Merge from upstream/master to fix an issue reported by UBSan.

Also update BUILD.gn to include 2 new headers under `msinttypes/`.
Do not include the bin/ directory which contains bad characters that the
presbumit catches.

TEST=Built fuchsia with ubsan and runtests, and run GI tests

Bug: 39857
Change-Id: Id5dd7288af79e042d4c2efd6ed5ead411a08f4b6
diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000..6f598bb
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1,22 @@
+# Set the default behavior, in case people don't have core.autocrlf set.
+* text=auto
+
+# Explicitly declare text files you want to always be normalized and converted
+# to native line endings on checkout.
+*.cpp text
+*.h text
+*.txt text
+*.md text
+*.cmake text
+*.svg text
+*.dot text
+*.yml text
+*.in text
+*.sh text
+*.autopkg text
+Dockerfile text
+
+# Denote all files that are truly binary and should not be modified.
+*.png binary
+*.jpg binary
+*.json binary
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..1d3073f
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,28 @@
+/bin/*
+!/bin/data
+!/bin/encodings
+!/bin/jsonchecker
+!/bin/types
+/build
+/doc/html
+/doc/doxygen_*.db
+*.a
+
+# Temporary files created during CMake build
+CMakeCache.txt
+CMakeFiles
+cmake_install.cmake
+CTestTestfile.cmake
+Makefile
+RapidJSON*.cmake
+RapidJSON.pc
+Testing
+/googletest
+install_manifest.txt
+Doxyfile
+Doxyfile.zh-cn
+DartConfiguration.tcl
+*.nupkg
+
+# Files created by OS
+*.DS_Store
diff --git a/.gitmodules b/.gitmodules
new file mode 100644
index 0000000..5e41f7c
--- /dev/null
+++ b/.gitmodules
@@ -0,0 +1,3 @@
+[submodule "thirdparty/gtest"]
+	path = thirdparty/gtest
+	url = https://github.com/google/googletest.git
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 0000000..df821a7
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,99 @@
+sudo: required
+dist: trusty
+group: edge
+
+language: cpp
+cache:
+  - ccache
+
+env:
+  global:
+    - USE_CCACHE=1
+    - CCACHE_SLOPPINESS=pch_defines,time_macros
+    - CCACHE_COMPRESS=1
+    - CCACHE_MAXSIZE=100M
+    - ARCH_FLAGS_x86='-m32'        # #266: don't use SSE on 32-bit
+    - ARCH_FLAGS_x86_64='-msse4.2' #       use SSE4.2 on 64-bit
+    - GITHUB_REPO='Tencent/rapidjson'
+    - secure: "HrsaCb+N66EG1HR+LWH1u51SjaJyRwJEDzqJGYMB7LJ/bfqb9mWKF1fLvZGk46W5t7TVaXRDD5KHFx9DPWvKn4gRUVkwTHEy262ah5ORh8M6n/6VVVajeV/AYt2C0sswdkDBDO4Xq+xy5gdw3G8s1A4Inbm73pUh+6vx+7ltBbk="
+
+before_install:
+    - sudo apt-add-repository -y ppa:ubuntu-toolchain-r/test
+    - sudo apt-get update -qq
+    - sudo apt-get install -y cmake valgrind g++-multilib libc6-dbg:i386
+
+matrix:
+  include:
+    # gcc
+    - env: CONF=release ARCH=x86    CXX11=ON
+      compiler: gcc
+    - env: CONF=release ARCH=x86_64 CXX11=ON
+      compiler: gcc
+    - env: CONF=debug   ARCH=x86    CXX11=OFF
+      compiler: gcc
+    - env: CONF=debug   ARCH=x86_64 CXX11=OFF
+      compiler: gcc
+    # clang
+    - env: CONF=debug   ARCH=x86    CXX11=ON CCACHE_CPP2=yes
+      compiler: clang
+    - env: CONF=debug   ARCH=x86_64 CXX11=ON CCACHE_CPP2=yes
+      compiler: clang
+    - env: CONF=debug   ARCH=x86    CXX11=OFF CCACHE_CPP2=yes
+      compiler: clang
+    - env: CONF=debug   ARCH=x86_64 CXX11=OFF CCACHE_CPP2=yes
+      compiler: clang
+    - env: CONF=release ARCH=x86    CXX11=ON CCACHE_CPP2=yes
+      compiler: clang
+    - env: CONF=release ARCH=x86_64 CXX11=ON CCACHE_CPP2=yes
+      compiler: clang
+    # coverage report
+    - env: CONF=debug   ARCH=x86    CXX11=ON GCOV_FLAGS='--coverage'
+      compiler: gcc
+      cache:
+        - ccache
+        - pip
+      after_success:
+        - pip install --user cpp-coveralls
+        - coveralls -r .. --gcov-options '\-lp' -e thirdparty -e example -e test -e build/CMakeFiles -e include/rapidjson/msinttypes -e include/rapidjson/internal/meta.h -e include/rapidjson/error/en.h
+    - env: CONF=debug   ARCH=x86_64 GCOV_FLAGS='--coverage'
+      compiler: gcc
+      cache:
+        - ccache
+        - pip
+      after_success:
+        - pip install --user cpp-coveralls
+        - coveralls -r .. --gcov-options '\-lp' -e thirdparty -e example -e test -e build/CMakeFiles -e include/rapidjson/msinttypes -e include/rapidjson/internal/meta.h -e include/rapidjson/error/en.h
+    - script: # Documentation task
+      - cd build
+      - cmake .. -DRAPIDJSON_HAS_STDSTRING=ON -DCMAKE_VERBOSE_MAKEFILE=ON
+      - make travis_doc
+      cache: false
+      addons:
+        apt:
+          packages:
+            - doxygen
+
+before_script:
+    - ccache -s
+      #   hack to avoid Valgrind bug (https://bugs.kde.org/show_bug.cgi?id=326469),
+      #   exposed by merging PR#163 (using -march=native)
+      #   TODO: Since this bug is already fixed. Remove this when valgrind can be upgraded.
+    - sed -i "s/-march=native//" CMakeLists.txt
+    - mkdir build
+
+script:
+  - if [ "$CXX" = "clang++" ]; then export CXXFLAGS="-stdlib=libc++ ${CXXFLAGS}"; fi
+  - >
+      eval "ARCH_FLAGS=\${ARCH_FLAGS_${ARCH}}" ;
+      (cd build && cmake
+      -DRAPIDJSON_HAS_STDSTRING=ON
+      -DRAPIDJSON_BUILD_CXX11=$CXX11
+      -DCMAKE_VERBOSE_MAKEFILE=ON
+      -DCMAKE_BUILD_TYPE=$CONF
+      -DCMAKE_CXX_FLAGS="$ARCH_FLAGS $GCOV_FLAGS"
+      -DCMAKE_EXE_LINKER_FLAGS=$GCOV_FLAGS
+      ..)
+  - cd build
+  - make tests -j 2
+  - make examples -j 2
+  - ctest -j 2 -V `[ "$CONF" = "release" ] || echo "-E perftest"`
diff --git a/BUILD.gn b/BUILD.gn
index cbb9f0c..92b7a01 100644
--- a/BUILD.gn
+++ b/BUILD.gn
@@ -69,6 +69,8 @@
     "include/rapidjson/istreamwrapper.h",
     "include/rapidjson/memorybuffer.h",
     "include/rapidjson/memorystream.h",
+    "include/rapidjson/msinttypes/inttypes.h",
+    "include/rapidjson/msinttypes/stdint.h",
     "include/rapidjson/ostreamwrapper.h",
     "include/rapidjson/pointer.h",
     "include/rapidjson/prettywriter.h",
diff --git a/CHANGELOG.md b/CHANGELOG.md
new file mode 100644
index 0000000..1c580bd
--- /dev/null
+++ b/CHANGELOG.md
@@ -0,0 +1,158 @@
+# Change Log
+All notable changes to this project will be documented in this file.
+This project adheres to [Semantic Versioning](http://semver.org/).
+
+## [Unreleased]
+
+## 1.1.0 - 2016-08-25
+
+### Added
+* Add GenericDocument ctor overload to specify JSON type (#369)
+* Add FAQ (#372, #373, #374, #376)
+* Add forward declaration header `fwd.h`
+* Add @PlatformIO Library Registry manifest file (#400)
+* Implement assignment operator for BigInteger (#404)
+* Add comments support (#443)
+* Adding coapp definition (#460)
+* documenttest.cpp: EXPECT_THROW when checking empty allocator (470)
+* GenericDocument: add implicit conversion to ParseResult (#480)
+* Use <wchar.h> with C++ linkage on Windows ARM (#485)
+* Detect little endian for Microsoft ARM targets 
+* Check Nan/Inf when writing a double (#510)
+* Add JSON Schema Implementation (#522)
+* Add iostream wrapper (#530)
+* Add Jsonx example for converting JSON into JSONx (a XML format) (#531)
+* Add optional unresolvedTokenIndex parameter to Pointer::Get() (#532)
+* Add encoding validation option for Writer/PrettyWriter (#534)
+* Add Writer::SetMaxDecimalPlaces() (#536)
+* Support {0, } and {0, m} in Regex (#539)
+* Add Value::Get/SetFloat(), Value::IsLossLessFloat/Double() (#540)
+* Add stream position check to reader unit tests (#541)
+* Add Templated accessors and range-based for (#542)
+* Add (Pretty)Writer::RawValue() (#543)
+* Add Document::Parse(std::string), Document::Parse(const char*, size_t length) and related APIs. (#553)
+* Add move constructor for GenericSchemaDocument (#554)
+* Add VS2010 and VS2015 to AppVeyor CI (#555)
+* Add parse-by-parts example (#556, #562)
+* Support parse number as string (#564, #589)
+* Add kFormatSingleLineArray for PrettyWriter (#577)
+* Added optional support for trailing commas (#584)
+* Added filterkey and filterkeydom examples (#615)
+* Added npm docs (#639)
+* Allow options for writing and parsing NaN/Infinity (#641)
+* Add std::string overload to PrettyWriter::Key() when RAPIDJSON_HAS_STDSTRING is defined (#698)
+
+### Fixed
+* Fix gcc/clang/vc warnings (#350, #394, #397, #444, #447, #473, #515, #582, #589, #595, #667)
+* Fix documentation (#482, #511, #550, #557, #614, #635, #660)
+* Fix emscripten alignment issue (#535)
+* Fix missing allocator to uses of AddMember in document (#365)
+* CMake will no longer complain that the minimum CMake version is not specified (#501)
+* Make it usable with old VC8 (VS2005) (#383)
+* Prohibit C++11 move from Document to Value (#391)
+* Try to fix incorrect 64-bit alignment (#419)
+* Check return of fwrite to avoid warn_unused_result build failures (#421)
+* Fix UB in GenericDocument::ParseStream (#426)
+* Keep Document value unchanged on parse error (#439)
+* Add missing return statement (#450)
+* Fix Document::Parse(const Ch*) for transcoding (#478)
+* encodings.h: fix typo in preprocessor condition (#495)
+* Custom Microsoft headers are necessary only for Visual Studio 2012 and lower (#559)
+* Fix memory leak for invalid regex (26e69ffde95ba4773ab06db6457b78f308716f4b)
+* Fix a bug in schema minimum/maximum keywords for 64-bit integer (e7149d665941068ccf8c565e77495521331cf390)
+* Fix a crash bug in regex (#605)
+* Fix schema "required" keyword cannot handle duplicated keys (#609)
+* Fix cmake CMP0054 warning (#612)
+* Added missing include guards in istreamwrapper.h and ostreamwrapper.h (#634)
+* Fix undefined behaviour (#646)
+* Fix buffer overrun using PutN (#673)
+* Fix rapidjson::value::Get<std::string>() may returns wrong data (#681)
+* Add Flush() for all value types (#689)
+* Handle malloc() fail in PoolAllocator (#691)
+* Fix builds on x32 platform. #703
+
+### Changed
+* Clarify problematic JSON license (#392)
+* Move Travis to container based infrastructure (#504, #558)
+* Make whitespace array more compact (#513)
+* Optimize Writer::WriteString() with SIMD (#544)
+* x86-64 48-bit pointer optimization for GenericValue (#546)
+* Define RAPIDJSON_HAS_CXX11_RVALUE_REFS directly in clang (#617)
+* Make GenericSchemaDocument constructor explicit (#674)
+* Optimize FindMember when use std::string (#690)
+
+## [1.0.2] - 2015-05-14
+
+### Added
+* Add Value::XXXMember(...) overloads for std::string (#335)
+
+### Fixed
+* Include rapidjson.h for all internal/error headers.
+* Parsing some numbers incorrectly in full-precision mode (`kFullPrecisionParseFlag`) (#342)
+* Fix some numbers parsed incorrectly (#336)
+* Fix alignment of 64bit platforms (#328)
+* Fix MemoryPoolAllocator::Clear() to clear user-buffer (0691502573f1afd3341073dd24b12c3db20fbde4)
+
+### Changed
+* CMakeLists for include as a thirdparty in projects (#334, #337)
+* Change Document::ParseStream() to use stack allocator for Reader (ffbe38614732af8e0b3abdc8b50071f386a4a685) 
+
+## [1.0.1] - 2015-04-25
+
+### Added
+* Changelog following [Keep a CHANGELOG](https://github.com/olivierlacan/keep-a-changelog) suggestions.
+
+### Fixed
+* Parsing of some numbers (e.g. "1e-00011111111111") causing assertion (#314).
+* Visual C++ 32-bit compilation error in `diyfp.h` (#317).
+
+## [1.0.0] - 2015-04-22
+
+### Added
+* 100% [Coverall](https://coveralls.io/r/Tencent/rapidjson?branch=master) coverage.
+* Version macros (#311)
+
+### Fixed
+* A bug in trimming long number sequence (4824f12efbf01af72b8cb6fc96fae7b097b73015).
+* Double quote in unicode escape (#288).
+* Negative zero roundtrip (double only) (#289).
+* Standardize behavior of `memcpy()` and `malloc()` (0c5c1538dcfc7f160e5a4aa208ddf092c787be5a, #305, 0e8bbe5e3ef375e7f052f556878be0bd79e9062d).
+
+### Removed
+* Remove an invalid `Document::ParseInsitu()` API (e7f1c6dd08b522cfcf9aed58a333bd9a0c0ccbeb).
+
+## 1.0-beta - 2015-04-8
+
+### Added
+* RFC 7159 (#101)
+* Optional Iterative Parser (#76)
+* Deep-copy values (#20)
+* Error code and message (#27)
+* ASCII Encoding (#70)
+* `kParseStopWhenDoneFlag` (#83)
+* `kParseFullPrecisionFlag` (881c91d696f06b7f302af6d04ec14dd08db66ceb)
+* Add `Key()` to handler concept (#134)
+* C++11 compatibility and support (#128)
+* Optimized number-to-string and vice versa conversions (#137, #80)
+* Short-String Optimization (#131)
+* Local stream optimization by traits (#32)
+* Travis & Appveyor Continuous Integration, with Valgrind verification (#24, #242)
+* Redo all documentation (English, Simplified Chinese)
+
+### Changed
+* Copyright ownership transferred to THL A29 Limited (a Tencent company).
+* Migrating from Premake to CMAKE (#192)
+* Resolve all warning reports
+
+### Removed
+* Remove other JSON libraries for performance comparison (#180)
+
+## 0.11 - 2012-11-16
+
+## 0.1 - 2011-11-18
+
+[Unreleased]: https://github.com/Tencent/rapidjson/compare/v1.1.0...HEAD
+[1.1.0]: https://github.com/Tencent/rapidjson/compare/v1.0.2...v1.1.0
+[1.0.2]: https://github.com/Tencent/rapidjson/compare/v1.0.1...v1.0.2
+[1.0.1]: https://github.com/Tencent/rapidjson/compare/v1.0.0...v1.0.1
+[1.0.0]: https://github.com/Tencent/rapidjson/compare/v1.0-beta...v1.0.0
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 57c6801..0275672 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -1,13 +1,221 @@
-# Copyright 2018 The Fuchsia Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
+if(POLICY CMP0025)
+  # detect Apple's Clang
+  cmake_policy(SET CMP0025 NEW)
+endif()
+if(POLICY CMP0054)
+  cmake_policy(SET CMP0054 NEW)
+endif()
 
-cmake_minimum_required(VERSION 3.0.0)
+SET(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/CMakeModules)
 
-project(rapidjson)
+PROJECT(RapidJSON CXX)
 
-add_library(rapidjson INTERFACE)
-target_include_directories(rapidjson INTERFACE
-  "${CMAKE_CURRENT_SOURCE_DIR}"
-  "${CMAKE_CURRENT_SOURCE_DIR}/include"
-)
+set(LIB_MAJOR_VERSION "1")
+set(LIB_MINOR_VERSION "1")
+set(LIB_PATCH_VERSION "0")
+set(LIB_VERSION_STRING "${LIB_MAJOR_VERSION}.${LIB_MINOR_VERSION}.${LIB_PATCH_VERSION}")
+
+# compile in release with debug info mode by default
+if(NOT CMAKE_BUILD_TYPE)
+    set(CMAKE_BUILD_TYPE "RelWithDebInfo" CACHE STRING "Choose the type of build, options are: Debug Release RelWithDebInfo MinSizeRel." FORCE)
+endif()
+
+# Build all binaries in a separate directory
+SET(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
+
+option(RAPIDJSON_BUILD_DOC "Build rapidjson documentation." ON)
+option(RAPIDJSON_BUILD_EXAMPLES "Build rapidjson examples." ON)
+option(RAPIDJSON_BUILD_TESTS "Build rapidjson perftests and unittests." ON)
+option(RAPIDJSON_BUILD_THIRDPARTY_GTEST
+    "Use gtest installation in `thirdparty/gtest` by default if available" OFF)
+
+option(RAPIDJSON_BUILD_CXX11 "Build rapidjson with C++11 (gcc/clang)" ON)
+if(RAPIDJSON_BUILD_CXX11)
+    set(CMAKE_CXX_STANDARD 11)
+    set(CMAKE_CXX_STANDARD_REQUIRED TRUE)
+endif()
+
+option(RAPIDJSON_BUILD_ASAN "Build rapidjson with address sanitizer (gcc/clang)" OFF)
+option(RAPIDJSON_BUILD_UBSAN "Build rapidjson with undefined behavior sanitizer (gcc/clang)" OFF)
+
+option(RAPIDJSON_ENABLE_INSTRUMENTATION_OPT "Build rapidjson with -march or -mcpu options" ON)
+
+option(RAPIDJSON_HAS_STDSTRING "" OFF)
+if(RAPIDJSON_HAS_STDSTRING)
+    add_definitions(-DRAPIDJSON_HAS_STDSTRING)
+endif()
+
+find_program(CCACHE_FOUND ccache)
+if(CCACHE_FOUND)
+    set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache)
+    set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK ccache)
+    if (CMAKE_CXX_COMPILER_ID MATCHES "Clang")
+        set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Qunused-arguments -fcolor-diagnostics")
+    endif()
+endif(CCACHE_FOUND)
+
+if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
+    if(RAPIDJSON_ENABLE_INSTRUMENTATION_OPT AND NOT CMAKE_CROSSCOMPILING)
+        if(CMAKE_SYSTEM_PROCESSOR STREQUAL "powerpc" OR CMAKE_SYSTEM_PROCESSOR STREQUAL "ppc64" OR CMAKE_SYSTEM_PROCESSOR STREQUAL "ppc64le")
+          set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mcpu=native")
+        else()
+          #FIXME: x86 is -march=native, but doesn't mean every arch is this option. To keep original project's compatibility, I leave this except POWER.
+          set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=native")
+        endif()
+    endif()
+    set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -Werror")
+    set(EXTRA_CXX_FLAGS -Weffc++ -Wswitch-default -Wfloat-equal -Wconversion -Wsign-conversion)
+    if (RAPIDJSON_BUILD_CXX11 AND CMAKE_VERSION VERSION_LESS 3.1)
+        if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS "4.7.0")
+            set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++0x")
+        else()
+            set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
+        endif()
+    endif()
+    if (RAPIDJSON_BUILD_ASAN)
+        if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS "4.8.0")
+            message(FATAL_ERROR "GCC < 4.8 doesn't support the address sanitizer")
+        else()
+            set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=address")
+        endif()
+    endif()
+    if (RAPIDJSON_BUILD_UBSAN)
+        if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS "4.9.0")
+            message(FATAL_ERROR "GCC < 4.9 doesn't support the undefined behavior sanitizer")
+        else()
+            set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=undefined")
+        endif()
+    endif()
+elseif (CMAKE_CXX_COMPILER_ID MATCHES "Clang")
+    if(NOT CMAKE_CROSSCOMPILING)
+      if(CMAKE_SYSTEM_PROCESSOR STREQUAL "powerpc" OR CMAKE_SYSTEM_PROCESSOR STREQUAL "ppc64" OR CMAKE_SYSTEM_PROCESSOR STREQUAL "ppc64le")
+        set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mcpu=native")
+      else()
+        #FIXME: x86 is -march=native, but doesn't mean every arch is this option. To keep original project's compatibility, I leave this except POWER.
+        set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=native")
+      endif()
+    endif()
+    set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -Werror -Wno-missing-field-initializers")
+    set(EXTRA_CXX_FLAGS -Weffc++ -Wswitch-default -Wfloat-equal -Wconversion -Wimplicit-fallthrough)
+    if (RAPIDJSON_BUILD_CXX11 AND CMAKE_VERSION VERSION_LESS 3.1)
+        set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
+    endif()
+    if (RAPIDJSON_BUILD_ASAN)
+        set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=address")
+    endif()
+    if (RAPIDJSON_BUILD_UBSAN)
+        if (CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang")
+            set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=undefined-trap -fsanitize-undefined-trap-on-error")
+        else()
+            set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=undefined")
+        endif()
+    endif()
+elseif (CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+    add_definitions(-D_CRT_SECURE_NO_WARNINGS=1)
+    set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /EHsc")
+elseif (CMAKE_CXX_COMPILER_ID MATCHES "XL")
+    set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -qarch=auto")
+endif()
+
+#add extra search paths for libraries and includes
+SET(INCLUDE_INSTALL_DIR "${CMAKE_INSTALL_PREFIX}/include" CACHE PATH "The directory the headers are installed in")
+SET(LIB_INSTALL_DIR "${CMAKE_INSTALL_PREFIX}/lib" CACHE STRING "Directory where lib will install")
+SET(DOC_INSTALL_DIR "${CMAKE_INSTALL_PREFIX}/share/doc/${PROJECT_NAME}" CACHE PATH "Path to the documentation")
+
+IF(UNIX OR CYGWIN)
+    SET(_CMAKE_INSTALL_DIR "${LIB_INSTALL_DIR}/cmake/${PROJECT_NAME}")
+ELSEIF(WIN32)
+    SET(_CMAKE_INSTALL_DIR "${CMAKE_INSTALL_PREFIX}/cmake")
+ENDIF()
+SET(CMAKE_INSTALL_DIR "${_CMAKE_INSTALL_DIR}" CACHE PATH "The directory cmake files are installed in")
+
+include_directories(${CMAKE_CURRENT_SOURCE_DIR}/include)
+
+if(RAPIDJSON_BUILD_DOC)
+    add_subdirectory(doc)
+endif()
+
+add_custom_target(travis_doc)
+add_custom_command(TARGET travis_doc
+    COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/travis-doxygen.sh)
+
+if(RAPIDJSON_BUILD_EXAMPLES)
+    add_subdirectory(example)
+endif()
+
+if(RAPIDJSON_BUILD_TESTS)
+    if(MSVC11)
+        # required for VS2012 due to missing support for variadic templates
+        add_definitions(-D_VARIADIC_MAX=10)
+    endif(MSVC11)
+    add_subdirectory(test)
+    include(CTest)
+endif()
+
+# pkg-config
+IF (UNIX OR CYGWIN)
+  CONFIGURE_FILE (${CMAKE_CURRENT_SOURCE_DIR}/${PROJECT_NAME}.pc.in
+                  ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}.pc
+                  @ONLY)
+  INSTALL (FILES ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}.pc
+      DESTINATION "${LIB_INSTALL_DIR}/pkgconfig"
+      COMPONENT pkgconfig)
+ENDIF()
+
+install(FILES readme.md
+        DESTINATION "${DOC_INSTALL_DIR}"
+        COMPONENT doc)
+
+install(DIRECTORY include/rapidjson
+    DESTINATION "${INCLUDE_INSTALL_DIR}"
+    COMPONENT dev)
+
+install(DIRECTORY example/
+    DESTINATION "${DOC_INSTALL_DIR}/examples"
+    COMPONENT examples
+    # Following patterns are for excluding the intermediate/object files
+    # from an install of in-source CMake build.
+    PATTERN "CMakeFiles" EXCLUDE
+    PATTERN "Makefile" EXCLUDE
+    PATTERN "cmake_install.cmake" EXCLUDE)
+
+# Provide config and version files to be used by other applications
+# ===============================
+
+################################################################################
+# Export package for use from the build tree
+EXPORT( PACKAGE ${PROJECT_NAME} )
+
+# Create the RapidJSONConfig.cmake file for other cmake projects.
+# ... for the build tree
+SET( CONFIG_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR})
+SET( CONFIG_DIR ${CMAKE_CURRENT_BINARY_DIR})
+SET( ${PROJECT_NAME}_INCLUDE_DIR "\${${PROJECT_NAME}_SOURCE_DIR}/include" )
+
+CONFIGURE_FILE( ${CMAKE_CURRENT_SOURCE_DIR}/${PROJECT_NAME}Config.cmake.in
+    ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}Config.cmake @ONLY )
+CONFIGURE_FILE(${CMAKE_CURRENT_SOURCE_DIR}/${PROJECT_NAME}ConfigVersion.cmake.in
+    ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake @ONLY)
+
+# ... for the install tree
+SET( CMAKECONFIG_INSTALL_DIR ${LIB_INSTALL_DIR}/cmake/${PROJECT_NAME} )
+FILE( RELATIVE_PATH REL_INCLUDE_DIR
+    "${CMAKECONFIG_INSTALL_DIR}"
+    "${CMAKE_INSTALL_PREFIX}/include" )
+
+SET( ${PROJECT_NAME}_INCLUDE_DIR "\${${PROJECT_NAME}_CMAKE_DIR}/${REL_INCLUDE_DIR}" )
+SET( CONFIG_SOURCE_DIR )
+SET( CONFIG_DIR )
+CONFIGURE_FILE( ${CMAKE_CURRENT_SOURCE_DIR}/${PROJECT_NAME}Config.cmake.in
+    ${CMAKE_CURRENT_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/${PROJECT_NAME}Config.cmake @ONLY )
+
+INSTALL(FILES "${CMAKE_CURRENT_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/${PROJECT_NAME}Config.cmake"
+        DESTINATION ${CMAKECONFIG_INSTALL_DIR} )
+
+# Install files
+INSTALL(FILES
+    ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}Config.cmake
+    ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake
+    DESTINATION "${CMAKE_INSTALL_DIR}"
+    COMPONENT dev)
diff --git a/CMakeModules/FindGTestSrc.cmake b/CMakeModules/FindGTestSrc.cmake
new file mode 100644
index 0000000..f3cb8c9
--- /dev/null
+++ b/CMakeModules/FindGTestSrc.cmake
@@ -0,0 +1,30 @@
+
+SET(GTEST_SEARCH_PATH
+    "${GTEST_SOURCE_DIR}"
+    "${CMAKE_CURRENT_LIST_DIR}/../thirdparty/gtest/googletest")
+
+IF(UNIX)
+    IF(RAPIDJSON_BUILD_THIRDPARTY_GTEST)
+        LIST(APPEND GTEST_SEARCH_PATH "/usr/src/gtest")
+    ELSE()
+        LIST(INSERT GTEST_SEARCH_PATH 1 "/usr/src/gtest")
+    ENDIF()
+ENDIF()
+
+FIND_PATH(GTEST_SOURCE_DIR
+    NAMES CMakeLists.txt src/gtest_main.cc
+    PATHS ${GTEST_SEARCH_PATH})
+
+
+# Debian installs gtest include directory in /usr/include, thus need to look
+# for include directory separately from source directory.
+FIND_PATH(GTEST_INCLUDE_DIR
+    NAMES gtest/gtest.h
+    PATH_SUFFIXES include
+    HINTS ${GTEST_SOURCE_DIR}
+    PATHS ${GTEST_SEARCH_PATH})
+
+INCLUDE(FindPackageHandleStandardArgs)
+find_package_handle_standard_args(GTestSrc DEFAULT_MSG
+    GTEST_SOURCE_DIR
+    GTEST_INCLUDE_DIR)
diff --git a/RapidJSON.pc.in b/RapidJSON.pc.in
new file mode 100644
index 0000000..6afb079
--- /dev/null
+++ b/RapidJSON.pc.in
@@ -0,0 +1,7 @@
+includedir=@INCLUDE_INSTALL_DIR@
+
+Name: @PROJECT_NAME@
+Description: A fast JSON parser/generator for C++ with both SAX/DOM style API
+Version: @LIB_VERSION_STRING@
+URL: https://github.com/Tencent/rapidjson
+Cflags: -I${includedir}
diff --git a/RapidJSONConfig.cmake.in b/RapidJSONConfig.cmake.in
new file mode 100644
index 0000000..e3c65a5
--- /dev/null
+++ b/RapidJSONConfig.cmake.in
@@ -0,0 +1,15 @@
+################################################################################
+# RapidJSON source dir
+set( RapidJSON_SOURCE_DIR "@CONFIG_SOURCE_DIR@")
+
+################################################################################
+# RapidJSON build dir
+set( RapidJSON_DIR "@CONFIG_DIR@")
+
+################################################################################
+# Compute paths
+get_filename_component(RapidJSON_CMAKE_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH)
+
+set( RapidJSON_INCLUDE_DIR  "@RapidJSON_INCLUDE_DIR@" )
+set( RapidJSON_INCLUDE_DIRS  "@RapidJSON_INCLUDE_DIR@" )
+message(STATUS "RapidJSON found. Headers: ${RapidJSON_INCLUDE_DIRS}")
diff --git a/RapidJSONConfigVersion.cmake.in b/RapidJSONConfigVersion.cmake.in
new file mode 100644
index 0000000..25741fc
--- /dev/null
+++ b/RapidJSONConfigVersion.cmake.in
@@ -0,0 +1,10 @@
+SET(PACKAGE_VERSION "@LIB_VERSION_STRING@")
+
+IF (PACKAGE_FIND_VERSION VERSION_EQUAL PACKAGE_VERSION)
+  SET(PACKAGE_VERSION_EXACT "true")
+ENDIF (PACKAGE_FIND_VERSION VERSION_EQUAL PACKAGE_VERSION)
+IF (NOT PACKAGE_FIND_VERSION VERSION_GREATER PACKAGE_VERSION)
+  SET(PACKAGE_VERSION_COMPATIBLE "true")
+ELSE (NOT PACKAGE_FIND_VERSION VERSION_GREATER PACKAGE_VERSION)
+  SET(PACKAGE_VERSION_UNSUITABLE "true")
+ENDIF (NOT PACKAGE_FIND_VERSION VERSION_GREATER PACKAGE_VERSION)
diff --git a/appveyor.yml b/appveyor.yml
new file mode 100644
index 0000000..376dc19
--- /dev/null
+++ b/appveyor.yml
@@ -0,0 +1,54 @@
+version: 1.1.0.{build}
+
+configuration:
+- Debug
+- Release
+
+environment:
+  matrix:
+  # - VS_VERSION: 9 2008
+  #   VS_PLATFORM: win32
+  # - VS_VERSION: 9 2008
+  #   VS_PLATFORM: x64
+  - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2013
+    VS_VERSION: 10 2010
+    VS_PLATFORM: win32
+  - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2013
+    VS_VERSION: 10 2010
+    VS_PLATFORM: x64
+  - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2013
+    VS_VERSION: 11 2012
+    VS_PLATFORM: win32
+  - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2013
+    VS_VERSION: 11 2012
+    VS_PLATFORM: x64
+  - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2013
+    VS_VERSION: 12 2013
+    VS_PLATFORM: win32
+  - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2013
+    VS_VERSION: 12 2013
+    VS_PLATFORM: x64
+  - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
+    VS_VERSION: 14 2015
+    VS_PLATFORM: win32
+  - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
+    VS_VERSION: 14 2015
+    VS_PLATFORM: x64
+  - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017
+    VS_VERSION: 15 2017
+    VS_PLATFORM: win32
+  - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017
+    VS_VERSION: 15 2017
+    VS_PLATFORM: x64
+
+before_build:
+- git submodule update --init --recursive
+- cmake -H. -BBuild/VS -G "Visual Studio %VS_VERSION%" -DCMAKE_GENERATOR_PLATFORM=%VS_PLATFORM% -DCMAKE_VERBOSE_MAKEFILE=ON -DBUILD_SHARED_LIBS=true -Wno-dev
+
+build:
+  project: Build\VS\RapidJSON.sln
+  parallel: true
+  verbosity: minimal
+
+test_script:
+- cd Build\VS && if %CONFIGURATION%==Debug (ctest --verbose -E perftest --build-config %CONFIGURATION%) else (ctest --verbose --build-config %CONFIGURATION%)
diff --git a/contrib/natvis/LICENSE b/contrib/natvis/LICENSE
new file mode 100644
index 0000000..f57da96
--- /dev/null
+++ b/contrib/natvis/LICENSE
@@ -0,0 +1,45 @@
+The MIT License (MIT)
+
+Copyright (c) 2017 Bart Muzzin
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+Derived from:
+
+The MIT License (MIT)
+
+Copyright (c) 2015 mojmir svoboda
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/contrib/natvis/README.md b/contrib/natvis/README.md
new file mode 100644
index 0000000..9685c7f
--- /dev/null
+++ b/contrib/natvis/README.md
@@ -0,0 +1,7 @@
+# rapidjson.natvis
+
+This file can be used as a [Visual Studio Visualizer](https://docs.microsoft.com/en-gb/visualstudio/debugger/create-custom-views-of-native-objects) to aid in visualizing rapidjson structures within the Visual Studio debugger. Natvis visualizers are supported in Visual Studio 2012 and later. To install, copy the file into this directory:
+
+`%USERPROFILE%\Documents\Visual Studio 2012\Visualizers`
+
+Each version of Visual Studio has a similar directory, it must be copied into each directory to be used with that particular version. In Visual Studio 2015 and later, this can be done without restarting Visual Studio (a new debugging session must be started).
diff --git a/contrib/natvis/rapidjson.natvis b/contrib/natvis/rapidjson.natvis
new file mode 100644
index 0000000..a804b7b
--- /dev/null
+++ b/contrib/natvis/rapidjson.natvis
@@ -0,0 +1,38 @@
+<?xml version="1.0" encoding="utf-8"?>
+<AutoVisualizer xmlns="http://schemas.microsoft.com/vstudio/debugger/natvis/2010">
+	<!-- rapidjson::GenericValue - basic support -->
+	<Type Name="rapidjson::GenericValue&lt;*,*&gt;">
+		<DisplayString Condition="(data_.f.flags &amp; kTypeMask) == kNullType">null</DisplayString>
+		<DisplayString Condition="data_.f.flags == kTrueFlag">true</DisplayString>
+		<DisplayString Condition="data_.f.flags == kFalseFlag">false</DisplayString>
+		<DisplayString Condition="data_.f.flags == kShortStringFlag">{data_.ss.str}</DisplayString>
+		<DisplayString Condition="(data_.f.flags &amp; kTypeMask) == kStringType">{(const char*)((size_t)data_.s.str &amp; 0x0000FFFFFFFFFFFF)}</DisplayString>
+		<DisplayString Condition="(data_.f.flags &amp; kNumberIntFlag) == kNumberIntFlag">{data_.n.i.i}</DisplayString>
+		<DisplayString Condition="(data_.f.flags &amp; kNumberUintFlag) == kNumberUintFlag">{data_.n.u.u}</DisplayString>
+		<DisplayString Condition="(data_.f.flags &amp; kNumberInt64Flag) == kNumberInt64Flag">{data_.n.i64}</DisplayString>
+		<DisplayString Condition="(data_.f.flags &amp; kNumberUint64Flag) == kNumberUint64Flag">{data_.n.u64}</DisplayString>
+		<DisplayString Condition="(data_.f.flags &amp; kNumberDoubleFlag) == kNumberDoubleFlag">{data_.n.d}</DisplayString>
+		<DisplayString Condition="data_.f.flags == kObjectType">Object members={data_.o.size}</DisplayString>
+		<DisplayString Condition="data_.f.flags == kArrayType">Array members={data_.a.size}</DisplayString>
+		<Expand>
+			<Item Condition="data_.f.flags == kObjectType" Name="[size]">data_.o.size</Item>
+			<Item Condition="data_.f.flags == kObjectType" Name="[capacity]">data_.o.capacity</Item>
+			<ArrayItems Condition="data_.f.flags == kObjectType">
+				<Size>data_.o.size</Size>
+				<!-- NOTE: Rapidjson stores some extra data in the high bits of pointers, which is why the mask -->
+				<ValuePointer>(rapidjson::GenericMember&lt;$T1,$T2&gt;*)(((size_t)data_.o.members) &amp; 0x0000FFFFFFFFFFFF)</ValuePointer>
+			</ArrayItems>
+
+			<Item Condition="data_.f.flags == kArrayType" Name="[size]">data_.a.size</Item>
+			<Item Condition="data_.f.flags == kArrayType" Name="[capacity]">data_.a.capacity</Item>
+			<ArrayItems Condition="data_.f.flags == kArrayType">
+				<Size>data_.a.size</Size>
+				<!-- NOTE: Rapidjson stores some extra data in the high bits of pointers, which is why the mask -->
+				<ValuePointer>(rapidjson::GenericValue&lt;$T1,$T2&gt;*)(((size_t)data_.a.elements) &amp; 0x0000FFFFFFFFFFFF)</ValuePointer>
+			</ArrayItems>
+
+		</Expand>
+	</Type>
+
+</AutoVisualizer>
+
diff --git a/doc/CMakeLists.txt b/doc/CMakeLists.txt
new file mode 100644
index 0000000..c5345ba
--- /dev/null
+++ b/doc/CMakeLists.txt
@@ -0,0 +1,27 @@
+find_package(Doxygen)
+
+IF(NOT DOXYGEN_FOUND)
+    MESSAGE(STATUS "No Doxygen found. Documentation won't be built")
+ELSE()
+    file(GLOB SOURCES ${CMAKE_CURRENT_LIST_DIR}/../include/*)
+    file(GLOB MARKDOWN_DOC ${CMAKE_CURRENT_LIST_DIR}/../doc/*.md)
+    list(APPEND MARKDOWN_DOC ${CMAKE_CURRENT_LIST_DIR}/../readme.md)
+
+    CONFIGURE_FILE(Doxyfile.in Doxyfile @ONLY)
+    CONFIGURE_FILE(Doxyfile.zh-cn.in Doxyfile.zh-cn @ONLY)
+
+    file(GLOB DOXYFILES ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile*)
+    
+    add_custom_command(OUTPUT html
+        COMMAND ${DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile
+        COMMAND ${DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile.zh-cn
+        COMMAND ${CMAKE_COMMAND} -E touch ${CMAKE_CURRENT_BINARY_DIR}/html
+        DEPENDS ${MARKDOWN_DOC} ${SOURCES} ${DOXYFILES}
+        WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/../
+        )
+
+    add_custom_target(doc ALL DEPENDS html)
+    install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/html
+        DESTINATION ${DOC_INSTALL_DIR}
+        COMPONENT doc)
+ENDIF()
diff --git a/doc/Doxyfile.in b/doc/Doxyfile.in
new file mode 100644
index 0000000..6e79f93
--- /dev/null
+++ b/doc/Doxyfile.in
@@ -0,0 +1,2369 @@
+# Doxyfile 1.8.7
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project.
+#
+# All text after a double hash (##) is considered a comment and is placed in
+# front of the TAG it is preceding.
+#
+# All text after a single hash (#) is considered a comment and will be ignored.
+# The format is:
+# TAG = value [value, ...]
+# For lists, items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (\" \").
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# This tag specifies the encoding used for all characters in the config file
+# that follow. The default is UTF-8 which is also the encoding used for all text
+# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv
+# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv
+# for the list of possible encodings.
+# The default value is: UTF-8.
+
+DOXYFILE_ENCODING      = UTF-8
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by
+# double-quotes, unless you are using Doxywizard) that should identify the
+# project for which the documentation is generated. This name is used in the
+# title of most generated pages and in a few other places.
+# The default value is: My Project.
+
+PROJECT_NAME           = RapidJSON
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number. This
+# could be handy for archiving the generated documentation or if some version
+# control system is used.
+
+PROJECT_NUMBER         =
+
+# Using the PROJECT_BRIEF tag one can provide an optional one line description
+# for a project that appears at the top of each page and should give viewer a
+# quick idea about the purpose of the project. Keep the description short.
+
+PROJECT_BRIEF          = "A fast JSON parser/generator for C++ with both SAX/DOM style API"
+
+# With the PROJECT_LOGO tag one can specify an logo or icon that is included in
+# the documentation. The maximum height of the logo should not exceed 55 pixels
+# and the maximum width should not exceed 200 pixels. Doxygen will copy the logo
+# to the output directory.
+
+PROJECT_LOGO           =
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path
+# into which the generated documentation will be written. If a relative path is
+# entered, it will be relative to the location where doxygen was started. If
+# left blank the current directory will be used.
+
+OUTPUT_DIRECTORY       = @CMAKE_CURRENT_BINARY_DIR@
+
+# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 4096 sub-
+# directories (in 2 levels) under the output directory of each output format and
+# will distribute the generated files over these directories. Enabling this
+# option can be useful when feeding doxygen a huge amount of source files, where
+# putting all generated files in the same directory would otherwise causes
+# performance problems for the file system.
+# The default value is: NO.
+
+CREATE_SUBDIRS         = NO
+
+# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII
+# characters to appear in the names of generated files. If set to NO, non-ASCII
+# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode
+# U+3044.
+# The default value is: NO.
+
+ALLOW_UNICODE_NAMES    = NO
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all constant output in the proper language.
+# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese,
+# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States),
+# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian,
+# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages),
+# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian,
+# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian,
+# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish,
+# Ukrainian and Vietnamese.
+# The default value is: English.
+
+OUTPUT_LANGUAGE        = English
+
+# If the BRIEF_MEMBER_DESC tag is set to YES doxygen will include brief member
+# descriptions after the members that are listed in the file and class
+# documentation (similar to Javadoc). Set to NO to disable this.
+# The default value is: YES.
+
+BRIEF_MEMBER_DESC      = YES
+
+# If the REPEAT_BRIEF tag is set to YES doxygen will prepend the brief
+# description of a member or function before the detailed description
+#
+# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# brief descriptions will be completely suppressed.
+# The default value is: YES.
+
+REPEAT_BRIEF           = YES
+
+# This tag implements a quasi-intelligent brief description abbreviator that is
+# used to form the text in various listings. Each string in this list, if found
+# as the leading text of the brief description, will be stripped from the text
+# and the result, after processing the whole list, is used as the annotated
+# text. Otherwise, the brief description is used as-is. If left blank, the
+# following values are used ($name is automatically replaced with the name of
+# the entity):The $name class, The $name widget, The $name file, is, provides,
+# specifies, contains, represents, a, an and the.
+
+ABBREVIATE_BRIEF       = "The $name class" \
+                         "The $name widget" \
+                         "The $name file" \
+                         is \
+                         provides \
+                         specifies \
+                         contains \
+                         represents \
+                         a \
+                         an \
+                         the
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+# doxygen will generate a detailed section even if there is only a brief
+# description.
+# The default value is: NO.
+
+ALWAYS_DETAILED_SEC    = NO
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
+# inherited members of a class in the documentation of that class as if those
+# members were ordinary class members. Constructors, destructors and assignment
+# operators of the base classes will not be shown.
+# The default value is: NO.
+
+INLINE_INHERITED_MEMB  = NO
+
+# If the FULL_PATH_NAMES tag is set to YES doxygen will prepend the full path
+# before files name in the file list and in the header files. If set to NO the
+# shortest path that makes the file name unique will be used
+# The default value is: YES.
+
+FULL_PATH_NAMES        = YES
+
+# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path.
+# Stripping is only done if one of the specified strings matches the left-hand
+# part of the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the path to
+# strip.
+#
+# Note that you can specify absolute paths here, but also relative paths, which
+# will be relative from the directory where doxygen is started.
+# This tag requires that the tag FULL_PATH_NAMES is set to YES.
+
+STRIP_FROM_PATH        =
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the
+# path mentioned in the documentation of a class, which tells the reader which
+# header file to include in order to use a class. If left blank only the name of
+# the header file containing the class definition is used. Otherwise one should
+# specify the list of include paths that are normally passed to the compiler
+# using the -I flag.
+
+STRIP_FROM_INC_PATH    =
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but
+# less readable) file names. This can be useful is your file systems doesn't
+# support long names like on DOS, Mac, or CD-ROM.
+# The default value is: NO.
+
+SHORT_NAMES            = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the
+# first line (until the first dot) of a Javadoc-style comment as the brief
+# description. If set to NO, the Javadoc-style will behave just like regular Qt-
+# style comments (thus requiring an explicit @brief command for a brief
+# description.)
+# The default value is: NO.
+
+JAVADOC_AUTOBRIEF      = NO
+
+# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first
+# line (until the first dot) of a Qt-style comment as the brief description. If
+# set to NO, the Qt-style will behave just like regular Qt-style comments (thus
+# requiring an explicit \brief command for a brief description.)
+# The default value is: NO.
+
+QT_AUTOBRIEF           = NO
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a
+# multi-line C++ special comment block (i.e. a block of //! or /// comments) as
+# a brief description. This used to be the default behavior. The new default is
+# to treat a multi-line C++ comment block as a detailed description. Set this
+# tag to YES if you prefer the old behavior instead.
+#
+# Note that setting this tag to YES also means that rational rose comments are
+# not recognized any more.
+# The default value is: NO.
+
+MULTILINE_CPP_IS_BRIEF = NO
+
+# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the
+# documentation from any documented member that it re-implements.
+# The default value is: YES.
+
+INHERIT_DOCS           = YES
+
+# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce a
+# new page for each member. If set to NO, the documentation of a member will be
+# part of the file/class/namespace that contains it.
+# The default value is: NO.
+
+SEPARATE_MEMBER_PAGES  = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen
+# uses this value to replace tabs by spaces in code fragments.
+# Minimum value: 1, maximum value: 16, default value: 4.
+
+TAB_SIZE               = 4
+
+# This tag can be used to specify a number of aliases that act as commands in
+# the documentation. An alias has the form:
+# name=value
+# For example adding
+# "sideeffect=@par Side Effects:\n"
+# will allow you to put the command \sideeffect (or @sideeffect) in the
+# documentation, which will result in a user-defined paragraph with heading
+# "Side Effects:". You can put \n's in the value part of an alias to insert
+# newlines.
+
+ALIASES                =
+
+# This tag can be used to specify a number of word-keyword mappings (TCL only).
+# A mapping has the form "name=value". For example adding "class=itcl::class"
+# will allow you to use the command class in the itcl::class meaning.
+
+TCL_SUBST              =
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
+# only. Doxygen will then generate output that is more tailored for C. For
+# instance, some of the names that are used will be different. The list of all
+# members will be omitted, etc.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_FOR_C  = NO
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or
+# Python sources only. Doxygen will then generate output that is more tailored
+# for that language. For instance, namespaces will be presented as packages,
+# qualified scopes will look different, etc.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_JAVA   = NO
+
+# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
+# sources. Doxygen will then generate output that is tailored for Fortran.
+# The default value is: NO.
+
+OPTIMIZE_FOR_FORTRAN   = NO
+
+# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
+# sources. Doxygen will then generate output that is tailored for VHDL.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_VHDL   = NO
+
+# Doxygen selects the parser to use depending on the extension of the files it
+# parses. With this tag you can assign which parser to use for a given
+# extension. Doxygen has a built-in mapping, but you can override or extend it
+# using this tag. The format is ext=language, where ext is a file extension, and
+# language is one of the parsers supported by doxygen: IDL, Java, Javascript,
+# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran:
+# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran:
+# Fortran. In the later case the parser tries to guess whether the code is fixed
+# or free formatted code, this is the default for Fortran type files), VHDL. For
+# instance to make doxygen treat .inc files as Fortran files (default is PHP),
+# and .f files as C (default is Fortran), use: inc=Fortran f=C.
+#
+# Note For files without extension you can use no_extension as a placeholder.
+#
+# Note that for custom extensions you also need to set FILE_PATTERNS otherwise
+# the files are not read by doxygen.
+
+EXTENSION_MAPPING      =
+
+# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments
+# according to the Markdown format, which allows for more readable
+# documentation. See http://daringfireball.net/projects/markdown/ for details.
+# The output of markdown processing is further processed by doxygen, so you can
+# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in
+# case of backward compatibilities issues.
+# The default value is: YES.
+
+MARKDOWN_SUPPORT       = YES
+
+# When enabled doxygen tries to link words that correspond to documented
+# classes, or namespaces to their corresponding documentation. Such a link can
+# be prevented in individual cases by by putting a % sign in front of the word
+# or globally by setting AUTOLINK_SUPPORT to NO.
+# The default value is: YES.
+
+AUTOLINK_SUPPORT       = YES
+
+# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
+# to include (a tag file for) the STL sources as input, then you should set this
+# tag to YES in order to let doxygen match functions declarations and
+# definitions whose arguments contain STL classes (e.g. func(std::string);
+# versus func(std::string) {}). This also make the inheritance and collaboration
+# diagrams that involve STL classes more complete and accurate.
+# The default value is: NO.
+
+BUILTIN_STL_SUPPORT    = NO
+
+# If you use Microsoft's C++/CLI language, you should set this option to YES to
+# enable parsing support.
+# The default value is: NO.
+
+CPP_CLI_SUPPORT        = NO
+
+# Set the SIP_SUPPORT tag to YES if your project consists of sip (see:
+# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen
+# will parse them like normal C++ but will assume all classes use public instead
+# of private inheritance when no explicit protection keyword is present.
+# The default value is: NO.
+
+SIP_SUPPORT            = NO
+
+# For Microsoft's IDL there are propget and propput attributes to indicate
+# getter and setter methods for a property. Setting this option to YES will make
+# doxygen to replace the get and set methods by a property in the documentation.
+# This will only work if the methods are indeed getting or setting a simple
+# type. If this is not the case, or you want to show the methods anyway, you
+# should set this option to NO.
+# The default value is: YES.
+
+IDL_PROPERTY_SUPPORT   = YES
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+# tag is set to YES, then doxygen will reuse the documentation of the first
+# member in the group (if any) for the other members of the group. By default
+# all members of a group must be documented explicitly.
+# The default value is: NO.
+
+DISTRIBUTE_GROUP_DOC   = NO
+
+# Set the SUBGROUPING tag to YES to allow class member groups of the same type
+# (for instance a group of public functions) to be put as a subgroup of that
+# type (e.g. under the Public Functions section). Set it to NO to prevent
+# subgrouping. Alternatively, this can be done per class using the
+# \nosubgrouping command.
+# The default value is: YES.
+
+SUBGROUPING            = YES
+
+# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions
+# are shown inside the group in which they are included (e.g. using \ingroup)
+# instead of on a separate page (for HTML and Man pages) or section (for LaTeX
+# and RTF).
+#
+# Note that this feature does not work in combination with
+# SEPARATE_MEMBER_PAGES.
+# The default value is: NO.
+
+INLINE_GROUPED_CLASSES = YES
+
+# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions
+# with only public data fields or simple typedef fields will be shown inline in
+# the documentation of the scope in which they are defined (i.e. file,
+# namespace, or group documentation), provided this scope is documented. If set
+# to NO, structs, classes, and unions are shown on a separate page (for HTML and
+# Man pages) or section (for LaTeX and RTF).
+# The default value is: NO.
+
+INLINE_SIMPLE_STRUCTS  = NO
+
+# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or
+# enum is documented as struct, union, or enum with the name of the typedef. So
+# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
+# with name TypeT. When disabled the typedef will appear as a member of a file,
+# namespace, or class. And the struct will be named TypeS. This can typically be
+# useful for C code in case the coding convention dictates that all compound
+# types are typedef'ed and only the typedef is referenced, never the tag name.
+# The default value is: NO.
+
+TYPEDEF_HIDES_STRUCT   = NO
+
+# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This
+# cache is used to resolve symbols given their name and scope. Since this can be
+# an expensive process and often the same symbol appears multiple times in the
+# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small
+# doxygen will become slower. If the cache is too large, memory is wasted. The
+# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range
+# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536
+# symbols. At the end of a run doxygen will report the cache usage and suggest
+# the optimal cache size from a speed point of view.
+# Minimum value: 0, maximum value: 9, default value: 0.
+
+LOOKUP_CACHE_SIZE      = 0
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
+# documentation are documented, even if no documentation was available. Private
+# class members and static file members will be hidden unless the
+# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES.
+# Note: This will also disable the warnings about undocumented members that are
+# normally produced when WARNINGS is set to YES.
+# The default value is: NO.
+
+EXTRACT_ALL            = NO
+
+# If the EXTRACT_PRIVATE tag is set to YES all private members of a class will
+# be included in the documentation.
+# The default value is: NO.
+
+EXTRACT_PRIVATE        = NO
+
+# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal
+# scope will be included in the documentation.
+# The default value is: NO.
+
+EXTRACT_PACKAGE        = NO
+
+# If the EXTRACT_STATIC tag is set to YES all static members of a file will be
+# included in the documentation.
+# The default value is: NO.
+
+EXTRACT_STATIC         = NO
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) defined
+# locally in source files will be included in the documentation. If set to NO
+# only classes defined in header files are included. Does not have any effect
+# for Java sources.
+# The default value is: YES.
+
+EXTRACT_LOCAL_CLASSES  = YES
+
+# This flag is only useful for Objective-C code. When set to YES local methods,
+# which are defined in the implementation section but not in the interface are
+# included in the documentation. If set to NO only methods in the interface are
+# included.
+# The default value is: NO.
+
+EXTRACT_LOCAL_METHODS  = NO
+
+# If this flag is set to YES, the members of anonymous namespaces will be
+# extracted and appear in the documentation as a namespace called
+# 'anonymous_namespace{file}', where file will be replaced with the base name of
+# the file that contains the anonymous namespace. By default anonymous namespace
+# are hidden.
+# The default value is: NO.
+
+EXTRACT_ANON_NSPACES   = NO
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all
+# undocumented members inside documented classes or files. If set to NO these
+# members will be included in the various overviews, but no documentation
+# section is generated. This option has no effect if EXTRACT_ALL is enabled.
+# The default value is: NO.
+
+HIDE_UNDOC_MEMBERS     = NO
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy. If set
+# to NO these classes will be included in the various overviews. This option has
+# no effect if EXTRACT_ALL is enabled.
+# The default value is: NO.
+
+HIDE_UNDOC_CLASSES     = NO
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend
+# (class|struct|union) declarations. If set to NO these declarations will be
+# included in the documentation.
+# The default value is: NO.
+
+HIDE_FRIEND_COMPOUNDS  = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any
+# documentation blocks found inside the body of a function. If set to NO these
+# blocks will be appended to the function's detailed documentation block.
+# The default value is: NO.
+
+HIDE_IN_BODY_DOCS      = NO
+
+# The INTERNAL_DOCS tag determines if documentation that is typed after a
+# \internal command is included. If the tag is set to NO then the documentation
+# will be excluded. Set it to YES to include the internal documentation.
+# The default value is: NO.
+
+INTERNAL_DOCS          = NO
+
+# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file
+# names in lower-case letters. If set to YES upper-case letters are also
+# allowed. This is useful if you have classes or files whose names only differ
+# in case and if your file system supports case sensitive file names. Windows
+# and Mac users are advised to set this option to NO.
+# The default value is: system dependent.
+
+CASE_SENSE_NAMES       = NO
+
+# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with
+# their full class and namespace scopes in the documentation. If set to YES the
+# scope will be hidden.
+# The default value is: NO.
+
+HIDE_SCOPE_NAMES       = NO
+
+# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of
+# the files that are included by a file in the documentation of that file.
+# The default value is: YES.
+
+SHOW_INCLUDE_FILES     = YES
+
+# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each
+# grouped member an include statement to the documentation, telling the reader
+# which file to include in order to use the member.
+# The default value is: NO.
+
+SHOW_GROUPED_MEMB_INC  = NO
+
+# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include
+# files with double quotes in the documentation rather than with sharp brackets.
+# The default value is: NO.
+
+FORCE_LOCAL_INCLUDES   = NO
+
+# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the
+# documentation for inline members.
+# The default value is: YES.
+
+INLINE_INFO            = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the
+# (detailed) documentation of file and class members alphabetically by member
+# name. If set to NO the members will appear in declaration order.
+# The default value is: YES.
+
+SORT_MEMBER_DOCS       = YES
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief
+# descriptions of file, namespace and class members alphabetically by member
+# name. If set to NO the members will appear in declaration order. Note that
+# this will also influence the order of the classes in the class list.
+# The default value is: NO.
+
+SORT_BRIEF_DOCS        = NO
+
+# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the
+# (brief and detailed) documentation of class members so that constructors and
+# destructors are listed first. If set to NO the constructors will appear in the
+# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS.
+# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief
+# member documentation.
+# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting
+# detailed member documentation.
+# The default value is: NO.
+
+SORT_MEMBERS_CTORS_1ST = NO
+
+# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy
+# of group names into alphabetical order. If set to NO the group names will
+# appear in their defined order.
+# The default value is: NO.
+
+SORT_GROUP_NAMES       = NO
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by
+# fully-qualified names, including namespaces. If set to NO, the class list will
+# be sorted only by class name, not including the namespace part.
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the alphabetical
+# list.
+# The default value is: NO.
+
+SORT_BY_SCOPE_NAME     = NO
+
+# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper
+# type resolution of all parameters of a function it will reject a match between
+# the prototype and the implementation of a member function even if there is
+# only one candidate or it is obvious which candidate to choose by doing a
+# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still
+# accept a match between prototype and implementation in such cases.
+# The default value is: NO.
+
+STRICT_PROTO_MATCHING  = NO
+
+# The GENERATE_TODOLIST tag can be used to enable ( YES) or disable ( NO) the
+# todo list. This list is created by putting \todo commands in the
+# documentation.
+# The default value is: YES.
+
+GENERATE_TODOLIST      = YES
+
+# The GENERATE_TESTLIST tag can be used to enable ( YES) or disable ( NO) the
+# test list. This list is created by putting \test commands in the
+# documentation.
+# The default value is: YES.
+
+GENERATE_TESTLIST      = YES
+
+# The GENERATE_BUGLIST tag can be used to enable ( YES) or disable ( NO) the bug
+# list. This list is created by putting \bug commands in the documentation.
+# The default value is: YES.
+
+GENERATE_BUGLIST       = YES
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable ( YES) or disable ( NO)
+# the deprecated list. This list is created by putting \deprecated commands in
+# the documentation.
+# The default value is: YES.
+
+GENERATE_DEPRECATEDLIST= YES
+
+# The ENABLED_SECTIONS tag can be used to enable conditional documentation
+# sections, marked by \if <section_label> ... \endif and \cond <section_label>
+# ... \endcond blocks.
+
+ENABLED_SECTIONS       = $(RAPIDJSON_SECTIONS)
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the
+# initial value of a variable or macro / define can have for it to appear in the
+# documentation. If the initializer consists of more lines than specified here
+# it will be hidden. Use a value of 0 to hide initializers completely. The
+# appearance of the value of individual variables and macros / defines can be
+# controlled using \showinitializer or \hideinitializer command in the
+# documentation regardless of this setting.
+# Minimum value: 0, maximum value: 10000, default value: 30.
+
+MAX_INITIALIZER_LINES  = 30
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at
+# the bottom of the documentation of classes and structs. If set to YES the list
+# will mention the files that were used to generate the documentation.
+# The default value is: YES.
+
+SHOW_USED_FILES        = YES
+
+# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This
+# will remove the Files entry from the Quick Index and from the Folder Tree View
+# (if specified).
+# The default value is: YES.
+
+SHOW_FILES             = YES
+
+# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces
+# page. This will remove the Namespaces entry from the Quick Index and from the
+# Folder Tree View (if specified).
+# The default value is: YES.
+
+SHOW_NAMESPACES        = NO
+
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that
+# doxygen should invoke to get the current version for each file (typically from
+# the version control system). Doxygen will invoke the program by executing (via
+# popen()) the command command input-file, where command is the value of the
+# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided
+# by doxygen. Whatever the program writes to standard output is used as the file
+# version. For an example see the documentation.
+
+FILE_VERSION_FILTER    =
+
+# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
+# by doxygen. The layout file controls the global structure of the generated
+# output files in an output format independent way. To create the layout file
+# that represents doxygen's defaults, run doxygen with the -l option. You can
+# optionally specify a file name after the option, if omitted DoxygenLayout.xml
+# will be used as the name of the layout file.
+#
+# Note that if you run doxygen from a directory containing a file called
+# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE
+# tag is left empty.
+
+LAYOUT_FILE            =
+
+# The CITE_BIB_FILES tag can be used to specify one or more bib files containing
+# the reference definitions. This must be a list of .bib files. The .bib
+# extension is automatically appended if omitted. This requires the bibtex tool
+# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info.
+# For LaTeX the style of the bibliography can be controlled using
+# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
+# search path. Do not use file names with spaces, bibtex cannot handle them. See
+# also \cite for info how to create references.
+
+CITE_BIB_FILES         =
+
+#---------------------------------------------------------------------------
+# Configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated to
+# standard output by doxygen. If QUIET is set to YES this implies that the
+# messages are off.
+# The default value is: NO.
+
+QUIET                  = NO
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are
+# generated to standard error ( stderr) by doxygen. If WARNINGS is set to YES
+# this implies that the warnings are on.
+#
+# Tip: Turn warnings on while writing the documentation.
+# The default value is: YES.
+
+WARNINGS               = YES
+
+# If the WARN_IF_UNDOCUMENTED tag is set to YES, then doxygen will generate
+# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag
+# will automatically be disabled.
+# The default value is: YES.
+
+WARN_IF_UNDOCUMENTED   = YES
+
+# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for
+# potential errors in the documentation, such as not documenting some parameters
+# in a documented function, or documenting parameters that don't exist or using
+# markup commands wrongly.
+# The default value is: YES.
+
+WARN_IF_DOC_ERROR      = YES
+
+# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
+# are documented, but have no documentation for their parameters or return
+# value. If set to NO doxygen will only warn about wrong or incomplete parameter
+# documentation, but not about the absence of documentation.
+# The default value is: NO.
+
+WARN_NO_PARAMDOC       = NO
+
+# The WARN_FORMAT tag determines the format of the warning messages that doxygen
+# can produce. The string should contain the $file, $line, and $text tags, which
+# will be replaced by the file and line number from which the warning originated
+# and the warning text. Optionally the format may contain $version, which will
+# be replaced by the version of the file (if it could be obtained via
+# FILE_VERSION_FILTER)
+# The default value is: $file:$line: $text.
+
+WARN_FORMAT            = "$file:$line: $text"
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning and error
+# messages should be written. If left blank the output is written to standard
+# error (stderr).
+
+WARN_LOGFILE           =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag is used to specify the files and/or directories that contain
+# documented source files. You may enter file names like myfile.cpp or
+# directories like /usr/src/myproject. Separate the files or directories with
+# spaces.
+# Note: If this tag is empty the current directory is searched.
+
+INPUT                  = readme.md \
+                         CHANGELOG.md \
+                         include/rapidjson/rapidjson.h \
+                         include/ \
+                         doc/features.md \
+                         doc/tutorial.md \
+                         doc/pointer.md \
+                         doc/stream.md \
+                         doc/encoding.md \
+                         doc/dom.md \
+                         doc/sax.md \
+                         doc/schema.md \
+                         doc/performance.md \
+                         doc/internals.md \
+                         doc/faq.md
+
+# This tag can be used to specify the character encoding of the source files
+# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
+# libiconv (or the iconv built into libc) for the transcoding. See the libiconv
+# documentation (see: http://www.gnu.org/software/libiconv) for the list of
+# possible encodings.
+# The default value is: UTF-8.
+
+INPUT_ENCODING         = UTF-8
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
+# *.h) to filter out the source-files in the directories. If left blank the
+# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii,
+# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp,
+# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown,
+# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf,
+# *.qsf, *.as and *.js.
+
+FILE_PATTERNS          = *.c \
+                         *.cc \
+                         *.cxx \
+                         *.cpp \
+                         *.h \
+                         *.hh \
+                         *.hxx \
+                         *.hpp \
+                         *.inc \
+                         *.md
+
+# The RECURSIVE tag can be used to specify whether or not subdirectories should
+# be searched for input files as well.
+# The default value is: NO.
+
+RECURSIVE              = YES
+
+# The EXCLUDE tag can be used to specify files and/or directories that should be
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+#
+# Note that relative paths are relative to the directory from which doxygen is
+# run.
+
+EXCLUDE                = ./include/rapidjson/msinttypes/
+
+# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
+# directories that are symbolic links (a Unix file system feature) are excluded
+# from the input.
+# The default value is: NO.
+
+EXCLUDE_SYMLINKS       = NO
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories.
+#
+# Note that the wildcards are matched against the file with absolute path, so to
+# exclude all test directories for example use the pattern */test/*
+
+EXCLUDE_PATTERNS       =
+
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
+# (namespaces, classes, functions, etc.) that should be excluded from the
+# output. The symbol name can be a fully qualified name, a word, or if the
+# wildcard * is used, a substring. Examples: ANamespace, AClass,
+# AClass::ANamespace, ANamespace::*Test
+#
+# Note that the wildcards are matched against the file with absolute path, so to
+# exclude all test directories use the pattern */test/*
+
+EXCLUDE_SYMBOLS        = internal
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or directories
+# that contain example code fragments that are included (see the \include
+# command).
+
+EXAMPLE_PATH           =
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and
+# *.h) to filter out the source-files in the directories. If left blank all
+# files are included.
+
+EXAMPLE_PATTERNS       = *
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+# searched for input files to be used with the \include or \dontinclude commands
+# irrespective of the value of the RECURSIVE tag.
+# The default value is: NO.
+
+EXAMPLE_RECURSIVE      = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or directories
+# that contain images that are to be included in the documentation (see the
+# \image command).
+
+IMAGE_PATH             = ./doc
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should
+# invoke to filter for each input file. Doxygen will invoke the filter program
+# by executing (via popen()) the command:
+#
+# <filter> <input-file>
+#
+# where <filter> is the value of the INPUT_FILTER tag, and <input-file> is the
+# name of an input file. Doxygen will then use the output that the filter
+# program writes to standard output. If FILTER_PATTERNS is specified, this tag
+# will be ignored.
+#
+# Note that the filter must not add or remove lines; it is applied before the
+# code is scanned, but not when the output code is generated. If lines are added
+# or removed, the anchors will not be placed correctly.
+
+INPUT_FILTER           =
+
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
+# basis. Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match. The filters are a list of the form: pattern=filter
+# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how
+# filters are used. If the FILTER_PATTERNS tag is empty or if none of the
+# patterns match the file name, INPUT_FILTER is applied.
+
+FILTER_PATTERNS        =
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+# INPUT_FILTER ) will also be used to filter the input files that are used for
+# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES).
+# The default value is: NO.
+
+FILTER_SOURCE_FILES    = NO
+
+# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
+# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and
+# it is also possible to disable source filtering for a specific pattern using
+# *.ext= (so without naming a filter).
+# This tag requires that the tag FILTER_SOURCE_FILES is set to YES.
+
+FILTER_SOURCE_PATTERNS =
+
+# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that
+# is part of the input, its contents will be placed on the main page
+# (index.html). This can be useful if you have a project on for instance GitHub
+# and want to reuse the introduction page also for the doxygen output.
+
+USE_MDFILE_AS_MAINPAGE = readme.md
+
+#---------------------------------------------------------------------------
+# Configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will be
+# generated. Documented entities will be cross-referenced with these sources.
+#
+# Note: To get rid of all source code in the generated output, make sure that
+# also VERBATIM_HEADERS is set to NO.
+# The default value is: NO.
+
+SOURCE_BROWSER         = NO
+
+# Setting the INLINE_SOURCES tag to YES will include the body of functions,
+# classes and enums directly into the documentation.
+# The default value is: NO.
+
+INLINE_SOURCES         = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any
+# special comment blocks from generated source code fragments. Normal C, C++ and
+# Fortran comments will always remain visible.
+# The default value is: YES.
+
+STRIP_CODE_COMMENTS    = NO
+
+# If the REFERENCED_BY_RELATION tag is set to YES then for each documented
+# function all documented functions referencing it will be listed.
+# The default value is: NO.
+
+REFERENCED_BY_RELATION = NO
+
+# If the REFERENCES_RELATION tag is set to YES then for each documented function
+# all documented entities called/used by that function will be listed.
+# The default value is: NO.
+
+REFERENCES_RELATION    = NO
+
+# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set
+# to YES, then the hyperlinks from functions in REFERENCES_RELATION and
+# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will
+# link to the documentation.
+# The default value is: YES.
+
+REFERENCES_LINK_SOURCE = YES
+
+# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the
+# source code will show a tooltip with additional information such as prototype,
+# brief description and links to the definition and documentation. Since this
+# will make the HTML file larger and loading of large files a bit slower, you
+# can opt to disable this feature.
+# The default value is: YES.
+# This tag requires that the tag SOURCE_BROWSER is set to YES.
+
+SOURCE_TOOLTIPS        = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code will
+# point to the HTML generated by the htags(1) tool instead of doxygen built-in
+# source browser. The htags tool is part of GNU's global source tagging system
+# (see http://www.gnu.org/software/global/global.html). You will need version
+# 4.8.6 or higher.
+#
+# To use it do the following:
+# - Install the latest version of global
+# - Enable SOURCE_BROWSER and USE_HTAGS in the config file
+# - Make sure the INPUT points to the root of the source tree
+# - Run doxygen as normal
+#
+# Doxygen will invoke htags (and that will in turn invoke gtags), so these
+# tools must be available from the command line (i.e. in the search path).
+#
+# The result: instead of the source browser generated by doxygen, the links to
+# source code will now point to the output of htags.
+# The default value is: NO.
+# This tag requires that the tag SOURCE_BROWSER is set to YES.
+
+USE_HTAGS              = NO
+
+# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a
+# verbatim copy of the header file for each class for which an include is
+# specified. Set to NO to disable this.
+# See also: Section \class.
+# The default value is: YES.
+
+VERBATIM_HEADERS       = YES
+
+# If the CLANG_ASSISTED_PARSING tag is set to YES, then doxygen will use the
+# clang parser (see: http://clang.llvm.org/) for more accurate parsing at the
+# cost of reduced performance. This can be particularly helpful with template
+# rich C++ code for which doxygen's built-in parser lacks the necessary type
+# information.
+# Note: The availability of this option depends on whether or not doxygen was
+# compiled with the --with-libclang option.
+# The default value is: NO.
+
+CLANG_ASSISTED_PARSING = NO
+
+# If clang assisted parsing is enabled you can provide the compiler with command
+# line options that you would normally use when invoking the compiler. Note that
+# the include paths will already be set by doxygen for the files and directories
+# specified with INPUT and INCLUDE_PATH.
+# This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES.
+
+CLANG_OPTIONS          =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all
+# compounds will be generated. Enable this if the project contains a lot of
+# classes, structs, unions or interfaces.
+# The default value is: YES.
+
+ALPHABETICAL_INDEX     = NO
+
+# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in
+# which the alphabetical index list will be split.
+# Minimum value: 1, maximum value: 20, default value: 5.
+# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
+
+COLS_IN_ALPHA_INDEX    = 5
+
+# In case all classes in a project start with a common prefix, all classes will
+# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag
+# can be used to specify a prefix (or a list of prefixes) that should be ignored
+# while generating the index headers.
+# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
+
+IGNORE_PREFIX          =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES doxygen will generate HTML output
+# The default value is: YES.
+
+GENERATE_HTML          = YES
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_OUTPUT            = html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each
+# generated HTML page (for example: .htm, .php, .asp).
+# The default value is: .html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_FILE_EXTENSION    = .html
+
+# The HTML_HEADER tag can be used to specify a user-defined HTML header file for
+# each generated HTML page. If the tag is left blank doxygen will generate a
+# standard header.
+#
+# To get valid HTML the header file that includes any scripts and style sheets
+# that doxygen needs, which is dependent on the configuration options used (e.g.
+# the setting GENERATE_TREEVIEW). It is highly recommended to start with a
+# default header using
+# doxygen -w html new_header.html new_footer.html new_stylesheet.css
+# YourConfigFile
+# and then modify the file new_header.html. See also section "Doxygen usage"
+# for information on how to generate the default header that doxygen normally
+# uses.
+# Note: The header is subject to change so you typically have to regenerate the
+# default header when upgrading to a newer version of doxygen. For a description
+# of the possible markers and block names see the documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_HEADER            = ./doc/misc/header.html
+
+# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each
+# generated HTML page. If the tag is left blank doxygen will generate a standard
+# footer. See HTML_HEADER for more information on how to generate a default
+# footer and what special commands can be used inside the footer. See also
+# section "Doxygen usage" for information on how to generate the default footer
+# that doxygen normally uses.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_FOOTER            = ./doc/misc/footer.html
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style
+# sheet that is used by each HTML page. It can be used to fine-tune the look of
+# the HTML output. If left blank doxygen will generate a default style sheet.
+# See also section "Doxygen usage" for information on how to generate the style
+# sheet that doxygen normally uses.
+# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as
+# it is more robust and this tag (HTML_STYLESHEET) will in the future become
+# obsolete.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_STYLESHEET        =
+
+# The HTML_EXTRA_STYLESHEET tag can be used to specify an additional user-
+# defined cascading style sheet that is included after the standard style sheets
+# created by doxygen. Using this option one can overrule certain style aspects.
+# This is preferred over using HTML_STYLESHEET since it does not replace the
+# standard style sheet and is therefore more robust against future updates.
+# Doxygen will copy the style sheet file to the output directory. For an example
+# see the documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_EXTRA_STYLESHEET  = ./doc/misc/doxygenextra.css
+
+# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the HTML output directory. Note
+# that these files will be copied to the base HTML output directory. Use the
+# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
+# files. In the HTML_STYLESHEET file, use the file name only. Also note that the
+# files will be copied as-is; there are no commands or markers available.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_EXTRA_FILES       =
+
+# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
+# will adjust the colors in the stylesheet and background images according to
+# this color. Hue is specified as an angle on a colorwheel, see
+# http://en.wikipedia.org/wiki/Hue for more information. For instance the value
+# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
+# purple, and 360 is red again.
+# Minimum value: 0, maximum value: 359, default value: 220.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_HUE    = 220
+
+# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors
+# in the HTML output. For a value of 0 the output will use grayscales only. A
+# value of 255 will produce the most vivid colors.
+# Minimum value: 0, maximum value: 255, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_SAT    = 100
+
+# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the
+# luminance component of the colors in the HTML output. Values below 100
+# gradually make the output lighter, whereas values above 100 make the output
+# darker. The value divided by 100 is the actual gamma applied, so 80 represents
+# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not
+# change the gamma.
+# Minimum value: 40, maximum value: 240, default value: 80.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_GAMMA  = 80
+
+# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
+# page will contain the date and time when the page was generated. Setting this
+# to NO can help when comparing the output of multiple runs.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_TIMESTAMP         = YES
+
+# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
+# documentation will contain sections that can be hidden and shown after the
+# page has loaded.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_DYNAMIC_SECTIONS  = NO
+
+# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries
+# shown in the various tree structured indices initially; the user can expand
+# and collapse entries dynamically later on. Doxygen will expand the tree to
+# such a level that at most the specified number of entries are visible (unless
+# a fully collapsed tree already exceeds this amount). So setting the number of
+# entries 1 will produce a full collapsed tree by default. 0 is a special value
+# representing an infinite number of entries and will result in a full expanded
+# tree by default.
+# Minimum value: 0, maximum value: 9999, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_INDEX_NUM_ENTRIES = 100
+
+# If the GENERATE_DOCSET tag is set to YES, additional index files will be
+# generated that can be used as input for Apple's Xcode 3 integrated development
+# environment (see: http://developer.apple.com/tools/xcode/), introduced with
+# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a
+# Makefile in the HTML output directory. Running make will produce the docset in
+# that directory and running make install will install the docset in
+# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at
+# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
+# for more information.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_DOCSET        = NO
+
+# This tag determines the name of the docset feed. A documentation feed provides
+# an umbrella under which multiple documentation sets from a single provider
+# (such as a company or product suite) can be grouped.
+# The default value is: Doxygen generated docs.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_FEEDNAME        = "Doxygen generated docs"
+
+# This tag specifies a string that should uniquely identify the documentation
+# set bundle. This should be a reverse domain-name style string, e.g.
+# com.mycompany.MyDocSet. Doxygen will append .docset to the name.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_BUNDLE_ID       = org.doxygen.Project
+
+# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify
+# the documentation publisher. This should be a reverse domain-name style
+# string, e.g. com.mycompany.MyDocSet.documentation.
+# The default value is: org.doxygen.Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_PUBLISHER_ID    = org.doxygen.Publisher
+
+# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher.
+# The default value is: Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_PUBLISHER_NAME  = Publisher
+
+# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
+# additional HTML index files: index.hhp, index.hhc, and index.hhk. The
+# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
+# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on
+# Windows.
+#
+# The HTML Help Workshop contains a compiler that can convert all HTML output
+# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML
+# files are now used as the Windows 98 help format, and will replace the old
+# Windows help format (.hlp) on all Windows platforms in the future. Compressed
+# HTML files also contain an index, a table of contents, and you can search for
+# words in the documentation. The HTML workshop also contains a viewer for
+# compressed HTML files.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_HTMLHELP      = NO
+
+# The CHM_FILE tag can be used to specify the file name of the resulting .chm
+# file. You can add a path in front of the file if the result should not be
+# written to the html output directory.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+CHM_FILE               =
+
+# The HHC_LOCATION tag can be used to specify the location (absolute path
+# including file name) of the HTML help compiler ( hhc.exe). If non-empty
+# doxygen will try to run the HTML help compiler on the generated index.hhp.
+# The file has to be specified with full path.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+HHC_LOCATION           =
+
+# The GENERATE_CHI flag controls if a separate .chi index file is generated (
+# YES) or that it should be included in the master .chm file ( NO).
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+GENERATE_CHI           = NO
+
+# The CHM_INDEX_ENCODING is used to encode HtmlHelp index ( hhk), content ( hhc)
+# and project file content.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+CHM_INDEX_ENCODING     =
+
+# The BINARY_TOC flag controls whether a binary table of contents is generated (
+# YES) or a normal table of contents ( NO) in the .chm file. Furthermore it
+# enables the Previous and Next buttons.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+BINARY_TOC             = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members to
+# the table of contents of the HTML help documentation and to the tree view.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+TOC_EXPAND             = NO
+
+# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
+# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that
+# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help
+# (.qch) of the generated HTML documentation.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_QHP           = NO
+
+# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify
+# the file name of the resulting .qch file. The path specified is relative to
+# the HTML output folder.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QCH_FILE               =
+
+# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help
+# Project output. For more information please see Qt Help Project / Namespace
+# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace).
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_NAMESPACE          = org.doxygen.Project
+
+# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt
+# Help Project output. For more information please see Qt Help Project / Virtual
+# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual-
+# folders).
+# The default value is: doc.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_VIRTUAL_FOLDER     = doc
+
+# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom
+# filter to add. For more information please see Qt Help Project / Custom
+# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
+# filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_CUST_FILTER_NAME   =
+
+# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the
+# custom filter to add. For more information please see Qt Help Project / Custom
+# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
+# filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_CUST_FILTER_ATTRS  =
+
+# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
+# project's filter section matches. Qt Help Project / Filter Attributes (see:
+# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_SECT_FILTER_ATTRS  =
+
+# The QHG_LOCATION tag can be used to specify the location of Qt's
+# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the
+# generated .qhp file.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHG_LOCATION           =
+
+# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be
+# generated, together with the HTML files, they form an Eclipse help plugin. To
+# install this plugin and make it available under the help contents menu in
+# Eclipse, the contents of the directory containing the HTML and XML files needs
+# to be copied into the plugins directory of eclipse. The name of the directory
+# within the plugins directory should be the same as the ECLIPSE_DOC_ID value.
+# After copying Eclipse needs to be restarted before the help appears.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_ECLIPSEHELP   = NO
+
+# A unique identifier for the Eclipse help plugin. When installing the plugin
+# the directory name containing the HTML and XML files should also have this
+# name. Each documentation set should have its own identifier.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES.
+
+ECLIPSE_DOC_ID         = org.doxygen.Project
+
+# If you want full control over the layout of the generated HTML pages it might
+# be necessary to disable the index and replace it with your own. The
+# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top
+# of each HTML page. A value of NO enables the index and the value YES disables
+# it. Since the tabs in the index contain the same information as the navigation
+# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+DISABLE_INDEX          = YES
+
+# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
+# structure should be generated to display hierarchical information. If the tag
+# value is set to YES, a side panel will be generated containing a tree-like
+# index structure (just like the one that is generated for HTML Help). For this
+# to work a browser that supports JavaScript, DHTML, CSS and frames is required
+# (i.e. any modern browser). Windows users are probably better off using the
+# HTML help feature. Via custom stylesheets (see HTML_EXTRA_STYLESHEET) one can
+# further fine-tune the look of the index. As an example, the default style
+# sheet generated by doxygen has an example that shows how to put an image at
+# the root of the tree instead of the PROJECT_NAME. Since the tree basically has
+# the same information as the tab index, you could consider setting
+# DISABLE_INDEX to YES when enabling this option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_TREEVIEW      = YES
+
+# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that
+# doxygen will group on one line in the generated HTML documentation.
+#
+# Note that a value of 0 will completely suppress the enum values from appearing
+# in the overview section.
+# Minimum value: 0, maximum value: 20, default value: 4.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+ENUM_VALUES_PER_LINE   = 4
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used
+# to set the initial width (in pixels) of the frame in which the tree is shown.
+# Minimum value: 0, maximum value: 1500, default value: 250.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+TREEVIEW_WIDTH         = 250
+
+# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open links to
+# external symbols imported via tag files in a separate window.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+EXT_LINKS_IN_WINDOW    = NO
+
+# Use this tag to change the font size of LaTeX formulas included as images in
+# the HTML documentation. When you change the font size after a successful
+# doxygen run you need to manually remove any form_*.png images from the HTML
+# output directory to force them to be regenerated.
+# Minimum value: 8, maximum value: 50, default value: 10.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+FORMULA_FONTSIZE       = 10
+
+# Use the FORMULA_TRANPARENT tag to determine whether or not the images
+# generated for formulas are transparent PNGs. Transparent PNGs are not
+# supported properly for IE 6.0, but are supported on all modern browsers.
+#
+# Note that when changing this option you need to delete any form_*.png files in
+# the HTML output directory before the changes have effect.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+FORMULA_TRANSPARENT    = YES
+
+# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
+# http://www.mathjax.org) which uses client side Javascript for the rendering
+# instead of using prerendered bitmaps. Use this if you do not have LaTeX
+# installed or if you want to formulas look prettier in the HTML output. When
+# enabled you may also need to install MathJax separately and configure the path
+# to it using the MATHJAX_RELPATH option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+USE_MATHJAX            = NO
+
+# When MathJax is enabled you can set the default output format to be used for
+# the MathJax output. See the MathJax site (see:
+# http://docs.mathjax.org/en/latest/output.html) for more details.
+# Possible values are: HTML-CSS (which is slower, but has the best
+# compatibility), NativeMML (i.e. MathML) and SVG.
+# The default value is: HTML-CSS.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_FORMAT         = HTML-CSS
+
+# When MathJax is enabled you need to specify the location relative to the HTML
+# output directory using the MATHJAX_RELPATH option. The destination directory
+# should contain the MathJax.js script. For instance, if the mathjax directory
+# is located at the same level as the HTML output directory, then
+# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax
+# Content Delivery Network so you can quickly see the result without installing
+# MathJax. However, it is strongly recommended to install a local copy of
+# MathJax from http://www.mathjax.org before deployment.
+# The default value is: http://cdn.mathjax.org/mathjax/latest.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_RELPATH        = http://www.mathjax.org/mathjax
+
+# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax
+# extension names that should be enabled during MathJax rendering. For example
+# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_EXTENSIONS     =
+
+# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces
+# of code that will be used on startup of the MathJax code. See the MathJax site
+# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an
+# example see the documentation.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_CODEFILE       =
+
+# When the SEARCHENGINE tag is enabled doxygen will generate a search box for
+# the HTML output. The underlying search engine uses javascript and DHTML and
+# should work on any modern browser. Note that when using HTML help
+# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET)
+# there is already a search function so this one should typically be disabled.
+# For large projects the javascript based search engine can be slow, then
+# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to
+# search using the keyboard; to jump to the search box use <access key> + S
+# (what the <access key> is depends on the OS and browser, but it is typically
+# <CTRL>, <ALT>/<option>, or both). Inside the search box use the <cursor down
+# key> to jump into the search results window, the results can be navigated
+# using the <cursor keys>. Press <Enter> to select an item or <escape> to cancel
+# the search. The filter options can be selected when the cursor is inside the
+# search box by pressing <Shift>+<cursor down>. Also here use the <cursor keys>
+# to select a filter and <Enter> or <escape> to activate or cancel the filter
+# option.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+SEARCHENGINE           = YES
+
+# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
+# implemented using a web server instead of a web client using Javascript. There
+# are two flavors of web server based searching depending on the EXTERNAL_SEARCH
+# setting. When disabled, doxygen will generate a PHP script for searching and
+# an index file used by the script. When EXTERNAL_SEARCH is enabled the indexing
+# and searching needs to be provided by external tools. See the section
+# "External Indexing and Searching" for details.
+# The default value is: NO.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SERVER_BASED_SEARCH    = NO
+
+# When EXTERNAL_SEARCH tag is enabled doxygen will no longer generate the PHP
+# script for searching. Instead the search results are written to an XML file
+# which needs to be processed by an external indexer. Doxygen will invoke an
+# external search engine pointed to by the SEARCHENGINE_URL option to obtain the
+# search results.
+#
+# Doxygen ships with an example indexer ( doxyindexer) and search engine
+# (doxysearch.cgi) which are based on the open source search engine library
+# Xapian (see: http://xapian.org/).
+#
+# See the section "External Indexing and Searching" for details.
+# The default value is: NO.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTERNAL_SEARCH        = NO
+
+# The SEARCHENGINE_URL should point to a search engine hosted by a web server
+# which will return the search results when EXTERNAL_SEARCH is enabled.
+#
+# Doxygen ships with an example indexer ( doxyindexer) and search engine
+# (doxysearch.cgi) which are based on the open source search engine library
+# Xapian (see: http://xapian.org/). See the section "External Indexing and
+# Searching" for details.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SEARCHENGINE_URL       =
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed
+# search data is written to a file for indexing by an external tool. With the
+# SEARCHDATA_FILE tag the name of this file can be specified.
+# The default file is: searchdata.xml.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SEARCHDATA_FILE        = searchdata.xml
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the
+# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is
+# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple
+# projects and redirect the results back to the right project.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTERNAL_SEARCH_ID     =
+
+# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen
+# projects other than the one defined by this configuration file, but that are
+# all added to the same external search index. Each project needs to have a
+# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id of
+# to a relative location where the documentation can be found. The format is:
+# EXTRA_SEARCH_MAPPINGS = tagname1=loc1 tagname2=loc2 ...
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTRA_SEARCH_MAPPINGS  =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES doxygen will generate LaTeX output.
+# The default value is: YES.
+
+GENERATE_LATEX         = NO
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: latex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_OUTPUT           = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked.
+#
+# Note that when enabling USE_PDFLATEX this option is only used for generating
+# bitmaps for formulas in the HTML output, but not in the Makefile that is
+# written to the output directory.
+# The default file is: latex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_CMD_NAME         = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate
+# index for LaTeX.
+# The default file is: makeindex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+MAKEINDEX_CMD_NAME     = makeindex
+
+# If the COMPACT_LATEX tag is set to YES doxygen generates more compact LaTeX
+# documents. This may be useful for small projects and may help to save some
+# trees in general.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+COMPACT_LATEX          = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used by the
+# printer.
+# Possible values are: a4 (210 x 297 mm), letter (8.5 x 11 inches), legal (8.5 x
+# 14 inches) and executive (7.25 x 10.5 inches).
+# The default value is: a4.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+PAPER_TYPE             = a4
+
+# The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names
+# that should be included in the LaTeX output. To get the times font for
+# instance you can specify
+# EXTRA_PACKAGES=times
+# If left blank no extra packages will be included.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+EXTRA_PACKAGES         =
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the
+# generated LaTeX document. The header should contain everything until the first
+# chapter. If it is left blank doxygen will generate a standard header. See
+# section "Doxygen usage" for information on how to let doxygen write the
+# default header to a separate file.
+#
+# Note: Only use a user-defined header if you know what you are doing! The
+# following commands have a special meaning inside the header: $title,
+# $datetime, $date, $doxygenversion, $projectname, $projectnumber. Doxygen will
+# replace them by respectively the title of the page, the current date and time,
+# only the current date, the version number of doxygen, the project name (see
+# PROJECT_NAME), or the project number (see PROJECT_NUMBER).
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_HEADER           =
+
+# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the
+# generated LaTeX document. The footer should contain everything after the last
+# chapter. If it is left blank doxygen will generate a standard footer.
+#
+# Note: Only use a user-defined footer if you know what you are doing!
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_FOOTER           =
+
+# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the LATEX_OUTPUT output
+# directory. Note that the files will be copied as-is; there are no commands or
+# markers available.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_EXTRA_FILES      =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated is
+# prepared for conversion to PDF (using ps2pdf or pdflatex). The PDF file will
+# contain links (just like the HTML output) instead of page references. This
+# makes the output suitable for online browsing using a PDF viewer.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+PDF_HYPERLINKS         = YES
+
+# If the LATEX_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate
+# the PDF file directly from the LaTeX files. Set this option to YES to get a
+# higher quality PDF documentation.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+USE_PDFLATEX           = YES
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode
+# command to the generated LaTeX files. This will instruct LaTeX to keep running
+# if errors occur, instead of asking the user for help. This option is also used
+# when generating formulas in HTML.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_BATCHMODE        = NO
+
+# If the LATEX_HIDE_INDICES tag is set to YES then doxygen will not include the
+# index chapters (such as File Index, Compound Index, etc.) in the output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_HIDE_INDICES     = NO
+
+# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source
+# code with syntax highlighting in the LaTeX output.
+#
+# Note that which sources are shown also depends on other settings such as
+# SOURCE_BROWSER.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_SOURCE_CODE      = NO
+
+# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
+# bibliography, e.g. plainnat, or ieeetr. See
+# http://en.wikipedia.org/wiki/BibTeX and \cite for more info.
+# The default value is: plain.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_BIB_STYLE        = plain
+
+#---------------------------------------------------------------------------
+# Configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES doxygen will generate RTF output. The
+# RTF output is optimized for Word 97 and may not look too pretty with other RTF
+# readers/editors.
+# The default value is: NO.
+
+GENERATE_RTF           = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: rtf.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_OUTPUT             = rtf
+
+# If the COMPACT_RTF tag is set to YES doxygen generates more compact RTF
+# documents. This may be useful for small projects and may help to save some
+# trees in general.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+COMPACT_RTF            = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated will
+# contain hyperlink fields. The RTF file will contain links (just like the HTML
+# output) instead of page references. This makes the output suitable for online
+# browsing using Word or some other Word compatible readers that support those
+# fields.
+#
+# Note: WordPad (write) and others do not support links.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_HYPERLINKS         = NO
+
+# Load stylesheet definitions from file. Syntax is similar to doxygen's config
+# file, i.e. a series of assignments. You only have to provide replacements,
+# missing definitions are set to their default value.
+#
+# See also section "Doxygen usage" for information on how to generate the
+# default style sheet that doxygen normally uses.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_STYLESHEET_FILE    =
+
+# Set optional variables used in the generation of an RTF document. Syntax is
+# similar to doxygen's config file. A template extensions file can be generated
+# using doxygen -e rtf extensionFile.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_EXTENSIONS_FILE    =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES doxygen will generate man pages for
+# classes and files.
+# The default value is: NO.
+
+GENERATE_MAN           = NO
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it. A directory man3 will be created inside the directory specified by
+# MAN_OUTPUT.
+# The default directory is: man.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_OUTPUT             = man
+
+# The MAN_EXTENSION tag determines the extension that is added to the generated
+# man pages. In case the manual section does not start with a number, the number
+# 3 is prepended. The dot (.) at the beginning of the MAN_EXTENSION tag is
+# optional.
+# The default value is: .3.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_EXTENSION          = .3
+
+# The MAN_SUBDIR tag determines the name of the directory created within
+# MAN_OUTPUT in which the man pages are placed. If defaults to man followed by
+# MAN_EXTENSION with the initial . removed.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_SUBDIR             =
+
+# If the MAN_LINKS tag is set to YES and doxygen generates man output, then it
+# will generate one additional man file for each entity documented in the real
+# man page(s). These additional files only source the real man page, but without
+# them the man command would be unable to find the correct page.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_LINKS              = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES doxygen will generate an XML file that
+# captures the structure of the code including all documentation.
+# The default value is: NO.
+
+GENERATE_XML           = NO
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: xml.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_OUTPUT             = xml
+
+# If the XML_PROGRAMLISTING tag is set to YES doxygen will dump the program
+# listings (including syntax highlighting and cross-referencing information) to
+# the XML output. Note that enabling this will significantly increase the size
+# of the XML output.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_PROGRAMLISTING     = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to the DOCBOOK output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_DOCBOOK tag is set to YES doxygen will generate Docbook files
+# that can be used to generate PDF.
+# The default value is: NO.
+
+GENERATE_DOCBOOK       = NO
+
+# The DOCBOOK_OUTPUT tag is used to specify where the Docbook pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in
+# front of it.
+# The default directory is: docbook.
+# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
+
+DOCBOOK_OUTPUT         = docbook
+
+#---------------------------------------------------------------------------
+# Configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES doxygen will generate an AutoGen
+# Definitions (see http://autogen.sf.net) file that captures the structure of
+# the code including all documentation. Note that this feature is still
+# experimental and incomplete at the moment.
+# The default value is: NO.
+
+GENERATE_AUTOGEN_DEF   = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES doxygen will generate a Perl module
+# file that captures the structure of the code including all documentation.
+#
+# Note that this feature is still experimental and incomplete at the moment.
+# The default value is: NO.
+
+GENERATE_PERLMOD       = NO
+
+# If the PERLMOD_LATEX tag is set to YES doxygen will generate the necessary
+# Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI
+# output from the Perl module output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_LATEX          = NO
+
+# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be nicely
+# formatted so it can be parsed by a human reader. This is useful if you want to
+# understand what is going on. On the other hand, if this tag is set to NO the
+# size of the Perl module output will be much smaller and Perl will parse it
+# just the same.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_PRETTY         = YES
+
+# The names of the make variables in the generated doxyrules.make file are
+# prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. This is useful
+# so different doxyrules.make files included by the same Makefile don't
+# overwrite each other's variables.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_MAKEVAR_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES doxygen will evaluate all
+# C-preprocessor directives found in the sources and include files.
+# The default value is: YES.
+
+ENABLE_PREPROCESSING   = YES
+
+# If the MACRO_EXPANSION tag is set to YES doxygen will expand all macro names
+# in the source code. If set to NO only conditional compilation will be
+# performed. Macro expansion can be done in a controlled way by setting
+# EXPAND_ONLY_PREDEF to YES.
+# The default value is: NO.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+MACRO_EXPANSION        = YES
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES then
+# the macro expansion is limited to the macros specified with the PREDEFINED and
+# EXPAND_AS_DEFINED tags.
+# The default value is: NO.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+EXPAND_ONLY_PREDEF     = YES
+
+# If the SEARCH_INCLUDES tag is set to YES the includes files in the
+# INCLUDE_PATH will be searched if a #include is found.
+# The default value is: YES.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+SEARCH_INCLUDES        = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by the
+# preprocessor.
+# This tag requires that the tag SEARCH_INCLUDES is set to YES.
+
+INCLUDE_PATH           =
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
+# patterns (like *.h and *.hpp) to filter out the header-files in the
+# directories. If left blank, the patterns specified with FILE_PATTERNS will be
+# used.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+INCLUDE_FILE_PATTERNS  =
+
+# The PREDEFINED tag can be used to specify one or more macro names that are
+# defined before the preprocessor is started (similar to the -D option of e.g.
+# gcc). The argument of the tag is a list of macros of the form: name or
+# name=definition (no spaces). If the definition and the "=" are omitted, "=1"
+# is assumed. To prevent a macro definition from being undefined via #undef or
+# recursively expanded use the := operator instead of the = operator.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+PREDEFINED             = \
+	RAPIDJSON_DOXYGEN_RUNNING \
+	RAPIDJSON_NAMESPACE_BEGIN="namespace rapidjson {" \
+	RAPIDJSON_NAMESPACE_END="}" \
+	RAPIDJSON_REMOVEFPTR_(x)=x \
+	RAPIDJSON_ENABLEIF_RETURN(cond,returntype)="RAPIDJSON_REMOVEFPTR_ returntype" \
+	RAPIDJSON_DISABLEIF_RETURN(cond,returntype)="RAPIDJSON_REMOVEFPTR_ returntype"
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
+# tag can be used to specify a list of macro names that should be expanded. The
+# macro definition that is found in the sources will be used. Use the PREDEFINED
+# tag if you want to use a different macro definition that overrules the
+# definition found in the source code.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+EXPAND_AS_DEFINED      = \
+	RAPIDJSON_NOEXCEPT
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will
+# remove all references to function-like macros that are alone on a line, have
+# an all uppercase name, and do not end with a semicolon. Such function macros
+# are typically used for boiler-plate code, and will confuse the parser if not
+# removed.
+# The default value is: YES.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+SKIP_FUNCTION_MACROS   = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to external references
+#---------------------------------------------------------------------------
+
+# The TAGFILES tag can be used to specify one or more tag files. For each tag
+# file the location of the external documentation should be added. The format of
+# a tag file without this location is as follows:
+# TAGFILES = file1 file2 ...
+# Adding location for the tag files is done as follows:
+# TAGFILES = file1=loc1 "file2 = loc2" ...
+# where loc1 and loc2 can be relative or absolute paths or URLs. See the
+# section "Linking to external documentation" for more information about the use
+# of tag files.
+# Note: Each tag file must have a unique name (where the name does NOT include
+# the path). If a tag file is not located in the directory in which doxygen is
+# run, you must also specify the path to the tagfile here.
+
+TAGFILES               =
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create a
+# tag file that is based on the input files it reads. See section "Linking to
+# external documentation" for more information about the usage of tag files.
+
+GENERATE_TAGFILE       =
+
+# If the ALLEXTERNALS tag is set to YES all external class will be listed in the
+# class index. If set to NO only the inherited external classes will be listed.
+# The default value is: NO.
+
+ALLEXTERNALS           = NO
+
+# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed in
+# the modules index. If set to NO, only the current project's groups will be
+# listed.
+# The default value is: YES.
+
+EXTERNAL_GROUPS        = YES
+
+# If the EXTERNAL_PAGES tag is set to YES all external pages will be listed in
+# the related pages index. If set to NO, only the current project's pages will
+# be listed.
+# The default value is: YES.
+
+EXTERNAL_PAGES         = YES
+
+# The PERL_PATH should be the absolute path and name of the perl script
+# interpreter (i.e. the result of 'which perl').
+# The default file (with absolute path) is: /usr/bin/perl.
+
+PERL_PATH              = /usr/bin/perl
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES doxygen will generate a class diagram
+# (in HTML and LaTeX) for classes with base or super classes. Setting the tag to
+# NO turns the diagrams off. Note that this option also works with HAVE_DOT
+# disabled, but it is recommended to install and use dot, since it yields more
+# powerful graphs.
+# The default value is: YES.
+
+CLASS_DIAGRAMS         = YES
+
+# You can define message sequence charts within doxygen comments using the \msc
+# command. Doxygen will then run the mscgen tool (see:
+# http://www.mcternan.me.uk/mscgen/)) to produce the chart and insert it in the
+# documentation. The MSCGEN_PATH tag allows you to specify the directory where
+# the mscgen tool resides. If left empty the tool is assumed to be found in the
+# default search path.
+
+MSCGEN_PATH            =
+
+# You can include diagrams made with dia in doxygen documentation. Doxygen will
+# then run dia to produce the diagram and insert it in the documentation. The
+# DIA_PATH tag allows you to specify the directory where the dia binary resides.
+# If left empty dia is assumed to be found in the default search path.
+
+DIA_PATH               =
+
+# If set to YES, the inheritance and collaboration graphs will hide inheritance
+# and usage relations if the target is undocumented or is not a class.
+# The default value is: YES.
+
+HIDE_UNDOC_RELATIONS   = YES
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
+# available from the path. This tool is part of Graphviz (see:
+# http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent
+# Bell Labs. The other options in this section have no effect if this option is
+# set to NO
+# The default value is: NO.
+
+HAVE_DOT               = NO
+
+# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is allowed
+# to run in parallel. When set to 0 doxygen will base this on the number of
+# processors available in the system. You can set it explicitly to a value
+# larger than 0 to get control over the balance between CPU load and processing
+# speed.
+# Minimum value: 0, maximum value: 32, default value: 0.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_NUM_THREADS        = 0
+
+# When you want a differently looking font n the dot files that doxygen
+# generates you can specify the font name using DOT_FONTNAME. You need to make
+# sure dot is able to find the font, which can be done by putting it in a
+# standard location or by setting the DOTFONTPATH environment variable or by
+# setting DOT_FONTPATH to the directory containing the font.
+# The default value is: Helvetica.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTNAME           = Helvetica
+
+# The DOT_FONTSIZE tag can be used to set the size (in points) of the font of
+# dot graphs.
+# Minimum value: 4, maximum value: 24, default value: 10.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTSIZE           = 10
+
+# By default doxygen will tell dot to use the default font as specified with
+# DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set
+# the path where dot can find it using this tag.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTPATH           =
+
+# If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for
+# each documented class showing the direct and indirect inheritance relations.
+# Setting this tag to YES will force the CLASS_DIAGRAMS tag to NO.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CLASS_GRAPH            = YES
+
+# If the COLLABORATION_GRAPH tag is set to YES then doxygen will generate a
+# graph for each documented class showing the direct and indirect implementation
+# dependencies (inheritance, containment, and class references variables) of the
+# class with other documented classes.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+COLLABORATION_GRAPH    = YES
+
+# If the GROUP_GRAPHS tag is set to YES then doxygen will generate a graph for
+# groups, showing the direct groups dependencies.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GROUP_GRAPHS           = YES
+
+# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
+# collaboration diagrams in a style similar to the OMG's Unified Modeling
+# Language.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+UML_LOOK               = NO
+
+# If the UML_LOOK tag is enabled, the fields and methods are shown inside the
+# class node. If there are many fields or methods and many nodes the graph may
+# become too big to be useful. The UML_LIMIT_NUM_FIELDS threshold limits the
+# number of items for each type to make the size more manageable. Set this to 0
+# for no limit. Note that the threshold may be exceeded by 50% before the limit
+# is enforced. So when you set the threshold to 10, up to 15 fields may appear,
+# but if the number exceeds 15, the total amount of fields shown is limited to
+# 10.
+# Minimum value: 0, maximum value: 100, default value: 10.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+UML_LIMIT_NUM_FIELDS   = 10
+
+# If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and
+# collaboration graphs will show the relations between templates and their
+# instances.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+TEMPLATE_RELATIONS     = NO
+
+# If the INCLUDE_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are set to
+# YES then doxygen will generate a graph for each documented file showing the
+# direct and indirect include dependencies of the file with other documented
+# files.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INCLUDE_GRAPH          = YES
+
+# If the INCLUDED_BY_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are
+# set to YES then doxygen will generate a graph for each documented file showing
+# the direct and indirect include dependencies of the file with other documented
+# files.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INCLUDED_BY_GRAPH      = YES
+
+# If the CALL_GRAPH tag is set to YES then doxygen will generate a call
+# dependency graph for every global function or class method.
+#
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable call graphs for selected
+# functions only using the \callgraph command.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CALL_GRAPH             = NO
+
+# If the CALLER_GRAPH tag is set to YES then doxygen will generate a caller
+# dependency graph for every global function or class method.
+#
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable caller graphs for selected
+# functions only using the \callergraph command.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CALLER_GRAPH           = NO
+
+# If the GRAPHICAL_HIERARCHY tag is set to YES then doxygen will graphical
+# hierarchy of all classes instead of a textual one.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GRAPHICAL_HIERARCHY    = YES
+
+# If the DIRECTORY_GRAPH tag is set to YES then doxygen will show the
+# dependencies a directory has on other directories in a graphical way. The
+# dependency relations are determined by the #include relations between the
+# files in the directories.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DIRECTORY_GRAPH        = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
+# generated by dot.
+# Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order
+# to make the SVG files visible in IE 9+ (other browsers do not have this
+# requirement).
+# Possible values are: png, jpg, gif and svg.
+# The default value is: png.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_IMAGE_FORMAT       = png
+
+# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
+# enable generation of interactive SVG images that allow zooming and panning.
+#
+# Note that this requires a modern browser other than Internet Explorer. Tested
+# and working are Firefox, Chrome, Safari, and Opera.
+# Note: For IE 9+ you need to set HTML_FILE_EXTENSION to xhtml in order to make
+# the SVG files visible. Older versions of IE do not have SVG support.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INTERACTIVE_SVG        = NO
+
+# The DOT_PATH tag can be used to specify the path where the dot tool can be
+# found. If left blank, it is assumed the dot tool can be found in the path.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_PATH               =
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the \dotfile
+# command).
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOTFILE_DIRS           =
+
+# The MSCFILE_DIRS tag can be used to specify one or more directories that
+# contain msc files that are included in the documentation (see the \mscfile
+# command).
+
+MSCFILE_DIRS           =
+
+# The DIAFILE_DIRS tag can be used to specify one or more directories that
+# contain dia files that are included in the documentation (see the \diafile
+# command).
+
+DIAFILE_DIRS           =
+
+# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes
+# that will be shown in the graph. If the number of nodes in a graph becomes
+# larger than this value, doxygen will truncate the graph, which is visualized
+# by representing a node as a red box. Note that doxygen if the number of direct
+# children of the root node in a graph is already larger than
+# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note that
+# the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
+# Minimum value: 0, maximum value: 10000, default value: 50.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_GRAPH_MAX_NODES    = 50
+
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the graphs
+# generated by dot. A depth value of 3 means that only nodes reachable from the
+# root by following a path via at most 3 edges will be shown. Nodes that lay
+# further from the root node will be omitted. Note that setting this option to 1
+# or 2 may greatly reduce the computation time needed for large code bases. Also
+# note that the size of a graph can be further restricted by
+# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
+# Minimum value: 0, maximum value: 1000, default value: 0.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+MAX_DOT_GRAPH_DEPTH    = 0
+
+# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
+# background. This is disabled by default, because dot on Windows does not seem
+# to support this out of the box.
+#
+# Warning: Depending on the platform used, enabling this option may lead to
+# badly anti-aliased labels on the edges of a graph (i.e. they become hard to
+# read).
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_TRANSPARENT        = NO
+
+# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
+# files in one run (i.e. multiple -o and -T options on the command line). This
+# makes dot run faster, but since only newer versions of dot (>1.8.10) support
+# this, this feature is disabled by default.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_MULTI_TARGETS      = NO
+
+# If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page
+# explaining the meaning of the various boxes and arrows in the dot generated
+# graphs.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GENERATE_LEGEND        = YES
+
+# If the DOT_CLEANUP tag is set to YES doxygen will remove the intermediate dot
+# files that are used to generate the various graphs.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_CLEANUP            = YES
diff --git a/doc/Doxyfile.zh-cn.in b/doc/Doxyfile.zh-cn.in
new file mode 100644
index 0000000..6a08f72
--- /dev/null
+++ b/doc/Doxyfile.zh-cn.in
@@ -0,0 +1,2369 @@
+# Doxyfile 1.8.7
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project.
+#
+# All text after a double hash (##) is considered a comment and is placed in
+# front of the TAG it is preceding.
+#
+# All text after a single hash (#) is considered a comment and will be ignored.
+# The format is:
+# TAG = value [value, ...]
+# For lists, items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (\" \").
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# This tag specifies the encoding used for all characters in the config file
+# that follow. The default is UTF-8 which is also the encoding used for all text
+# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv
+# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv
+# for the list of possible encodings.
+# The default value is: UTF-8.
+
+DOXYFILE_ENCODING      = UTF-8
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by
+# double-quotes, unless you are using Doxywizard) that should identify the
+# project for which the documentation is generated. This name is used in the
+# title of most generated pages and in a few other places.
+# The default value is: My Project.
+
+PROJECT_NAME           = RapidJSON
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number. This
+# could be handy for archiving the generated documentation or if some version
+# control system is used.
+
+PROJECT_NUMBER         =
+
+# Using the PROJECT_BRIEF tag one can provide an optional one line description
+# for a project that appears at the top of each page and should give viewer a
+# quick idea about the purpose of the project. Keep the description short.
+
+PROJECT_BRIEF          = "一个C++快速JSON解析器及生成器,包含SAX/DOM风格API"
+
+# With the PROJECT_LOGO tag one can specify an logo or icon that is included in
+# the documentation. The maximum height of the logo should not exceed 55 pixels
+# and the maximum width should not exceed 200 pixels. Doxygen will copy the logo
+# to the output directory.
+
+PROJECT_LOGO           =
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path
+# into which the generated documentation will be written. If a relative path is
+# entered, it will be relative to the location where doxygen was started. If
+# left blank the current directory will be used.
+
+OUTPUT_DIRECTORY       = @CMAKE_CURRENT_BINARY_DIR@
+
+# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 4096 sub-
+# directories (in 2 levels) under the output directory of each output format and
+# will distribute the generated files over these directories. Enabling this
+# option can be useful when feeding doxygen a huge amount of source files, where
+# putting all generated files in the same directory would otherwise causes
+# performance problems for the file system.
+# The default value is: NO.
+
+CREATE_SUBDIRS         = NO
+
+# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII
+# characters to appear in the names of generated files. If set to NO, non-ASCII
+# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode
+# U+3044.
+# The default value is: NO.
+
+ALLOW_UNICODE_NAMES    = NO
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all constant output in the proper language.
+# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese,
+# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States),
+# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian,
+# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages),
+# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian,
+# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian,
+# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish,
+# Ukrainian and Vietnamese.
+# The default value is: English.
+
+OUTPUT_LANGUAGE        = Chinese
+
+# If the BRIEF_MEMBER_DESC tag is set to YES doxygen will include brief member
+# descriptions after the members that are listed in the file and class
+# documentation (similar to Javadoc). Set to NO to disable this.
+# The default value is: YES.
+
+BRIEF_MEMBER_DESC      = YES
+
+# If the REPEAT_BRIEF tag is set to YES doxygen will prepend the brief
+# description of a member or function before the detailed description
+#
+# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# brief descriptions will be completely suppressed.
+# The default value is: YES.
+
+REPEAT_BRIEF           = YES
+
+# This tag implements a quasi-intelligent brief description abbreviator that is
+# used to form the text in various listings. Each string in this list, if found
+# as the leading text of the brief description, will be stripped from the text
+# and the result, after processing the whole list, is used as the annotated
+# text. Otherwise, the brief description is used as-is. If left blank, the
+# following values are used ($name is automatically replaced with the name of
+# the entity):The $name class, The $name widget, The $name file, is, provides,
+# specifies, contains, represents, a, an and the.
+
+ABBREVIATE_BRIEF       = "The $name class" \
+                         "The $name widget" \
+                         "The $name file" \
+                         is \
+                         provides \
+                         specifies \
+                         contains \
+                         represents \
+                         a \
+                         an \
+                         the
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+# doxygen will generate a detailed section even if there is only a brief
+# description.
+# The default value is: NO.
+
+ALWAYS_DETAILED_SEC    = NO
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
+# inherited members of a class in the documentation of that class as if those
+# members were ordinary class members. Constructors, destructors and assignment
+# operators of the base classes will not be shown.
+# The default value is: NO.
+
+INLINE_INHERITED_MEMB  = NO
+
+# If the FULL_PATH_NAMES tag is set to YES doxygen will prepend the full path
+# before files name in the file list and in the header files. If set to NO the
+# shortest path that makes the file name unique will be used
+# The default value is: YES.
+
+FULL_PATH_NAMES        = YES
+
+# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path.
+# Stripping is only done if one of the specified strings matches the left-hand
+# part of the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the path to
+# strip.
+#
+# Note that you can specify absolute paths here, but also relative paths, which
+# will be relative from the directory where doxygen is started.
+# This tag requires that the tag FULL_PATH_NAMES is set to YES.
+
+STRIP_FROM_PATH        =
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the
+# path mentioned in the documentation of a class, which tells the reader which
+# header file to include in order to use a class. If left blank only the name of
+# the header file containing the class definition is used. Otherwise one should
+# specify the list of include paths that are normally passed to the compiler
+# using the -I flag.
+
+STRIP_FROM_INC_PATH    =
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but
+# less readable) file names. This can be useful is your file systems doesn't
+# support long names like on DOS, Mac, or CD-ROM.
+# The default value is: NO.
+
+SHORT_NAMES            = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the
+# first line (until the first dot) of a Javadoc-style comment as the brief
+# description. If set to NO, the Javadoc-style will behave just like regular Qt-
+# style comments (thus requiring an explicit @brief command for a brief
+# description.)
+# The default value is: NO.
+
+JAVADOC_AUTOBRIEF      = NO
+
+# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first
+# line (until the first dot) of a Qt-style comment as the brief description. If
+# set to NO, the Qt-style will behave just like regular Qt-style comments (thus
+# requiring an explicit \brief command for a brief description.)
+# The default value is: NO.
+
+QT_AUTOBRIEF           = NO
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a
+# multi-line C++ special comment block (i.e. a block of //! or /// comments) as
+# a brief description. This used to be the default behavior. The new default is
+# to treat a multi-line C++ comment block as a detailed description. Set this
+# tag to YES if you prefer the old behavior instead.
+#
+# Note that setting this tag to YES also means that rational rose comments are
+# not recognized any more.
+# The default value is: NO.
+
+MULTILINE_CPP_IS_BRIEF = NO
+
+# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the
+# documentation from any documented member that it re-implements.
+# The default value is: YES.
+
+INHERIT_DOCS           = YES
+
+# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce a
+# new page for each member. If set to NO, the documentation of a member will be
+# part of the file/class/namespace that contains it.
+# The default value is: NO.
+
+SEPARATE_MEMBER_PAGES  = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen
+# uses this value to replace tabs by spaces in code fragments.
+# Minimum value: 1, maximum value: 16, default value: 4.
+
+TAB_SIZE               = 4
+
+# This tag can be used to specify a number of aliases that act as commands in
+# the documentation. An alias has the form:
+# name=value
+# For example adding
+# "sideeffect=@par Side Effects:\n"
+# will allow you to put the command \sideeffect (or @sideeffect) in the
+# documentation, which will result in a user-defined paragraph with heading
+# "Side Effects:". You can put \n's in the value part of an alias to insert
+# newlines.
+
+ALIASES                =
+
+# This tag can be used to specify a number of word-keyword mappings (TCL only).
+# A mapping has the form "name=value". For example adding "class=itcl::class"
+# will allow you to use the command class in the itcl::class meaning.
+
+TCL_SUBST              =
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
+# only. Doxygen will then generate output that is more tailored for C. For
+# instance, some of the names that are used will be different. The list of all
+# members will be omitted, etc.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_FOR_C  = NO
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or
+# Python sources only. Doxygen will then generate output that is more tailored
+# for that language. For instance, namespaces will be presented as packages,
+# qualified scopes will look different, etc.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_JAVA   = NO
+
+# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
+# sources. Doxygen will then generate output that is tailored for Fortran.
+# The default value is: NO.
+
+OPTIMIZE_FOR_FORTRAN   = NO
+
+# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
+# sources. Doxygen will then generate output that is tailored for VHDL.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_VHDL   = NO
+
+# Doxygen selects the parser to use depending on the extension of the files it
+# parses. With this tag you can assign which parser to use for a given
+# extension. Doxygen has a built-in mapping, but you can override or extend it
+# using this tag. The format is ext=language, where ext is a file extension, and
+# language is one of the parsers supported by doxygen: IDL, Java, Javascript,
+# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran:
+# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran:
+# Fortran. In the later case the parser tries to guess whether the code is fixed
+# or free formatted code, this is the default for Fortran type files), VHDL. For
+# instance to make doxygen treat .inc files as Fortran files (default is PHP),
+# and .f files as C (default is Fortran), use: inc=Fortran f=C.
+#
+# Note For files without extension you can use no_extension as a placeholder.
+#
+# Note that for custom extensions you also need to set FILE_PATTERNS otherwise
+# the files are not read by doxygen.
+
+EXTENSION_MAPPING      =
+
+# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments
+# according to the Markdown format, which allows for more readable
+# documentation. See http://daringfireball.net/projects/markdown/ for details.
+# The output of markdown processing is further processed by doxygen, so you can
+# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in
+# case of backward compatibilities issues.
+# The default value is: YES.
+
+MARKDOWN_SUPPORT       = YES
+
+# When enabled doxygen tries to link words that correspond to documented
+# classes, or namespaces to their corresponding documentation. Such a link can
+# be prevented in individual cases by by putting a % sign in front of the word
+# or globally by setting AUTOLINK_SUPPORT to NO.
+# The default value is: YES.
+
+AUTOLINK_SUPPORT       = YES
+
+# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
+# to include (a tag file for) the STL sources as input, then you should set this
+# tag to YES in order to let doxygen match functions declarations and
+# definitions whose arguments contain STL classes (e.g. func(std::string);
+# versus func(std::string) {}). This also make the inheritance and collaboration
+# diagrams that involve STL classes more complete and accurate.
+# The default value is: NO.
+
+BUILTIN_STL_SUPPORT    = NO
+
+# If you use Microsoft's C++/CLI language, you should set this option to YES to
+# enable parsing support.
+# The default value is: NO.
+
+CPP_CLI_SUPPORT        = NO
+
+# Set the SIP_SUPPORT tag to YES if your project consists of sip (see:
+# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen
+# will parse them like normal C++ but will assume all classes use public instead
+# of private inheritance when no explicit protection keyword is present.
+# The default value is: NO.
+
+SIP_SUPPORT            = NO
+
+# For Microsoft's IDL there are propget and propput attributes to indicate
+# getter and setter methods for a property. Setting this option to YES will make
+# doxygen to replace the get and set methods by a property in the documentation.
+# This will only work if the methods are indeed getting or setting a simple
+# type. If this is not the case, or you want to show the methods anyway, you
+# should set this option to NO.
+# The default value is: YES.
+
+IDL_PROPERTY_SUPPORT   = YES
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+# tag is set to YES, then doxygen will reuse the documentation of the first
+# member in the group (if any) for the other members of the group. By default
+# all members of a group must be documented explicitly.
+# The default value is: NO.
+
+DISTRIBUTE_GROUP_DOC   = NO
+
+# Set the SUBGROUPING tag to YES to allow class member groups of the same type
+# (for instance a group of public functions) to be put as a subgroup of that
+# type (e.g. under the Public Functions section). Set it to NO to prevent
+# subgrouping. Alternatively, this can be done per class using the
+# \nosubgrouping command.
+# The default value is: YES.
+
+SUBGROUPING            = YES
+
+# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions
+# are shown inside the group in which they are included (e.g. using \ingroup)
+# instead of on a separate page (for HTML and Man pages) or section (for LaTeX
+# and RTF).
+#
+# Note that this feature does not work in combination with
+# SEPARATE_MEMBER_PAGES.
+# The default value is: NO.
+
+INLINE_GROUPED_CLASSES = YES
+
+# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions
+# with only public data fields or simple typedef fields will be shown inline in
+# the documentation of the scope in which they are defined (i.e. file,
+# namespace, or group documentation), provided this scope is documented. If set
+# to NO, structs, classes, and unions are shown on a separate page (for HTML and
+# Man pages) or section (for LaTeX and RTF).
+# The default value is: NO.
+
+INLINE_SIMPLE_STRUCTS  = NO
+
+# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or
+# enum is documented as struct, union, or enum with the name of the typedef. So
+# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
+# with name TypeT. When disabled the typedef will appear as a member of a file,
+# namespace, or class. And the struct will be named TypeS. This can typically be
+# useful for C code in case the coding convention dictates that all compound
+# types are typedef'ed and only the typedef is referenced, never the tag name.
+# The default value is: NO.
+
+TYPEDEF_HIDES_STRUCT   = NO
+
+# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This
+# cache is used to resolve symbols given their name and scope. Since this can be
+# an expensive process and often the same symbol appears multiple times in the
+# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small
+# doxygen will become slower. If the cache is too large, memory is wasted. The
+# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range
+# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536
+# symbols. At the end of a run doxygen will report the cache usage and suggest
+# the optimal cache size from a speed point of view.
+# Minimum value: 0, maximum value: 9, default value: 0.
+
+LOOKUP_CACHE_SIZE      = 0
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
+# documentation are documented, even if no documentation was available. Private
+# class members and static file members will be hidden unless the
+# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES.
+# Note: This will also disable the warnings about undocumented members that are
+# normally produced when WARNINGS is set to YES.
+# The default value is: NO.
+
+EXTRACT_ALL            = NO
+
+# If the EXTRACT_PRIVATE tag is set to YES all private members of a class will
+# be included in the documentation.
+# The default value is: NO.
+
+EXTRACT_PRIVATE        = NO
+
+# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal
+# scope will be included in the documentation.
+# The default value is: NO.
+
+EXTRACT_PACKAGE        = NO
+
+# If the EXTRACT_STATIC tag is set to YES all static members of a file will be
+# included in the documentation.
+# The default value is: NO.
+
+EXTRACT_STATIC         = NO
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) defined
+# locally in source files will be included in the documentation. If set to NO
+# only classes defined in header files are included. Does not have any effect
+# for Java sources.
+# The default value is: YES.
+
+EXTRACT_LOCAL_CLASSES  = YES
+
+# This flag is only useful for Objective-C code. When set to YES local methods,
+# which are defined in the implementation section but not in the interface are
+# included in the documentation. If set to NO only methods in the interface are
+# included.
+# The default value is: NO.
+
+EXTRACT_LOCAL_METHODS  = NO
+
+# If this flag is set to YES, the members of anonymous namespaces will be
+# extracted and appear in the documentation as a namespace called
+# 'anonymous_namespace{file}', where file will be replaced with the base name of
+# the file that contains the anonymous namespace. By default anonymous namespace
+# are hidden.
+# The default value is: NO.
+
+EXTRACT_ANON_NSPACES   = NO
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all
+# undocumented members inside documented classes or files. If set to NO these
+# members will be included in the various overviews, but no documentation
+# section is generated. This option has no effect if EXTRACT_ALL is enabled.
+# The default value is: NO.
+
+HIDE_UNDOC_MEMBERS     = NO
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy. If set
+# to NO these classes will be included in the various overviews. This option has
+# no effect if EXTRACT_ALL is enabled.
+# The default value is: NO.
+
+HIDE_UNDOC_CLASSES     = NO
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend
+# (class|struct|union) declarations. If set to NO these declarations will be
+# included in the documentation.
+# The default value is: NO.
+
+HIDE_FRIEND_COMPOUNDS  = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any
+# documentation blocks found inside the body of a function. If set to NO these
+# blocks will be appended to the function's detailed documentation block.
+# The default value is: NO.
+
+HIDE_IN_BODY_DOCS      = NO
+
+# The INTERNAL_DOCS tag determines if documentation that is typed after a
+# \internal command is included. If the tag is set to NO then the documentation
+# will be excluded. Set it to YES to include the internal documentation.
+# The default value is: NO.
+
+INTERNAL_DOCS          = NO
+
+# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file
+# names in lower-case letters. If set to YES upper-case letters are also
+# allowed. This is useful if you have classes or files whose names only differ
+# in case and if your file system supports case sensitive file names. Windows
+# and Mac users are advised to set this option to NO.
+# The default value is: system dependent.
+
+CASE_SENSE_NAMES       = NO
+
+# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with
+# their full class and namespace scopes in the documentation. If set to YES the
+# scope will be hidden.
+# The default value is: NO.
+
+HIDE_SCOPE_NAMES       = NO
+
+# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of
+# the files that are included by a file in the documentation of that file.
+# The default value is: YES.
+
+SHOW_INCLUDE_FILES     = YES
+
+# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each
+# grouped member an include statement to the documentation, telling the reader
+# which file to include in order to use the member.
+# The default value is: NO.
+
+SHOW_GROUPED_MEMB_INC  = NO
+
+# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include
+# files with double quotes in the documentation rather than with sharp brackets.
+# The default value is: NO.
+
+FORCE_LOCAL_INCLUDES   = NO
+
+# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the
+# documentation for inline members.
+# The default value is: YES.
+
+INLINE_INFO            = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the
+# (detailed) documentation of file and class members alphabetically by member
+# name. If set to NO the members will appear in declaration order.
+# The default value is: YES.
+
+SORT_MEMBER_DOCS       = YES
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief
+# descriptions of file, namespace and class members alphabetically by member
+# name. If set to NO the members will appear in declaration order. Note that
+# this will also influence the order of the classes in the class list.
+# The default value is: NO.
+
+SORT_BRIEF_DOCS        = NO
+
+# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the
+# (brief and detailed) documentation of class members so that constructors and
+# destructors are listed first. If set to NO the constructors will appear in the
+# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS.
+# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief
+# member documentation.
+# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting
+# detailed member documentation.
+# The default value is: NO.
+
+SORT_MEMBERS_CTORS_1ST = NO
+
+# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy
+# of group names into alphabetical order. If set to NO the group names will
+# appear in their defined order.
+# The default value is: NO.
+
+SORT_GROUP_NAMES       = NO
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by
+# fully-qualified names, including namespaces. If set to NO, the class list will
+# be sorted only by class name, not including the namespace part.
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the alphabetical
+# list.
+# The default value is: NO.
+
+SORT_BY_SCOPE_NAME     = NO
+
+# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper
+# type resolution of all parameters of a function it will reject a match between
+# the prototype and the implementation of a member function even if there is
+# only one candidate or it is obvious which candidate to choose by doing a
+# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still
+# accept a match between prototype and implementation in such cases.
+# The default value is: NO.
+
+STRICT_PROTO_MATCHING  = NO
+
+# The GENERATE_TODOLIST tag can be used to enable ( YES) or disable ( NO) the
+# todo list. This list is created by putting \todo commands in the
+# documentation.
+# The default value is: YES.
+
+GENERATE_TODOLIST      = YES
+
+# The GENERATE_TESTLIST tag can be used to enable ( YES) or disable ( NO) the
+# test list. This list is created by putting \test commands in the
+# documentation.
+# The default value is: YES.
+
+GENERATE_TESTLIST      = YES
+
+# The GENERATE_BUGLIST tag can be used to enable ( YES) or disable ( NO) the bug
+# list. This list is created by putting \bug commands in the documentation.
+# The default value is: YES.
+
+GENERATE_BUGLIST       = YES
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable ( YES) or disable ( NO)
+# the deprecated list. This list is created by putting \deprecated commands in
+# the documentation.
+# The default value is: YES.
+
+GENERATE_DEPRECATEDLIST= YES
+
+# The ENABLED_SECTIONS tag can be used to enable conditional documentation
+# sections, marked by \if <section_label> ... \endif and \cond <section_label>
+# ... \endcond blocks.
+
+ENABLED_SECTIONS       = $(RAPIDJSON_SECTIONS)
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the
+# initial value of a variable or macro / define can have for it to appear in the
+# documentation. If the initializer consists of more lines than specified here
+# it will be hidden. Use a value of 0 to hide initializers completely. The
+# appearance of the value of individual variables and macros / defines can be
+# controlled using \showinitializer or \hideinitializer command in the
+# documentation regardless of this setting.
+# Minimum value: 0, maximum value: 10000, default value: 30.
+
+MAX_INITIALIZER_LINES  = 30
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at
+# the bottom of the documentation of classes and structs. If set to YES the list
+# will mention the files that were used to generate the documentation.
+# The default value is: YES.
+
+SHOW_USED_FILES        = YES
+
+# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This
+# will remove the Files entry from the Quick Index and from the Folder Tree View
+# (if specified).
+# The default value is: YES.
+
+SHOW_FILES             = YES
+
+# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces
+# page. This will remove the Namespaces entry from the Quick Index and from the
+# Folder Tree View (if specified).
+# The default value is: YES.
+
+SHOW_NAMESPACES        = NO
+
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that
+# doxygen should invoke to get the current version for each file (typically from
+# the version control system). Doxygen will invoke the program by executing (via
+# popen()) the command command input-file, where command is the value of the
+# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided
+# by doxygen. Whatever the program writes to standard output is used as the file
+# version. For an example see the documentation.
+
+FILE_VERSION_FILTER    =
+
+# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
+# by doxygen. The layout file controls the global structure of the generated
+# output files in an output format independent way. To create the layout file
+# that represents doxygen's defaults, run doxygen with the -l option. You can
+# optionally specify a file name after the option, if omitted DoxygenLayout.xml
+# will be used as the name of the layout file.
+#
+# Note that if you run doxygen from a directory containing a file called
+# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE
+# tag is left empty.
+
+LAYOUT_FILE            =
+
+# The CITE_BIB_FILES tag can be used to specify one or more bib files containing
+# the reference definitions. This must be a list of .bib files. The .bib
+# extension is automatically appended if omitted. This requires the bibtex tool
+# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info.
+# For LaTeX the style of the bibliography can be controlled using
+# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
+# search path. Do not use file names with spaces, bibtex cannot handle them. See
+# also \cite for info how to create references.
+
+CITE_BIB_FILES         =
+
+#---------------------------------------------------------------------------
+# Configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated to
+# standard output by doxygen. If QUIET is set to YES this implies that the
+# messages are off.
+# The default value is: NO.
+
+QUIET                  = NO
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are
+# generated to standard error ( stderr) by doxygen. If WARNINGS is set to YES
+# this implies that the warnings are on.
+#
+# Tip: Turn warnings on while writing the documentation.
+# The default value is: YES.
+
+WARNINGS               = YES
+
+# If the WARN_IF_UNDOCUMENTED tag is set to YES, then doxygen will generate
+# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag
+# will automatically be disabled.
+# The default value is: YES.
+
+WARN_IF_UNDOCUMENTED   = YES
+
+# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for
+# potential errors in the documentation, such as not documenting some parameters
+# in a documented function, or documenting parameters that don't exist or using
+# markup commands wrongly.
+# The default value is: YES.
+
+WARN_IF_DOC_ERROR      = YES
+
+# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
+# are documented, but have no documentation for their parameters or return
+# value. If set to NO doxygen will only warn about wrong or incomplete parameter
+# documentation, but not about the absence of documentation.
+# The default value is: NO.
+
+WARN_NO_PARAMDOC       = NO
+
+# The WARN_FORMAT tag determines the format of the warning messages that doxygen
+# can produce. The string should contain the $file, $line, and $text tags, which
+# will be replaced by the file and line number from which the warning originated
+# and the warning text. Optionally the format may contain $version, which will
+# be replaced by the version of the file (if it could be obtained via
+# FILE_VERSION_FILTER)
+# The default value is: $file:$line: $text.
+
+WARN_FORMAT            = "$file:$line: $text"
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning and error
+# messages should be written. If left blank the output is written to standard
+# error (stderr).
+
+WARN_LOGFILE           =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag is used to specify the files and/or directories that contain
+# documented source files. You may enter file names like myfile.cpp or
+# directories like /usr/src/myproject. Separate the files or directories with
+# spaces.
+# Note: If this tag is empty the current directory is searched.
+
+INPUT                  = readme.zh-cn.md \
+                         CHANGELOG.md \
+                         include/rapidjson/rapidjson.h \
+                         include/ \
+                         doc/features.zh-cn.md \
+                         doc/tutorial.zh-cn.md \
+                         doc/pointer.zh-cn.md \
+                         doc/stream.zh-cn.md \
+                         doc/encoding.zh-cn.md \
+                         doc/dom.zh-cn.md \
+                         doc/sax.zh-cn.md \
+                         doc/schema.zh-cn.md \
+                         doc/performance.zh-cn.md \
+                         doc/internals.zh-cn.md \
+                         doc/faq.zh-cn.md
+
+# This tag can be used to specify the character encoding of the source files
+# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
+# libiconv (or the iconv built into libc) for the transcoding. See the libiconv
+# documentation (see: http://www.gnu.org/software/libiconv) for the list of
+# possible encodings.
+# The default value is: UTF-8.
+
+INPUT_ENCODING         = UTF-8
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
+# *.h) to filter out the source-files in the directories. If left blank the
+# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii,
+# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp,
+# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown,
+# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf,
+# *.qsf, *.as and *.js.
+
+FILE_PATTERNS          = *.c \
+                         *.cc \
+                         *.cxx \
+                         *.cpp \
+                         *.h \
+                         *.hh \
+                         *.hxx \
+                         *.hpp \
+                         *.inc \
+                         *.md
+
+# The RECURSIVE tag can be used to specify whether or not subdirectories should
+# be searched for input files as well.
+# The default value is: NO.
+
+RECURSIVE              = YES
+
+# The EXCLUDE tag can be used to specify files and/or directories that should be
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+#
+# Note that relative paths are relative to the directory from which doxygen is
+# run.
+
+EXCLUDE                = ./include/rapidjson/msinttypes/
+
+# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
+# directories that are symbolic links (a Unix file system feature) are excluded
+# from the input.
+# The default value is: NO.
+
+EXCLUDE_SYMLINKS       = NO
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories.
+#
+# Note that the wildcards are matched against the file with absolute path, so to
+# exclude all test directories for example use the pattern */test/*
+
+EXCLUDE_PATTERNS       =
+
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
+# (namespaces, classes, functions, etc.) that should be excluded from the
+# output. The symbol name can be a fully qualified name, a word, or if the
+# wildcard * is used, a substring. Examples: ANamespace, AClass,
+# AClass::ANamespace, ANamespace::*Test
+#
+# Note that the wildcards are matched against the file with absolute path, so to
+# exclude all test directories use the pattern */test/*
+
+EXCLUDE_SYMBOLS        = internal
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or directories
+# that contain example code fragments that are included (see the \include
+# command).
+
+EXAMPLE_PATH           =
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and
+# *.h) to filter out the source-files in the directories. If left blank all
+# files are included.
+
+EXAMPLE_PATTERNS       = *
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+# searched for input files to be used with the \include or \dontinclude commands
+# irrespective of the value of the RECURSIVE tag.
+# The default value is: NO.
+
+EXAMPLE_RECURSIVE      = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or directories
+# that contain images that are to be included in the documentation (see the
+# \image command).
+
+IMAGE_PATH             = ./doc
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should
+# invoke to filter for each input file. Doxygen will invoke the filter program
+# by executing (via popen()) the command:
+#
+# <filter> <input-file>
+#
+# where <filter> is the value of the INPUT_FILTER tag, and <input-file> is the
+# name of an input file. Doxygen will then use the output that the filter
+# program writes to standard output. If FILTER_PATTERNS is specified, this tag
+# will be ignored.
+#
+# Note that the filter must not add or remove lines; it is applied before the
+# code is scanned, but not when the output code is generated. If lines are added
+# or removed, the anchors will not be placed correctly.
+
+INPUT_FILTER           =
+
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
+# basis. Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match. The filters are a list of the form: pattern=filter
+# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how
+# filters are used. If the FILTER_PATTERNS tag is empty or if none of the
+# patterns match the file name, INPUT_FILTER is applied.
+
+FILTER_PATTERNS        =
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+# INPUT_FILTER ) will also be used to filter the input files that are used for
+# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES).
+# The default value is: NO.
+
+FILTER_SOURCE_FILES    = NO
+
+# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
+# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and
+# it is also possible to disable source filtering for a specific pattern using
+# *.ext= (so without naming a filter).
+# This tag requires that the tag FILTER_SOURCE_FILES is set to YES.
+
+FILTER_SOURCE_PATTERNS =
+
+# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that
+# is part of the input, its contents will be placed on the main page
+# (index.html). This can be useful if you have a project on for instance GitHub
+# and want to reuse the introduction page also for the doxygen output.
+
+USE_MDFILE_AS_MAINPAGE = readme.zh-cn.md
+
+#---------------------------------------------------------------------------
+# Configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will be
+# generated. Documented entities will be cross-referenced with these sources.
+#
+# Note: To get rid of all source code in the generated output, make sure that
+# also VERBATIM_HEADERS is set to NO.
+# The default value is: NO.
+
+SOURCE_BROWSER         = NO
+
+# Setting the INLINE_SOURCES tag to YES will include the body of functions,
+# classes and enums directly into the documentation.
+# The default value is: NO.
+
+INLINE_SOURCES         = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any
+# special comment blocks from generated source code fragments. Normal C, C++ and
+# Fortran comments will always remain visible.
+# The default value is: YES.
+
+STRIP_CODE_COMMENTS    = NO
+
+# If the REFERENCED_BY_RELATION tag is set to YES then for each documented
+# function all documented functions referencing it will be listed.
+# The default value is: NO.
+
+REFERENCED_BY_RELATION = NO
+
+# If the REFERENCES_RELATION tag is set to YES then for each documented function
+# all documented entities called/used by that function will be listed.
+# The default value is: NO.
+
+REFERENCES_RELATION    = NO
+
+# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set
+# to YES, then the hyperlinks from functions in REFERENCES_RELATION and
+# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will
+# link to the documentation.
+# The default value is: YES.
+
+REFERENCES_LINK_SOURCE = YES
+
+# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the
+# source code will show a tooltip with additional information such as prototype,
+# brief description and links to the definition and documentation. Since this
+# will make the HTML file larger and loading of large files a bit slower, you
+# can opt to disable this feature.
+# The default value is: YES.
+# This tag requires that the tag SOURCE_BROWSER is set to YES.
+
+SOURCE_TOOLTIPS        = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code will
+# point to the HTML generated by the htags(1) tool instead of doxygen built-in
+# source browser. The htags tool is part of GNU's global source tagging system
+# (see http://www.gnu.org/software/global/global.html). You will need version
+# 4.8.6 or higher.
+#
+# To use it do the following:
+# - Install the latest version of global
+# - Enable SOURCE_BROWSER and USE_HTAGS in the config file
+# - Make sure the INPUT points to the root of the source tree
+# - Run doxygen as normal
+#
+# Doxygen will invoke htags (and that will in turn invoke gtags), so these
+# tools must be available from the command line (i.e. in the search path).
+#
+# The result: instead of the source browser generated by doxygen, the links to
+# source code will now point to the output of htags.
+# The default value is: NO.
+# This tag requires that the tag SOURCE_BROWSER is set to YES.
+
+USE_HTAGS              = NO
+
+# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a
+# verbatim copy of the header file for each class for which an include is
+# specified. Set to NO to disable this.
+# See also: Section \class.
+# The default value is: YES.
+
+VERBATIM_HEADERS       = YES
+
+# If the CLANG_ASSISTED_PARSING tag is set to YES, then doxygen will use the
+# clang parser (see: http://clang.llvm.org/) for more accurate parsing at the
+# cost of reduced performance. This can be particularly helpful with template
+# rich C++ code for which doxygen's built-in parser lacks the necessary type
+# information.
+# Note: The availability of this option depends on whether or not doxygen was
+# compiled with the --with-libclang option.
+# The default value is: NO.
+
+CLANG_ASSISTED_PARSING = NO
+
+# If clang assisted parsing is enabled you can provide the compiler with command
+# line options that you would normally use when invoking the compiler. Note that
+# the include paths will already be set by doxygen for the files and directories
+# specified with INPUT and INCLUDE_PATH.
+# This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES.
+
+CLANG_OPTIONS          =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all
+# compounds will be generated. Enable this if the project contains a lot of
+# classes, structs, unions or interfaces.
+# The default value is: YES.
+
+ALPHABETICAL_INDEX     = NO
+
+# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in
+# which the alphabetical index list will be split.
+# Minimum value: 1, maximum value: 20, default value: 5.
+# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
+
+COLS_IN_ALPHA_INDEX    = 5
+
+# In case all classes in a project start with a common prefix, all classes will
+# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag
+# can be used to specify a prefix (or a list of prefixes) that should be ignored
+# while generating the index headers.
+# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
+
+IGNORE_PREFIX          =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES doxygen will generate HTML output
+# The default value is: YES.
+
+GENERATE_HTML          = YES
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_OUTPUT            = html/zh-cn
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each
+# generated HTML page (for example: .htm, .php, .asp).
+# The default value is: .html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_FILE_EXTENSION    = .html
+
+# The HTML_HEADER tag can be used to specify a user-defined HTML header file for
+# each generated HTML page. If the tag is left blank doxygen will generate a
+# standard header.
+#
+# To get valid HTML the header file that includes any scripts and style sheets
+# that doxygen needs, which is dependent on the configuration options used (e.g.
+# the setting GENERATE_TREEVIEW). It is highly recommended to start with a
+# default header using
+# doxygen -w html new_header.html new_footer.html new_stylesheet.css
+# YourConfigFile
+# and then modify the file new_header.html. See also section "Doxygen usage"
+# for information on how to generate the default header that doxygen normally
+# uses.
+# Note: The header is subject to change so you typically have to regenerate the
+# default header when upgrading to a newer version of doxygen. For a description
+# of the possible markers and block names see the documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_HEADER            = ./doc/misc/header.html
+
+# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each
+# generated HTML page. If the tag is left blank doxygen will generate a standard
+# footer. See HTML_HEADER for more information on how to generate a default
+# footer and what special commands can be used inside the footer. See also
+# section "Doxygen usage" for information on how to generate the default footer
+# that doxygen normally uses.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_FOOTER            = ./doc/misc/footer.html
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style
+# sheet that is used by each HTML page. It can be used to fine-tune the look of
+# the HTML output. If left blank doxygen will generate a default style sheet.
+# See also section "Doxygen usage" for information on how to generate the style
+# sheet that doxygen normally uses.
+# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as
+# it is more robust and this tag (HTML_STYLESHEET) will in the future become
+# obsolete.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_STYLESHEET        =
+
+# The HTML_EXTRA_STYLESHEET tag can be used to specify an additional user-
+# defined cascading style sheet that is included after the standard style sheets
+# created by doxygen. Using this option one can overrule certain style aspects.
+# This is preferred over using HTML_STYLESHEET since it does not replace the
+# standard style sheet and is therefore more robust against future updates.
+# Doxygen will copy the style sheet file to the output directory. For an example
+# see the documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_EXTRA_STYLESHEET  = ./doc/misc/doxygenextra.css
+
+# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the HTML output directory. Note
+# that these files will be copied to the base HTML output directory. Use the
+# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
+# files. In the HTML_STYLESHEET file, use the file name only. Also note that the
+# files will be copied as-is; there are no commands or markers available.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_EXTRA_FILES       =
+
+# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
+# will adjust the colors in the stylesheet and background images according to
+# this color. Hue is specified as an angle on a colorwheel, see
+# http://en.wikipedia.org/wiki/Hue for more information. For instance the value
+# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
+# purple, and 360 is red again.
+# Minimum value: 0, maximum value: 359, default value: 220.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_HUE    = 220
+
+# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors
+# in the HTML output. For a value of 0 the output will use grayscales only. A
+# value of 255 will produce the most vivid colors.
+# Minimum value: 0, maximum value: 255, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_SAT    = 100
+
+# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the
+# luminance component of the colors in the HTML output. Values below 100
+# gradually make the output lighter, whereas values above 100 make the output
+# darker. The value divided by 100 is the actual gamma applied, so 80 represents
+# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not
+# change the gamma.
+# Minimum value: 40, maximum value: 240, default value: 80.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_GAMMA  = 80
+
+# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
+# page will contain the date and time when the page was generated. Setting this
+# to NO can help when comparing the output of multiple runs.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_TIMESTAMP         = YES
+
+# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
+# documentation will contain sections that can be hidden and shown after the
+# page has loaded.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_DYNAMIC_SECTIONS  = NO
+
+# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries
+# shown in the various tree structured indices initially; the user can expand
+# and collapse entries dynamically later on. Doxygen will expand the tree to
+# such a level that at most the specified number of entries are visible (unless
+# a fully collapsed tree already exceeds this amount). So setting the number of
+# entries 1 will produce a full collapsed tree by default. 0 is a special value
+# representing an infinite number of entries and will result in a full expanded
+# tree by default.
+# Minimum value: 0, maximum value: 9999, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_INDEX_NUM_ENTRIES = 100
+
+# If the GENERATE_DOCSET tag is set to YES, additional index files will be
+# generated that can be used as input for Apple's Xcode 3 integrated development
+# environment (see: http://developer.apple.com/tools/xcode/), introduced with
+# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a
+# Makefile in the HTML output directory. Running make will produce the docset in
+# that directory and running make install will install the docset in
+# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at
+# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
+# for more information.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_DOCSET        = NO
+
+# This tag determines the name of the docset feed. A documentation feed provides
+# an umbrella under which multiple documentation sets from a single provider
+# (such as a company or product suite) can be grouped.
+# The default value is: Doxygen generated docs.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_FEEDNAME        = "Doxygen generated docs"
+
+# This tag specifies a string that should uniquely identify the documentation
+# set bundle. This should be a reverse domain-name style string, e.g.
+# com.mycompany.MyDocSet. Doxygen will append .docset to the name.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_BUNDLE_ID       = org.doxygen.Project
+
+# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify
+# the documentation publisher. This should be a reverse domain-name style
+# string, e.g. com.mycompany.MyDocSet.documentation.
+# The default value is: org.doxygen.Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_PUBLISHER_ID    = org.doxygen.Publisher
+
+# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher.
+# The default value is: Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_PUBLISHER_NAME  = Publisher
+
+# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
+# additional HTML index files: index.hhp, index.hhc, and index.hhk. The
+# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
+# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on
+# Windows.
+#
+# The HTML Help Workshop contains a compiler that can convert all HTML output
+# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML
+# files are now used as the Windows 98 help format, and will replace the old
+# Windows help format (.hlp) on all Windows platforms in the future. Compressed
+# HTML files also contain an index, a table of contents, and you can search for
+# words in the documentation. The HTML workshop also contains a viewer for
+# compressed HTML files.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_HTMLHELP      = NO
+
+# The CHM_FILE tag can be used to specify the file name of the resulting .chm
+# file. You can add a path in front of the file if the result should not be
+# written to the html output directory.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+CHM_FILE               =
+
+# The HHC_LOCATION tag can be used to specify the location (absolute path
+# including file name) of the HTML help compiler ( hhc.exe). If non-empty
+# doxygen will try to run the HTML help compiler on the generated index.hhp.
+# The file has to be specified with full path.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+HHC_LOCATION           =
+
+# The GENERATE_CHI flag controls if a separate .chi index file is generated (
+# YES) or that it should be included in the master .chm file ( NO).
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+GENERATE_CHI           = NO
+
+# The CHM_INDEX_ENCODING is used to encode HtmlHelp index ( hhk), content ( hhc)
+# and project file content.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+CHM_INDEX_ENCODING     =
+
+# The BINARY_TOC flag controls whether a binary table of contents is generated (
+# YES) or a normal table of contents ( NO) in the .chm file. Furthermore it
+# enables the Previous and Next buttons.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+BINARY_TOC             = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members to
+# the table of contents of the HTML help documentation and to the tree view.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+TOC_EXPAND             = NO
+
+# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
+# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that
+# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help
+# (.qch) of the generated HTML documentation.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_QHP           = NO
+
+# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify
+# the file name of the resulting .qch file. The path specified is relative to
+# the HTML output folder.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QCH_FILE               =
+
+# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help
+# Project output. For more information please see Qt Help Project / Namespace
+# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace).
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_NAMESPACE          = org.doxygen.Project
+
+# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt
+# Help Project output. For more information please see Qt Help Project / Virtual
+# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual-
+# folders).
+# The default value is: doc.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_VIRTUAL_FOLDER     = doc
+
+# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom
+# filter to add. For more information please see Qt Help Project / Custom
+# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
+# filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_CUST_FILTER_NAME   =
+
+# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the
+# custom filter to add. For more information please see Qt Help Project / Custom
+# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
+# filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_CUST_FILTER_ATTRS  =
+
+# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
+# project's filter section matches. Qt Help Project / Filter Attributes (see:
+# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_SECT_FILTER_ATTRS  =
+
+# The QHG_LOCATION tag can be used to specify the location of Qt's
+# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the
+# generated .qhp file.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHG_LOCATION           =
+
+# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be
+# generated, together with the HTML files, they form an Eclipse help plugin. To
+# install this plugin and make it available under the help contents menu in
+# Eclipse, the contents of the directory containing the HTML and XML files needs
+# to be copied into the plugins directory of eclipse. The name of the directory
+# within the plugins directory should be the same as the ECLIPSE_DOC_ID value.
+# After copying Eclipse needs to be restarted before the help appears.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_ECLIPSEHELP   = NO
+
+# A unique identifier for the Eclipse help plugin. When installing the plugin
+# the directory name containing the HTML and XML files should also have this
+# name. Each documentation set should have its own identifier.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES.
+
+ECLIPSE_DOC_ID         = org.doxygen.Project
+
+# If you want full control over the layout of the generated HTML pages it might
+# be necessary to disable the index and replace it with your own. The
+# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top
+# of each HTML page. A value of NO enables the index and the value YES disables
+# it. Since the tabs in the index contain the same information as the navigation
+# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+DISABLE_INDEX          = YES
+
+# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
+# structure should be generated to display hierarchical information. If the tag
+# value is set to YES, a side panel will be generated containing a tree-like
+# index structure (just like the one that is generated for HTML Help). For this
+# to work a browser that supports JavaScript, DHTML, CSS and frames is required
+# (i.e. any modern browser). Windows users are probably better off using the
+# HTML help feature. Via custom stylesheets (see HTML_EXTRA_STYLESHEET) one can
+# further fine-tune the look of the index. As an example, the default style
+# sheet generated by doxygen has an example that shows how to put an image at
+# the root of the tree instead of the PROJECT_NAME. Since the tree basically has
+# the same information as the tab index, you could consider setting
+# DISABLE_INDEX to YES when enabling this option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_TREEVIEW      = YES
+
+# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that
+# doxygen will group on one line in the generated HTML documentation.
+#
+# Note that a value of 0 will completely suppress the enum values from appearing
+# in the overview section.
+# Minimum value: 0, maximum value: 20, default value: 4.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+ENUM_VALUES_PER_LINE   = 4
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used
+# to set the initial width (in pixels) of the frame in which the tree is shown.
+# Minimum value: 0, maximum value: 1500, default value: 250.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+TREEVIEW_WIDTH         = 250
+
+# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open links to
+# external symbols imported via tag files in a separate window.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+EXT_LINKS_IN_WINDOW    = NO
+
+# Use this tag to change the font size of LaTeX formulas included as images in
+# the HTML documentation. When you change the font size after a successful
+# doxygen run you need to manually remove any form_*.png images from the HTML
+# output directory to force them to be regenerated.
+# Minimum value: 8, maximum value: 50, default value: 10.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+FORMULA_FONTSIZE       = 10
+
+# Use the FORMULA_TRANPARENT tag to determine whether or not the images
+# generated for formulas are transparent PNGs. Transparent PNGs are not
+# supported properly for IE 6.0, but are supported on all modern browsers.
+#
+# Note that when changing this option you need to delete any form_*.png files in
+# the HTML output directory before the changes have effect.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+FORMULA_TRANSPARENT    = YES
+
+# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
+# http://www.mathjax.org) which uses client side Javascript for the rendering
+# instead of using prerendered bitmaps. Use this if you do not have LaTeX
+# installed or if you want to formulas look prettier in the HTML output. When
+# enabled you may also need to install MathJax separately and configure the path
+# to it using the MATHJAX_RELPATH option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+USE_MATHJAX            = NO
+
+# When MathJax is enabled you can set the default output format to be used for
+# the MathJax output. See the MathJax site (see:
+# http://docs.mathjax.org/en/latest/output.html) for more details.
+# Possible values are: HTML-CSS (which is slower, but has the best
+# compatibility), NativeMML (i.e. MathML) and SVG.
+# The default value is: HTML-CSS.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_FORMAT         = HTML-CSS
+
+# When MathJax is enabled you need to specify the location relative to the HTML
+# output directory using the MATHJAX_RELPATH option. The destination directory
+# should contain the MathJax.js script. For instance, if the mathjax directory
+# is located at the same level as the HTML output directory, then
+# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax
+# Content Delivery Network so you can quickly see the result without installing
+# MathJax. However, it is strongly recommended to install a local copy of
+# MathJax from http://www.mathjax.org before deployment.
+# The default value is: http://cdn.mathjax.org/mathjax/latest.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_RELPATH        = http://www.mathjax.org/mathjax
+
+# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax
+# extension names that should be enabled during MathJax rendering. For example
+# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_EXTENSIONS     =
+
+# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces
+# of code that will be used on startup of the MathJax code. See the MathJax site
+# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an
+# example see the documentation.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_CODEFILE       =
+
+# When the SEARCHENGINE tag is enabled doxygen will generate a search box for
+# the HTML output. The underlying search engine uses javascript and DHTML and
+# should work on any modern browser. Note that when using HTML help
+# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET)
+# there is already a search function so this one should typically be disabled.
+# For large projects the javascript based search engine can be slow, then
+# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to
+# search using the keyboard; to jump to the search box use <access key> + S
+# (what the <access key> is depends on the OS and browser, but it is typically
+# <CTRL>, <ALT>/<option>, or both). Inside the search box use the <cursor down
+# key> to jump into the search results window, the results can be navigated
+# using the <cursor keys>. Press <Enter> to select an item or <escape> to cancel
+# the search. The filter options can be selected when the cursor is inside the
+# search box by pressing <Shift>+<cursor down>. Also here use the <cursor keys>
+# to select a filter and <Enter> or <escape> to activate or cancel the filter
+# option.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+SEARCHENGINE           = YES
+
+# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
+# implemented using a web server instead of a web client using Javascript. There
+# are two flavors of web server based searching depending on the EXTERNAL_SEARCH
+# setting. When disabled, doxygen will generate a PHP script for searching and
+# an index file used by the script. When EXTERNAL_SEARCH is enabled the indexing
+# and searching needs to be provided by external tools. See the section
+# "External Indexing and Searching" for details.
+# The default value is: NO.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SERVER_BASED_SEARCH    = NO
+
+# When EXTERNAL_SEARCH tag is enabled doxygen will no longer generate the PHP
+# script for searching. Instead the search results are written to an XML file
+# which needs to be processed by an external indexer. Doxygen will invoke an
+# external search engine pointed to by the SEARCHENGINE_URL option to obtain the
+# search results.
+#
+# Doxygen ships with an example indexer ( doxyindexer) and search engine
+# (doxysearch.cgi) which are based on the open source search engine library
+# Xapian (see: http://xapian.org/).
+#
+# See the section "External Indexing and Searching" for details.
+# The default value is: NO.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTERNAL_SEARCH        = NO
+
+# The SEARCHENGINE_URL should point to a search engine hosted by a web server
+# which will return the search results when EXTERNAL_SEARCH is enabled.
+#
+# Doxygen ships with an example indexer ( doxyindexer) and search engine
+# (doxysearch.cgi) which are based on the open source search engine library
+# Xapian (see: http://xapian.org/). See the section "External Indexing and
+# Searching" for details.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SEARCHENGINE_URL       =
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed
+# search data is written to a file for indexing by an external tool. With the
+# SEARCHDATA_FILE tag the name of this file can be specified.
+# The default file is: searchdata.xml.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SEARCHDATA_FILE        = searchdata.xml
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the
+# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is
+# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple
+# projects and redirect the results back to the right project.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTERNAL_SEARCH_ID     =
+
+# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen
+# projects other than the one defined by this configuration file, but that are
+# all added to the same external search index. Each project needs to have a
+# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id of
+# to a relative location where the documentation can be found. The format is:
+# EXTRA_SEARCH_MAPPINGS = tagname1=loc1 tagname2=loc2 ...
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTRA_SEARCH_MAPPINGS  =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES doxygen will generate LaTeX output.
+# The default value is: YES.
+
+GENERATE_LATEX         = NO
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: latex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_OUTPUT           = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked.
+#
+# Note that when enabling USE_PDFLATEX this option is only used for generating
+# bitmaps for formulas in the HTML output, but not in the Makefile that is
+# written to the output directory.
+# The default file is: latex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_CMD_NAME         = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate
+# index for LaTeX.
+# The default file is: makeindex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+MAKEINDEX_CMD_NAME     = makeindex
+
+# If the COMPACT_LATEX tag is set to YES doxygen generates more compact LaTeX
+# documents. This may be useful for small projects and may help to save some
+# trees in general.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+COMPACT_LATEX          = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used by the
+# printer.
+# Possible values are: a4 (210 x 297 mm), letter (8.5 x 11 inches), legal (8.5 x
+# 14 inches) and executive (7.25 x 10.5 inches).
+# The default value is: a4.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+PAPER_TYPE             = a4
+
+# The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names
+# that should be included in the LaTeX output. To get the times font for
+# instance you can specify
+# EXTRA_PACKAGES=times
+# If left blank no extra packages will be included.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+EXTRA_PACKAGES         =
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the
+# generated LaTeX document. The header should contain everything until the first
+# chapter. If it is left blank doxygen will generate a standard header. See
+# section "Doxygen usage" for information on how to let doxygen write the
+# default header to a separate file.
+#
+# Note: Only use a user-defined header if you know what you are doing! The
+# following commands have a special meaning inside the header: $title,
+# $datetime, $date, $doxygenversion, $projectname, $projectnumber. Doxygen will
+# replace them by respectively the title of the page, the current date and time,
+# only the current date, the version number of doxygen, the project name (see
+# PROJECT_NAME), or the project number (see PROJECT_NUMBER).
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_HEADER           =
+
+# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the
+# generated LaTeX document. The footer should contain everything after the last
+# chapter. If it is left blank doxygen will generate a standard footer.
+#
+# Note: Only use a user-defined footer if you know what you are doing!
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_FOOTER           =
+
+# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the LATEX_OUTPUT output
+# directory. Note that the files will be copied as-is; there are no commands or
+# markers available.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_EXTRA_FILES      =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated is
+# prepared for conversion to PDF (using ps2pdf or pdflatex). The PDF file will
+# contain links (just like the HTML output) instead of page references. This
+# makes the output suitable for online browsing using a PDF viewer.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+PDF_HYPERLINKS         = YES
+
+# If the LATEX_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate
+# the PDF file directly from the LaTeX files. Set this option to YES to get a
+# higher quality PDF documentation.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+USE_PDFLATEX           = YES
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode
+# command to the generated LaTeX files. This will instruct LaTeX to keep running
+# if errors occur, instead of asking the user for help. This option is also used
+# when generating formulas in HTML.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_BATCHMODE        = NO
+
+# If the LATEX_HIDE_INDICES tag is set to YES then doxygen will not include the
+# index chapters (such as File Index, Compound Index, etc.) in the output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_HIDE_INDICES     = NO
+
+# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source
+# code with syntax highlighting in the LaTeX output.
+#
+# Note that which sources are shown also depends on other settings such as
+# SOURCE_BROWSER.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_SOURCE_CODE      = NO
+
+# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
+# bibliography, e.g. plainnat, or ieeetr. See
+# http://en.wikipedia.org/wiki/BibTeX and \cite for more info.
+# The default value is: plain.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_BIB_STYLE        = plain
+
+#---------------------------------------------------------------------------
+# Configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES doxygen will generate RTF output. The
+# RTF output is optimized for Word 97 and may not look too pretty with other RTF
+# readers/editors.
+# The default value is: NO.
+
+GENERATE_RTF           = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: rtf.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_OUTPUT             = rtf
+
+# If the COMPACT_RTF tag is set to YES doxygen generates more compact RTF
+# documents. This may be useful for small projects and may help to save some
+# trees in general.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+COMPACT_RTF            = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated will
+# contain hyperlink fields. The RTF file will contain links (just like the HTML
+# output) instead of page references. This makes the output suitable for online
+# browsing using Word or some other Word compatible readers that support those
+# fields.
+#
+# Note: WordPad (write) and others do not support links.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_HYPERLINKS         = NO
+
+# Load stylesheet definitions from file. Syntax is similar to doxygen's config
+# file, i.e. a series of assignments. You only have to provide replacements,
+# missing definitions are set to their default value.
+#
+# See also section "Doxygen usage" for information on how to generate the
+# default style sheet that doxygen normally uses.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_STYLESHEET_FILE    =
+
+# Set optional variables used in the generation of an RTF document. Syntax is
+# similar to doxygen's config file. A template extensions file can be generated
+# using doxygen -e rtf extensionFile.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_EXTENSIONS_FILE    =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES doxygen will generate man pages for
+# classes and files.
+# The default value is: NO.
+
+GENERATE_MAN           = NO
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it. A directory man3 will be created inside the directory specified by
+# MAN_OUTPUT.
+# The default directory is: man.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_OUTPUT             = man
+
+# The MAN_EXTENSION tag determines the extension that is added to the generated
+# man pages. In case the manual section does not start with a number, the number
+# 3 is prepended. The dot (.) at the beginning of the MAN_EXTENSION tag is
+# optional.
+# The default value is: .3.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_EXTENSION          = .3
+
+# The MAN_SUBDIR tag determines the name of the directory created within
+# MAN_OUTPUT in which the man pages are placed. If defaults to man followed by
+# MAN_EXTENSION with the initial . removed.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_SUBDIR             =
+
+# If the MAN_LINKS tag is set to YES and doxygen generates man output, then it
+# will generate one additional man file for each entity documented in the real
+# man page(s). These additional files only source the real man page, but without
+# them the man command would be unable to find the correct page.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_LINKS              = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES doxygen will generate an XML file that
+# captures the structure of the code including all documentation.
+# The default value is: NO.
+
+GENERATE_XML           = NO
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: xml.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_OUTPUT             = xml
+
+# If the XML_PROGRAMLISTING tag is set to YES doxygen will dump the program
+# listings (including syntax highlighting and cross-referencing information) to
+# the XML output. Note that enabling this will significantly increase the size
+# of the XML output.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_PROGRAMLISTING     = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to the DOCBOOK output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_DOCBOOK tag is set to YES doxygen will generate Docbook files
+# that can be used to generate PDF.
+# The default value is: NO.
+
+GENERATE_DOCBOOK       = NO
+
+# The DOCBOOK_OUTPUT tag is used to specify where the Docbook pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in
+# front of it.
+# The default directory is: docbook.
+# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
+
+DOCBOOK_OUTPUT         = docbook
+
+#---------------------------------------------------------------------------
+# Configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES doxygen will generate an AutoGen
+# Definitions (see http://autogen.sf.net) file that captures the structure of
+# the code including all documentation. Note that this feature is still
+# experimental and incomplete at the moment.
+# The default value is: NO.
+
+GENERATE_AUTOGEN_DEF   = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES doxygen will generate a Perl module
+# file that captures the structure of the code including all documentation.
+#
+# Note that this feature is still experimental and incomplete at the moment.
+# The default value is: NO.
+
+GENERATE_PERLMOD       = NO
+
+# If the PERLMOD_LATEX tag is set to YES doxygen will generate the necessary
+# Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI
+# output from the Perl module output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_LATEX          = NO
+
+# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be nicely
+# formatted so it can be parsed by a human reader. This is useful if you want to
+# understand what is going on. On the other hand, if this tag is set to NO the
+# size of the Perl module output will be much smaller and Perl will parse it
+# just the same.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_PRETTY         = YES
+
+# The names of the make variables in the generated doxyrules.make file are
+# prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. This is useful
+# so different doxyrules.make files included by the same Makefile don't
+# overwrite each other's variables.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_MAKEVAR_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES doxygen will evaluate all
+# C-preprocessor directives found in the sources and include files.
+# The default value is: YES.
+
+ENABLE_PREPROCESSING   = YES
+
+# If the MACRO_EXPANSION tag is set to YES doxygen will expand all macro names
+# in the source code. If set to NO only conditional compilation will be
+# performed. Macro expansion can be done in a controlled way by setting
+# EXPAND_ONLY_PREDEF to YES.
+# The default value is: NO.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+MACRO_EXPANSION        = YES
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES then
+# the macro expansion is limited to the macros specified with the PREDEFINED and
+# EXPAND_AS_DEFINED tags.
+# The default value is: NO.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+EXPAND_ONLY_PREDEF     = YES
+
+# If the SEARCH_INCLUDES tag is set to YES the includes files in the
+# INCLUDE_PATH will be searched if a #include is found.
+# The default value is: YES.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+SEARCH_INCLUDES        = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by the
+# preprocessor.
+# This tag requires that the tag SEARCH_INCLUDES is set to YES.
+
+INCLUDE_PATH           =
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
+# patterns (like *.h and *.hpp) to filter out the header-files in the
+# directories. If left blank, the patterns specified with FILE_PATTERNS will be
+# used.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+INCLUDE_FILE_PATTERNS  =
+
+# The PREDEFINED tag can be used to specify one or more macro names that are
+# defined before the preprocessor is started (similar to the -D option of e.g.
+# gcc). The argument of the tag is a list of macros of the form: name or
+# name=definition (no spaces). If the definition and the "=" are omitted, "=1"
+# is assumed. To prevent a macro definition from being undefined via #undef or
+# recursively expanded use the := operator instead of the = operator.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+PREDEFINED             = \
+	RAPIDJSON_DOXYGEN_RUNNING \
+	RAPIDJSON_NAMESPACE_BEGIN="namespace rapidjson {" \
+	RAPIDJSON_NAMESPACE_END="}" \
+	RAPIDJSON_REMOVEFPTR_(x)=x \
+	RAPIDJSON_ENABLEIF_RETURN(cond,returntype)="RAPIDJSON_REMOVEFPTR_ returntype" \
+	RAPIDJSON_DISABLEIF_RETURN(cond,returntype)="RAPIDJSON_REMOVEFPTR_ returntype"
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
+# tag can be used to specify a list of macro names that should be expanded. The
+# macro definition that is found in the sources will be used. Use the PREDEFINED
+# tag if you want to use a different macro definition that overrules the
+# definition found in the source code.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+EXPAND_AS_DEFINED      = \
+	RAPIDJSON_NOEXCEPT
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will
+# remove all references to function-like macros that are alone on a line, have
+# an all uppercase name, and do not end with a semicolon. Such function macros
+# are typically used for boiler-plate code, and will confuse the parser if not
+# removed.
+# The default value is: YES.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+SKIP_FUNCTION_MACROS   = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to external references
+#---------------------------------------------------------------------------
+
+# The TAGFILES tag can be used to specify one or more tag files. For each tag
+# file the location of the external documentation should be added. The format of
+# a tag file without this location is as follows:
+# TAGFILES = file1 file2 ...
+# Adding location for the tag files is done as follows:
+# TAGFILES = file1=loc1 "file2 = loc2" ...
+# where loc1 and loc2 can be relative or absolute paths or URLs. See the
+# section "Linking to external documentation" for more information about the use
+# of tag files.
+# Note: Each tag file must have a unique name (where the name does NOT include
+# the path). If a tag file is not located in the directory in which doxygen is
+# run, you must also specify the path to the tagfile here.
+
+TAGFILES               =
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create a
+# tag file that is based on the input files it reads. See section "Linking to
+# external documentation" for more information about the usage of tag files.
+
+GENERATE_TAGFILE       =
+
+# If the ALLEXTERNALS tag is set to YES all external class will be listed in the
+# class index. If set to NO only the inherited external classes will be listed.
+# The default value is: NO.
+
+ALLEXTERNALS           = NO
+
+# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed in
+# the modules index. If set to NO, only the current project's groups will be
+# listed.
+# The default value is: YES.
+
+EXTERNAL_GROUPS        = YES
+
+# If the EXTERNAL_PAGES tag is set to YES all external pages will be listed in
+# the related pages index. If set to NO, only the current project's pages will
+# be listed.
+# The default value is: YES.
+
+EXTERNAL_PAGES         = YES
+
+# The PERL_PATH should be the absolute path and name of the perl script
+# interpreter (i.e. the result of 'which perl').
+# The default file (with absolute path) is: /usr/bin/perl.
+
+PERL_PATH              = /usr/bin/perl
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES doxygen will generate a class diagram
+# (in HTML and LaTeX) for classes with base or super classes. Setting the tag to
+# NO turns the diagrams off. Note that this option also works with HAVE_DOT
+# disabled, but it is recommended to install and use dot, since it yields more
+# powerful graphs.
+# The default value is: YES.
+
+CLASS_DIAGRAMS         = YES
+
+# You can define message sequence charts within doxygen comments using the \msc
+# command. Doxygen will then run the mscgen tool (see:
+# http://www.mcternan.me.uk/mscgen/)) to produce the chart and insert it in the
+# documentation. The MSCGEN_PATH tag allows you to specify the directory where
+# the mscgen tool resides. If left empty the tool is assumed to be found in the
+# default search path.
+
+MSCGEN_PATH            =
+
+# You can include diagrams made with dia in doxygen documentation. Doxygen will
+# then run dia to produce the diagram and insert it in the documentation. The
+# DIA_PATH tag allows you to specify the directory where the dia binary resides.
+# If left empty dia is assumed to be found in the default search path.
+
+DIA_PATH               =
+
+# If set to YES, the inheritance and collaboration graphs will hide inheritance
+# and usage relations if the target is undocumented or is not a class.
+# The default value is: YES.
+
+HIDE_UNDOC_RELATIONS   = YES
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
+# available from the path. This tool is part of Graphviz (see:
+# http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent
+# Bell Labs. The other options in this section have no effect if this option is
+# set to NO
+# The default value is: NO.
+
+HAVE_DOT               = NO
+
+# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is allowed
+# to run in parallel. When set to 0 doxygen will base this on the number of
+# processors available in the system. You can set it explicitly to a value
+# larger than 0 to get control over the balance between CPU load and processing
+# speed.
+# Minimum value: 0, maximum value: 32, default value: 0.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_NUM_THREADS        = 0
+
+# When you want a differently looking font n the dot files that doxygen
+# generates you can specify the font name using DOT_FONTNAME. You need to make
+# sure dot is able to find the font, which can be done by putting it in a
+# standard location or by setting the DOTFONTPATH environment variable or by
+# setting DOT_FONTPATH to the directory containing the font.
+# The default value is: Helvetica.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTNAME           = Helvetica
+
+# The DOT_FONTSIZE tag can be used to set the size (in points) of the font of
+# dot graphs.
+# Minimum value: 4, maximum value: 24, default value: 10.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTSIZE           = 10
+
+# By default doxygen will tell dot to use the default font as specified with
+# DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set
+# the path where dot can find it using this tag.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTPATH           =
+
+# If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for
+# each documented class showing the direct and indirect inheritance relations.
+# Setting this tag to YES will force the CLASS_DIAGRAMS tag to NO.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CLASS_GRAPH            = YES
+
+# If the COLLABORATION_GRAPH tag is set to YES then doxygen will generate a
+# graph for each documented class showing the direct and indirect implementation
+# dependencies (inheritance, containment, and class references variables) of the
+# class with other documented classes.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+COLLABORATION_GRAPH    = YES
+
+# If the GROUP_GRAPHS tag is set to YES then doxygen will generate a graph for
+# groups, showing the direct groups dependencies.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GROUP_GRAPHS           = YES
+
+# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
+# collaboration diagrams in a style similar to the OMG's Unified Modeling
+# Language.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+UML_LOOK               = NO
+
+# If the UML_LOOK tag is enabled, the fields and methods are shown inside the
+# class node. If there are many fields or methods and many nodes the graph may
+# become too big to be useful. The UML_LIMIT_NUM_FIELDS threshold limits the
+# number of items for each type to make the size more manageable. Set this to 0
+# for no limit. Note that the threshold may be exceeded by 50% before the limit
+# is enforced. So when you set the threshold to 10, up to 15 fields may appear,
+# but if the number exceeds 15, the total amount of fields shown is limited to
+# 10.
+# Minimum value: 0, maximum value: 100, default value: 10.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+UML_LIMIT_NUM_FIELDS   = 10
+
+# If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and
+# collaboration graphs will show the relations between templates and their
+# instances.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+TEMPLATE_RELATIONS     = NO
+
+# If the INCLUDE_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are set to
+# YES then doxygen will generate a graph for each documented file showing the
+# direct and indirect include dependencies of the file with other documented
+# files.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INCLUDE_GRAPH          = YES
+
+# If the INCLUDED_BY_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are
+# set to YES then doxygen will generate a graph for each documented file showing
+# the direct and indirect include dependencies of the file with other documented
+# files.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INCLUDED_BY_GRAPH      = YES
+
+# If the CALL_GRAPH tag is set to YES then doxygen will generate a call
+# dependency graph for every global function or class method.
+#
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable call graphs for selected
+# functions only using the \callgraph command.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CALL_GRAPH             = NO
+
+# If the CALLER_GRAPH tag is set to YES then doxygen will generate a caller
+# dependency graph for every global function or class method.
+#
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable caller graphs for selected
+# functions only using the \callergraph command.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CALLER_GRAPH           = NO
+
+# If the GRAPHICAL_HIERARCHY tag is set to YES then doxygen will graphical
+# hierarchy of all classes instead of a textual one.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GRAPHICAL_HIERARCHY    = YES
+
+# If the DIRECTORY_GRAPH tag is set to YES then doxygen will show the
+# dependencies a directory has on other directories in a graphical way. The
+# dependency relations are determined by the #include relations between the
+# files in the directories.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DIRECTORY_GRAPH        = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
+# generated by dot.
+# Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order
+# to make the SVG files visible in IE 9+ (other browsers do not have this
+# requirement).
+# Possible values are: png, jpg, gif and svg.
+# The default value is: png.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_IMAGE_FORMAT       = png
+
+# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
+# enable generation of interactive SVG images that allow zooming and panning.
+#
+# Note that this requires a modern browser other than Internet Explorer. Tested
+# and working are Firefox, Chrome, Safari, and Opera.
+# Note: For IE 9+ you need to set HTML_FILE_EXTENSION to xhtml in order to make
+# the SVG files visible. Older versions of IE do not have SVG support.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INTERACTIVE_SVG        = NO
+
+# The DOT_PATH tag can be used to specify the path where the dot tool can be
+# found. If left blank, it is assumed the dot tool can be found in the path.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_PATH               =
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the \dotfile
+# command).
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOTFILE_DIRS           =
+
+# The MSCFILE_DIRS tag can be used to specify one or more directories that
+# contain msc files that are included in the documentation (see the \mscfile
+# command).
+
+MSCFILE_DIRS           =
+
+# The DIAFILE_DIRS tag can be used to specify one or more directories that
+# contain dia files that are included in the documentation (see the \diafile
+# command).
+
+DIAFILE_DIRS           =
+
+# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes
+# that will be shown in the graph. If the number of nodes in a graph becomes
+# larger than this value, doxygen will truncate the graph, which is visualized
+# by representing a node as a red box. Note that doxygen if the number of direct
+# children of the root node in a graph is already larger than
+# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note that
+# the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
+# Minimum value: 0, maximum value: 10000, default value: 50.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_GRAPH_MAX_NODES    = 50
+
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the graphs
+# generated by dot. A depth value of 3 means that only nodes reachable from the
+# root by following a path via at most 3 edges will be shown. Nodes that lay
+# further from the root node will be omitted. Note that setting this option to 1
+# or 2 may greatly reduce the computation time needed for large code bases. Also
+# note that the size of a graph can be further restricted by
+# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
+# Minimum value: 0, maximum value: 1000, default value: 0.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+MAX_DOT_GRAPH_DEPTH    = 0
+
+# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
+# background. This is disabled by default, because dot on Windows does not seem
+# to support this out of the box.
+#
+# Warning: Depending on the platform used, enabling this option may lead to
+# badly anti-aliased labels on the edges of a graph (i.e. they become hard to
+# read).
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_TRANSPARENT        = NO
+
+# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
+# files in one run (i.e. multiple -o and -T options on the command line). This
+# makes dot run faster, but since only newer versions of dot (>1.8.10) support
+# this, this feature is disabled by default.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_MULTI_TARGETS      = NO
+
+# If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page
+# explaining the meaning of the various boxes and arrows in the dot generated
+# graphs.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GENERATE_LEGEND        = YES
+
+# If the DOT_CLEANUP tag is set to YES doxygen will remove the intermediate dot
+# files that are used to generate the various graphs.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_CLEANUP            = YES
diff --git a/doc/diagram/architecture.dot b/doc/diagram/architecture.dot
new file mode 100644
index 0000000..c816c87
--- /dev/null
+++ b/doc/diagram/architecture.dot
@@ -0,0 +1,50 @@
+digraph {
+	compound=true
+	fontname="Inconsolata, Consolas"
+	fontsize=10
+	margin="0,0"
+	ranksep=0.2
+	nodesep=0.5
+	penwidth=0.5
+	colorscheme=spectral7
+	
+	node [shape=box, fontname="Inconsolata, Consolas", fontsize=10, penwidth=0.5, style=filled, fillcolor=white]
+	edge [fontname="Inconsolata, Consolas", fontsize=10, penwidth=0.5]
+
+	subgraph cluster1 {
+		margin="10,10"
+		labeljust="left"
+		label = "SAX"
+		style=filled
+		fillcolor=6
+
+		Reader -> Writer [style=invis]
+	}
+
+	subgraph cluster2 {
+		margin="10,10"
+		labeljust="left"
+		label = "DOM"
+		style=filled
+		fillcolor=7
+
+		Value
+		Document
+	}
+
+	Handler [label="<<concept>>\nHandler"]
+
+	{
+		edge [arrowtail=onormal, dir=back]
+		Value -> Document
+		Handler -> Document
+		Handler -> Writer
+	}
+
+	{
+		edge [arrowhead=vee, style=dashed, constraint=false]
+		Reader -> Handler [label="calls"]
+		Value -> Handler [label="calls"]
+		Document -> Reader [label="uses"]
+	}
+}
\ No newline at end of file
diff --git a/doc/diagram/architecture.png b/doc/diagram/architecture.png
new file mode 100644
index 0000000..556c7e7
--- /dev/null
+++ b/doc/diagram/architecture.png
Binary files differ
diff --git a/doc/diagram/insituparsing.dot b/doc/diagram/insituparsing.dot
new file mode 100644
index 0000000..eca0e38
--- /dev/null
+++ b/doc/diagram/insituparsing.dot
@@ -0,0 +1,65 @@
+digraph {
+	compound=true
+	fontname="Inconsolata, Consolas"
+	fontsize=10
+	margin="0,0"
+	ranksep=0.2
+	penwidth=0.5
+	
+	node [fontname="Inconsolata, Consolas", fontsize=10, penwidth=0.5]
+	edge [fontname="Inconsolata, Consolas", fontsize=10, arrowhead=normal]
+
+	{
+		node [shape=record, fontsize="8", margin="0.04", height=0.2, color=gray]
+		oldjson [label="\{|\"|m|s|g|\"|:|\"|H|e|l|l|o|\\|n|W|o|r|l|d|!|\"|,|\"|\\|u|0|0|7|3|t|a|r|s|\"|:|1|0|\}", xlabel="Before Parsing"]
+		//newjson [label="\{|\"|<a>m|s|g|\\0|:|\"|<b>H|e|l|l|o|\\n|W|o|r|l|d|!|\\0|\"|,|\"|<c>s|t|a|r|s|\\0|t|a|r|s|:|1|0|\}", xlabel="After Parsing"]
+		newjson [shape=plaintext, label=<
+<table BORDER="0" CELLBORDER="1" CELLSPACING="0" CELLPADDING="2"><tr>
+<td>{</td>
+<td>"</td><td port="a">m</td><td>s</td><td>g</td><td bgcolor="yellow">\\0</td>
+<td>:</td>
+<td>"</td><td port="b">H</td><td>e</td><td>l</td><td>l</td><td>o</td><td bgcolor="yellow">\\n</td><td bgcolor="yellow">W</td><td bgcolor="yellow">o</td><td bgcolor="yellow">r</td><td bgcolor="yellow">l</td><td bgcolor="yellow">d</td><td bgcolor="yellow">!</td><td bgcolor="yellow">\\0</td><td>"</td>
+<td>,</td>
+<td>"</td><td port="c" bgcolor="yellow">s</td><td bgcolor="yellow">t</td><td bgcolor="yellow">a</td><td bgcolor="yellow">r</td><td bgcolor="yellow">s</td><td bgcolor="yellow">\\0</td><td>t</td><td>a</td><td>r</td><td>s</td>
+<td>:</td>
+<td>1</td><td>0</td>
+<td>}</td>
+</tr></table>
+>, xlabel="After Parsing"]
+	}
+
+	subgraph cluster1 {
+		margin="10,10"
+		labeljust="left"
+		label = "Document by In situ Parsing"
+		style=filled
+		fillcolor=gray95
+		node [shape=Mrecord, style=filled, colorscheme=spectral7]
+		
+		root [label="{object|}", fillcolor=3]
+
+		{			
+			msg [label="{string|<a>}", fillcolor=5]
+			helloworld [label="{string|<a>}", fillcolor=5]
+			stars [label="{string|<a>}", fillcolor=5]
+			ten [label="{number|10}", fillcolor=6]
+		}
+	}
+
+	oldjson -> root [label=" ParseInsitu()" lhead="cluster1"]
+	edge [arrowhead=vee]
+	root -> { msg; stars }
+
+	edge [arrowhead="none"]
+	msg  -> helloworld
+	stars -> ten
+
+	{
+		edge [arrowhead=vee, arrowtail=dot, arrowsize=0.5, dir=both, tailclip=false]
+		msg:a:c -> newjson:a
+		helloworld:a:c -> newjson:b
+		stars:a:c -> newjson:c
+	}
+
+	//oldjson -> newjson [style=invis]
+}
\ No newline at end of file
diff --git a/doc/diagram/insituparsing.png b/doc/diagram/insituparsing.png
new file mode 100644
index 0000000..4400c88
--- /dev/null
+++ b/doc/diagram/insituparsing.png
Binary files differ
diff --git a/doc/diagram/iterative-parser-states-diagram.dot b/doc/diagram/iterative-parser-states-diagram.dot
new file mode 100644
index 0000000..82ebfe1
--- /dev/null
+++ b/doc/diagram/iterative-parser-states-diagram.dot
@@ -0,0 +1,62 @@
+digraph {
+    fontname="Inconsolata, Consolas"
+    fontsize=10
+    margin="0,0"
+    penwidth=0.0
+    
+    node [fontname="Inconsolata, Consolas", fontsize=10, penwidth=0.5]
+    edge [fontname="Inconsolata, Consolas", fontsize=10, penwidth=0.5]
+
+    node [shape = doublecircle]; Start; Finish;
+    node [shape = box; style = "rounded, filled"; fillcolor=white ];
+
+    Start -> ArrayInitial [label=" ["];
+    Start -> ObjectInitial [label=" {"];
+
+    subgraph clusterArray {
+        margin="10,10"
+        style=filled
+        fillcolor=gray95
+        label = "Array"
+        
+        ArrayInitial; Element; ElementDelimiter; ArrayFinish;
+    }
+
+    subgraph clusterObject {
+        margin="10,10"
+        style=filled
+        fillcolor=gray95
+        label = "Object"
+
+        ObjectInitial; MemberKey; KeyValueDelimiter; MemberValue; MemberDelimiter; ObjectFinish;
+    }
+
+    ArrayInitial -> ArrayInitial [label="["];
+    ArrayInitial -> ArrayFinish [label=" ]"];
+    ArrayInitial -> ObjectInitial [label="{", constraint=false];
+    ArrayInitial -> Element [label="string\nfalse\ntrue\nnull\nnumber"];
+
+    Element -> ArrayFinish [label="]"];
+    Element -> ElementDelimiter [label=","];
+
+    ElementDelimiter -> ArrayInitial [label=" ["];
+    ElementDelimiter -> ObjectInitial [label="{"];
+    ElementDelimiter -> Element [label="string\nfalse\ntrue\nnull\nnumber"];
+
+    ObjectInitial -> ObjectFinish [label=" }"];
+    ObjectInitial -> MemberKey [label=" string "];
+
+    MemberKey -> KeyValueDelimiter [label=":"];
+
+    KeyValueDelimiter -> ArrayInitial [label="["];
+    KeyValueDelimiter -> ObjectInitial [label=" {"];
+    KeyValueDelimiter -> MemberValue [label=" string\n false\n true\n null\n number"];
+
+    MemberValue -> ObjectFinish [label="}"];
+    MemberValue -> MemberDelimiter [label=","];
+
+    MemberDelimiter -> MemberKey [label=" string "];
+
+    ArrayFinish -> Finish;
+    ObjectFinish -> Finish;
+}
diff --git a/doc/diagram/iterative-parser-states-diagram.png b/doc/diagram/iterative-parser-states-diagram.png
new file mode 100644
index 0000000..f315494
--- /dev/null
+++ b/doc/diagram/iterative-parser-states-diagram.png
Binary files differ
diff --git a/doc/diagram/makefile b/doc/diagram/makefile
new file mode 100644
index 0000000..3483977
--- /dev/null
+++ b/doc/diagram/makefile
@@ -0,0 +1,8 @@
+%.pdf: %.dot
+	dot $< -Tpdf -o $@
+
+%.png: %.dot
+	dot $< -Tpng -o $@
+
+DOTFILES = $(basename $(wildcard *.dot))
+all: $(addsuffix .png, $(DOTFILES)) $(addsuffix .pdf, $(DOTFILES))
diff --git a/doc/diagram/move1.dot b/doc/diagram/move1.dot
new file mode 100644
index 0000000..a7c1464
--- /dev/null
+++ b/doc/diagram/move1.dot
@@ -0,0 +1,47 @@
+digraph {
+	compound=true
+	fontname="Inconsolata, Consolas"
+	fontsize=10
+	margin="0,0"
+	ranksep=0.2
+	penwidth=0.5
+
+	node [fontname="Inconsolata, Consolas", fontsize=10, penwidth=0.5]
+	edge [fontname="Inconsolata, Consolas", fontsize=10, arrowhead=normal]
+
+	subgraph cluster1 {
+		margin="10,10"
+		labeljust="left"
+		label = "Before"
+		style=filled
+		fillcolor=gray95
+
+		node [shape=Mrecord, style=filled, colorscheme=spectral7]
+
+		{
+			rank = same
+			b1 [label="{b:number|456}", fillcolor=6]
+			a1 [label="{a:number|123}", fillcolor=6]
+		}
+
+		a1 -> b1 [style="dashed", label="Move", dir=back]
+	}
+
+	subgraph cluster2 {
+		margin="10,10"
+		labeljust="left"
+		label = "After"
+		style=filled
+		fillcolor=gray95
+
+		node [shape=Mrecord, style=filled, colorscheme=spectral7]
+
+		{
+			rank = same
+			b2 [label="{b:null|}", fillcolor=1]
+			a2 [label="{a:number|456}", fillcolor=6]
+		}
+		a2 -> b2 [style=invis, dir=back]
+	}
+	b1 -> b2 [style=invis]
+}
\ No newline at end of file
diff --git a/doc/diagram/move1.png b/doc/diagram/move1.png
new file mode 100644
index 0000000..ab322d0
--- /dev/null
+++ b/doc/diagram/move1.png
Binary files differ
diff --git a/doc/diagram/move2.dot b/doc/diagram/move2.dot
new file mode 100644
index 0000000..2319871
--- /dev/null
+++ b/doc/diagram/move2.dot
@@ -0,0 +1,62 @@
+digraph {
+	compound=true
+	fontname="Inconsolata, Consolas"
+	fontsize=10
+	margin="0,0"
+	ranksep=0.2
+	penwidth=0.5
+
+	node [fontname="Inconsolata, Consolas", fontsize=10, penwidth=0.5]
+	edge [fontname="Inconsolata, Consolas", fontsize=10, arrowhead=normal]
+
+	subgraph cluster1 {
+		margin="10,10"
+		labeljust="left"
+		label = "Before Copying (Hypothetic)"
+		style=filled
+		fillcolor=gray95
+
+		node [shape=Mrecord, style=filled, colorscheme=spectral7]
+
+		c1 [label="{contacts:array|}", fillcolor=4]
+		c11 [label="{|}"]
+		c12 [label="{|}"]
+		c13 [shape="none", label="...", style="solid"]
+		o1 [label="{o:object|}", fillcolor=3]
+		ghost [label="{o:object|}", style=invis]
+
+		c1 -> o1 [style="dashed", label="AddMember", constraint=false]
+
+		edge [arrowhead=vee]
+		c1 -> { c11; c12; c13 }
+		o1 -> ghost [style=invis]
+	}
+
+	subgraph cluster2 {
+		margin="10,10"
+		labeljust="left"
+		label = "After Copying (Hypothetic)"
+		style=filled
+		fillcolor=gray95
+
+		node [shape=Mrecord, style=filled, colorscheme=spectral7]
+
+		c2 [label="{contacts:array|}", fillcolor=4]
+		c3 [label="{array|}", fillcolor=4]
+		c21 [label="{|}"]
+		c22 [label="{|}"]
+		c23 [shape=none, label="...", style="solid"]
+		o2 [label="{o:object|}", fillcolor=3]
+		cs [label="{string|\"contacts\"}", fillcolor=5]
+		c31 [label="{|}"]
+		c32 [label="{|}"]
+		c33 [shape="none", label="...", style="solid"]
+
+		edge [arrowhead=vee]
+		c2 -> { c21; c22; c23 }
+		o2 -> cs
+		cs -> c3 [arrowhead=none]
+		c3 -> { c31; c32; c33 }
+	}
+	ghost -> o2 [style=invis]
+}
diff --git a/doc/diagram/move2.png b/doc/diagram/move2.png
new file mode 100644
index 0000000..8d4fc5b
--- /dev/null
+++ b/doc/diagram/move2.png
Binary files differ
diff --git a/doc/diagram/move3.dot b/doc/diagram/move3.dot
new file mode 100644
index 0000000..57adb4f
--- /dev/null
+++ b/doc/diagram/move3.dot
@@ -0,0 +1,60 @@
+digraph {
+	compound=true
+	fontname="Inconsolata, Consolas"
+	fontsize=10
+	margin="0,0"
+	ranksep=0.2
+	penwidth=0.5
+	forcelabels=true
+
+	node [fontname="Inconsolata, Consolas", fontsize=10, penwidth=0.5]
+	edge [fontname="Inconsolata, Consolas", fontsize=10, arrowhead=normal]
+
+	subgraph cluster1 {
+		margin="10,10"
+		labeljust="left"
+		label = "Before Moving"
+		style=filled
+		fillcolor=gray95
+
+		node [shape=Mrecord, style=filled, colorscheme=spectral7]
+
+		c1 [label="{contacts:array|}", fillcolor=4]
+		c11 [label="{|}"]
+		c12 [label="{|}"]
+		c13 [shape=none, label="...", style="solid"]
+		o1 [label="{o:object|}", fillcolor=3]
+		ghost [label="{o:object|}", style=invis]
+
+		c1 -> o1 [style="dashed", constraint=false, label="AddMember"]
+
+		edge [arrowhead=vee]
+		c1 -> { c11; c12; c13 }
+		o1 -> ghost [style=invis]
+	}
+
+	subgraph cluster2 {
+		margin="10,10"
+		labeljust="left"
+		label = "After Moving"
+		style=filled
+		fillcolor=gray95
+
+		node [shape=Mrecord, style=filled, colorscheme=spectral7]
+
+		c2 [label="{contacts:null|}", fillcolor=1]
+		c3 [label="{array|}", fillcolor=4]
+		c21 [label="{|}"]
+		c22 [label="{|}"]
+		c23 [shape="none", label="...", style="solid"]
+		o2 [label="{o:object|}", fillcolor=3]
+		cs [label="{string|\"contacts\"}", fillcolor=5]
+		c2 -> o2 [style="dashed", constraint=false, label="AddMember", style=invis]
+
+		edge [arrowhead=vee]
+		c3 -> { c21; c22; c23 }
+		o2 -> cs
+		cs -> c3 [arrowhead=none]
+	}
+	ghost -> o2 [style=invis]
+}
diff --git a/doc/diagram/move3.png b/doc/diagram/move3.png
new file mode 100644
index 0000000..558470f
--- /dev/null
+++ b/doc/diagram/move3.png
Binary files differ
diff --git a/doc/diagram/normalparsing.dot b/doc/diagram/normalparsing.dot
new file mode 100644
index 0000000..b15941b
--- /dev/null
+++ b/doc/diagram/normalparsing.dot
@@ -0,0 +1,56 @@
+digraph {
+	compound=true
+	fontname="Inconsolata, Consolas"
+	fontsize=10
+	margin="0,0"
+	ranksep=0.2
+	penwidth=0.5
+	
+	node [fontname="Inconsolata, Consolas", fontsize=10, penwidth=0.5]
+	edge [fontname="Inconsolata, Consolas", fontsize=10, arrowhead=normal]
+
+	{
+		node [shape=record, fontsize="8", margin="0.04", height=0.2, color=gray]
+		normaljson [label="\{|\"|m|s|g|\"|:|\"|H|e|l|l|o|\\|n|W|o|r|l|d|!|\"|,|\"|\\|u|0|0|7|3|t|a|r|s\"|:|1|0|\}"]
+
+		{
+			rank = same
+			msgstring  [label="m|s|g|\\0"]
+			helloworldstring  [label="H|e|l|l|o|\\n|W|o|r|l|d|!|\\0"]
+			starsstring [label="s|t|a|r|s\\0"]
+		}
+	}
+
+	subgraph cluster1 {
+		margin="10,10"
+		labeljust="left"
+		label = "Document by Normal Parsing"
+		style=filled
+		fillcolor=gray95
+		node [shape=Mrecord, style=filled, colorscheme=spectral7]
+		
+		root [label="{object|}", fillcolor=3]
+
+		{			
+			msg [label="{string|<a>}", fillcolor=5]
+			helloworld [label="{string|<a>}", fillcolor=5]
+			stars [label="{string|<a>}", fillcolor=5]
+			ten [label="{number|10}", fillcolor=6]
+		}
+	}
+
+	normaljson -> root [label=" Parse()" lhead="cluster1"]
+	edge [arrowhead=vee]
+	root -> { msg; stars }
+
+	edge [arrowhead="none"]
+	msg  -> helloworld
+	stars -> ten
+
+	edge [arrowhead=vee, arrowtail=dot, arrowsize=0.5, dir=both, tailclip=false]
+	msg:a:c -> msgstring:w
+	helloworld:a:c -> helloworldstring:w
+	stars:a:c -> starsstring:w
+
+	msgstring -> helloworldstring -> starsstring [style=invis]
+}
\ No newline at end of file
diff --git a/doc/diagram/normalparsing.png b/doc/diagram/normalparsing.png
new file mode 100644
index 0000000..702512c
--- /dev/null
+++ b/doc/diagram/normalparsing.png
Binary files differ
diff --git a/doc/diagram/simpledom.dot b/doc/diagram/simpledom.dot
new file mode 100644
index 0000000..959cdbb
--- /dev/null
+++ b/doc/diagram/simpledom.dot
@@ -0,0 +1,54 @@
+digraph {
+	compound=true
+	fontname="Inconsolata, Consolas"
+	fontsize=10
+	margin="0,0"
+	ranksep=0.2
+	penwidth=0.5
+	
+	node [fontname="Inconsolata, Consolas", fontsize=10, penwidth=0.5]
+	edge [fontname="Inconsolata, Consolas", fontsize=10, arrowhead=normal]
+
+	{
+		node [shape=record, fontsize="8", margin="0.04", height=0.2, color=gray]
+		srcjson [label="\{|\"|p|r|o|j|e|c|t|\"|:|\"|r|a|p|i|d|j|s|o|n|\"|,|\"|s|t|a|r|s|\"|:|1|0|\}"]
+		dstjson [label="\{|\"|p|r|o|j|e|c|t|\"|:|\"|r|a|p|i|d|j|s|o|n|\"|,|\"|s|t|a|r|s|\"|:|1|1|\}"]
+	}
+
+	{
+		node [shape="box", style="filled", fillcolor="gray95"]
+		Document2 [label="(Modified) Document"]
+		Writer
+	}
+
+	subgraph cluster1 {
+		margin="10,10"
+		labeljust="left"
+		label = "Document"
+		style=filled
+		fillcolor=gray95
+		node [shape=Mrecord, style=filled, colorscheme=spectral7]
+		
+		root [label="{object|}", fillcolor=3]
+
+		{			
+			project [label="{string|\"project\"}", fillcolor=5]
+			rapidjson [label="{string|\"rapidjson\"}", fillcolor=5]
+			stars [label="{string|\"stars\"}", fillcolor=5]
+			ten [label="{number|10}", fillcolor=6]
+		}
+
+		edge [arrowhead=vee]
+		root -> { project; stars }
+
+		edge [arrowhead="none"]
+		project -> rapidjson
+		stars -> ten
+	}
+
+	srcjson -> root [label=" Parse()", lhead="cluster1"]
+
+	ten -> Document2 [label=" Increase \"stars\"", ltail="cluster1" ]
+	Document2  -> Writer [label=" Traverse DOM by Accept()"]
+	Writer -> dstjson [label=" Output to StringBuffer"]
+}
\ No newline at end of file
diff --git a/doc/diagram/simpledom.png b/doc/diagram/simpledom.png
new file mode 100644
index 0000000..38d9c5d
--- /dev/null
+++ b/doc/diagram/simpledom.png
Binary files differ
diff --git a/doc/diagram/tutorial.dot b/doc/diagram/tutorial.dot
new file mode 100644
index 0000000..138ddc3
--- /dev/null
+++ b/doc/diagram/tutorial.dot
@@ -0,0 +1,58 @@
+digraph {
+	compound=true
+	fontname="Inconsolata, Consolas"
+	fontsize=10
+	margin="0,0"
+	ranksep=0.2
+	penwidth=0.5
+	
+	node [fontname="Inconsolata, Consolas", fontsize=10, penwidth=0.5]
+	edge [fontname="Inconsolata, Consolas", fontsize=10]
+
+	subgraph cluster1 {
+		margin="10,10"
+		labeljust="left"
+		label = "Document"
+		style=filled
+		fillcolor=gray95
+		node [shape=Mrecord, style=filled, colorscheme=spectral7]
+		
+		root [label="{object|}", fillcolor=3]
+
+		{			
+			hello [label="{string|\"hello\"}", fillcolor=5]
+			t [label="{string|\"t\"}", fillcolor=5]
+			f [label="{string|\"f\"}", fillcolor=5]
+			n [label="{string|\"n\"}", fillcolor=5]
+			i [label="{string|\"i\"}", fillcolor=5]
+			pi [label="{string|\"pi\"}", fillcolor=5]
+			a [label="{string|\"a\"}", fillcolor=5]
+
+			world [label="{string|\"world\"}", fillcolor=5]
+			true [label="{true|}", fillcolor=7]
+			false [label="{false|}", fillcolor=2]
+			null [label="{null|}", fillcolor=1]
+			i1 [label="{number|123}", fillcolor=6]
+			pi1 [label="{number|3.1416}", fillcolor=6]
+			array [label="{array|size=4}", fillcolor=4]
+
+			a1 [label="{number|1}", fillcolor=6]
+			a2 [label="{number|2}", fillcolor=6]
+			a3 [label="{number|3}", fillcolor=6]
+			a4 [label="{number|4}", fillcolor=6]
+		}
+
+		edge [arrowhead=vee]
+		root -> { hello; t; f; n; i; pi; a }		
+		array -> { a1; a2; a3; a4 }
+
+		edge [arrowhead=none]
+		hello -> world
+		t -> true
+		f -> false
+		n -> null
+		i -> i1
+		pi -> pi1
+		a -> array
+	}
+}
\ No newline at end of file
diff --git a/doc/diagram/tutorial.png b/doc/diagram/tutorial.png
new file mode 100644
index 0000000..8a12924
--- /dev/null
+++ b/doc/diagram/tutorial.png
Binary files differ
diff --git a/doc/diagram/utilityclass.dot b/doc/diagram/utilityclass.dot
new file mode 100644
index 0000000..1492a8a
--- /dev/null
+++ b/doc/diagram/utilityclass.dot
@@ -0,0 +1,73 @@
+digraph {
+	rankdir=LR
+	compound=true
+	fontname="Inconsolata, Consolas"
+	fontsize=10
+	margin="0,0"
+	ranksep=0.3
+	nodesep=0.15
+	penwidth=0.5
+	colorscheme=spectral7
+	
+	node [shape=box, fontname="Inconsolata, Consolas", fontsize=10, penwidth=0.5, style=filled, fillcolor=white]
+	edge [fontname="Inconsolata, Consolas", fontsize=10, penwidth=0.5]
+
+	subgraph cluster0 {
+		style=filled
+		fillcolor=4
+
+		Encoding [label="<<concept>>\nEncoding"]
+
+		edge [arrowtail=onormal, dir=back]
+		Encoding -> { UTF8; UTF16; UTF32; ASCII; AutoUTF }
+		UTF16 -> { UTF16LE; UTF16BE }
+		UTF32 -> { UTF32LE; UTF32BE }
+	}
+
+	subgraph cluster1 {
+		style=filled
+		fillcolor=5
+
+		Stream [label="<<concept>>\nStream"]
+		InputByteStream [label="<<concept>>\nInputByteStream"]
+		OutputByteStream [label="<<concept>>\nOutputByteStream"]
+
+		edge [arrowtail=onormal, dir=back]
+		Stream -> { 
+			StringStream; InsituStringStream; StringBuffer; 
+			EncodedInputStream; EncodedOutputStream; 
+			AutoUTFInputStream; AutoUTFOutputStream 
+			InputByteStream; OutputByteStream
+		}
+
+		InputByteStream ->	{ MemoryStream; FlieReadStream }
+		OutputByteStream -> { MemoryBuffer; FileWriteStream } 
+	}
+
+	subgraph cluster2 {
+		style=filled
+		fillcolor=3
+
+		Allocator [label="<<concept>>\nAllocator"]
+
+		edge [arrowtail=onormal, dir=back]
+		Allocator -> { CrtAllocator; MemoryPoolAllocator }
+	}
+
+	{
+		edge [arrowtail=odiamond, arrowhead=vee, dir=both]
+		EncodedInputStream -> InputByteStream
+		EncodedOutputStream -> OutputByteStream
+		AutoUTFInputStream -> InputByteStream
+		AutoUTFOutputStream -> OutputByteStream
+		MemoryPoolAllocator -> Allocator [label="base", tailport=s]
+	}
+
+	{
+		edge [arrowhead=vee, style=dashed]
+		AutoUTFInputStream -> AutoUTF
+		AutoUTFOutputStream -> AutoUTF
+	}
+
+	//UTF32LE -> Stream [style=invis]
+}
\ No newline at end of file
diff --git a/doc/diagram/utilityclass.png b/doc/diagram/utilityclass.png
new file mode 100644
index 0000000..ce029a4
--- /dev/null
+++ b/doc/diagram/utilityclass.png
Binary files differ
diff --git a/doc/dom.md b/doc/dom.md
new file mode 100644
index 0000000..0079b64
--- /dev/null
+++ b/doc/dom.md
@@ -0,0 +1,280 @@
+# DOM
+
+Document Object Model(DOM) is an in-memory representation of JSON for query and manipulation. The basic usage of DOM is described in [Tutorial](doc/tutorial.md). This section will describe some details and more advanced usages.
+
+[TOC]
+
+# Template {#Template}
+
+In the tutorial,  `Value` and `Document` was used. Similarly to `std::string`, these are actually `typedef` of template classes:
+
+~~~~~~~~~~cpp
+namespace rapidjson {
+
+template <typename Encoding, typename Allocator = MemoryPoolAllocator<> >
+class GenericValue {
+    // ...
+};
+
+template <typename Encoding, typename Allocator = MemoryPoolAllocator<> >
+class GenericDocument : public GenericValue<Encoding, Allocator> {
+    // ...
+};
+
+typedef GenericValue<UTF8<> > Value;
+typedef GenericDocument<UTF8<> > Document;
+
+} // namespace rapidjson
+~~~~~~~~~~
+
+User can customize these template parameters.
+
+## Encoding {#Encoding}
+
+The `Encoding` parameter specifies the encoding of JSON String value in memory. Possible options are `UTF8`, `UTF16`, `UTF32`. Note that, these 3 types are also template class. `UTF8<>` is `UTF8<char>`, which means using char to store the characters. You may refer to [Encoding](doc/encoding.md) for details.
+
+Suppose a Windows application would query localization strings stored in JSON files. Unicode-enabled functions in Windows use UTF-16 (wide character) encoding. No matter what encoding was used in JSON files, we can store the strings in UTF-16 in memory.
+
+~~~~~~~~~~cpp
+using namespace rapidjson;
+
+typedef GenericDocument<UTF16<> > WDocument;
+typedef GenericValue<UTF16<> > WValue;
+
+FILE* fp = fopen("localization.json", "rb"); // non-Windows use "r"
+
+char readBuffer[256];
+FileReadStream bis(fp, readBuffer, sizeof(readBuffer));
+
+AutoUTFInputStream<unsigned, FileReadStream> eis(bis);  // wraps bis into eis
+
+WDocument d;
+d.ParseStream<0, AutoUTF<unsigned> >(eis);
+
+const WValue locale(L"ja"); // Japanese
+
+MessageBoxW(hWnd, d[locale].GetString(), L"Test", MB_OK);
+~~~~~~~~~~
+
+## Allocator {#Allocator}
+
+The `Allocator` defines which allocator class is used when allocating/deallocating memory for `Document`/`Value`. `Document` owns, or references to an `Allocator` instance. On the other hand, `Value` does not do so, in order to reduce memory consumption.
+
+The default allocator used in `GenericDocument` is `MemoryPoolAllocator`. This allocator actually allocate memory sequentially, and cannot deallocate one by one. This is very suitable when parsing a JSON into a DOM tree.
+
+Another allocator is `CrtAllocator`, of which CRT is short for C RunTime library. This allocator simply calls the standard `malloc()`/`realloc()`/`free()`. When there is a lot of add and remove operations, this allocator may be preferred. But this allocator is far less efficient than `MemoryPoolAllocator`.
+
+# Parsing {#Parsing}
+
+`Document` provides several functions for parsing. In below, (1) is the fundamental function, while the others are helpers which call (1).
+
+~~~~~~~~~~cpp
+using namespace rapidjson;
+
+// (1) Fundamental
+template <unsigned parseFlags, typename SourceEncoding, typename InputStream>
+GenericDocument& GenericDocument::ParseStream(InputStream& is);
+
+// (2) Using the same Encoding for stream
+template <unsigned parseFlags, typename InputStream>
+GenericDocument& GenericDocument::ParseStream(InputStream& is);
+
+// (3) Using default parse flags
+template <typename InputStream>
+GenericDocument& GenericDocument::ParseStream(InputStream& is);
+
+// (4) In situ parsing
+template <unsigned parseFlags>
+GenericDocument& GenericDocument::ParseInsitu(Ch* str);
+
+// (5) In situ parsing, using default parse flags
+GenericDocument& GenericDocument::ParseInsitu(Ch* str);
+
+// (6) Normal parsing of a string
+template <unsigned parseFlags, typename SourceEncoding>
+GenericDocument& GenericDocument::Parse(const Ch* str);
+
+// (7) Normal parsing of a string, using same Encoding of Document
+template <unsigned parseFlags>
+GenericDocument& GenericDocument::Parse(const Ch* str);
+
+// (8) Normal parsing of a string, using default parse flags
+GenericDocument& GenericDocument::Parse(const Ch* str);
+~~~~~~~~~~
+
+The examples of [tutorial](doc/tutorial.md) uses (8) for normal parsing of string. The examples of [stream](doc/stream.md) uses the first three. *In situ* parsing will be described soon.
+
+The `parseFlags` are combination of the following bit-flags:
+
+Parse flags                   | Meaning
+------------------------------|-----------------------------------
+`kParseNoFlags`               | No flag is set.
+`kParseDefaultFlags`          | Default parse flags. It is equal to macro `RAPIDJSON_PARSE_DEFAULT_FLAGS`, which is defined as `kParseNoFlags`.
+`kParseInsituFlag`            | In-situ(destructive) parsing.
+`kParseValidateEncodingFlag`  | Validate encoding of JSON strings.
+`kParseIterativeFlag`         | Iterative(constant complexity in terms of function call stack size) parsing.
+`kParseStopWhenDoneFlag`      | After parsing a complete JSON root from stream, stop further processing the rest of stream. When this flag is used, parser will not generate `kParseErrorDocumentRootNotSingular` error. Using this flag for parsing multiple JSONs in the same stream.
+`kParseFullPrecisionFlag`     | Parse number in full precision (slower). If this flag is not set, the normal precision (faster) is used. Normal precision has maximum 3 [ULP](http://en.wikipedia.org/wiki/Unit_in_the_last_place) error.
+`kParseCommentsFlag`          | Allow one-line `// ...` and multi-line `/* ... */` comments (relaxed JSON syntax).
+`kParseNumbersAsStringsFlag`  | Parse numerical type values as strings.
+`kParseTrailingCommasFlag`    | Allow trailing commas at the end of objects and arrays (relaxed JSON syntax).
+`kParseNanAndInfFlag`         | Allow parsing `NaN`, `Inf`, `Infinity`, `-Inf` and `-Infinity` as `double` values (relaxed JSON syntax).
+
+By using a non-type template parameter, instead of a function parameter, C++ compiler can generate code which is optimized for specified combinations, improving speed, and reducing code size (if only using a single specialization). The downside is the flags needed to be determined in compile-time.
+
+The `SourceEncoding` parameter defines what encoding is in the stream. This can be differed to the `Encoding` of the `Document`. See [Transcoding and Validation](#TranscodingAndValidation) section for details.
+
+And the `InputStream` is type of input stream.
+
+## Parse Error {#ParseError}
+
+When the parse processing succeeded, the `Document` contains the parse results. When there is an error, the original DOM is *unchanged*. And the error state of parsing can be obtained by `bool HasParseError()`,  `ParseErrorCode GetParseError()` and `size_t GetErrorOffset()`.
+
+Parse Error Code                            | Description
+--------------------------------------------|---------------------------------------------------
+`kParseErrorNone`                           | No error.
+`kParseErrorDocumentEmpty`                  | The document is empty.
+`kParseErrorDocumentRootNotSingular`        | The document root must not follow by other values.
+`kParseErrorValueInvalid`                   | Invalid value.
+`kParseErrorObjectMissName`                 | Missing a name for object member.
+`kParseErrorObjectMissColon`                | Missing a colon after a name of object member.
+`kParseErrorObjectMissCommaOrCurlyBracket`  | Missing a comma or `}` after an object member.
+`kParseErrorArrayMissCommaOrSquareBracket`  | Missing a comma or `]` after an array element.
+`kParseErrorStringUnicodeEscapeInvalidHex`  | Incorrect hex digit after `\\u` escape in string.
+`kParseErrorStringUnicodeSurrogateInvalid`  | The surrogate pair in string is invalid.
+`kParseErrorStringEscapeInvalid`            | Invalid escape character in string.
+`kParseErrorStringMissQuotationMark`        | Missing a closing quotation mark in string.
+`kParseErrorStringInvalidEncoding`          | Invalid encoding in string.
+`kParseErrorNumberTooBig`                   | Number too big to be stored in `double`.
+`kParseErrorNumberMissFraction`             | Miss fraction part in number.
+`kParseErrorNumberMissExponent`             | Miss exponent in number.
+
+The offset of error is defined as the character number from beginning of stream. Currently RapidJSON does not keep track of line number.
+
+To get an error message, RapidJSON provided a English messages in `rapidjson/error/en.h`. User can customize it for other locales, or use a custom localization system.
+
+Here shows an example of parse error handling.
+
+~~~~~~~~~~cpp
+#include "rapidjson/document.h"
+#include "rapidjson/error/en.h"
+
+// ...
+Document d;
+if (d.Parse(json).HasParseError()) {
+    fprintf(stderr, "\nError(offset %u): %s\n", 
+        (unsigned)d.GetErrorOffset(),
+        GetParseError_En(d.GetParseError()));
+    // ...
+}
+~~~~~~~~~~
+
+## In Situ Parsing {#InSituParsing}
+
+From [Wikipedia](http://en.wikipedia.org/wiki/In_situ):
+
+> *In situ* ... is a Latin phrase that translates literally to "on site" or "in position". It means "locally", "on site", "on the premises" or "in place" to describe an event where it takes place, and is used in many different contexts.
+> ...
+> (In computer science) An algorithm is said to be an in situ algorithm, or in-place algorithm, if the extra amount of memory required to execute the algorithm is O(1), that is, does not exceed a constant no matter how large the input. For example, heapsort is an in situ sorting algorithm.
+
+In normal parsing process, a large overhead is to decode JSON strings and copy them to other buffers. *In situ* parsing decodes those JSON string at the place where it is stored. It is possible in JSON because the length of decoded string is always shorter than or equal to the one in JSON. In this context, decoding a JSON string means to process the escapes, such as `"\n"`, `"\u1234"`, etc., and add a null terminator (`'\0'`)at the end of string.
+
+The following diagrams compare normal and *in situ* parsing. The JSON string values contain pointers to the decoded string.
+
+![normal parsing](diagram/normalparsing.png)
+
+In normal parsing, the decoded string are copied to freshly allocated buffers. `"\\n"` (2 characters) is decoded as `"\n"` (1 character). `"\\u0073"` (6 characters) is decoded as `"s"` (1 character).
+
+![instiu parsing](diagram/insituparsing.png)
+
+*In situ* parsing just modified the original JSON. Updated characters are highlighted in the diagram. If the JSON string does not contain escape character, such as `"msg"`, the parsing process merely replace the closing double quotation mark with a null character.
+
+Since *in situ* parsing modify the input, the parsing API needs `char*` instead of `const char*`.
+
+~~~~~~~~~~cpp
+// Read whole file into a buffer
+FILE* fp = fopen("test.json", "r");
+fseek(fp, 0, SEEK_END);
+size_t filesize = (size_t)ftell(fp);
+fseek(fp, 0, SEEK_SET);
+char* buffer = (char*)malloc(filesize + 1);
+size_t readLength = fread(buffer, 1, filesize, fp);
+buffer[readLength] = '\0';
+fclose(fp);
+
+// In situ parsing the buffer into d, buffer will also be modified
+Document d;
+d.ParseInsitu(buffer);
+
+// Query/manipulate the DOM here...
+
+free(buffer);
+// Note: At this point, d may have dangling pointers pointed to the deallocated buffer.
+~~~~~~~~~~
+
+The JSON strings are marked as const-string. But they may not be really "constant". The life cycle of it depends on the JSON buffer.
+
+In situ parsing minimizes allocation overheads and memory copying. Generally this improves cache coherence, which is an important factor of performance in modern computer.
+
+There are some limitations of *in situ* parsing:
+
+1. The whole JSON is in memory.
+2. The source encoding in stream and target encoding in document must be the same.
+3. The buffer need to be retained until the document is no longer used.
+4. If the DOM need to be used for long period after parsing, and there are few JSON strings in the DOM, retaining the buffer may be a memory waste.
+
+*In situ* parsing is mostly suitable for short-term JSON that only need to be processed once, and then be released from memory. In practice, these situation is very common, for example, deserializing JSON to C++ objects, processing web requests represented in JSON, etc.
+
+## Transcoding and Validation {#TranscodingAndValidation}
+
+RapidJSON supports conversion between Unicode formats (officially termed UCS Transformation Format) internally. During DOM parsing, the source encoding of the stream can be different from the encoding of the DOM. For example, the source stream contains a UTF-8 JSON, while the DOM is using UTF-16 encoding. There is an example code in [EncodedInputStream](doc/stream.md).
+
+When writing a JSON from DOM to output stream, transcoding can also be used. An example is in [EncodedOutputStream](doc/stream.md).
+
+During transcoding, the source string is decoded to into Unicode code points, and then the code points are encoded in the target format. During decoding, it will validate the byte sequence in the source string. If it is not a valid sequence, the parser will be stopped with `kParseErrorStringInvalidEncoding` error.
+
+When the source encoding of stream is the same as encoding of DOM, by default, the parser will *not* validate the sequence. User may use `kParseValidateEncodingFlag` to force validation.
+
+# Techniques {#Techniques}
+
+Some techniques about using DOM API is discussed here.
+
+## DOM as SAX Event Publisher
+
+In RapidJSON, stringifying a DOM with `Writer` may be look a little bit weird.
+
+~~~~~~~~~~cpp
+// ...
+Writer<StringBuffer> writer(buffer);
+d.Accept(writer);
+~~~~~~~~~~
+
+Actually, `Value::Accept()` is responsible for publishing SAX events about the value to the handler. With this design, `Value` and `Writer` are decoupled. `Value` can generate SAX events, and `Writer` can handle those events.
+
+User may create custom handlers for transforming the DOM into other formats. For example, a handler which converts the DOM into XML.
+
+For more about SAX events and handler, please refer to [SAX](doc/sax.md).
+
+## User Buffer {#UserBuffer}
+
+Some applications may try to avoid memory allocations whenever possible.
+
+`MemoryPoolAllocator` can support this by letting user to provide a buffer. The buffer can be on the program stack, or a "scratch buffer" which is statically allocated (a static/global array) for storing temporary data.
+
+`MemoryPoolAllocator` will use the user buffer to satisfy allocations. When the user buffer is used up, it will allocate a chunk of memory from the base allocator (by default the `CrtAllocator`).
+
+Here is an example of using stack memory. The first allocator is for storing values, while the second allocator is for storing temporary data during parsing.
+
+~~~~~~~~~~cpp
+typedef GenericDocument<UTF8<>, MemoryPoolAllocator<>, MemoryPoolAllocator<>> DocumentType;
+char valueBuffer[4096];
+char parseBuffer[1024];
+MemoryPoolAllocator<> valueAllocator(valueBuffer, sizeof(valueBuffer));
+MemoryPoolAllocator<> parseAllocator(parseBuffer, sizeof(parseBuffer));
+DocumentType d(&valueAllocator, sizeof(parseBuffer), &parseAllocator);
+d.Parse(json);
+~~~~~~~~~~
+
+If the total size of allocation is less than 4096+1024 bytes during parsing, this code does not invoke any heap allocation (via `new` or `malloc()`) at all.
+
+User can query the current memory consumption in bytes via `MemoryPoolAllocator::Size()`. And then user can determine a suitable size of user buffer.
diff --git a/doc/dom.zh-cn.md b/doc/dom.zh-cn.md
new file mode 100644
index 0000000..9743b7a
--- /dev/null
+++ b/doc/dom.zh-cn.md
@@ -0,0 +1,284 @@
+# DOM
+
+文档对象模型(Document Object Model, DOM)是一种罝于内存中的 JSON 表示方式,以供查询及操作。我们己于 [教程](doc/tutorial.zh-cn.md) 中介绍了 DOM 的基本用法,本节将讲述一些细节及高级用法。
+
+[TOC]
+
+# 模板 {#Template}
+
+教程中使用了 `Value` 和 `Document` 类型。与 `std::string` 相似,这些类型其实是两个模板类的 `typedef`:
+
+~~~~~~~~~~cpp
+namespace rapidjson {
+
+template <typename Encoding, typename Allocator = MemoryPoolAllocator<> >
+class GenericValue {
+    // ...
+};
+
+template <typename Encoding, typename Allocator = MemoryPoolAllocator<> >
+class GenericDocument : public GenericValue<Encoding, Allocator> {
+    // ...
+};
+
+typedef GenericValue<UTF8<> > Value;
+typedef GenericDocument<UTF8<> > Document;
+
+} // namespace rapidjson
+~~~~~~~~~~
+
+使用者可以自定义这些模板参数。
+
+## 编码 {#Encoding}
+
+`Encoding` 参数指明在内存中的 JSON String 使用哪种编码。可行的选项有 `UTF8`、`UTF16`、`UTF32`。要注意这 3 个类型其实也是模板类。`UTF8<>` 等同 `UTF8<char>`,这代表它使用 `char` 来存储字符串。更多细节可以参考 [编码](doc/encoding.zh-cn.md)。
+
+这里是一个例子。假设一个 Windows 应用软件希望查询存储于 JSON 中的本地化字符串。Windows 中含 Unicode 的函数使用 UTF-16(宽字符)编码。无论 JSON 文件使用哪种编码,我们都可以把字符串以 UTF-16 形式存储在内存。
+
+~~~~~~~~~~cpp
+using namespace rapidjson;
+
+typedef GenericDocument<UTF16<> > WDocument;
+typedef GenericValue<UTF16<> > WValue;
+
+FILE* fp = fopen("localization.json", "rb"); // 非 Windows 平台使用 "r"
+
+char readBuffer[256];
+FileReadStream bis(fp, readBuffer, sizeof(readBuffer));
+
+AutoUTFInputStream<unsigned, FileReadStream> eis(bis);  // 包装 bis 成 eis
+
+WDocument d;
+d.ParseStream<0, AutoUTF<unsigned> >(eis);
+
+const WValue locale(L"ja"); // Japanese
+
+MessageBoxW(hWnd, d[locale].GetString(), L"Test", MB_OK);
+~~~~~~~~~~
+
+## 分配器 {#Allocator}
+
+`Allocator` 定义当 `Document`/`Value` 分配或释放内存时使用那个分配类。`Document` 拥有或引用到一个 `Allocator` 实例。而为了节省内存,`Value` 没有这么做。
+
+`GenericDocument` 的缺省分配器是 `MemoryPoolAllocator`。此分配器实际上会顺序地分配内存,并且不能逐一释放。当要解析一个 JSON 并生成 DOM,这种分配器是非常合适的。
+
+RapidJSON 还提供另一个分配器 `CrtAllocator`,当中 CRT 是 C 运行库(C RunTime library)的缩写。此分配器简单地读用标准的 `malloc()`/`realloc()`/`free()`。当我们需要许多增减操作,这种分配器会更为适合。然而这种分配器远远比 `MemoryPoolAllocator` 低效。
+
+# 解析 {#Parsing}
+
+`Document` 提供几个解析函数。以下的 (1) 是根本的函数,其他都是调用 (1) 的协助函数。
+
+~~~~~~~~~~cpp
+using namespace rapidjson;
+
+// (1) 根本
+template <unsigned parseFlags, typename SourceEncoding, typename InputStream>
+GenericDocument& GenericDocument::ParseStream(InputStream& is);
+
+// (2) 使用流的编码
+template <unsigned parseFlags, typename InputStream>
+GenericDocument& GenericDocument::ParseStream(InputStream& is);
+
+// (3) 使用缺省标志
+template <typename InputStream>
+GenericDocument& GenericDocument::ParseStream(InputStream& is);
+
+// (4) 原位解析
+template <unsigned parseFlags>
+GenericDocument& GenericDocument::ParseInsitu(Ch* str);
+
+// (5) 原位解析,使用缺省标志
+GenericDocument& GenericDocument::ParseInsitu(Ch* str);
+
+// (6) 正常解析一个字符串
+template <unsigned parseFlags, typename SourceEncoding>
+GenericDocument& GenericDocument::Parse(const Ch* str);
+
+// (7) 正常解析一个字符串,使用 Document 的编码
+template <unsigned parseFlags>
+GenericDocument& GenericDocument::Parse(const Ch* str);
+
+// (8) 正常解析一个字符串,使用缺省标志
+GenericDocument& GenericDocument::Parse(const Ch* str);
+~~~~~~~~~~
+
+[教程](doc/tutorial.zh-cn.md) 中的例使用 (8) 去正常解析字符串。而 [流](doc/stream.zh-cn.md) 的例子使用前 3 个函数。我们将稍后介绍原位(*In situ*) 解析。
+
+`parseFlags` 是以下位标置的组合:
+
+解析位标志                    | 意义
+------------------------------|-----------------------------------
+`kParseNoFlags`               | 没有任何标志。
+`kParseDefaultFlags`          | 缺省的解析选项。它等于 `RAPIDJSON_PARSE_DEFAULT_FLAGS` 宏,此宏定义为 `kParseNoFlags`。
+`kParseInsituFlag`            | 原位(破坏性)解析。
+`kParseValidateEncodingFlag`  | 校验 JSON 字符串的编码。
+`kParseIterativeFlag`         | 迭代式(调用堆栈大小为常数复杂度)解析。
+`kParseStopWhenDoneFlag`      | 当从流解析了一个完整的 JSON 根节点之后,停止继续处理余下的流。当使用了此标志,解析器便不会产生 `kParseErrorDocumentRootNotSingular` 错误。可使用本标志去解析同一个流里的多个 JSON。
+`kParseFullPrecisionFlag`     | 使用完整的精确度去解析数字(较慢)。如不设置此标节,则会使用正常的精确度(较快)。正常精确度会有最多 3 个 [ULP](http://en.wikipedia.org/wiki/Unit_in_the_last_place) 的误差。
+`kParseCommentsFlag`          | 容许单行 `// ...` 及多行 `/* ... */` 注释(放宽的 JSON 语法)。
+`kParseNumbersAsStringsFlag`  | 把数字类型解析成字符串。
+`kParseTrailingCommasFlag`    | 容许在对象和数组结束前含有逗号(放宽的 JSON 语法)。
+`kParseNanAndInfFlag`         | 容许 `NaN`、`Inf`、`Infinity`、`-Inf` 及 `-Infinity` 作为 `double` 值(放宽的 JSON 语法)。
+
+由于使用了非类型模板参数,而不是函数参数,C++ 编译器能为个别组合生成代码,以改善性能及减少代码尺寸(当只用单种特化)。缺点是需要在编译期决定标志。
+
+`SourceEncoding` 参数定义流使用了什么编码。这与 `Document` 的 `Encoding` 不相同。细节可参考 [转码和校验](#TranscodingAndValidation) 一节。
+
+此外 `InputStream` 是输入流的类型。
+
+## 解析错误 {#ParseError}
+
+当解析过程顺利完成,`Document` 便会含有解析结果。当过程出现错误,原来的 DOM 会*维持不变*。可使用 `bool HasParseError()`、`ParseErrorCode GetParseError()` 及 `size_t GetErrorOffset()` 获取解析的错误状态。
+
+解析错误代号                                | 描述
+--------------------------------------------|---------------------------------------------------
+`kParseErrorNone`                           | 无错误。
+`kParseErrorDocumentEmpty`                  | 文档是空的。
+`kParseErrorDocumentRootNotSingular`        | 文档的根后面不能有其它值。
+`kParseErrorValueInvalid`                   | 不合法的值。
+`kParseErrorObjectMissName`                 | Object 成员缺少名字。
+`kParseErrorObjectMissColon`                | Object 成员名字后缺少冒号。
+`kParseErrorObjectMissCommaOrCurlyBracket`  | Object 成员后缺少逗号或 `}`。
+`kParseErrorArrayMissCommaOrSquareBracket`  | Array 元素后缺少逗号或 `]` 。
+`kParseErrorStringUnicodeEscapeInvalidHex`  | String 中的 `\\u` 转义符后含非十六进位数字。
+`kParseErrorStringUnicodeSurrogateInvalid`  | String 中的代理对(surrogate pair)不合法。
+`kParseErrorStringEscapeInvalid`            | String 含非法转义字符。
+`kParseErrorStringMissQuotationMark`        | String 缺少关闭引号。
+`kParseErrorStringInvalidEncoding`          | String 含非法编码。
+`kParseErrorNumberTooBig`                   | Number 的值太大,不能存储于 `double`。
+`kParseErrorNumberMissFraction`             | Number 缺少了小数部分。
+`kParseErrorNumberMissExponent`             | Number 缺少了指数。
+
+错误的偏移量定义为从流开始至错误处的字符数量。目前 RapidJSON 不记录错误行号。
+
+要取得错误讯息,RapidJSON 在 `rapidjson/error/en.h` 中提供了英文错误讯息。使用者可以修改它用于其他语言环境,或使用一个自定义的本地化系统。
+
+以下是一个处理错误的例子。
+
+~~~~~~~~~~cpp
+#include "rapidjson/document.h"
+#include "rapidjson/error/en.h"
+
+// ...
+Document d;
+if (d.Parse(json).HasParseError()) {
+    fprintf(stderr, "\nError(offset %u): %s\n", 
+        (unsigned)d.GetErrorOffset(),
+        GetParseError_En(d.GetParseErrorCode()));
+    // ...
+}
+~~~~~~~~~~
+
+## 原位解析 {#InSituParsing}
+
+根据 [维基百科](http://en.wikipedia.org/wiki/In_situ):
+
+> *In situ* ... is a Latin phrase that translates literally to "on site" or "in position". It means "locally", "on site", "on the premises" or "in place" to describe an event where it takes place, and is used in many different contexts.
+> ...
+> (In computer science) An algorithm is said to be an in situ algorithm, or in-place algorithm, if the extra amount of memory required to execute the algorithm is O(1), that is, does not exceed a constant no matter how large the input. For example, heapsort is an in situ sorting algorithm.
+
+> 翻译:*In situ*……是一个拉丁文片语,字面上的意思是指「现场」、「在位置」。在许多不同语境中,它描述一个事件发生的位置,意指「本地」、「现场」、「在处所」、「就位」。
+> ……
+> (在计算机科学中)一个算法若称为原位算法,或在位算法,是指执行该算法所需的额外内存空间是 O(1) 的,换句话说,无论输入大小都只需要常数空间。例如,堆排序是一个原位排序算法。
+
+在正常的解析过程中,对 JSON string 解码并复制至其他缓冲区是一个很大的开销。原位解析(*in situ* parsing)把这些 JSON string 直接解码于它原来存储的地方。由于解码后的 string 长度总是短于或等于原来储存于 JSON 的 string,所以这是可行的。在这个语境下,对 JSON string 进行解码是指处理转义符,如 `"\n"`、`"\u1234"` 等,以及在 string 末端加入空终止符号 (`'\0'`)。
+
+以下的图比较正常及原位解析。JSON string 值包含指向解码后的字符串。
+
+![正常解析](diagram/normalparsing.png)
+
+在正常解析中,解码后的字符串被复制至全新分配的缓冲区中。`"\\n"`(2 个字符)被解码成 `"\n"`(1 个字符)。`"\\u0073"`(6 个字符)被解码成 `"s"`(1 个字符)。
+
+![原位解析](diagram/insituparsing.png)
+
+原位解析直接修改了原来的 JSON。图中高亮了被更新的字符。若 JSON string 不含转义符,例如 `"msg"`,那么解析过程仅仅是以空字符代替结束双引号。
+
+由于原位解析修改了输入,其解析 API 需要 `char*` 而非 `const char*`。
+
+~~~~~~~~~~cpp
+// 把整个文件读入 buffer
+FILE* fp = fopen("test.json", "r");
+fseek(fp, 0, SEEK_END);
+size_t filesize = (size_t)ftell(fp);
+fseek(fp, 0, SEEK_SET);
+char* buffer = (char*)malloc(filesize + 1);
+size_t readLength = fread(buffer, 1, filesize, fp);
+buffer[readLength] = '\0';
+fclose(fp);
+
+// 原位解析 buffer 至 d,buffer 内容会被修改。
+Document d;
+d.ParseInsitu(buffer);
+
+// 在此查询、修改 DOM……
+
+free(buffer);
+// 注意:在这个位置,d 可能含有指向已被释放的 buffer 的悬空指针
+~~~~~~~~~~
+
+JSON string 会被打上 const-string 的标志。但它们可能并非真正的「常数」。它的生命周期取决于存储 JSON 的缓冲区。
+
+原位解析把分配开销及内存复制减至最小。通常这样做能改善缓存一致性,而这对现代计算机来说是一个重要的性能因素。
+
+原位解析有以下限制:
+
+1. 整个 JSON 须存储在内存之中。
+2. 流的来源缓码与文档的目标编码必须相同。
+3. 需要保留缓冲区,直至文档不再被使用。
+4. 若 DOM 需要在解析后被长期使用,而 DOM 内只有很少 JSON string,保留缓冲区可能造成内存浪费。
+
+原位解析最适合用于短期的、用完即弃的 JSON。实际应用中,这些场合是非常普遍的,例如反序列化 JSON 至 C++ 对象、处理以 JSON 表示的 web 请求等。
+
+## 转码与校验 {#TranscodingAndValidation}
+
+RapidJSON 内部支持不同 Unicode 格式(正式的术语是 UCS 变换格式)间的转换。在 DOM 解析时,流的来源编码与 DOM 的编码可以不同。例如,来源流可能含有 UTF-8 的 JSON,而 DOM 则使用 UTF-16 编码。在 [EncodedInputStream](doc/stream.zh-cn.md) 一节里有一个例子。
+
+当从 DOM 输出一个 JSON 至输出流之时,也可以使用转码功能。在 [EncodedOutputStream](doc/stream.zh-cn.md) 一节里有一个例子。
+
+在转码过程中,会把来源 string 解码成 Unicode 码点,然后把码点编码成目标格式。在解码时,它会校验来源 string 的字节序列是否合法。若遇上非合法序列,解析器会停止并返回 `kParseErrorStringInvalidEncoding` 错误。
+
+当来源编码与 DOM 的编码相同,解析器缺省地 * 不会 * 校验序列。使用者可开启 `kParseValidateEncodingFlag` 去强制校验。
+
+# 技巧 {#Techniques}
+
+这里讨论一些 DOM API 的使用技巧。
+
+## 把 DOM 作为 SAX 事件发表者
+
+在 RapidJSON 中,利用 `Writer` 把 DOM 生成 JSON 的做法,看来有点奇怪。
+
+~~~~~~~~~~cpp
+// ...
+Writer<StringBuffer> writer(buffer);
+d.Accept(writer);
+~~~~~~~~~~
+
+实际上,`Value::Accept()` 是负责发布该值相关的 SAX 事件至处理器的。通过这个设计,`Value` 及 `Writer` 解除了偶合。`Value` 可生成 SAX 事件,而 `Writer` 则可以处理这些事件。
+
+使用者可以创建自定义的处理器,去把 DOM 转换成其它格式。例如,一个把 DOM 转换成 XML 的处理器。
+
+要知道更多关于 SAX 事件与处理器,可参阅 [SAX](doc/sax.zh-cn.md)。
+
+## 使用者缓冲区 {#UserBuffer}
+
+许多应用软件可能需要尽量减少内存分配。
+
+`MemoryPoolAllocator` 可以帮助这方面,它容许使用者提供一个缓冲区。该缓冲区可能置于程序堆栈,或是一个静态分配的「草稿缓冲区(scratch buffer)」(一个静态/全局的数组),用于储存临时数据。
+
+`MemoryPoolAllocator` 会先用使用者缓冲区去解决分配请求。当使用者缓冲区用完,就会从基础分配器(缺省为 `CrtAllocator`)分配一块内存。
+
+以下是使用堆栈内存的例子,第一个分配器用于存储值,第二个用于解析时的临时缓冲。
+
+~~~~~~~~~~cpp
+typedef GenericDocument<UTF8<>, MemoryPoolAllocator<>, MemoryPoolAllocator<>> DocumentType;
+char valueBuffer[4096];
+char parseBuffer[1024];
+MemoryPoolAllocator<> valueAllocator(valueBuffer, sizeof(valueBuffer));
+MemoryPoolAllocator<> parseAllocator(parseBuffer, sizeof(parseBuffer));
+DocumentType d(&valueAllocator, sizeof(parseBuffer), &parseAllocator);
+d.Parse(json);
+~~~~~~~~~~
+
+若解析时分配总量少于 4096+1024 字节时,这段代码不会造成任何堆内存分配(经 `new` 或 `malloc()`)。
+
+使用者可以通过 `MemoryPoolAllocator::Size()` 查询当前已分的内存大小。那么使用者可以拟定使用者缓冲区的合适大小。
diff --git a/doc/encoding.md b/doc/encoding.md
new file mode 100644
index 0000000..e663aea
--- /dev/null
+++ b/doc/encoding.md
@@ -0,0 +1,146 @@
+# Encoding
+
+According to [ECMA-404](http://www.ecma-international.org/publications/files/ECMA-ST/ECMA-404.pdf),
+
+> (in Introduction) JSON text is a sequence of Unicode code points.
+
+The earlier [RFC4627](http://www.ietf.org/rfc/rfc4627.txt) stated that,
+
+> (in §3) JSON text SHALL be encoded in Unicode.  The default encoding is UTF-8.
+
+> (in §6) JSON may be represented using UTF-8, UTF-16, or UTF-32. When JSON is written in UTF-8, JSON is 8bit compatible.  When JSON is written in UTF-16 or UTF-32, the binary content-transfer-encoding must be used.
+
+RapidJSON supports various encodings. It can also validate the encodings of JSON, and transcoding JSON among encodings. All these features are implemented internally, without the need for external libraries (e.g. [ICU](http://site.icu-project.org/)).
+
+[TOC]
+
+# Unicode {#Unicode}
+From [Unicode's official website](http://www.unicode.org/standard/WhatIsUnicode.html):
+> Unicode provides a unique number for every character, 
+> no matter what the platform,
+> no matter what the program,
+> no matter what the language.
+
+Those unique numbers are called code points, which is in the range `0x0` to `0x10FFFF`.
+
+## Unicode Transformation Format {#UTF}
+
+There are various encodings for storing Unicode code points. These are called Unicode Transformation Format (UTF). RapidJSON supports the most commonly used UTFs, including
+
+* UTF-8: 8-bit variable-width encoding. It maps a code point to 1–4 bytes.
+* UTF-16: 16-bit variable-width encoding. It maps a code point to 1–2 16-bit code units (i.e., 2–4 bytes).
+* UTF-32: 32-bit fixed-width encoding. It directly maps a code point to a single 32-bit code unit (i.e. 4 bytes).
+
+For UTF-16 and UTF-32, the byte order (endianness) does matter. Within computer memory, they are often stored in the computer's endianness. However, when it is stored in file or transferred over network, we need to state the byte order of the byte sequence, either little-endian (LE) or big-endian (BE). 
+
+RapidJSON provide these encodings via the structs in `rapidjson/encodings.h`:
+
+~~~~~~~~~~cpp
+namespace rapidjson {
+
+template<typename CharType = char>
+struct UTF8;
+
+template<typename CharType = wchar_t>
+struct UTF16;
+
+template<typename CharType = wchar_t>
+struct UTF16LE;
+
+template<typename CharType = wchar_t>
+struct UTF16BE;
+
+template<typename CharType = unsigned>
+struct UTF32;
+
+template<typename CharType = unsigned>
+struct UTF32LE;
+
+template<typename CharType = unsigned>
+struct UTF32BE;
+
+} // namespace rapidjson
+~~~~~~~~~~
+
+For processing text in memory, we normally use `UTF8`, `UTF16` or `UTF32`. For processing text via I/O, we may use `UTF8`, `UTF16LE`, `UTF16BE`, `UTF32LE` or `UTF32BE`.
+
+When using the DOM-style API, the `Encoding` template parameter in `GenericValue<Encoding>` and `GenericDocument<Encoding>` indicates the encoding to be used to represent JSON string in memory. So normally we will use `UTF8`, `UTF16` or `UTF32` for this template parameter. The choice depends on operating systems and other libraries that the application is using. For example, Windows API represents Unicode characters in UTF-16, while most Linux distributions and applications prefer UTF-8.
+
+Example of UTF-16 DOM declaration:
+
+~~~~~~~~~~cpp
+typedef GenericDocument<UTF16<> > WDocument;
+typedef GenericValue<UTF16<> > WValue;
+~~~~~~~~~~
+
+For a detail example, please check the example in [DOM's Encoding](doc/stream.md) section.
+
+## Character Type {#CharacterType}
+
+As shown in the declaration, each encoding has a `CharType` template parameter. Actually, it may be a little bit confusing, but each `CharType` stores a code unit, not a character (code point). As mentioned in previous section, a code point may be encoded to 1–4 code units for UTF-8.
+
+For `UTF16(LE|BE)`, `UTF32(LE|BE)`, the `CharType` must be integer type of at least 2 and 4 bytes  respectively.
+
+Note that C++11 introduces `char16_t` and `char32_t`, which can be used for `UTF16` and `UTF32` respectively.
+
+## AutoUTF {#AutoUTF}
+
+Previous encodings are statically bound in compile-time. In other words, user must know exactly which encodings will be used in the memory or streams. However, sometimes we may need to read/write files of different encodings. The encoding needed to be decided in runtime.
+
+`AutoUTF` is an encoding designed for this purpose. It chooses which encoding to be used according to the input or output stream. Currently, it should be used with `EncodedInputStream` and `EncodedOutputStream`.
+
+## ASCII {#ASCII}
+
+Although the JSON standards did not mention about [ASCII](http://en.wikipedia.org/wiki/ASCII), sometimes we would like to write 7-bit ASCII JSON for applications that cannot handle UTF-8. Since any JSON can represent unicode characters in escaped sequence `\uXXXX`, JSON can always be encoded in ASCII.
+
+Here is an example for writing a UTF-8 DOM into ASCII:
+
+~~~~~~~~~~cpp
+using namespace rapidjson;
+Document d; // UTF8<>
+// ...
+StringBuffer buffer;
+Writer<StringBuffer, Document::EncodingType, ASCII<> > writer(buffer);
+d.Accept(writer);
+std::cout << buffer.GetString();
+~~~~~~~~~~
+
+ASCII can be used in input stream. If the input stream contains bytes with values above 127, it will cause `kParseErrorStringInvalidEncoding` error.
+
+ASCII *cannot* be used in memory (encoding of `Document` or target encoding of `Reader`), as it cannot represent Unicode code points.
+
+# Validation & Transcoding {#ValidationTranscoding}
+
+When RapidJSON parses a JSON, it can validate the input JSON, whether it is a valid sequence of a specified encoding. This option can be turned on by adding `kParseValidateEncodingFlag` in `parseFlags` template parameter.
+
+If the input encoding and output encoding is different, `Reader` and `Writer` will automatically transcode (convert) the text. In this case, `kParseValidateEncodingFlag` is not necessary, as it must decode the input sequence. And if the sequence was unable to be decoded, it must be invalid.
+
+## Transcoder {#Transcoder}
+
+Although the encoding functions in RapidJSON are designed for JSON parsing/generation, user may abuse them for transcoding of non-JSON strings.
+
+Here is an example for transcoding a string from UTF-8 to UTF-16:
+
+~~~~~~~~~~cpp
+#include "rapidjson/encodings.h"
+
+using namespace rapidjson;
+
+const char* s = "..."; // UTF-8 string
+StringStream source(s);
+GenericStringBuffer<UTF16<> > target;
+
+bool hasError = false;
+while (source.Peek() != '\0')
+    if (!Transcoder<UTF8<>, UTF16<> >::Transcode(source, target)) {
+        hasError = true;
+        break;
+    }
+
+if (!hasError) {
+    const wchar_t* t = target.GetString();
+    // ...
+}
+~~~~~~~~~~
+
+You may also use `AutoUTF` and the associated streams for setting source/target encoding in runtime.
diff --git a/doc/encoding.zh-cn.md b/doc/encoding.zh-cn.md
new file mode 100644
index 0000000..808ba52
--- /dev/null
+++ b/doc/encoding.zh-cn.md
@@ -0,0 +1,152 @@
+# 编码
+
+根据 [ECMA-404](http://www.ecma-international.org/publications/files/ECMA-ST/ECMA-404.pdf):
+
+> (in Introduction) JSON text is a sequence of Unicode code points.
+> 
+> 翻译:JSON 文本是 Unicode 码点的序列。
+
+较早的 [RFC4627](http://www.ietf.org/rfc/rfc4627.txt) 申明:
+
+> (in §3) JSON text SHALL be encoded in Unicode.  The default encoding is UTF-8.
+> 
+> 翻译:JSON 文本应该以 Unicode 编码。缺省的编码为 UTF-8。
+
+> (in §6) JSON may be represented using UTF-8, UTF-16, or UTF-32. When JSON is written in UTF-8, JSON is 8bit compatible.  When JSON is written in UTF-16 or UTF-32, the binary content-transfer-encoding must be used.
+> 
+> 翻译:JSON 可使用 UTF-8、UTF-16 或 UTF-32 表示。当 JSON 以 UTF-8 写入,该 JSON 是 8 位兼容的。当 JSON 以 UTF-16 或 UTF-32 写入,就必须使用二进制的内容传送编码。
+
+RapidJSON 支持多种编码。它也能检查 JSON 的编码,以及在不同编码中进行转码。所有这些功能都是在内部实现,无需使用外部的程序库(如 [ICU](http://site.icu-project.org/))。
+
+[TOC]
+
+# Unicode {#Unicode}
+根据 [Unicode 的官方网站](http://www.unicode.org/standard/translations/t-chinese.html):
+>Unicode 给每个字符提供了一个唯一的数字,
+不论是什么平台、
+不论是什么程序、
+不论是什么语言。
+
+这些唯一数字称为码点(code point),其范围介乎 `0x0` 至 `0x10FFFF` 之间。
+
+## Unicode 转换格式 {#UTF}
+
+存储 Unicode 码点有多种编码方式。这些称为 Unicode 转换格式(Unicode Transformation Format, UTF)。RapidJSON 支持最常用的 UTF,包括:
+
+* UTF-8:8 位可变长度编码。它把一个码点映射至 1 至 4 个字节。
+* UTF-16:16 位可变长度编码。它把一个码点映射至 1 至 2 个 16 位编码单元(即 2 至 4 个字节)。
+* UTF-32:32 位固定长度编码。它直接把码点映射至单个 32 位编码单元(即 4 字节)。
+
+对于 UTF-16 及 UTF-32 来说,字节序(endianness)是有影响的。在内存中,它们通常都是以该计算机的字节序来存储。然而,当要储存在文件中或在网上传输,我们需要指明字节序列的字节序,是小端(little endian, LE)还是大端(big-endian, BE)。 
+
+RapidJSON 通过 `rapidjson/encodings.h` 中的 struct 去提供各种编码:
+
+~~~~~~~~~~cpp
+namespace rapidjson {
+
+template<typename CharType = char>
+struct UTF8;
+
+template<typename CharType = wchar_t>
+struct UTF16;
+
+template<typename CharType = wchar_t>
+struct UTF16LE;
+
+template<typename CharType = wchar_t>
+struct UTF16BE;
+
+template<typename CharType = unsigned>
+struct UTF32;
+
+template<typename CharType = unsigned>
+struct UTF32LE;
+
+template<typename CharType = unsigned>
+struct UTF32BE;
+
+} // namespace rapidjson
+~~~~~~~~~~
+
+对于在内存中的文本,我们正常会使用 `UTF8`、`UTF16` 或 `UTF32`。对于处理经过 I/O 的文本,我们可使用 `UTF8`、`UTF16LE`、`UTF16BE`、`UTF32LE` 或 `UTF32BE`。
+
+当使用 DOM 风格的 API,`GenericValue<Encoding>` 及 `GenericDocument<Encoding>` 里的 `Encoding` 模板参数是用于指明内存中存储的 JSON 字符串使用哪种编码。因此通常我们会在此参数中使用 `UTF8`、`UTF16` 或 `UTF32`。如何选择,视乎应用软件所使用的操作系统及其他程序库。例如,Windows API 使用 UTF-16 表示 Unicode 字符,而多数的 Linux 发行版本及应用软件则更喜欢 UTF-8。
+
+使用 UTF-16 的 DOM 声明例子:
+
+~~~~~~~~~~cpp
+typedef GenericDocument<UTF16<> > WDocument;
+typedef GenericValue<UTF16<> > WValue;
+~~~~~~~~~~
+
+可以在 [DOM's Encoding](doc/stream.zh-cn.md) 一节看到更详细的使用例子。
+
+## 字符类型 {#CharacterType}
+
+从之前的声明中可以看到,每个编码都有一个 `CharType` 模板参数。这可能比较容易混淆,实际上,每个 `CharType` 存储一个编码单元,而不是一个字符(码点)。如之前所谈及,在 UTF-8 中一个码点可能会编码成 1 至 4 个编码单元。
+
+对于 `UTF16(LE|BE)` 及 `UTF32(LE|BE)` 来说,`CharType` 必须分别是一个至少 2 及 4 字节的整数类型。
+
+注意 C++11 新添了 `char16_t` 及 `char32_t` 类型,也可分别用于 `UTF16` 及 `UTF32`。
+
+## AutoUTF {#AutoUTF}
+
+上述所介绍的编码都是在编译期静态挷定的。换句话说,使用者必须知道内存或流之中使用了哪种编码。然而,有时候我们可能需要读写不同编码的文件,而且这些编码需要在运行时才能决定。
+
+`AutoUTF` 是为此而设计的编码。它根据输入或输出流来选择使用哪种编码。目前它应该与 `EncodedInputStream` 及 `EncodedOutputStream` 结合使用。
+
+## ASCII {#ASCII}
+
+虽然 JSON 标准并未提及 [ASCII](http://en.wikipedia.org/wiki/ASCII),有时候我们希望写入 7 位的 ASCII JSON,以供未能处理 UTF-8 的应用程序使用。由于任 JSON 都可以把 Unicode 字符表示为 `\uXXXX` 转义序列,JSON 总是可用 ASCII 来编码。
+
+以下的例子把 UTF-8 的 DOM 写成 ASCII 的 JSON:
+
+~~~~~~~~~~cpp
+using namespace rapidjson;
+Document d; // UTF8<>
+// ...
+StringBuffer buffer;
+Writer<StringBuffer, Document::EncodingType, ASCII<> > writer(buffer);
+d.Accept(writer);
+std::cout << buffer.GetString();
+~~~~~~~~~~
+
+ASCII 可用于输入流。当输入流包含大于 127 的字节,就会导致 `kParseErrorStringInvalidEncoding` 错误。
+
+ASCII * 不能 * 用于内存(`Document` 的编码,或 `Reader` 的目标编码),因为它不能表示 Unicode 码点。
+
+# 校验及转码 {#ValidationTranscoding}
+
+当 RapidJSON 解析一个 JSON 时,它能校验输入 JSON,判断它是否所标明编码的合法序列。要开启此选项,请把 `kParseValidateEncodingFlag` 加入 `parseFlags` 模板参数。
+
+若输入编码和输出编码并不相同,`Reader` 及 `Writer` 会算把文本转码。在这种情况下,并不需要 `kParseValidateEncodingFlag`,因为它必须解码输入序列。若序列不能被解码,它必然是不合法的。
+
+## 转码器 {#Transcoder}
+
+虽然 RapidJSON 的编码功能是为 JSON 解析/生成而设计,使用者也可以“滥用”它们来为非 JSON 字符串转码。
+
+以下的例子把 UTF-8 字符串转码成 UTF-16:
+
+~~~~~~~~~~cpp
+#include "rapidjson/encodings.h"
+
+using namespace rapidjson;
+
+const char* s = "..."; // UTF-8 string
+StringStream source(s);
+GenericStringBuffer<UTF16<> > target;
+
+bool hasError = false;
+while (source.Peek() != '\0')
+    if (!Transcoder<UTF8<>, UTF16<> >::Transcode(source, target)) {
+        hasError = true;
+        break;
+    }
+
+if (!hasError) {
+    const wchar_t* t = target.GetString();
+    // ...
+}
+~~~~~~~~~~
+
+你也可以用 `AutoUTF` 及对应的流来在运行时设置内源/目的之编码。
diff --git a/doc/faq.md b/doc/faq.md
new file mode 100644
index 0000000..9abfdf1
--- /dev/null
+++ b/doc/faq.md
@@ -0,0 +1,289 @@
+# FAQ
+
+[TOC]
+
+## General
+
+1. What is RapidJSON?
+
+   RapidJSON is a C++ library for parsing and generating JSON. You may check all [features](doc/features.md) of it.
+
+2. Why is RapidJSON named so?
+
+   It is inspired by [RapidXML](http://rapidxml.sourceforge.net/), which is a fast XML DOM parser.
+
+3. Is RapidJSON similar to RapidXML?
+
+   RapidJSON borrowed some designs of RapidXML, including *in situ* parsing, header-only library. But the two APIs are completely different. Also RapidJSON provide many features that are not in RapidXML.
+
+4. Is RapidJSON free?
+
+   Yes, it is free under MIT license. It can be used in commercial applications. Please check the details in [license.txt](https://github.com/Tencent/rapidjson/blob/master/license.txt).
+
+5. Is RapidJSON small? What are its dependencies? 
+
+   Yes. A simple executable which parses a JSON and prints its statistics is less than 30KB on Windows.
+
+   RapidJSON depends on C++ standard library only.
+
+6. How to install RapidJSON?
+
+   Check [Installation section](https://miloyip.github.io/rapidjson/).
+
+7. Can RapidJSON run on my platform?
+
+   RapidJSON has been tested in many combinations of operating systems, compilers and CPU architecture by the community. But we cannot ensure that it can be run on your particular platform. Building and running the unit test suite will give you the answer.
+
+8. Does RapidJSON support C++03? C++11?
+
+   RapidJSON was firstly implemented for C++03. Later it added optional support of some C++11 features (e.g., move constructor, `noexcept`). RapidJSON shall be compatible with C++03 or C++11 compliant compilers.
+
+9. Does RapidJSON really work in real applications?
+
+   Yes. It is deployed in both client and server real applications. A community member reported that RapidJSON in their system parses 50 million JSONs daily.
+
+10. How RapidJSON is tested?
+
+   RapidJSON contains a unit test suite for automatic testing. [Travis](https://travis-ci.org/Tencent/rapidjson/)(for Linux) and [AppVeyor](https://ci.appveyor.com/project/Tencent/rapidjson/)(for Windows) will compile and run the unit test suite for all modifications. The test process also uses Valgrind (in Linux) to detect memory leaks.
+
+11. Is RapidJSON well documented?
+
+   RapidJSON provides user guide and API documentationn.
+
+12. Are there alternatives?
+
+   Yes, there are a lot alternatives. For example, [nativejson-benchmark](https://github.com/miloyip/nativejson-benchmark) has a listing of open-source C/C++ JSON libraries. [json.org](http://www.json.org/) also has a list.
+
+## JSON
+
+1. What is JSON?
+
+   JSON (JavaScript Object Notation) is a lightweight data-interchange format. It uses human readable text format. More details of JSON can be referred to [RFC7159](http://www.ietf.org/rfc/rfc7159.txt) and [ECMA-404](http://www.ecma-international.org/publications/standards/Ecma-404.htm).
+
+2. What are applications of JSON?
+
+   JSON are commonly used in web applications for transferring structured data. It is also used as a file format for data persistence.
+
+2. Does RapidJSON conform to the JSON standard?
+
+   Yes. RapidJSON is fully compliance with [RFC7159](http://www.ietf.org/rfc/rfc7159.txt) and [ECMA-404](http://www.ecma-international.org/publications/standards/Ecma-404.htm). It can handle corner cases, such as supporting null character and surrogate pairs in JSON strings.
+
+3. Does RapidJSON support relaxed syntax?
+
+   Currently no. RapidJSON only support the strict standardized format. Support on related syntax is under discussion in this [issue](https://github.com/Tencent/rapidjson/issues/36).
+
+## DOM and SAX
+
+1. What is DOM style API?
+
+   Document Object Model (DOM) is an in-memory representation of JSON for query and manipulation.
+
+2. What is SAX style API?
+
+   SAX is an event-driven API for parsing and generation.
+
+3. Should I choose DOM or SAX?
+
+   DOM is easy for query and manipulation. SAX is very fast and memory-saving but often more difficult to be applied.
+
+4. What is *in situ* parsing?
+
+   *in situ* parsing decodes the JSON strings directly into the input JSON. This is an optimization which can reduce memory consumption and improve performance, but the input JSON will be modified. Check [in-situ parsing](doc/dom.md) for details.
+
+5. When does parsing generate an error?
+
+   The parser generates an error when the input JSON contains invalid syntax, or a value can not be represented (a number is too big), or the handler of parsers terminate the parsing. Check [parse error](doc/dom.md) for details.
+
+6. What error information is provided? 
+
+   The error is stored in `ParseResult`, which includes the error code and offset (number of characters from the beginning of JSON). The error code can be translated into human-readable error message.
+
+7. Why not just using `double` to represent JSON number?
+
+   Some applications use 64-bit unsigned/signed integers. And these integers cannot be converted into `double` without loss of precision. So the parsers detects whether a JSON number is convertible to different types of integers and/or `double`.
+
+8. How to clear-and-minimize a document or value?
+
+   Call one of the `SetXXX()` methods - they call destructor which deallocates DOM data:
+
+   ~~~~~~~~~~cpp
+   Document d;
+   ...
+   d.SetObject();  // clear and minimize
+   ~~~~~~~~~~
+
+   Alternatively, use equivalent of the [C++ swap with temporary idiom](https://en.wikibooks.org/wiki/More_C%2B%2B_Idioms/Clear-and-minimize):
+   ~~~~~~~~~~cpp
+   Value(kObjectType).Swap(d);
+   ~~~~~~~~~~
+   or equivalent, but slightly longer to type:
+   ~~~~~~~~~~cpp
+   d.Swap(Value(kObjectType).Move()); 
+   ~~~~~~~~~~
+
+9. How to insert a document node into another document?
+
+   Let's take the following two DOM trees represented as JSON documents:
+   ~~~~~~~~~~cpp
+   Document person;
+   person.Parse("{\"person\":{\"name\":{\"first\":\"Adam\",\"last\":\"Thomas\"}}}");
+   
+   Document address;
+   address.Parse("{\"address\":{\"city\":\"Moscow\",\"street\":\"Quiet\"}}");
+   ~~~~~~~~~~
+   Let's assume we want to merge them in such way that the whole `address` document becomes a node of the `person`:
+   ~~~~~~~~~~js
+   { "person": {
+      "name": { "first": "Adam", "last": "Thomas" },
+      "address": { "city": "Moscow", "street": "Quiet" }
+      }
+   }
+   ~~~~~~~~~~
+
+   The most important requirement to take care of document and value life-cycle as well as consistent memory management using the right allocator during the value transfer.
+   
+   Simple yet most efficient way to achieve that is to modify the `address` definition above to initialize it with allocator of the `person` document, then we just add the root member of the value:
+   ~~~~~~~~~~cpp
+   Document address(&person.GetAllocator());
+   ...
+   person["person"].AddMember("address", address["address"], person.GetAllocator());
+   ~~~~~~~~~~
+Alternatively, if we don't want to explicitly refer to the root value of `address` by name, we can refer to it via iterator:
+   ~~~~~~~~~~cpp
+   auto addressRoot = address.MemberBegin();
+   person["person"].AddMember(addressRoot->name, addressRoot->value, person.GetAllocator());
+   ~~~~~~~~~~
+   
+   Second way is to deep-clone the value from the address document:
+   ~~~~~~~~~~cpp
+   Value addressValue = Value(address["address"], person.GetAllocator());
+   person["person"].AddMember("address", addressValue, person.GetAllocator());
+   ~~~~~~~~~~
+
+## Document/Value (DOM)
+
+1. What is move semantics? Why?
+
+   Instead of copy semantics, move semantics is used in `Value`. That means, when assigning a source value to a target value, the ownership of source value is moved to the target value.
+
+   Since moving is faster than copying, this design decision forces user to aware of the copying overhead.
+
+2. How to copy a value?
+
+   There are two APIs: constructor with allocator, and `CopyFrom()`. See [Deep Copy Value](doc/tutorial.md) for an example.
+
+3. Why do I need to provide the length of string?
+
+   Since C string is null-terminated, the length of string needs to be computed via `strlen()`, with linear runtime complexity. This incurs an unnecessary overhead of many operations, if the user already knows the length of string.
+
+   Also, RapidJSON can handle `\u0000` (null character) within a string. If a string contains null characters, `strlen()` cannot return the true length of it. In such case user must provide the length of string explicitly.
+
+4. Why do I need to provide allocator parameter in many DOM manipulation API?
+
+   Since the APIs are member functions of `Value`, we do not want to save an allocator pointer in every `Value`.
+
+5. Does it convert between numerical types?
+
+   When using `GetInt()`, `GetUint()`, ... conversion may occur. For integer-to-integer conversion, it only convert when it is safe (otherwise it will assert). However, when converting a 64-bit signed/unsigned integer to double, it will convert but be aware that it may lose precision. A number with fraction, or an integer larger than 64-bit, can only be obtained by `GetDouble()`.
+
+## Reader/Writer (SAX)
+
+1. Why don't we just `printf` a JSON? Why do we need a `Writer`? 
+
+   Most importantly, `Writer` will ensure the output JSON is well-formed. Calling SAX events incorrectly (e.g. `StartObject()` pairing with `EndArray()`) will assert. Besides, `Writer` will escapes strings (e.g., `\n`). Finally, the numeric output of `printf()` may not be a valid JSON number, especially in some locale with digit delimiters. And the number-to-string conversion in `Writer` is implemented with very fast algorithms, which outperforms than `printf()` or `iostream`.
+
+2. Can I pause the parsing process and resume it later?
+
+   This is not directly supported in the current version due to performance consideration. However, if the execution environment supports multi-threading, user can parse a JSON in a separate thread, and pause it by blocking in the input stream.
+
+## Unicode
+
+1. Does it support UTF-8, UTF-16 and other format?
+
+   Yes. It fully support UTF-8, UTF-16 (LE/BE), UTF-32 (LE/BE) and ASCII. 
+
+2. Can it validate the encoding?
+
+   Yes, just pass `kParseValidateEncodingFlag` to `Parse()`. If there is invalid encoding in the stream, it will generate `kParseErrorStringInvalidEncoding` error.
+
+3. What is surrogate pair? Does RapidJSON support it?
+
+   JSON uses UTF-16 encoding when escaping unicode character, e.g. `\u5927` representing Chinese character "big". To handle characters other than those in basic multilingual plane (BMP), UTF-16 encodes those characters with two 16-bit values, which is called UTF-16 surrogate pair. For example, the Emoji character U+1F602 can be encoded as `\uD83D\uDE02` in JSON.
+
+   RapidJSON fully support parsing/generating UTF-16 surrogates. 
+
+4. Can it handle `\u0000` (null character) in JSON string?
+
+   Yes. RapidJSON fully support null character in JSON string. However, user need to be aware of it and using `GetStringLength()` and related APIs to obtain the true length of string.
+
+5. Can I output `\uxxxx` for all non-ASCII character?
+
+   Yes, use `ASCII<>` as output encoding template parameter in `Writer` can enforce escaping those characters.
+
+## Stream
+
+1. I have a big JSON file. Should I load the whole file to memory?
+
+   User can use `FileReadStream` to read the file chunk-by-chunk. But for *in situ* parsing, the whole file must be loaded.
+
+2. Can I parse JSON while it is streamed from network?
+
+   Yes. User can implement a custom stream for this. Please refer to the implementation of `FileReadStream`.
+
+3. I don't know what encoding will the JSON be. How to handle them?
+
+   You may use `AutoUTFInputStream` which detects the encoding of input stream automatically. However, it will incur some performance overhead.
+
+4. What is BOM? How RapidJSON handle it?
+
+   [Byte order mark (BOM)](http://en.wikipedia.org/wiki/Byte_order_mark) sometimes reside at the beginning of file/stream to indicate the UTF encoding type of it.
+
+   RapidJSON's `EncodedInputStream` can detect/consume BOM. `EncodedOutputStream` can optionally write a BOM. See [Encoded Streams](doc/stream.md) for example.
+
+5. Why little/big endian is related?
+
+   little/big endian of stream is an issue for UTF-16 and UTF-32 streams, but not UTF-8 stream.
+
+## Performance
+
+1. Is RapidJSON really fast?
+
+   Yes. It may be the fastest open source JSON library. There is a [benchmark](https://github.com/miloyip/nativejson-benchmark) for evaluating performance of C/C++ JSON libraries.
+
+2. Why is it fast?
+
+   Many design decisions of RapidJSON is aimed at time/space performance. These may reduce user-friendliness of APIs. Besides, it also employs low-level optimizations (intrinsics, SIMD) and special algorithms (custom double-to-string, string-to-double conversions).
+
+3. What is SIMD? How it is applied in RapidJSON?
+
+   [SIMD](http://en.wikipedia.org/wiki/SIMD) instructions can perform parallel computation in modern CPUs. RapidJSON support Intel's SSE2/SSE4.2 and ARM's Neon to accelerate whitespace/tabspace/carriage-return/line-feed skipping. This improves performance of parsing indent formatted JSON. Define `RAPIDJSON_SSE2`, `RAPIDJSON_SSE42` or `RAPIDJSON_NEON` macro to enable this feature. However, running the executable on a machine without such instruction set support will make it crash.
+
+4. Does it consume a lot of memory?
+
+   The design of RapidJSON aims at reducing memory footprint.
+
+   In the SAX API, `Reader` consumes memory proportional to maximum depth of JSON tree, plus maximum length of JSON string.
+
+   In the DOM API, each `Value` consumes exactly 16/24 bytes for 32/64-bit architecture respectively. RapidJSON also uses a special memory allocator to minimize overhead of allocations.
+
+5. What is the purpose of being high performance?
+
+   Some applications need to process very large JSON files. Some server-side applications need to process huge amount of JSONs. Being high performance can improve both latency and throughput. In a broad sense, it will also save energy.
+
+## Gossip
+
+1. Who are the developers of RapidJSON?
+
+   Milo Yip ([miloyip](https://github.com/miloyip)) is the original author of RapidJSON. Many contributors from the world have improved RapidJSON.  Philipp A. Hartmann ([pah](https://github.com/pah)) has implemented a lot of improvements, setting up automatic testing and also involves in a lot of discussions for the community. Don Ding ([thebusytypist](https://github.com/thebusytypist)) implemented the iterative parser. Andrii Senkovych ([jollyroger](https://github.com/jollyroger)) completed the CMake migration. Kosta ([Kosta-Github](https://github.com/Kosta-Github)) provided a very neat short-string optimization. Thank you for all other contributors and community members as well.
+
+2. Why do you develop RapidJSON?
+
+   It was just a hobby project initially in 2011. Milo Yip is a game programmer and he just knew about JSON at that time and would like to apply JSON in future projects. As JSON seems very simple he would like to write a header-only and fast library.
+
+3. Why there is a long empty period of development?
+
+   It is basically due to personal issues, such as getting new family members. Also, Milo Yip has spent a lot of spare time on translating "Game Engine Architecture" by Jason Gregory into Chinese.
+
+4. Why did the repository move from Google Code to GitHub?
+
+   This is the trend. And GitHub is much more powerful and convenient.
diff --git a/doc/faq.zh-cn.md b/doc/faq.zh-cn.md
new file mode 100644
index 0000000..bdacfce
--- /dev/null
+++ b/doc/faq.zh-cn.md
@@ -0,0 +1,290 @@
+# 常见问题
+
+[TOC]
+
+## 一般问题
+
+1. RapidJSON 是什么?
+
+   RapidJSON 是一个 C++ 库,用于解析及生成 JSON。读者可参考它的所有 [特点](doc/features.zh-cn.md)。
+
+2. 为什么称作 RapidJSON?
+
+   它的灵感来自于 [RapidXML](http://rapidxml.sourceforge.net/),RapidXML 是一个高速的 XML DOM 解析器。
+
+3. RapidJSON 与 RapidXML 相似么?
+
+   RapidJSON 借镜了 RapidXML 的一些设计, 包括原位(*in situ*)解析、只有头文件的库。但两者的 API 是完全不同的。此外 RapidJSON 也提供许多 RapidXML 没有的特点。
+
+4. RapidJSON 是免费的么?
+
+   是的,它在 MIT 特許條款下免费。它可用于商业软件。详情请参看 [license.txt](https://github.com/Tencent/rapidjson/blob/master/license.txt)。
+
+5. RapidJSON 很小么?它有何依赖?
+
+   是的。在 Windows 上,一个解析 JSON 并打印出统计的可执行文件少于 30KB。
+
+   RapidJSON 仅依赖于 C++ 标准库。
+
+6. 怎样安装 RapidJSON?
+
+   见 [安装一节](../readme.zh-cn.md#安装)。
+
+7. RapidJSON 能否运行于我的平台?
+
+   社区已在多个操作系统/编译器/CPU 架构的组合上测试 RapidJSON。但我们无法确保它能运行于你特定的平台上。只需要生成及执行单元测试便能获取答案。
+
+8. RapidJSON 支持 C++03 么?C++11 呢?
+
+   RapidJSON 开始时在 C++03 上实现。后来加入了可选的 C++11 特性支持(如转移构造函数、`noexcept`)。RapidJSON 应该兼容所有遵从 C++03 或 C++11 的编译器。
+
+9. RapidJSON 是否真的用于实际应用?
+
+   是的。它被配置于前台及后台的真实应用中。一个社区成员说 RapidJSON 在他们的系统中每日解析 5 千万个 JSON。
+
+10. RapidJSON 是如何被测试的?
+
+   RapidJSON 包含一组单元测试去执行自动测试。[Travis](https://travis-ci.org/Tencent/rapidjson/)(供 Linux 平台)及 [AppVeyor](https://ci.appveyor.com/project/Tencent/rapidjson/)(供 Windows 平台)会对所有修改进行编译及执行单元测试。在 Linux 下还会使用 Valgrind 去检测内存泄漏。
+
+11. RapidJSON 是否有完整的文档?
+
+   RapidJSON 提供了使用手册及 API 说明文档。
+
+12. 有没有其他替代品?
+
+   有许多替代品。例如 [nativejson-benchmark](https://github.com/miloyip/nativejson-benchmark) 列出了一些开源的 C/C++ JSON 库。[json.org](http://www.json.org/) 也有一个列表。
+
+## JSON
+
+1. 什么是 JSON?
+
+   JSON (JavaScript Object Notation) 是一个轻量的数据交换格式。它使用人类可读的文本格式。更多关于 JSON 的细节可考 [RFC7159](http://www.ietf.org/rfc/rfc7159.txt) 及 [ECMA-404](http://www.ecma-international.org/publications/standards/Ecma-404.htm)。
+
+2. JSON 有什么应用场合?
+
+   JSON 常用于网页应用程序,以传送结构化数据。它也可作为文件格式用于数据持久化。
+
+2. RapidJSON 是否符合 JSON 标准?
+
+   是。RapidJSON 完全符合 [RFC7159](http://www.ietf.org/rfc/rfc7159.txt) 及 [ECMA-404](http://www.ecma-international.org/publications/standards/Ecma-404.htm)。它能处理一些特殊情况,例如支持 JSON 字符串中含有空字符及代理对(surrogate pair)。
+
+3. RapidJSON 是否支持宽松的语法?
+
+   现时不支持。RapidJSON 只支持严格的标准格式。宽松语法现时在这 [issue](https://github.com/Tencent/rapidjson/issues/36) 中进行讨论。
+
+## DOM 与 SAX
+
+1. 什么是 DOM 风格 API?
+
+   Document Object Model(DOM)是一个储存于内存的 JSON 表示方式,用于查询及修改 JSON。
+
+2. 什么是 SAX 风格 API?
+
+   SAX 是一个事件驱动的 API,用于解析及生成 JSON。
+
+3. 我应用 DOM 还是 SAX?
+
+   DOM 易于查询及修改。SAX 则是非常快及省内存的,但通常较难使用。
+
+4. 什么是原位(*in situ*)解析?
+
+   原位解析会把 JSON 字符串直接解码至输入的 JSON 中。这是一个优化,可减少内存消耗及提升性能,但输入的 JSON 会被更改。进一步细节请参考 [原位解析](doc/dom.zh-cn.md) 。
+
+5. 什么时候会产生解析错误?
+
+   当输入的 JSON 包含非法语法,或不能表示一个值(如 Number 太大),或解析器的处理器中断解析过程,解析器都会产生一个错误。详情请参考 [解析错误](doc/dom.zh-cn.md)。
+
+6. 有什么错误信息?
+
+   错误信息存储在 `ParseResult`,它包含错误代号及偏移值(从 JSON 开始至错误处的字符数目)。可以把错误代号翻译为人类可读的错误讯息。
+
+7. 为何不只使用 `double` 去表示 JSON number?
+
+   一些应用需要使用 64 位无号/有号整数。这些整数不能无损地转换成 `double`。因此解析器会检测一个 JSON number 是否能转换至各种整数类型及 `double`。
+
+8. 如何清空并最小化 `document` 或 `value` 的容量?
+
+   调用 `SetXXX()` 方法 - 这些方法会调用析构函数,并重建空的 Object 或 Array:
+
+   ~~~~~~~~~~cpp
+   Document d;
+   ...
+   d.SetObject();  // clear and minimize
+   ~~~~~~~~~~
+
+   另外,也可以参考在 [C++ swap with temporary idiom](https://en.wikibooks.org/wiki/More_C%2B%2B_Idioms/Clear-and-minimize) 中的一种等价的方法:
+   ~~~~~~~~~~cpp
+   Value(kObjectType).Swap(d);
+   ~~~~~~~~~~
+   或者,使用这个稍微长一点的代码也能完成同样的事情:
+   ~~~~~~~~~~cpp
+   d.Swap(Value(kObjectType).Move()); 
+   ~~~~~~~~~~
+
+9. 如何将一个 `document` 节点插入到另一个 `document` 中?
+
+   比如有以下两个 document(DOM):
+   ~~~~~~~~~~cpp
+   Document person;
+   person.Parse("{\"person\":{\"name\":{\"first\":\"Adam\",\"last\":\"Thomas\"}}}");
+   
+   Document address;
+   address.Parse("{\"address\":{\"city\":\"Moscow\",\"street\":\"Quiet\"}}");
+   ~~~~~~~~~~
+   假设我们希望将整个 `address` 插入到 `person` 中,作为其的一个子节点:
+   ~~~~~~~~~~js
+   { "person": {
+      "name": { "first": "Adam", "last": "Thomas" },
+      "address": { "city": "Moscow", "street": "Quiet" }
+      }
+   }
+   ~~~~~~~~~~
+
+   在插入节点的过程中需要注意 `document` 和 `value` 的生命周期并且正确地使用 allocator 进行内存分配和管理。
+
+   一个简单有效的方法就是修改上述 `address` 变量的定义,让其使用 `person` 的 allocator 初始化,然后将其添加到根节点。
+
+   ~~~~~~~~~~cpp
+   Documnet address(&person.GetAllocator());
+   ...
+   person["person"].AddMember("address", address["address"], person.GetAllocator());
+   ~~~~~~~~~~
+   当然,如果你不想通过显式地写出 `address` 的 key 来得到其值,可以使用迭代器来实现:
+   ~~~~~~~~~~cpp
+   auto addressRoot = address.MemberBegin();
+   person["person"].AddMember(addressRoot->name, addressRoot->value, person.GetAllocator());
+   ~~~~~~~~~~
+   
+   此外,还可以通过深拷贝 address document 来实现:
+   ~~~~~~~~~~cpp
+   Value addressValue = Value(address["address"], person.GetAllocator());
+   person["person"].AddMember("address", addressValue, person.GetAllocator());
+   ~~~~~~~~~~
+
+## Document/Value (DOM)
+
+1. 什么是转移语义?为什么?
+
+   `Value` 不用复制语义,而使用了转移语义。这是指,当把来源值赋值于目标值时,来源值的所有权会转移至目标值。
+
+   由于转移快于复制,此设计决定强迫使用者注意到复制的消耗。
+
+2. 怎样去复制一个值?
+
+   有两个 API 可用:含 allocator 的构造函数,以及 `CopyFrom()`。可参考 [深复制 Value](doc/tutorial.zh-cn.md) 里的用例。
+
+3. 为什么我需要提供字符串的长度?
+
+   由于 C 字符串是空字符结尾的,需要使用 `strlen()` 去计算其长度,这是线性复杂度的操作。若使用者已知字符串的长度,对很多操作来说会造成不必要的消耗。
+
+   此外,RapidJSON 可处理含有 `\u0000`(空字符)的字符串。若一个字符串含有空字符,`strlen()` 便不能返回真正的字符串长度。在这种情况下使用者必须明确地提供字符串长度。
+
+4. 为什么在许多 DOM 操作 API 中要提供分配器作为参数?
+
+   由于这些 API 是 `Value` 的成员函数,我们不希望为每个 `Value` 储存一个分配器指针。
+
+5. 它会转换各种数值类型么?
+
+   当使用 `GetInt()`、`GetUint()` 等 API 时,可能会发生转换。对于整数至整数转换,仅当保证转换安全才会转换(否则会断言失败)。然而,当把一个 64 位有号/无号整数转换至 double 时,它会转换,但有可能会损失精度。含有小数的数字、或大于 64 位的整数,都只能使用 `GetDouble()` 获取其值。
+
+## Reader/Writer (SAX)
+
+1. 为什么不仅仅用 `printf` 输出一个 JSON?为什么需要 `Writer`?
+
+   最重要的是,`Writer` 能确保输出的 JSON 是格式正确的。错误地调用 SAX 事件(如 `StartObject()` 错配 `EndArray()`)会造成断言失败。此外,`Writer` 会把字符串进行转义(如 `\n`)。最后,`printf()` 的数值输出可能并不是一个合法的 JSON number,特别是某些 locale 会有数字分隔符。而且 `Writer` 的数值字符串转换是使用非常快的算法来实现的,胜过 `printf()` 及 `iostream`。
+
+2. 我能否暂停解析过程,并在稍后继续?
+
+   基于性能考虑,目前版本并不直接支持此功能。然而,若执行环境支持多线程,使用者可以在另一线程解析 JSON,并通过阻塞输入流去暂停。
+
+## Unicode
+
+1. 它是否支持 UTF-8、UTF-16 及其他格式?
+
+   是。它完全支持 UTF-8、UTF-16(大端/小端)、UTF-32(大端/小端)及 ASCII。
+
+2. 它能否检测编码的合法性?
+
+   能。只需把 `kParseValidateEncodingFlag` 参考传给 `Parse()`。若发现在输入流中有非法的编码,它就会产生 `kParseErrorStringInvalidEncoding` 错误。
+
+3. 什么是代理对(surrogate pair)?RapidJSON 是否支持?
+
+   JSON 使用 UTF-16 编码去转义 Unicode 字符,例如 `\u5927` 表示中文字“大”。要处理基本多文种平面(basic multilingual plane,BMP)以外的字符时,UTF-16 会把那些字符编码成两个 16 位值,这称为 UTF-16 代理对。例如,绘文字字符 U+1F602 在 JSON 中可被编码成 `\uD83D\uDE02`。
+
+   RapidJSON 完全支持解析及生成 UTF-16 代理对。 
+
+4. 它能否处理 JSON 字符串中的 `\u0000`(空字符)?
+
+   能。RapidJSON 完全支持 JSON 字符串中的空字符。然而,使用者需要注意到这件事,并使用 `GetStringLength()` 及相关 API 去取得字符串真正长度。
+
+5. 能否对所有非 ASCII 字符输出成 `\uxxxx` 形式?
+
+   可以。只要在 `Writer` 中使用 `ASCII<>` 作为输出编码参数,就可以强逼转义那些字符。
+
+## 流
+
+1. 我有一个很大的 JSON 文件。我应否把它整个载入内存中?
+
+   使用者可使用 `FileReadStream` 去逐块读入文件。但若使用于原位解析,必须载入整个文件。
+
+2. 我能否解析一个从网络上串流进来的 JSON?
+
+   可以。使用者可根据 `FileReadStream` 的实现,去实现一个自定义的流。
+
+3. 我不知道一些 JSON 将会使用哪种编码。怎样处理它们?
+
+   你可以使用 `AutoUTFInputStream`,它能自动检测输入流的编码。然而,它会带来一些性能开销。
+
+4. 什么是 BOM?RapidJSON 怎样处理它?
+
+   [字节顺序标记(byte order mark, BOM)](http://en.wikipedia.org/wiki/Byte_order_mark) 有时会出现于文件/流的开始,以表示其 UTF 编码类型。
+
+   RapidJSON 的 `EncodedInputStream` 可检测/跳过 BOM。`EncodedOutputStream` 可选择是否写入 BOM。可参考 [编码流](doc/stream.zh-cn.md) 中的例子。
+
+5. 为什么会涉及大端/小端?
+
+   流的大端/小端是 UTF-16 及 UTF-32 流要处理的问题,而 UTF-8 不需要处理。
+
+## 性能
+
+1. RapidJSON 是否真的快?
+
+   是。它可能是最快的开源 JSON 库。有一个 [评测](https://github.com/miloyip/nativejson-benchmark) 评估 C/C++ JSON 库的性能。
+
+2. 为什么它会快?
+
+   RapidJSON 的许多设计是针对时间/空间性能来设计的,这些决定可能会影响 API 的易用性。此外,它也使用了许多底层优化(内部函数/intrinsic、SIMD)及特别的算法(自定义的 double 至字符串转换、字符串至 double 的转换)。
+
+3. 什是是 SIMD?它如何用于 RapidJSON?
+
+   [SIMD](http://en.wikipedia.org/wiki/SIMD) 指令可以在现代 CPU 中执行并行运算。RapidJSON 支持使用 Intel 的 SSE2/SSE4.2 和 ARM 的 Neon 来加速对空白符、制表符、回车符和换行符的过滤处理。在解析含缩进的 JSON 时,这能提升性能。只要定义名为 `RAPIDJSON_SSE2` ,`RAPIDJSON_SSE42` 或 `RAPIDJSON_NEON` 的宏,就能启动这个功能。然而,若在不支持这些指令集的机器上执行这些可执行文件,会导致崩溃。
+
+4. 它会消耗许多内存么?
+
+   RapidJSON 的设计目标是减低内存占用。
+
+   在 SAX API 中,`Reader` 消耗的内存与 JSON 树深度加上最长 JSON 字符成正比。
+
+   在 DOM API 中,每个 `Value` 在 32/64 位架构下分别消耗 16/24 字节。RapidJSON 也使用一个特殊的内存分配器去减少分配的额外开销。
+
+5. 高性能的意义何在?
+
+   有些应用程序需要处理非常大的 JSON 文件。而有些后台应用程序需要处理大量的 JSON。达到高性能同时改善延时及吞吐量。更广义来说,这也可以节省能源。
+
+## 八挂
+
+1. 谁是 RapidJSON 的开发者?
+
+   叶劲峰(Milo Yip,[miloyip](https://github.com/miloyip))是 RapidJSON 的原作者。全世界许多贡献者一直在改善 RapidJSON。Philipp A. Hartmann([pah](https://github.com/pah))实现了许多改进,也设置了自动化测试,而且还参与许多社区讨论。丁欧南(Don Ding,[thebusytypist](https://github.com/thebusytypist))实现了迭代式解析器。Andrii Senkovych([jollyroger](https://github.com/jollyroger))完成了向 CMake 的迁移。Kosta([Kosta-Github](https://github.com/Kosta-Github))提供了一个非常灵巧的短字符串优化。也需要感谢其他献者及社区成员。
+
+2. 为何你要开发 RapidJSON?
+
+   在 2011 年开始这项目是,它仅一个兴趣项目。Milo Yip 是一个游戏程序员,他在那时候认识到 JSON 并希望在未来的项目中使用。由于 JSON 好像很简单,他希望写一个仅有头文件并且快速的程序库。
+
+3. 为什么开发中段有一段长期空档?
+
+   主要是个人因素,例如加入新家庭成员。另外,Milo Yip 也花了许多业馀时间去翻译 Jason Gregory 的《Game Engine Architecture》至中文版《游戏引擎架构》。
+
+4. 为什么这个项目从 Google Code 搬到 GitHub?
+
+   这是大势所趋,而且 GitHub 更为强大及方便。
diff --git a/doc/features.md b/doc/features.md
new file mode 100644
index 0000000..0d79e7f
--- /dev/null
+++ b/doc/features.md
@@ -0,0 +1,104 @@
+# Features
+
+## General
+
+* Cross-platform
+ * Compilers: Visual Studio, gcc, clang, etc.
+ * Architectures: x86, x64, ARM, etc.
+ * Operating systems: Windows, Mac OS X, Linux, iOS, Android, etc.
+* Easy installation
+ * Header files only library. Just copy the headers to your project.
+* Self-contained, minimal dependences
+ * No STL, BOOST, etc.
+ * Only included `<cstdio>`, `<cstdlib>`, `<cstring>`, `<inttypes.h>`, `<new>`, `<stdint.h>`. 
+* Without C++ exception, RTTI
+* High performance
+ * Use template and inline functions to reduce function call overheads.
+ * Internal optimized Grisu2 and floating point parsing implementations.
+ * Optional SSE2/SSE4.2 support.
+
+## Standard compliance
+
+* RapidJSON should be fully RFC4627/ECMA-404 compliance.
+* Support JSON Pointer (RFC6901).
+* Support JSON Schema Draft v4.
+* Support Unicode surrogate.
+* Support null character (`"\u0000"`)
+ * For example, `["Hello\u0000World"]` can be parsed and handled gracefully. There is API for getting/setting lengths of string.
+* Support optional relaxed syntax.
+ * Single line (`// ...`) and multiple line (`/* ... */`) comments (`kParseCommentsFlag`). 
+ * Trailing commas at the end of objects and arrays (`kParseTrailingCommasFlag`).
+ * `NaN`, `Inf`, `Infinity`, `-Inf` and `-Infinity` as `double` values (`kParseNanAndInfFlag`)
+* [NPM compliant](http://github.com/Tencent/rapidjson/blob/master/doc/npm.md).
+
+## Unicode
+
+* Support UTF-8, UTF-16, UTF-32 encodings, including little endian and big endian.
+ * These encodings are used in input/output streams and in-memory representation.
+* Support automatic detection of encodings in input stream.
+* Support transcoding between encodings internally.
+ * For example, you can read a UTF-8 file and let RapidJSON transcode the JSON strings into UTF-16 in the DOM.
+* Support encoding validation internally.
+ * For example, you can read a UTF-8 file, and let RapidJSON check whether all JSON strings are valid UTF-8 byte sequence.
+* Support custom character types.
+ * By default the character types are `char` for UTF8, `wchar_t` for UTF16, `uint32_t` for UTF32.
+* Support custom encodings.
+
+## API styles
+
+* SAX (Simple API for XML) style API
+ * Similar to [SAX](http://en.wikipedia.org/wiki/Simple_API_for_XML), RapidJSON provides a event sequential access parser API (`rapidjson::GenericReader`). It also provides a generator API (`rapidjson::Writer`) which consumes the same set of events.
+* DOM (Document Object Model) style API
+ * Similar to [DOM](http://en.wikipedia.org/wiki/Document_Object_Model) for HTML/XML, RapidJSON can parse JSON into a DOM representation (`rapidjson::GenericDocument`), for easy manipulation, and finally stringify back to JSON if needed.
+ * The DOM style API (`rapidjson::GenericDocument`) is actually implemented with SAX style API (`rapidjson::GenericReader`). SAX is faster but sometimes DOM is easier. Users can pick their choices according to scenarios.
+
+## Parsing
+
+* Recursive (default) and iterative parser
+ * Recursive parser is faster but prone to stack overflow in extreme cases.
+ * Iterative parser use custom stack to keep parsing state.
+* Support *in situ* parsing.
+ * Parse JSON string values in-place at the source JSON, and then the DOM points to addresses of those strings.
+ * Faster than convention parsing: no allocation for strings, no copy (if string does not contain escapes), cache-friendly.
+* Support 32-bit/64-bit signed/unsigned integer and `double` for JSON number type.
+* Support parsing multiple JSONs in input stream (`kParseStopWhenDoneFlag`).
+* Error Handling
+ * Support comprehensive error code if parsing failed.
+ * Support error message localization.
+
+## DOM (Document)
+
+* RapidJSON checks range of numerical values for conversions.
+* Optimization for string literal
+ * Only store pointer instead of copying
+* Optimization for "short" strings
+ * Store short string in `Value` internally without additional allocation.
+ * For UTF-8 string: maximum 11 characters in 32-bit, 21 characters in 64-bit (13 characters in x86-64).
+* Optionally support `std::string` (define `RAPIDJSON_HAS_STDSTRING=1`)
+
+## Generation
+
+* Support `rapidjson::PrettyWriter` for adding newlines and indentations.
+
+## Stream
+
+* Support `rapidjson::GenericStringBuffer` for storing the output JSON as string.
+* Support `rapidjson::FileReadStream` and `rapidjson::FileWriteStream` for input/output `FILE` object.
+* Support custom streams.
+
+## Memory
+
+* Minimize memory overheads for DOM.
+ * Each JSON value occupies exactly 16/20 bytes for most 32/64-bit machines (excluding text string).
+* Support fast default allocator.
+ * A stack-based allocator (allocate sequentially, prohibit to free individual allocations, suitable for parsing).
+ * User can provide a pre-allocated buffer. (Possible to parse a number of JSONs without any CRT allocation)
+* Support standard CRT(C-runtime) allocator.
+* Support custom allocators.
+
+## Miscellaneous
+
+* Some C++11 support (optional)
+ * Rvalue reference
+ * `noexcept` specifier
+ * Range-based for loop
diff --git a/doc/features.zh-cn.md b/doc/features.zh-cn.md
new file mode 100644
index 0000000..7662cc1
--- /dev/null
+++ b/doc/features.zh-cn.md
@@ -0,0 +1,103 @@
+# 特点
+
+## 总体
+
+* 跨平台
+ * 编译器:Visual Studio、gcc、clang 等
+ * 架构:x86、x64、ARM 等
+ * 操作系统:Windows、Mac OS X、Linux、iOS、Android 等
+* 容易安装
+ * 只有头文件的库。只需把头文件复制至你的项目中。
+* 独立、最小依赖
+ * 不需依赖 STL、BOOST 等。
+ * 只包含 `<cstdio>`, `<cstdlib>`, `<cstring>`, `<inttypes.h>`, `<new>`, `<stdint.h>`。 
+* 没使用 C++ 异常、RTTI
+* 高性能
+ * 使用模版及内联函数去降低函数调用开销。
+ * 内部经优化的 Grisu2 及浮点数解析实现。
+ * 可选的 SSE2/SSE4.2 支持。
+
+## 符合标准
+
+* RapidJSON 应完全符合 RFC4627/ECMA-404 标准。
+* 支持 JSON Pointer (RFC6901).
+* 支持 JSON Schema Draft v4.
+* 支持 Unicode 代理对(surrogate pair)。
+* 支持空字符(`"\u0000"`)。
+ * 例如,可以优雅地解析及处理 `["Hello\u0000World"]`。含读写字符串长度的 API。
+* 支持可选的放宽语法
+ * 单行(`// ...`)及多行(`/* ... */`) 注释 (`kParseCommentsFlag`)。
+ * 在对象和数组结束前含逗号 (`kParseTrailingCommasFlag`)。
+ * `NaN`、`Inf`、`Infinity`、`-Inf` 及 `-Infinity` 作为 `double` 值 (`kParseNanAndInfFlag`)
+* [NPM 兼容](https://github.com/Tencent/rapidjson/blob/master/doc/npm.md).
+
+## Unicode
+
+* 支持 UTF-8、UTF-16、UTF-32 编码,包括小端序和大端序。
+ * 这些编码用于输入输出流,以及内存中的表示。
+* 支持从输入流自动检测编码。
+* 内部支持编码的转换。
+ * 例如,你可以读取一个 UTF-8 文件,让 RapidJSON 把 JSON 字符串转换至 UTF-16 的 DOM。
+* 内部支持编码校验。
+ * 例如,你可以读取一个 UTF-8 文件,让 RapidJSON 检查是否所有 JSON 字符串是合法的 UTF-8 字节序列。
+* 支持自定义的字符类型。
+ * 预设的字符类型是:UTF-8 为 `char`,UTF-16 为 `wchar_t`,UTF32 为 `uint32_t`。
+* 支持自定义的编码。
+
+## API 风格
+
+* SAX(Simple API for XML)风格 API
+ * 类似于 [SAX](http://en.wikipedia.org/wiki/Simple_API_for_XML), RapidJSON 提供一个事件循序访问的解析器 API(`rapidjson::GenericReader`)。RapidJSON 也提供一个生成器 API(`rapidjson::Writer`),可以处理相同的事件集合。
+* DOM(Document Object Model)风格 API
+ * 类似于 HTML/XML 的 [DOM](http://en.wikipedia.org/wiki/Document_Object_Model),RapidJSON 可把 JSON 解析至一个 DOM 表示方式(`rapidjson::GenericDocument`),以方便操作。如有需要,可把 DOM 转换(stringify)回 JSON。
+ * DOM 风格 API(`rapidjson::GenericDocument`)实际上是由 SAX 风格 API(`rapidjson::GenericReader`)实现的。SAX 更快,但有时 DOM 更易用。用户可根据情况作出选择。
+
+## 解析
+
+* 递归式(预设)及迭代式解析器
+ * 递归式解析器较快,但在极端情况下可出现堆栈溢出。
+ * 迭代式解析器使用自定义的堆栈去维持解析状态。
+* 支持原位(*in situ*)解析。
+ * 把 JSON 字符串的值解析至原 JSON 之中,然后让 DOM 指向那些字符串。
+ * 比常规分析更快:不需字符串的内存分配、不需复制(如字符串不含转义符)、缓存友好。
+* 对于 JSON 数字类型,支持 32-bit/64-bit 的有号/无号整数,以及 `double`。
+* 错误处理
+ * 支持详尽的解析错误代号。
+ * 支持本地化错误信息。
+
+## DOM (Document)
+
+* RapidJSON 在类型转换时会检查数值的范围。
+* 字符串字面量的优化
+ * 只储存指针,不作复制
+* 优化“短”字符串
+ * 在 `Value` 内储存短字符串,无需额外分配。
+ * 对 UTF-8 字符串来说,32 位架构下可存储最多 11 字符,64 位下 21 字符(x86-64 下 13 字符)。
+* 可选地支持 `std::string`(定义 `RAPIDJSON_HAS_STDSTRING=1`)
+
+## 生成
+
+* 支持 `rapidjson::PrettyWriter` 去加入换行及缩进。
+
+## 输入输出流
+
+* 支持 `rapidjson::GenericStringBuffer`,把输出的 JSON 储存于字符串内。
+* 支持 `rapidjson::FileReadStream` 及 `rapidjson::FileWriteStream`,使用 `FILE` 对象作输入输出。
+* 支持自定义输入输出流。
+
+## 内存
+
+* 最小化 DOM 的内存开销。
+ * 对大部分 32/64 位机器而言,每个 JSON 值只占 16 或 20 字节(不包含字符串)。
+* 支持快速的预设分配器。
+ * 它是一个堆栈形式的分配器(顺序分配,不容许单独释放,适合解析过程之用)。
+ * 使用者也可提供一个预分配的缓冲区。(有可能达至无需 CRT 分配就能解析多个 JSON)
+* 支持标准 CRT(C-runtime)分配器。
+* 支持自定义分配器。
+
+## 其他
+
+* 一些 C++11 的支持(可选)
+ * 右值引用(rvalue reference)
+ * `noexcept` 修饰符
+ * 范围 for 循环
diff --git a/doc/internals.md b/doc/internals.md
new file mode 100644
index 0000000..706f98c
--- /dev/null
+++ b/doc/internals.md
@@ -0,0 +1,368 @@
+# Internals
+
+This section records some design and implementation details.
+
+[TOC]
+
+# Architecture {#Architecture}
+
+## SAX and DOM
+
+The basic relationships of SAX and DOM is shown in the following UML diagram.
+
+![Architecture UML class diagram](diagram/architecture.png)
+
+The core of the relationship is the `Handler` concept. From the SAX side, `Reader` parses a JSON from a stream and publish events to a `Handler`. `Writer` implements the `Handler` concept to handle the same set of events. From the DOM side, `Document` implements the `Handler` concept to build a DOM according to the events. `Value` supports a `Value::Accept(Handler&)` function, which traverses the DOM to publish events.
+
+With this design, SAX is not dependent on DOM. Even `Reader` and `Writer` have no dependencies between them. This provides flexibility to chain event publisher and handlers. Besides, `Value` does not depends on SAX as well. So, in addition to stringify a DOM to JSON, user may also stringify it to a XML writer, or do anything else.
+
+## Utility Classes
+
+Both SAX and DOM APIs depends on 3 additional concepts: `Allocator`, `Encoding` and `Stream`. Their inheritance hierarchy is shown as below.
+
+![Utility classes UML class diagram](diagram/utilityclass.png)
+
+# Value {#Value}
+
+`Value` (actually a typedef of `GenericValue<UTF8<>>`) is the core of DOM API. This section describes the design of it.
+
+## Data Layout {#DataLayout}
+
+`Value` is a [variant type](http://en.wikipedia.org/wiki/Variant_type). In RapidJSON's context, an instance of `Value` can contain 1 of 6 JSON value types. This is possible by using `union`. Each `Value` contains two members: `union Data data_` and a`unsigned flags_`. The `flags_` indicates the JSON type, and also additional information. 
+
+The following tables show the data layout of each type. The 32-bit/64-bit columns indicates the size of the field in bytes.
+
+| Null              |                                  |32-bit|64-bit|
+|-------------------|----------------------------------|:----:|:----:|
+| (unused)          |                                  |4     |8     | 
+| (unused)          |                                  |4     |4     |
+| (unused)          |                                  |4     |4     |
+| `unsigned flags_` | `kNullType kNullFlag`            |4     |4     |
+
+| Bool              |                                                    |32-bit|64-bit|
+|-------------------|----------------------------------------------------|:----:|:----:|
+| (unused)          |                                                    |4     |8     | 
+| (unused)          |                                                    |4     |4     |
+| (unused)          |                                                    |4     |4     |
+| `unsigned flags_` | `kBoolType` (either `kTrueFlag` or `kFalseFlag`) |4     |4     |
+
+| String              |                                     |32-bit|64-bit|
+|---------------------|-------------------------------------|:----:|:----:|
+| `Ch* str`           | Pointer to the string (may own)     |4     |8     | 
+| `SizeType length`   | Length of string                    |4     |4     |
+| (unused)            |                                     |4     |4     |
+| `unsigned flags_`   | `kStringType kStringFlag ...`       |4     |4     |
+
+| Object              |                                     |32-bit|64-bit|
+|---------------------|-------------------------------------|:----:|:----:|
+| `Member* members`   | Pointer to array of members (owned) |4     |8     | 
+| `SizeType size`     | Number of members                   |4     |4     |
+| `SizeType capacity` | Capacity of members                 |4     |4     |
+| `unsigned flags_`   | `kObjectType kObjectFlag`           |4     |4     |
+
+| Array               |                                     |32-bit|64-bit|
+|---------------------|-------------------------------------|:----:|:----:|
+| `Value* values`     | Pointer to array of values (owned)  |4     |8     | 
+| `SizeType size`     | Number of values                    |4     |4     |
+| `SizeType capacity` | Capacity of values                  |4     |4     |
+| `unsigned flags_`   | `kArrayType kArrayFlag`             |4     |4     |
+
+| Number (Int)        |                                     |32-bit|64-bit|
+|---------------------|-------------------------------------|:----:|:----:|
+| `int i`             | 32-bit signed integer               |4     |4     | 
+| (zero padding)      | 0                                   |4     |4     |
+| (unused)            |                                     |4     |8     |
+| `unsigned flags_`   | `kNumberType kNumberFlag kIntFlag kInt64Flag ...` |4     |4     |
+
+| Number (UInt)       |                                     |32-bit|64-bit|
+|---------------------|-------------------------------------|:----:|:----:|
+| `unsigned u`        | 32-bit unsigned integer             |4     |4     | 
+| (zero padding)      | 0                                   |4     |4     |
+| (unused)            |                                     |4     |8     |
+| `unsigned flags_`   | `kNumberType kNumberFlag kUIntFlag kUInt64Flag ...` |4     |4     |
+
+| Number (Int64)      |                                     |32-bit|64-bit|
+|---------------------|-------------------------------------|:----:|:----:|
+| `int64_t i64`       | 64-bit signed integer               |8     |8     | 
+| (unused)            |                                     |4     |8     |
+| `unsigned flags_`   | `kNumberType kNumberFlag kInt64Flag ...`          |4     |4     |
+
+| Number (Uint64)     |                                     |32-bit|64-bit|
+|---------------------|-------------------------------------|:----:|:----:|
+| `uint64_t i64`      | 64-bit unsigned integer             |8     |8     | 
+| (unused)            |                                     |4     |8     |
+| `unsigned flags_`   | `kNumberType kNumberFlag kInt64Flag ...`          |4     |4     |
+
+| Number (Double)     |                                     |32-bit|64-bit|
+|---------------------|-------------------------------------|:----:|:----:|
+| `uint64_t i64`      | Double precision floating-point     |8     |8     | 
+| (unused)            |                                     |4     |8     |
+| `unsigned flags_`   | `kNumberType kNumberFlag kDoubleFlag` |4     |4     |
+
+Here are some notes:
+* To reduce memory consumption for 64-bit architecture, `SizeType` is typedef as `unsigned` instead of `size_t`.
+* Zero padding for 32-bit number may be placed after or before the actual type, according to the endianness. This makes possible for interpreting a 32-bit integer as a 64-bit integer, without any conversion.
+* An `Int` is always an `Int64`, but the converse is not always true.
+
+## Flags {#Flags}
+
+The 32-bit `flags_` contains both JSON type and other additional information. As shown in the above tables, each JSON type contains redundant `kXXXType` and `kXXXFlag`. This design is for optimizing the operation of testing bit-flags (`IsNumber()`) and obtaining a sequential number for each type (`GetType()`).
+
+String has two optional flags. `kCopyFlag` means that the string owns a copy of the string. `kInlineStrFlag` means using [Short-String Optimization](#ShortString).
+
+Number is a bit more complicated. For normal integer values, it can contains `kIntFlag`, `kUintFlag`,  `kInt64Flag` and/or `kUint64Flag`, according to the range of the integer. For numbers with fraction, and integers larger than 64-bit range, they will be stored as `double` with `kDoubleFlag`.
+
+## Short-String Optimization {#ShortString}
+
+ [Kosta](https://github.com/Kosta-Github) provided a very neat short-string optimization. The optimization idea is given as follow. Excluding the `flags_`, a `Value` has 12 or 16 bytes (32-bit or 64-bit) for storing actual data. Instead of storing a pointer to a string, it is possible to store short strings in these space internally. For encoding with 1-byte character type (e.g. `char`), it can store maximum 11 or 15 characters string inside the `Value` type.
+
+| ShortString (Ch=char) |                                   |32-bit|64-bit|
+|---------------------|-------------------------------------|:----:|:----:|
+| `Ch str[MaxChars]`  | String buffer                       |11    |15    | 
+| `Ch invLength`      | MaxChars - Length                   |1     |1     |
+| `unsigned flags_`   | `kStringType kStringFlag ...`       |4     |4     |
+
+A special technique is applied. Instead of storing the length of string directly, it stores (MaxChars - length). This make it possible to store 11 characters with trailing `\0`.
+
+This optimization can reduce memory usage for copy-string. It can also improve cache-coherence thus improve runtime performance.
+
+# Allocator {#InternalAllocator}
+
+`Allocator` is a concept in RapidJSON:
+~~~cpp
+concept Allocator {
+    static const bool kNeedFree;    //!< Whether this allocator needs to call Free().
+
+    // Allocate a memory block.
+    // \param size of the memory block in bytes.
+    // \returns pointer to the memory block.
+    void* Malloc(size_t size);
+
+    // Resize a memory block.
+    // \param originalPtr The pointer to current memory block. Null pointer is permitted.
+    // \param originalSize The current size in bytes. (Design issue: since some allocator may not book-keep this, explicitly pass to it can save memory.)
+    // \param newSize the new size in bytes.
+    void* Realloc(void* originalPtr, size_t originalSize, size_t newSize);
+
+    // Free a memory block.
+    // \param pointer to the memory block. Null pointer is permitted.
+    static void Free(void *ptr);
+};
+~~~
+
+Note that `Malloc()` and `Realloc()` are member functions but `Free()` is static member function.
+
+## MemoryPoolAllocator {#MemoryPoolAllocator}
+
+`MemoryPoolAllocator` is the default allocator for DOM. It allocate but do not free memory. This is suitable for building a DOM tree.
+
+Internally, it allocates chunks of memory from the base allocator (by default `CrtAllocator`) and stores the chunks as a singly linked list. When user requests an allocation, it allocates memory from the following order:
+
+1. User supplied buffer if it is available. (See [User Buffer section in DOM](doc/dom.md))
+2. If user supplied buffer is full, use the current memory chunk.
+3. If the current block is full, allocate a new block of memory.
+
+# Parsing Optimization {#ParsingOptimization}
+
+## Skip Whitespaces with SIMD {#SkipwhitespaceWithSIMD}
+
+When parsing JSON from a stream, the parser need to skip 4 whitespace characters:
+
+1. Space (`U+0020`)
+2. Character Tabulation (`U+000B`)
+3. Line Feed (`U+000A`)
+4. Carriage Return (`U+000D`)
+
+A simple implementation will be simply:
+~~~cpp
+void SkipWhitespace(InputStream& s) {
+    while (s.Peek() == ' ' || s.Peek() == '\n' || s.Peek() == '\r' || s.Peek() == '\t')
+        s.Take();
+}
+~~~
+
+However, this requires 4 comparisons and a few branching for each character. This was found to be a hot spot.
+
+To accelerate this process, SIMD was applied to compare 16 characters with 4 white spaces for each iteration. Currently RapidJSON supports SSE2, SSE4.2 and ARM Neon instructions for this. And it is only activated for UTF-8 memory streams, including string stream or *in situ* parsing.
+
+To enable this optimization, need to define `RAPIDJSON_SSE2`, `RAPIDJSON_SSE42` or `RAPIDJSON_NEON` before including `rapidjson.h`. Some compilers can detect the setting, as in `perftest.h`:
+
+~~~cpp
+// __SSE2__ and __SSE4_2__ are recognized by gcc, clang, and the Intel compiler.
+// We use -march=native with gmake to enable -msse2 and -msse4.2, if supported.
+// Likewise, __ARM_NEON is used to detect Neon.
+#if defined(__SSE4_2__)
+#  define RAPIDJSON_SSE42
+#elif defined(__SSE2__)
+#  define RAPIDJSON_SSE2
+#elif defined(__ARM_NEON)
+#  define RAPIDJSON_NEON
+#endif
+~~~
+
+Note that, these are compile-time settings. Running the executable on a machine without such instruction set support will make it crash.
+
+### Page boundary issue
+
+In an early version of RapidJSON, [an issue](https://code.google.com/archive/p/rapidjson/issues/104) reported that the `SkipWhitespace_SIMD()` causes crash very rarely (around 1 in 500,000). After investigation, it is suspected that `_mm_loadu_si128()` accessed bytes after `'\0'`, and across a protected page boundary.
+
+In [Intel® 64 and IA-32 Architectures Optimization Reference Manual
+](http://www.intel.com/content/www/us/en/architecture-and-technology/64-ia-32-architectures-optimization-manual.html), section 10.2.1:
+
+> To support algorithms requiring unaligned 128-bit SIMD memory accesses, memory buffer allocation by a caller function should consider adding some pad space so that a callee function can safely use the address pointer safely with unaligned 128-bit SIMD memory operations.
+> The minimal padding size should be the width of the SIMD register that might be used in conjunction with unaligned SIMD memory access.
+
+This is not feasible as RapidJSON should not enforce such requirement.
+
+To fix this issue, currently the routine process bytes up to the next aligned address. After tha, use aligned read to perform SIMD processing. Also see [#85](https://github.com/Tencent/rapidjson/issues/85).
+
+## Local Stream Copy {#LocalStreamCopy}
+
+During optimization, it is found that some compilers cannot localize some member data access of streams into local variables or registers. Experimental results show that for some stream types, making a copy of the stream and used it in inner-loop can improve performance. For example, the actual (non-SIMD) implementation of `SkipWhitespace()` is implemented as:
+
+~~~cpp
+template<typename InputStream>
+void SkipWhitespace(InputStream& is) {
+    internal::StreamLocalCopy<InputStream> copy(is);
+    InputStream& s(copy.s);
+
+    while (s.Peek() == ' ' || s.Peek() == '\n' || s.Peek() == '\r' || s.Peek() == '\t')
+        s.Take();
+}
+~~~
+
+Depending on the traits of stream, `StreamLocalCopy` will make (or not make) a copy of the stream object, use it locally and copy the states of stream back to the original stream.
+
+## Parsing to Double {#ParsingDouble}
+
+Parsing string into `double` is difficult. The standard library function `strtod()` can do the job but it is slow. By default, the parsers use normal precision setting. This has has maximum 3 [ULP](http://en.wikipedia.org/wiki/Unit_in_the_last_place) error and implemented in `internal::StrtodNormalPrecision()`.
+
+When using `kParseFullPrecisionFlag`, the parsers calls `internal::StrtodFullPrecision()` instead, and this function actually implemented 3 versions of conversion methods.
+1. [Fast-Path](http://www.exploringbinary.com/fast-path-decimal-to-floating-point-conversion/).
+2. Custom DIY-FP implementation as in [double-conversion](https://github.com/floitsch/double-conversion).
+3. Big Integer Method as in (Clinger, William D. How to read floating point numbers accurately. Vol. 25. No. 6. ACM, 1990).
+
+If the first conversion methods fail, it will try the second, and so on.
+
+# Generation Optimization {#GenerationOptimization}
+
+## Integer-to-String conversion {#itoa}
+
+The naive algorithm for integer-to-string conversion involves division per each decimal digit. We have implemented various implementations and evaluated them in [itoa-benchmark](https://github.com/miloyip/itoa-benchmark).
+
+Although SSE2 version is the fastest but the difference is minor by comparing to the first running-up `branchlut`. And `branchlut` is pure C++ implementation so we adopt `branchlut` in RapidJSON.
+
+## Double-to-String conversion {#dtoa}
+
+Originally RapidJSON uses `snprintf(..., ..., "%g")`  to achieve double-to-string conversion. This is not accurate as the default precision is 6. Later we also find that this is slow and there is an alternative.
+
+Google's V8 [double-conversion](https://github.com/floitsch/double-conversion
+) implemented a newer, fast algorithm called Grisu3 (Loitsch, Florian. "Printing floating-point numbers quickly and accurately with integers." ACM Sigplan Notices 45.6 (2010): 233-243.).
+
+However, since it is not header-only so that we implemented a header-only version of Grisu2. This algorithm guarantees that the result is always accurate. And in most of cases it produces the shortest (optimal) string representation.
+
+The header-only conversion function has been evaluated in [dtoa-benchmark](https://github.com/miloyip/dtoa-benchmark).
+
+# Parser {#Parser}
+
+## Iterative Parser {#IterativeParser}
+
+The iterative parser is a recursive descent LL(1) parser
+implemented in a non-recursive manner.
+
+### Grammar {#IterativeParserGrammar}
+
+The grammar used for this parser is based on strict JSON syntax:
+~~~~~~~~~~
+S -> array | object
+array -> [ values ]
+object -> { members }
+values -> non-empty-values | ε
+non-empty-values -> value addition-values
+addition-values -> ε | , non-empty-values
+members -> non-empty-members | ε
+non-empty-members -> member addition-members
+addition-members -> ε | , non-empty-members
+member -> STRING : value
+value -> STRING | NUMBER | NULL | BOOLEAN | object | array
+~~~~~~~~~~
+
+Note that left factoring is applied to non-terminals `values` and `members`
+to make the grammar be LL(1).
+
+### Parsing Table {#IterativeParserParsingTable}
+
+Based on the grammar, we can construct the FIRST and FOLLOW set.
+
+The FIRST set of non-terminals is listed below:
+
+|    NON-TERMINAL   |               FIRST              |
+|:-----------------:|:--------------------------------:|
+|       array       |                 [                |
+|       object      |                 {                |
+|       values      | ε STRING NUMBER NULL BOOLEAN { [ |
+|  addition-values  |              ε COMMA             |
+|      members      |             ε STRING             |
+|  addition-members |              ε COMMA             |
+|       member      |              STRING              |
+|       value       |  STRING NUMBER NULL BOOLEAN { [  |
+|         S         |                [ {               |
+| non-empty-members |              STRING              |
+|  non-empty-values |  STRING NUMBER NULL BOOLEAN { [  |
+
+The FOLLOW set is listed below:
+
+|    NON-TERMINAL   |  FOLLOW |
+|:-----------------:|:-------:|
+|         S         |    $    |
+|       array       | , $ } ] |
+|       object      | , $ } ] |
+|       values      |    ]    |
+|  non-empty-values |    ]    |
+|  addition-values  |    ]    |
+|      members      |    }    |
+| non-empty-members |    }    |
+|  addition-members |    }    |
+|       member      |   , }   |
+|       value       |  , } ]  |
+
+Finally the parsing table can be constructed from FIRST and FOLLOW set:
+
+|    NON-TERMINAL   |           [           |           {           |          ,          | : | ] | } |          STRING         |         NUMBER        |          NULL         |        BOOLEAN        |
+|:-----------------:|:---------------------:|:---------------------:|:-------------------:|:-:|:-:|:-:|:-----------------------:|:---------------------:|:---------------------:|:---------------------:|
+|         S         |         array         |         object        |                     |   |   |   |                         |                       |                       |                       |
+|       array       |       [ values ]      |                       |                     |   |   |   |                         |                       |                       |                       |
+|       object      |                       |      { members }      |                     |   |   |   |                         |                       |                       |                       |
+|       values      |    non-empty-values   |    non-empty-values   |                     |   | ε |   |     non-empty-values    |    non-empty-values   |    non-empty-values   |    non-empty-values   |
+|  non-empty-values | value addition-values | value addition-values |                     |   |   |   |  value addition-values  | value addition-values | value addition-values | value addition-values |
+|  addition-values  |                       |                       |  , non-empty-values |   | ε |   |                         |                       |                       |                       |
+|      members      |                       |                       |                     |   |   | ε |    non-empty-members    |                       |                       |                       |
+| non-empty-members |                       |                       |                     |   |   |   | member addition-members |                       |                       |                       |
+|  addition-members |                       |                       | , non-empty-members |   |   | ε |                         |                       |                       |                       |
+|       member      |                       |                       |                     |   |   |   |      STRING : value     |                       |                       |                       |
+|       value       |         array         |         object        |                     |   |   |   |          STRING         |         NUMBER        |          NULL         |        BOOLEAN        |
+
+There is a great [tool](http://hackingoff.com/compilers/predict-first-follow-set) for above grammar analysis.
+
+### Implementation {#IterativeParserImplementation}
+
+Based on the parsing table, a direct(or conventional) implementation
+that pushes the production body in reverse order
+while generating a production could work.
+
+In RapidJSON, several modifications(or adaptations to current design) are made to a direct implementation.
+
+First, the parsing table is encoded in a state machine in RapidJSON.
+States are constructed by the head and body of production.
+State transitions are constructed by production rules.
+Besides, extra states are added for productions involved with `array` and `object`.
+In this way the generation of array values or object members would be a single state transition,
+rather than several pop/push operations in the direct implementation.
+This also makes the estimation of stack size more easier.
+
+The state diagram is shown as follows:
+
+![State Diagram](diagram/iterative-parser-states-diagram.png)
+
+Second, the iterative parser also keeps track of array's value count and object's member count
+in its internal stack, which may be different from a conventional implementation.
diff --git a/doc/internals.zh-cn.md b/doc/internals.zh-cn.md
new file mode 100644
index 0000000..ca3d297
--- /dev/null
+++ b/doc/internals.zh-cn.md
@@ -0,0 +1,363 @@
+# 内部架构
+
+本部分记录了一些设计和实现细节。
+
+[TOC]
+
+# 架构 {#Architecture}
+
+## SAX 和 DOM
+
+下面的 UML 图显示了 SAX 和 DOM 的基本关系。
+
+![架构 UML 类图](diagram/architecture.png)
+
+关系的核心是 `Handler` 概念。在 SAX 一边,`Reader` 从流解析 JSON 并将事件发送到 `Handler`。`Writer` 实现了 `Handler` 概念,用于处理相同的事件。在 DOM 一边,`Document` 实现了 `Handler` 概念,用于通过这些时间来构建 DOM。`Value` 支持了 `Value::Accept(Handler&)` 函数,它可以将 DOM 转换为事件进行发送。
+
+在这个设计,SAX 是不依赖于 DOM 的。甚至 `Reader` 和 `Writer` 之间也没有依赖。这提供了连接事件发送器和处理器的灵活性。除此之外,`Value` 也是不依赖于 SAX 的。所以,除了将 DOM 序列化为 JSON 之外,用户也可以将其序列化为 XML,或者做任何其他事情。
+
+## 工具类
+
+SAX 和 DOM API 都依赖于3个额外的概念:`Allocator`、`Encoding` 和 `Stream`。它们的继承层次结构如下图所示。
+
+![工具类 UML 类图](diagram/utilityclass.png)
+
+# 值(Value) {#Value}
+
+`Value` (实际上被定义为 `GenericValue<UTF8<>>`)是 DOM API 的核心。本部分描述了它的设计。
+
+## 数据布局 {#DataLayout}
+
+`Value` 是[可变类型](http://en.wikipedia.org/wiki/Variant_type)。在 RapidJSON 的上下文中,一个 `Value` 的实例可以包含6种 JSON 数据类型之一。通过使用 `union` ,这是可能实现的。每一个 `Value` 包含两个成员:`union Data data_` 和 `unsigned flags_`。`flags_` 表明了 JSON 类型,以及附加的信息。
+
+下表显示了所有类型的数据布局。32位/64位列表明了字段所占用的字节数。
+
+| Null              |                                  | 32位 | 64位 |
+|-------------------|----------------------------------|:----:|:----:|
+| (未使用)        |                                  |4     |8     |
+| (未使用)        |                                  |4     |4     |
+| (未使用)        |                                  |4     |4     |
+| `unsigned flags_` | `kNullType kNullFlag`            |4     |4     |
+
+| Bool              |                                                    | 32位 | 64位 |
+|-------------------|----------------------------------------------------|:----:|:----:|
+| (未使用)        |                                                    |4     |8     |
+| (未使用)        |                                                    |4     |4     |
+| (未使用)        |                                                    |4     |4     |
+| `unsigned flags_` | `kBoolType` (either `kTrueFlag` or `kFalseFlag`)   |4     |4     |
+
+| String              |                                     | 32位 | 64位 |
+|---------------------|-------------------------------------|:----:|:----:|
+| `Ch* str`           | 指向字符串的指针(可能拥有所有权)  |4     |8     |
+| `SizeType length`   | 字符串长度                          |4     |4     |
+| (未使用)          |                                     |4     |4     |
+| `unsigned flags_`   | `kStringType kStringFlag ...`       |4     |4     |
+
+| Object              |                                     | 32位 | 64位 |
+|---------------------|-------------------------------------|:----:|:----:|
+| `Member* members`   | 指向成员数组的指针(拥有所有权)    |4     |8     |
+| `SizeType size`     | 成员数量                            |4     |4     |
+| `SizeType capacity` | 成员容量                            |4     |4     |
+| `unsigned flags_`   | `kObjectType kObjectFlag`           |4     |4     |
+
+| Array               |                                     | 32位 | 64位 |
+|---------------------|-------------------------------------|:----:|:----:|
+| `Value* values`     | 指向值数组的指针(拥有所有权)      |4     |8     |
+| `SizeType size`     | 值数量                              |4     |4     |
+| `SizeType capacity` | 值容量                              |4     |4     |
+| `unsigned flags_`   | `kArrayType kArrayFlag`             |4     |4     |
+
+| Number (Int)        |                                     | 32位 | 64位 |
+|---------------------|-------------------------------------|:----:|:----:|
+| `int i`             | 32位有符号整数                      |4     |4     |
+| (零填充)          | 0                                   |4     |4     |
+| (未使用)          |                                     |4     |8     |
+| `unsigned flags_`   | `kNumberType kNumberFlag kIntFlag kInt64Flag ...` |4     |4     |
+
+| Number (UInt)       |                                     | 32位 | 64位 |
+|---------------------|-------------------------------------|:----:|:----:|
+| `unsigned u`        | 32位无符号整数                      |4     |4     |
+| (零填充)          | 0                                   |4     |4     |
+| (未使用)          |                                     |4     |8     |
+| `unsigned flags_`   | `kNumberType kNumberFlag kUIntFlag kUInt64Flag ...` |4     |4     |
+
+| Number (Int64)      |                                     | 32位 | 64位 |
+|---------------------|-------------------------------------|:----:|:----:|
+| `int64_t i64`       | 64位有符号整数                      |8     |8     |
+| (未使用)          |                                     |4     |8     |
+| `unsigned flags_`   | `kNumberType kNumberFlag kInt64Flag ...`          |4     |4     |
+
+| Number (Uint64)     |                                     | 32位 | 64位 |
+|---------------------|-------------------------------------|:----:|:----:|
+| `uint64_t i64`      | 64位无符号整数                      |8     |8     |
+| (未使用)          |                                     |4     |8     |
+| `unsigned flags_`   | `kNumberType kNumberFlag kInt64Flag ...`          |4     |4     |
+
+| Number (Double)     |                                     | 32位 | 64位 |
+|---------------------|-------------------------------------|:----:|:----:|
+| `uint64_t i64`      | 双精度浮点数                        |8     |8     |
+| (未使用)          |                                     |4     |8     |
+| `unsigned flags_`   |`kNumberType kNumberFlag kDoubleFlag`|4     |4     |
+
+这里有一些需要注意的地方:
+* 为了减少在64位架构上的内存消耗,`SizeType` 被定义为 `unsigned` 而不是 `size_t`。
+* 32位整数的零填充可能被放在实际类型的前面或后面,这依赖于字节序。这使得它可以将32位整数不经过任何转换就可以解释为64位整数。
+* `Int` 永远是 `Int64`,反之不然。
+
+## 标志 {#Flags}
+
+32位的 `flags_` 包含了 JSON 类型和其他信息。如前文中的表所述,每一种 JSON 类型包含了冗余的 `kXXXType` 和 `kXXXFlag`。这个设计是为了优化测试位标志(`IsNumber()`)和获取每一种类型的序列号(`GetType()`)。
+
+字符串有两个可选的标志。`kCopyFlag` 表明这个字符串拥有字符串拷贝的所有权。而 `kInlineStrFlag` 意味着使用了[短字符串优化](#ShortString)。
+
+数字更加复杂一些。对于普通的整数值,它可以包含 `kIntFlag`、`kUintFlag`、 `kInt64Flag` 和/或 `kUint64Flag`,这由整数的范围决定。带有小数或者超过64位所能表达的范围的整数的数字会被存储为带有 `kDoubleFlag` 的 `double`。
+
+## 短字符串优化 {#ShortString}
+
+[Kosta](https://github.com/Kosta-Github) 提供了很棒的短字符串优化。这个优化的xxx如下所述。除去 `flags_` ,`Value` 有12或16字节(对于32位或64位)来存储实际的数据。这为在其内部直接存储短字符串而不是存储字符串的指针创造了可能。对于1字节的字符类型(例如 `char`),它可以在 `Value` 类型内部存储至多11或15个字符的字符串。
+
+|ShortString (Ch=char)|                                     | 32位 | 64位 |
+|---------------------|-------------------------------------|:----:|:----:|
+| `Ch str[MaxChars]`  | 字符串缓冲区                        |11    |15    |
+| `Ch invLength`      | MaxChars - Length                   |1     |1     |
+| `unsigned flags_`   | `kStringType kStringFlag ...`       |4     |4     |
+
+这里使用了一项特殊的技术。它存储了 (MaxChars - length) 而不直接存储字符串的长度。这使得存储11个字符并且带有后缀 `\0` 成为可能。
+
+这个优化可以减少字符串拷贝内存占用。它也改善了缓存一致性,并进一步提高了运行时性能。
+
+# 分配器(Allocator) {#InternalAllocator}
+
+`Allocator` 是 RapidJSON 中的概念:
+~~~cpp
+concept Allocator {
+    static const bool kNeedFree;    //!< 表明这个分配器是否需要调用 Free()。
+
+    // 申请内存块。
+    // \param size 内存块的大小,以字节记。
+    // \returns 指向内存块的指针。
+    void* Malloc(size_t size);
+
+    // 调整内存块的大小。
+    // \param originalPtr 当前内存块的指针。空指针是被允许的。
+    // \param originalSize 当前大小,以字节记。(设计问题:因为有些分配器可能不会记录它,显示的传递它可以节约内存。)
+    // \param newSize 新大小,以字节记。
+    void* Realloc(void* originalPtr, size_t originalSize, size_t newSize);
+
+    // 释放内存块。
+    // \param ptr 指向内存块的指针。空指针是被允许的。
+    static void Free(void *ptr);
+};
+~~~
+
+需要注意的是 `Malloc()` 和 `Realloc()` 是成员函数而 `Free()` 是静态成员函数。
+
+## MemoryPoolAllocator {#MemoryPoolAllocator}
+
+`MemoryPoolAllocator` 是 DOM 的默认内存分配器。它只申请内存而不释放内存。这对于构建 DOM 树非常合适。
+
+在它的内部,它从基础的内存分配器申请内存块(默认为 `CrtAllocator`)并将这些内存块存储为单向链表。当用户请求申请内存,它会遵循下列步骤来申请内存:
+
+1. 如果可用,使用用户提供的缓冲区。(见 [User Buffer section in DOM](doc/dom.md))
+2. 如果用户提供的缓冲区已满,使用当前内存块。
+3. 如果当前内存块已满,申请新的内存块。
+
+# 解析优化 {#ParsingOptimization}
+
+## 使用 SIMD 跳过空格 {#SkipwhitespaceWithSIMD}
+
+当从流中解析 JSON 时,解析器需要跳过4种空格字符:
+
+1. 空格 (`U+0020`)
+2. 制表符 (`U+000B`)
+3. 换行 (`U+000A`)
+4. 回车 (`U+000D`)
+
+这是一份简单的实现:
+~~~cpp
+void SkipWhitespace(InputStream& s) {
+    while (s.Peek() == ' ' || s.Peek() == '\n' || s.Peek() == '\r' || s.Peek() == '\t')
+        s.Take();
+}
+~~~
+
+但是,这需要对每个字符进行4次比较以及一些分支。这被发现是一个热点。
+
+为了加速这一处理,RapidJSON 使用 SIMD 来在一次迭代中比较16个字符和4个空格。目前 RapidJSON 支持 SSE2 , SSE4.2 和 ARM Neon 指令。同时它也只会对 UTF-8 内存流启用,包括字符串流或 *原位* 解析。
+
+你可以通过在包含 `rapidjson.h` 之前定义 `RAPIDJSON_SSE2` , `RAPIDJSON_SSE42` 或 `RAPIDJSON_NEON` 来启用这个优化。一些编译器可以检测这个设置,如 `perftest.h`:
+
+~~~cpp
+// __SSE2__ 和 __SSE4_2__ 可被 gcc、clang 和 Intel 编译器识别:
+// 如果支持的话,我们在 gmake 中使用了 -march=native 来启用 -msse2 和 -msse4.2
+// 同样的, __ARM_NEON 被用于识别Neon
+#if defined(__SSE4_2__)
+#  define RAPIDJSON_SSE42
+#elif defined(__SSE2__)
+#  define RAPIDJSON_SSE2
+#elif defined(__ARM_NEON)
+#  define RAPIDJSON_NEON
+#endif
+~~~
+
+需要注意的是,这是编译期的设置。在不支持这些指令的机器上运行可执行文件会使它崩溃。
+
+### 页面对齐问题
+
+在 RapidJSON 的早期版本中,被报告了[一个问题](https://code.google.com/archive/p/rapidjson/issues/104):`SkipWhitespace_SIMD()` 会罕见地导致崩溃(约五十万分之一的几率)。在调查之后,怀疑是 `_mm_loadu_si128()` 访问了 `'\0'` 之后的内存,并越过被保护的页面边界。
+
+在 [Intel® 64 and IA-32 Architectures Optimization Reference Manual
+](http://www.intel.com/content/www/us/en/architecture-and-technology/64-ia-32-architectures-optimization-manual.html) 中,章节 10.2.1:
+
+> 为了支持需要费对齐的128位 SIMD 内存访问的算法,调用者的内存缓冲区申请应当考虑添加一些填充空间,这样被调用的函数可以安全地将地址指针用于未对齐的128位 SIMD 内存操作。
+> 在结合非对齐的 SIMD 内存操作中,最小的对齐大小应该等于 SIMD 寄存器的大小。
+
+对于 RapidJSON 来说,这显然是不可行的,因为 RapidJSON 不应当强迫用户进行内存对齐。
+
+为了修复这个问题,当前的代码会先按字节处理直到下一个对齐的地址。在这之后,使用对齐读取来进行 SIMD 处理。见 [#85](https://github.com/Tencent/rapidjson/issues/85)。
+
+## 局部流拷贝 {#LocalStreamCopy}
+
+在优化的过程中,我们发现一些编译器不能将访问流的一些成员数据放入局部变量或者寄存器中。测试结果显示,对于一些流类型,创建流的拷贝并将其用于内层循环中可以改善性能。例如,实际(非 SIMD)的 `SkipWhitespace()` 被实现为:
+
+~~~cpp
+template<typename InputStream>
+void SkipWhitespace(InputStream& is) {
+    internal::StreamLocalCopy<InputStream> copy(is);
+    InputStream& s(copy.s);
+
+    while (s.Peek() == ' ' || s.Peek() == '\n' || s.Peek() == '\r' || s.Peek() == '\t')
+        s.Take();
+}
+~~~
+
+基于流的特征,`StreamLocalCopy` 会创建(或不创建)流对象的拷贝,在局部使用它并将流的状态拷贝回原来的流。
+
+## 解析为双精度浮点数 {#ParsingDouble}
+
+将字符串解析为 `double` 并不简单。标准库函数 `strtod()` 可以胜任这项工作,但它比较缓慢。默认情况下,解析器使用默认的精度设置。这最多有 3[ULP](http://en.wikipedia.org/wiki/Unit_in_the_last_place) 的误差,并实现在 `internal::StrtodNormalPrecision()` 中。
+
+当使用 `kParseFullPrecisionFlag` 时,编译器会改为调用 `internal::StrtodFullPrecision()` ,这个函数会自动调用三个版本的转换。
+1. [Fast-Path](http://www.exploringbinary.com/fast-path-decimal-to-floating-point-conversion/)。
+2. [double-conversion](https://github.com/floitsch/double-conversion) 中的自定义 DIY-FP 实现。
+3. (Clinger, William D. How to read floating point numbers accurately. Vol. 25. No. 6. ACM, 1990) 中的大整数算法。
+
+如果第一个转换方法失败,则尝试使用第二种方法,以此类推。
+
+# 生成优化 {#GenerationOptimization}
+
+## 整数到字符串的转换 {#itoa}
+
+整数到字符串转换的朴素算法需要对每一个十进制位进行一次处罚。我们实现了若干版本并在 [itoa-benchmark](https://github.com/miloyip/itoa-benchmark) 中对它们进行了评估。
+
+虽然 SSE2 版本是最快的,但它和第二快的 `branchlut` 差距不大。而且 `branchlut` 是纯C++实现,所以我们在 RapidJSON 中使用了 `branchlut`。
+
+## 双精度浮点数到字符串的转换 {#dtoa}
+
+原来 RapidJSON 使用 `snprintf(..., ..., "%g")` 来进行双精度浮点数到字符串的转换。这是不准确的,因为默认的精度是6。随后我们发现它很缓慢,而且有其它的替代品。
+
+Google 的 V8 [double-conversion](https://github.com/floitsch/double-conversion
+) 实现了更新的、快速的被称为 Grisu3 的算法(Loitsch, Florian. "Printing floating-point numbers quickly and accurately with integers." ACM Sigplan Notices 45.6 (2010): 233-243.)。
+
+然而,这个实现不是仅头文件的,所以我们实现了一个仅头文件的 Grisu2 版本。这个算法保证了结果永远精确。而且在大多数情况下,它会生成最短的(可选)字符串表示。
+
+这个仅头文件的转换函数在 [dtoa-benchmark](https://github.com/miloyip/dtoa-benchmark) 中进行评估。
+
+# 解析器 {#Parser}
+
+## 迭代解析 {#IterativeParser}
+
+迭代解析器是一个以非递归方式实现的递归下降的 LL(1) 解析器。
+
+### 语法 {#IterativeParserGrammar}
+
+解析器使用的语法是基于严格 JSON 语法的:
+~~~~~~~~~~
+S -> array | object
+array -> [ values ]
+object -> { members }
+values -> non-empty-values | ε
+non-empty-values -> value addition-values
+addition-values -> ε | , non-empty-values
+members -> non-empty-members | ε
+non-empty-members -> member addition-members
+addition-members -> ε | , non-empty-members
+member -> STRING : value
+value -> STRING | NUMBER | NULL | BOOLEAN | object | array
+~~~~~~~~~~
+
+注意到左因子被加入了非终结符的 `values` 和 `members` 来保证语法是 LL(1) 的。
+
+### 解析表 {#IterativeParserParsingTable}
+
+基于这份语法,我们可以构造 FIRST 和 FOLLOW 集合。
+
+非终结符的 FIRST 集合如下所示:
+
+|    NON-TERMINAL   |               FIRST              |
+|:-----------------:|:--------------------------------:|
+|       array       |                 [                |
+|       object      |                 {                |
+|       values      | ε STRING NUMBER NULL BOOLEAN { [ |
+|  addition-values  |              ε COMMA             |
+|      members      |             ε STRING             |
+|  addition-members |              ε COMMA             |
+|       member      |              STRING              |
+|       value       |  STRING NUMBER NULL BOOLEAN { [  |
+|         S         |                [ {               |
+| non-empty-members |              STRING              |
+|  non-empty-values |  STRING NUMBER NULL BOOLEAN { [  |
+
+FOLLOW 集合如下所示:
+
+|    NON-TERMINAL   |  FOLLOW |
+|:-----------------:|:-------:|
+|         S         |    $    |
+|       array       | , $ } ] |
+|       object      | , $ } ] |
+|       values      |    ]    |
+|  non-empty-values |    ]    |
+|  addition-values  |    ]    |
+|      members      |    }    |
+| non-empty-members |    }    |
+|  addition-members |    }    |
+|       member      |   , }   |
+|       value       |  , } ]  |
+
+最终可以从 FIRST 和 FOLLOW 集合生成解析表:
+
+|    NON-TERMINAL   |           [           |           {           |          ,          | : | ] | } |          STRING         |         NUMBER        |          NULL         |        BOOLEAN        |
+|:-----------------:|:---------------------:|:---------------------:|:-------------------:|:-:|:-:|:-:|:-----------------------:|:---------------------:|:---------------------:|:---------------------:|
+|         S         |         array         |         object        |                     |   |   |   |                         |                       |                       |                       |
+|       array       |       [ values ]      |                       |                     |   |   |   |                         |                       |                       |                       |
+|       object      |                       |      { members }      |                     |   |   |   |                         |                       |                       |                       |
+|       values      |    non-empty-values   |    non-empty-values   |                     |   | ε |   |     non-empty-values    |    non-empty-values   |    non-empty-values   |    non-empty-values   |
+|  non-empty-values | value addition-values | value addition-values |                     |   |   |   |  value addition-values  | value addition-values | value addition-values | value addition-values |
+|  addition-values  |                       |                       |  , non-empty-values |   | ε |   |                         |                       |                       |                       |
+|      members      |                       |                       |                     |   |   | ε |    non-empty-members    |                       |                       |                       |
+| non-empty-members |                       |                       |                     |   |   |   | member addition-members |                       |                       |                       |
+|  addition-members |                       |                       | , non-empty-members |   |   | ε |                         |                       |                       |                       |
+|       member      |                       |                       |                     |   |   |   |      STRING : value     |                       |                       |                       |
+|       value       |         array         |         object        |                     |   |   |   |          STRING         |         NUMBER        |          NULL         |        BOOLEAN        |
+
+对于上面的语法分析,这里有一个很棒的[工具](http://hackingoff.com/compilers/predict-first-follow-set)。
+
+### 实现 {#IterativeParserImplementation}
+
+基于这份解析表,一个直接的(常规的)将规则反向入栈的实现可以正常工作。
+
+在 RapidJSON 中,对直接的实现进行了一些修改:
+
+首先,在 RapidJSON 中,这份解析表被编码为状态机。
+规则由头部和主体组成。
+状态转换由规则构造。
+除此之外,额外的状态被添加到与 `array` 和 `object` 有关的规则。
+通过这种方式,生成数组值或对象成员可以只用一次状态转移便可完成,
+而不需要在直接的实现中的多次出栈/入栈操作。
+这也使得估计栈的大小更加容易。
+
+状态图如如下所示:
+
+![状态图](diagram/iterative-parser-states-diagram.png)
+
+第二,迭代解析器也在内部栈保存了数组的值个数和对象成员的数量,这也与传统的实现不同。
diff --git a/doc/logo/rapidjson.png b/doc/logo/rapidjson.png
new file mode 100644
index 0000000..b3b2f80
--- /dev/null
+++ b/doc/logo/rapidjson.png
Binary files differ
diff --git a/doc/logo/rapidjson.svg b/doc/logo/rapidjson.svg
new file mode 100644
index 0000000..9708d5a
--- /dev/null
+++ b/doc/logo/rapidjson.svg
@@ -0,0 +1,119 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:xlink="http://www.w3.org/1999/xlink"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="217.15039"
+   height="60.831055"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.48.4 r9939"
+   sodipodi:docname="rapidjson.svg">
+  <defs
+     id="defs4">
+    <linearGradient
+       id="linearGradient3801">
+      <stop
+         style="stop-color:#000000;stop-opacity:1;"
+         offset="0"
+         id="stop3803" />
+      <stop
+         style="stop-color:#000000;stop-opacity:0;"
+         offset="1"
+         id="stop3805" />
+    </linearGradient>
+    <linearGradient
+       inkscape:collect="always"
+       xlink:href="#linearGradient3801"
+       id="linearGradient3807"
+       x1="81.25"
+       y1="52.737183"
+       x2="122.25"
+       y2="52.737183"
+       gradientUnits="userSpaceOnUse"
+       gradientTransform="matrix(1.2378503,0,0,1.1662045,-226.99279,64.427324)" />
+    <linearGradient
+       inkscape:collect="always"
+       xlink:href="#linearGradient3801"
+       id="linearGradient3935"
+       gradientUnits="userSpaceOnUse"
+       gradientTransform="matrix(-1.4768835,0,0,2.2904698,246.48785,81.630301)"
+       x1="81.25"
+       y1="52.737183"
+       x2="115.96579"
+       y2="48.439766" />
+    <linearGradient
+       inkscape:collect="always"
+       xlink:href="#linearGradient3801"
+       id="linearGradient3947"
+       gradientUnits="userSpaceOnUse"
+       gradientTransform="matrix(1.2378503,0,0,1.1662045,-226.99279,-10.072676)"
+       x1="81.25"
+       y1="52.737183"
+       x2="122.25"
+       y2="52.737183" />
+  </defs>
+  <sodipodi:namedview
+     id="base"
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1.0"
+     inkscape:pageopacity="0.0"
+     inkscape:pageshadow="2"
+     inkscape:zoom="2"
+     inkscape:cx="207.8959"
+     inkscape:cy="-3.2283687"
+     inkscape:document-units="px"
+     inkscape:current-layer="layer1"
+     showgrid="false"
+     inkscape:window-width="1920"
+     inkscape:window-height="1137"
+     inkscape:window-x="-8"
+     inkscape:window-y="-8"
+     inkscape:window-maximized="1"
+     fit-margin-top="10"
+     fit-margin-left="10"
+     fit-margin-right="10"
+     fit-margin-bottom="10" />
+  <metadata
+     id="metadata7">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title></dc:title>
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <g
+     inkscape:label="Layer 1"
+     inkscape:groupmode="layer"
+     id="layer1"
+     transform="translate(-39.132812,-38.772339)">
+    <text
+       sodipodi:linespacing="125%"
+       id="text3939"
+       y="79.862183"
+       x="147.5"
+       style="font-size:20px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Microsoft JhengHei;-inkscape-font-specification:Microsoft JhengHei"
+       xml:space="preserve"><tspan
+         style="font-size:48px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Inconsolata;-inkscape-font-specification:Inconsolata"
+         y="79.862183"
+         x="147.5"
+         id="tspan3941"
+         sodipodi:role="line"><tspan
+           id="tspan3943"
+           style="font-size:42px;font-style:oblique;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;font-family:Segoe UI;-inkscape-font-specification:Segoe UI Oblique">Rapid</tspan><tspan
+           id="tspan3945"
+           style="font-weight:bold;-inkscape-font-specification:Inconsolata Bold">JSON</tspan></tspan></text>
+  </g>
+</svg>
diff --git a/doc/misc/DoxygenLayout.xml b/doc/misc/DoxygenLayout.xml
new file mode 100644
index 0000000..b7c9586
--- /dev/null
+++ b/doc/misc/DoxygenLayout.xml
@@ -0,0 +1,194 @@
+<doxygenlayout version="1.0">
+  <!-- Generated by doxygen 1.8.7 -->
+  <!-- Navigation index tabs for HTML output -->
+  <navindex>
+    <tab type="mainpage" visible="yes" title=""/>
+    <tab type="pages" visible="yes" title="" intro=""/>
+    <tab type="modules" visible="yes" title="" intro=""/>
+    <tab type="namespaces" visible="yes" title="">
+      <tab type="namespacelist" visible="yes" title="" intro=""/>
+      <tab type="namespacemembers" visible="yes" title="" intro=""/>
+    </tab>
+    <tab type="classes" visible="yes" title="">
+      <tab type="classlist" visible="yes" title="" intro=""/>
+      <tab type="classindex" visible="$ALPHABETICAL_INDEX" title=""/> 
+      <tab type="hierarchy" visible="yes" title="" intro=""/>
+      <tab type="classmembers" visible="yes" title="" intro=""/>
+    </tab>
+    <tab type="files" visible="yes" title="">
+      <tab type="filelist" visible="yes" title="" intro=""/>
+      <tab type="globals" visible="yes" title="" intro=""/>
+    </tab>
+    <tab type="examples" visible="yes" title="" intro=""/>  
+  </navindex>
+
+  <!-- Layout definition for a class page -->
+  <class>
+    <briefdescription visible="yes"/>
+    <includes visible="$SHOW_INCLUDE_FILES"/>
+    <inheritancegraph visible="$CLASS_GRAPH"/>
+    <collaborationgraph visible="$COLLABORATION_GRAPH"/>
+    <memberdecl>
+      <nestedclasses visible="yes" title=""/>
+      <publictypes title=""/>
+      <services title=""/>
+      <interfaces title=""/>
+      <publicslots title=""/>
+      <signals title=""/>
+      <publicmethods title=""/>
+      <publicstaticmethods title=""/>
+      <publicattributes title=""/>
+      <publicstaticattributes title=""/>
+      <protectedtypes title=""/>
+      <protectedslots title=""/>
+      <protectedmethods title=""/>
+      <protectedstaticmethods title=""/>
+      <protectedattributes title=""/>
+      <protectedstaticattributes title=""/>
+      <packagetypes title=""/>
+      <packagemethods title=""/>
+      <packagestaticmethods title=""/>
+      <packageattributes title=""/>
+      <packagestaticattributes title=""/>
+      <properties title=""/>
+      <events title=""/>
+      <privatetypes title=""/>
+      <privateslots title=""/>
+      <privatemethods title=""/>
+      <privatestaticmethods title=""/>
+      <privateattributes title=""/>
+      <privatestaticattributes title=""/>
+      <friends title=""/>
+      <related title="" subtitle=""/>
+      <membergroups visible="yes"/>
+    </memberdecl>
+    <detaileddescription title=""/>
+    <memberdef>
+      <inlineclasses title=""/>
+      <typedefs title=""/>
+      <enums title=""/>
+      <services title=""/>
+      <interfaces title=""/>
+      <constructors title=""/>
+      <functions title=""/>
+      <related title=""/>
+      <variables title=""/>
+      <properties title=""/>
+      <events title=""/>
+    </memberdef>
+    <allmemberslink visible="yes"/>
+    <usedfiles visible="$SHOW_USED_FILES"/>
+    <authorsection visible="yes"/>
+  </class>
+
+  <!-- Layout definition for a namespace page -->
+  <namespace>
+    <briefdescription visible="yes"/>
+    <memberdecl>
+      <nestednamespaces visible="yes" title=""/>
+      <constantgroups visible="yes" title=""/>
+      <classes visible="yes" title=""/>
+      <typedefs title=""/>
+      <enums title=""/>
+      <functions title=""/>
+      <variables title=""/>
+      <membergroups visible="yes"/>
+    </memberdecl>
+    <detaileddescription title=""/>
+    <memberdef>
+      <inlineclasses title=""/>
+      <typedefs title=""/>
+      <enums title=""/>
+      <functions title=""/>
+      <variables title=""/>
+    </memberdef>
+    <authorsection visible="yes"/>
+  </namespace>
+
+  <!-- Layout definition for a file page -->
+  <file>
+    <briefdescription visible="yes"/>
+    <includes visible="$SHOW_INCLUDE_FILES"/>
+    <includegraph visible="$INCLUDE_GRAPH"/>
+    <includedbygraph visible="$INCLUDED_BY_GRAPH"/>
+    <sourcelink visible="yes"/>
+    <memberdecl>
+      <classes visible="yes" title=""/>
+      <namespaces visible="yes" title=""/>
+      <constantgroups visible="yes" title=""/>
+      <defines title=""/>
+      <typedefs title=""/>
+      <enums title=""/>
+      <functions title=""/>
+      <variables title=""/>
+      <membergroups visible="yes"/>
+    </memberdecl>
+    <detaileddescription title=""/>
+    <memberdef>
+      <inlineclasses title=""/>
+      <defines title=""/>
+      <typedefs title=""/>
+      <enums title=""/>
+      <functions title=""/>
+      <variables title=""/>
+    </memberdef>
+    <authorsection/>
+  </file>
+
+  <!-- Layout definition for a group page -->
+  <group>
+    <briefdescription visible="yes"/>
+    <groupgraph visible="$GROUP_GRAPHS"/>
+    <memberdecl>
+      <nestedgroups visible="yes" title=""/>
+      <dirs visible="yes" title=""/>
+      <files visible="yes" title=""/>
+      <namespaces visible="yes" title=""/>
+      <classes visible="yes" title=""/>
+      <defines title=""/>
+      <typedefs title=""/>
+      <enums title=""/>
+      <enumvalues title=""/>
+      <functions title=""/>
+      <variables title=""/>
+      <signals title=""/>
+      <publicslots title=""/>
+      <protectedslots title=""/>
+      <privateslots title=""/>
+      <events title=""/>
+      <properties title=""/>
+      <friends title=""/>
+      <membergroups visible="yes"/>
+    </memberdecl>
+    <detaileddescription title=""/>
+    <memberdef>
+      <pagedocs/>
+      <inlineclasses title=""/>
+      <defines title=""/>
+      <typedefs title=""/>
+      <enums title=""/>
+      <enumvalues title=""/>
+      <functions title=""/>
+      <variables title=""/>
+      <signals title=""/>
+      <publicslots title=""/>
+      <protectedslots title=""/>
+      <privateslots title=""/>
+      <events title=""/>
+      <properties title=""/>
+      <friends title=""/>
+    </memberdef>
+    <authorsection visible="yes"/>
+  </group>
+
+  <!-- Layout definition for a directory page -->
+  <directory>
+    <briefdescription visible="yes"/>
+    <directorygraph visible="yes"/>
+    <memberdecl>
+      <dirs visible="yes"/>
+      <files visible="yes"/>
+    </memberdecl>
+    <detaileddescription title=""/>
+  </directory>
+</doxygenlayout>
diff --git a/doc/misc/doxygenextra.css b/doc/misc/doxygenextra.css
new file mode 100644
index 0000000..bd67375
--- /dev/null
+++ b/doc/misc/doxygenextra.css
@@ -0,0 +1,274 @@
+body code {
+	margin: 0;
+	border: 1px solid #ddd;
+	background-color: #f8f8f8;
+	border-radius: 3px;
+	padding: 0;
+}
+
+a {
+	color: #4183c4;
+}
+
+a.el {
+	font-weight: normal;
+}
+
+body, table, div, p, dl {
+	color: #333333;
+	font-family: Helvetica, arial, freesans, clean, sans-serif, 'Segoe UI Emoji', 'Segoe UI Symbol';
+	font-size: 15px;
+	font-style: normal;
+	font-variant: normal;
+	font-weight: normal;
+	line-height: 25.5px;
+}
+
+body {
+	background-color: #eee;
+}
+
+div.header {
+	background-image: none;
+	background-color: white;
+	margin: 0px;
+	border: 0px;
+}
+
+div.headertitle {
+	width: 858px;
+	margin: 30px;
+	padding: 0px;
+}
+
+div.toc {
+	background-color: #f8f8f8;
+	border-color: #ddd;
+	margin-right: 10px;
+	margin-left: 20px;
+}
+div.toc h3 {
+	color: #333333;
+	font-family: Helvetica, arial, freesans, clean, sans-serif, 'Segoe UI Emoji', 'Segoe UI Symbol';
+	font-size: 18px;
+	font-style: normal;
+	font-variant: normal;
+	font-weight: normal;
+}
+div.toc li {
+	color: #333333;
+	font-family: Helvetica, arial, freesans, clean, sans-serif, 'Segoe UI Emoji', 'Segoe UI Symbol';
+	font-size: 12px;
+	font-style: normal;
+	font-variant: normal;
+	font-weight: normal;
+}
+
+.title {
+	font-size: 2.5em;
+	line-height: 63.75px;
+	border-bottom: 1px solid #ddd;
+	margin-bottom: 15px;
+	margin-left: 0px;
+	margin-right: 0px;
+	margin-top: 0px;
+}
+
+.summary {
+	float: none !important;
+	width: auto !important;
+	padding-top: 10px;
+	padding-right: 10px !important;
+}
+
+.summary + .headertitle .title {
+	font-size: 1.5em;
+	line-height: 2.0em;
+}
+
+body h1 {
+	font-size: 2em;
+	line-height: 1.7;
+	border-bottom: 1px solid #eee;
+	margin: 1em 0 15px;
+	padding: 0;
+	overflow: hidden;
+}
+
+body h2 {
+	font-size: 1.5em;
+	line-height: 1.7;
+	margin: 1em 0 15px;
+	padding: 0;
+}
+
+pre.fragment {
+	font-family: Consolas, 'Liberation Mono', Menlo, Courier, monospace;
+	font-size: 13px;
+	font-style: normal;
+	font-variant: normal;
+	font-weight: normal;
+	line-height: 19px;
+}
+
+table.doxtable th {
+	background-color: #f8f8f8;
+	color: #333333;
+	font-size: 15px;
+}
+
+table.doxtable td, table.doxtable th {
+	border: 1px solid #ddd;
+}
+
+#doc-content {
+	background-color: #fff;
+	width: 918px;
+	height: auto !important;
+	margin-left: 270px !important;
+}
+
+div.contents {
+	width: 858px;
+	margin: 30px;
+}
+
+div.line {
+	font-family: Consolas, 'Liberation Mono', Menlo, Courier, monospace;
+	font-size: 13px;
+	font-style: normal;
+	font-variant: normal;
+	font-weight: normal;
+	line-height: 19px;	
+}
+
+tt, code, pre {
+	font-family: Consolas, "Liberation Mono", Menlo, Courier, monospace;
+	font-size: 12px;
+}
+
+div.fragment {
+	background-color: #f8f8f8;
+	border: 1px solid #ddd;
+	font-size: 13px;
+	line-height: 19px;
+	overflow: auto;
+	padding: 6px 10px;
+	border-radius: 3px;
+}
+
+#topbanner {
+	position: fixed;
+	margin: 15px;
+	z-index: 101;
+}
+
+#projectname
+{
+	font-family: Helvetica, arial, freesans, clean, sans-serif, 'Segoe UI Emoji', 'Segoe UI Symbol';
+	font-size: 38px;
+	font-weight: bold;
+	line-height: 63.75px;
+	margin: 0px;
+	padding: 2px 0px;
+}
+    
+#projectbrief
+{
+	font-family: Helvetica, arial, freesans, clean, sans-serif, 'Segoe UI Emoji', 'Segoe UI Symbol';
+	font-size: 16px;
+	line-height: 22.4px;
+	margin: 0px 0px 13px 0px;
+	padding: 2px;
+}
+
+/* side bar and search */
+
+#side-nav
+{
+	padding: 10px 0px 20px 20px;
+	border-top: 60px solid #2980b9;
+	background-color: #343131;
+	width: 250px !important;
+	height: 100% !important;
+	position: fixed;
+}
+
+#nav-tree
+{
+	background-color: transparent;
+	background-image: none;
+	height: 100% !important;
+}
+
+#nav-tree .label
+{
+	font-family: Helvetica, arial, freesans, clean, sans-serif, 'Segoe UI Emoji', 'Segoe UI Symbol';
+	line-height: 25.5px;	
+	font-size: 15px;
+}
+
+#nav-tree
+{
+	color: #b3b3b3;
+}
+
+#nav-tree .selected {
+	background-image: none;
+}
+
+#nav-tree a
+{
+	color: #b3b3b3;
+}
+
+#github
+{
+	position: fixed;
+	left: auto;
+	right: auto;
+	width: 250px;
+}
+
+#MSearchBox
+{
+	margin: 20px;
+	left: 40px;
+	right: auto;
+	position: fixed;
+	width: 180px;
+}
+
+#MSearchField
+{
+	width: 121px;
+}
+
+#MSearchResultsWindow
+{
+	left: 45px !important;
+}
+
+#nav-sync
+{
+	display: none;
+}
+
+.ui-resizable .ui-resizable-handle
+{
+	width: 0px;
+}
+
+#nav-path
+{
+	display: none;
+}
+
+/* external link icon */
+div.contents a[href ^= "http"]:after {
+     content: " " url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAYAAACNMs+9AAAAVklEQVR4Xn3PgQkAMQhDUXfqTu7kTtkpd5RA8AInfArtQ2iRXFWT2QedAfttj2FsPIOE1eCOlEuoWWjgzYaB/IkeGOrxXhqB+uA9Bfcm0lAZuh+YIeAD+cAqSz4kCMUAAAAASUVORK5CYII=);
+}
+
+.githublogo {
+	content: url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAyRpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADw/eHBhY2tldCBiZWdpbj0i77u/IiBpZD0iVzVNME1wQ2VoaUh6cmVTek5UY3prYzlkIj8+IDx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IkFkb2JlIFhNUCBDb3JlIDUuNS1jMDIxIDc5LjE1NDkxMSwgMjAxMy8xMC8yOS0xMTo0NzoxNiAgICAgICAgIj4gPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4gPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIgeG1sbnM6eG1wTU09Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9tbS8iIHhtbG5zOnN0UmVmPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvc1R5cGUvUmVzb3VyY2VSZWYjIiB4bWxuczp4bXA9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC8iIHhtcE1NOkRvY3VtZW50SUQ9InhtcC5kaWQ6RERCMUIwOUY4NkNFMTFFM0FBNTJFRTMzNTJEMUJDNDYiIHhtcE1NOkluc3RhbmNlSUQ9InhtcC5paWQ6RERCMUIwOUU4NkNFMTFFM0FBNTJFRTMzNTJEMUJDNDYiIHhtcDpDcmVhdG9yVG9vbD0iQWRvYmUgUGhvdG9zaG9wIENTNiAoTWFjaW50b3NoKSI+IDx4bXBNTTpEZXJpdmVkRnJvbSBzdFJlZjppbnN0YW5jZUlEPSJ4bXAuaWlkOkU1MTc4QTJBOTlBMDExRTI5QTE1QkMxMDQ2QTg5MDREIiBzdFJlZjpkb2N1bWVudElEPSJ4bXAuZGlkOkU1MTc4QTJCOTlBMDExRTI5QTE1QkMxMDQ2QTg5MDREIi8+IDwvcmRmOkRlc2NyaXB0aW9uPiA8L3JkZjpSREY+IDwveDp4bXBtZXRhPiA8P3hwYWNrZXQgZW5kPSJyIj8+jUqS1wAAApVJREFUeNq0l89rE1EQx3e3gVJoSPzZeNEWPKgHoa0HBak0iHiy/4C3WvDmoZ56qJ7txVsPQu8qlqqHIhRKJZceesmhioQEfxTEtsoSpdJg1u/ABJ7Pmc1m8zLwgWTmzcw3L+/te+tHUeQltONgCkyCi2AEDHLsJ6iBMlgHL8FeoqokoA2j4CloRMmtwTmj7erHBXPgCWhG6a3JNXKdCiDl1cidVbXZkJoXQRi5t5BrxwoY71FzU8S4JuAIqFkJ2+BFSlEh525b/hr3+k/AklDkNsf6wTT4yv46KIMNpsy+iMdMc47HNWxbsgVcUn7FmLAzzoFAWDsBx+wVP6bUpp5ewI+DOeUx0Wd9D8F70BTGNjkWtqnhmT1JQAHcUgZd8Lo3rQb1LAT8eJVUfgGvHQigGp+V2Z0iAUUl8QH47kAA1XioxIo+bRN8OG8F/oBjwv+Z1nJgX5jpdzQDw0LCjsPmrcW7I/iHScCAEDj03FtD8A0EyuChHgg4KTlJQF3wZ7WELppnBX+dBFSVpJsOBWi1qiRgSwnOgoyD5hmuJdkWCVhTgnTvW3AgYIFrSbZGh0UW/Io5Vp+DQoK7o80pztWMemZbgxeNwCNwDbw1fIfgGZjhU6xPaJgBV8BdsMw5cbZoHsenwYFxkZzl83xTSKTiviCAfCsJLysH3POfC8m8NegyGAGfLP/VmGmfSChgXroR0RSWjEFv2J/nG84cuKFMf4sTCZqXuJd4KaXFVjEG3+tw4eXbNK/YC9oXXs3O8NY8y99L4BXY5cvLY/Bb2VZ58EOJVcB18DHJq9lRsKr8inyKGVjlmh29mtHs3AHfuhCwy1vXT/Nu2GKQt+UHsGdctyX6eQyNvc+5sfX9Dl7Pe2J/BRgAl2CpwmrsHR0AAAAASUVORK5CYII=);
+}
\ No newline at end of file
diff --git a/doc/misc/footer.html b/doc/misc/footer.html
new file mode 100644
index 0000000..77f1131
--- /dev/null
+++ b/doc/misc/footer.html
@@ -0,0 +1,11 @@
+<!-- HTML footer for doxygen 1.8.7-->
+<!-- start footer part -->
+<!--BEGIN GENERATE_TREEVIEW-->
+<div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
+  <ul>
+    $navpath
+  </ul>
+</div>
+<!--END GENERATE_TREEVIEW-->
+</body>
+</html>
diff --git a/doc/misc/header.html b/doc/misc/header.html
new file mode 100644
index 0000000..a89ba46
--- /dev/null
+++ b/doc/misc/header.html
@@ -0,0 +1,24 @@
+<!-- HTML header for doxygen 1.8.7-->
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<meta name="generator" content="Doxygen $doxygenversion"/>
+<!--BEGIN PROJECT_NAME--><title>$projectname: $title</title><!--END PROJECT_NAME-->
+<!--BEGIN !PROJECT_NAME--><title>$title</title><!--END !PROJECT_NAME-->
+<link href="$relpath^tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="$relpath^jquery.js"></script>
+<script type="text/javascript" src="$relpath^dynsections.js"></script>
+$treeview
+$search
+$mathjax
+<link href="$relpath^$stylesheet" rel="stylesheet" type="text/css" />
+$extrastylesheet
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="topbanner"><a href="https://github.com/Tencent/rapidjson" title="RapidJSON GitHub"><i class="githublogo"></i></a></div>
+$searchbox
+<!--END TITLEAREA-->
+<!-- end header part -->
diff --git a/doc/npm.md b/doc/npm.md
new file mode 100644
index 0000000..6f4e85a
--- /dev/null
+++ b/doc/npm.md
@@ -0,0 +1,31 @@
+## NPM
+
+# package.json {#package}
+
+~~~~~~~~~~js
+{
+  ...
+  "dependencies": {
+    ...
+    "rapidjson": "git@github.com:Tencent/rapidjson.git"
+  },
+  ...
+  "gypfile": true
+}
+~~~~~~~~~~
+
+# binding.gyp {#binding}
+
+~~~~~~~~~~js
+{
+  ...
+  'targets': [
+    {
+      ...
+      'include_dirs': [
+        '<!(node -e \'require("rapidjson")\')'
+      ]
+    }
+  ]
+}
+~~~~~~~~~~
diff --git a/doc/performance.md b/doc/performance.md
new file mode 100644
index 0000000..6f9e1bf
--- /dev/null
+++ b/doc/performance.md
@@ -0,0 +1,26 @@
+# Performance
+
+There is a [native JSON benchmark collection] [1] which evaluates speed, memory usage and code size of various operations among 37 JSON libraries.
+
+[1]: https://github.com/miloyip/nativejson-benchmark
+
+The old performance article for RapidJSON 0.1 is provided [here](https://code.google.com/p/rapidjson/wiki/Performance).
+
+Additionally, you may refer to the following third-party benchmarks.
+
+## Third-party benchmarks
+
+* [Basic benchmarks for miscellaneous C++ JSON parsers and generators](https://github.com/mloskot/json_benchmark) by Mateusz Loskot (Jun 2013)
+ * [casablanca](https://casablanca.codeplex.com/)
+ * [json_spirit](https://github.com/cierelabs/json_spirit)
+ * [jsoncpp](http://jsoncpp.sourceforge.net/)
+ * [libjson](http://sourceforge.net/projects/libjson/)
+ * [rapidjson](https://github.com/Tencent/rapidjson/)
+ * [QJsonDocument](http://qt-project.org/doc/qt-5.0/qtcore/qjsondocument.html)
+ 
+* [JSON Parser Benchmarking](http://chadaustin.me/2013/01/json-parser-benchmarking/) by Chad Austin (Jan 2013)
+ * [sajson](https://github.com/chadaustin/sajson)
+ * [rapidjson](https://github.com/Tencent/rapidjson/)
+ * [vjson](https://code.google.com/p/vjson/)
+ * [YAJL](http://lloyd.github.com/yajl/)
+ * [Jansson](http://www.digip.org/jansson/)
diff --git a/doc/performance.zh-cn.md b/doc/performance.zh-cn.md
new file mode 100644
index 0000000..2322c9c
--- /dev/null
+++ b/doc/performance.zh-cn.md
@@ -0,0 +1,26 @@
+# 性能
+
+有一个 [native JSON benchmark collection][1] 项目,能评估 37 个 JSON 库在不同操作下的速度、內存用量及代码大小。
+
+[1]: https://github.com/miloyip/nativejson-benchmark
+
+RapidJSON 0.1 版本的性能测试文章位于 [这里](https://code.google.com/p/rapidjson/wiki/Performance).
+
+此外,你也可以参考以下这些第三方的评测。
+
+## 第三方评测
+
+* [Basic benchmarks for miscellaneous C++ JSON parsers and generators](https://github.com/mloskot/json_benchmark) by Mateusz Loskot (Jun 2013)
+ * [casablanca](https://casablanca.codeplex.com/)
+ * [json_spirit](https://github.com/cierelabs/json_spirit)
+ * [jsoncpp](http://jsoncpp.sourceforge.net/)
+ * [libjson](http://sourceforge.net/projects/libjson/)
+ * [rapidjson](https://github.com/Tencent/rapidjson/)
+ * [QJsonDocument](http://qt-project.org/doc/qt-5.0/qtcore/qjsondocument.html)
+ 
+* [JSON Parser Benchmarking](http://chadaustin.me/2013/01/json-parser-benchmarking/) by Chad Austin (Jan 2013)
+ * [sajson](https://github.com/chadaustin/sajson)
+ * [rapidjson](https://github.com/Tencent/rapidjson/)
+ * [vjson](https://code.google.com/p/vjson/)
+ * [YAJL](http://lloyd.github.com/yajl/)
+ * [Jansson](http://www.digip.org/jansson/)
diff --git a/doc/pointer.md b/doc/pointer.md
new file mode 100644
index 0000000..9a0e5ca
--- /dev/null
+++ b/doc/pointer.md
@@ -0,0 +1,234 @@
+# Pointer
+
+(This feature was released in v1.1.0)
+
+JSON Pointer is a standardized ([RFC6901]) way to select a value inside a JSON Document (DOM). This can be analogous to XPath for XML document. However, JSON Pointer is much simpler, and a single JSON Pointer only pointed to a single value.
+
+Using RapidJSON's implementation of JSON Pointer can simplify some manipulations of the DOM.
+
+[TOC]
+
+# JSON Pointer {#JsonPointer}
+
+A JSON Pointer is a list of zero-to-many tokens, each prefixed by `/`. Each token can be a string or a number. For example, given a JSON:
+~~~javascript
+{
+    "foo" : ["bar", "baz"],
+    "pi" : 3.1416
+}
+~~~
+
+The following JSON Pointers resolve this JSON as:
+
+1. `"/foo"` → `[ "bar", "baz" ]`
+2. `"/foo/0"` → `"bar"`
+3. `"/foo/1"` → `"baz"`
+4. `"/pi"` → `3.1416`
+
+Note that, an empty JSON Pointer `""` (zero token) resolves to the whole JSON.
+
+# Basic Usage {#BasicUsage}
+
+The following example code is self-explanatory.
+
+~~~cpp
+#include "rapidjson/pointer.h"
+
+// ...
+Document d;
+
+// Create DOM by Set()
+Pointer("/project").Set(d, "RapidJSON");
+Pointer("/stars").Set(d, 10);
+
+// { "project" : "RapidJSON", "stars" : 10 }
+
+// Access DOM by Get(). It return nullptr if the value does not exist.
+if (Value* stars = Pointer("/stars").Get(d))
+    stars->SetInt(stars->GetInt() + 1);
+
+// { "project" : "RapidJSON", "stars" : 11 }
+
+// Set() and Create() automatically generate parents if not exist.
+Pointer("/a/b/0").Create(d);
+
+// { "project" : "RapidJSON", "stars" : 11, "a" : { "b" : [ null ] } }
+
+// GetWithDefault() returns reference. And it deep clones the default value.
+Value& hello = Pointer("/hello").GetWithDefault(d, "world");
+
+// { "project" : "RapidJSON", "stars" : 11, "a" : { "b" : [ null ] }, "hello" : "world" }
+
+// Swap() is similar to Set()
+Value x("C++");
+Pointer("/hello").Swap(d, x);
+
+// { "project" : "RapidJSON", "stars" : 11, "a" : { "b" : [ null ] }, "hello" : "C++" }
+// x becomes "world"
+
+// Erase a member or element, return true if the value exists
+bool success = Pointer("/a").Erase(d);
+assert(success);
+
+// { "project" : "RapidJSON", "stars" : 10 }
+~~~
+
+# Helper Functions {#HelperFunctions}
+
+Since object-oriented calling convention may be non-intuitive, RapidJSON also provides helper functions, which just wrap the member functions with free-functions.
+
+The following example does exactly the same as the above one.
+
+~~~cpp
+Document d;
+
+SetValueByPointer(d, "/project", "RapidJSON");
+SetValueByPointer(d, "/stars", 10);
+
+if (Value* stars = GetValueByPointer(d, "/stars"))
+    stars->SetInt(stars->GetInt() + 1);
+
+CreateValueByPointer(d, "/a/b/0");
+
+Value& hello = GetValueByPointerWithDefault(d, "/hello", "world");
+
+Value x("C++");
+SwapValueByPointer(d, "/hello", x);
+
+bool success = EraseValueByPointer(d, "/a");
+assert(success);
+~~~
+
+The conventions are shown here for comparison:
+
+1. `Pointer(source).<Method>(root, ...)`
+2. `<Method>ValueByPointer(root, Pointer(source), ...)`
+3. `<Method>ValueByPointer(root, source, ...)`
+
+# Resolving Pointer {#ResolvingPointer}
+
+`Pointer::Get()` or `GetValueByPointer()` function does not modify the DOM. If the tokens cannot match a value in the DOM, it returns `nullptr`. User can use this to check whether a value exists.
+
+Note that, numerical tokens can represent an array index or member name. The resolving process will match the values according to the types of value.
+
+~~~javascript
+{
+    "0" : 123,
+    "1" : [456]
+}
+~~~
+
+1. `"/0"` → `123`
+2. `"/1/0"` → `456`
+
+The token `"0"` is treated as member name in the first pointer. It is treated as an array index in the second pointer.
+
+The other functions, including `Create()`, `GetWithDefault()`, `Set()` and `Swap()`, will change the DOM. These functions will always succeed. They will create the parent values if they do not exist. If the parent values do not match the tokens, they will also be forced to change their type. Changing the type also mean fully removal of that DOM subtree.
+
+Parsing the above JSON into `d`, 
+
+~~~cpp
+SetValueByPointer(d, "1/a", 789); // { "0" : 123, "1" : { "a" : 789 } }
+~~~
+
+## Resolving Minus Sign Token
+
+Besides, [RFC6901] defines a special token `-` (single minus sign), which represents the pass-the-end element of an array. `Get()` only treats this token as a member name '"-"'. Yet the other functions can resolve this for array, equivalent to calling `Value::PushBack()` to the array.
+
+~~~cpp
+Document d;
+d.Parse("{\"foo\":[123]}");
+SetValueByPointer(d, "/foo/-", 456); // { "foo" : [123, 456] }
+SetValueByPointer(d, "/-", 789);    // { "foo" : [123, 456], "-" : 789 }
+~~~
+
+## Resolving Document and Value
+
+When using `p.Get(root)` or `GetValueByPointer(root, p)`, `root` is a (const) `Value&`. That means, it can be a subtree of the DOM.
+
+The other functions have two groups of signature. One group uses `Document& document` as parameter, another one uses `Value& root`. The first group uses `document.GetAllocator()` for creating values. And the second group needs user to supply an allocator, like the functions in DOM.
+
+All examples above do not require an allocator parameter, because the first parameter is a `Document&`. But if you want to resolve a pointer to a subtree, you need to supply the allocator as in the following example:
+
+~~~cpp
+class Person {
+public:
+    Person() {
+        document_ = new Document();
+        // CreateValueByPointer() here no need allocator
+        SetLocation(CreateValueByPointer(*document_, "/residence"), ...);
+        SetLocation(CreateValueByPointer(*document_, "/office"), ...);
+    };
+
+private:
+    void SetLocation(Value& location, const char* country, const char* addresses[2]) {
+        Value::Allocator& a = document_->GetAllocator();
+        // SetValueByPointer() here need allocator
+        SetValueByPointer(location, "/country", country, a);
+        SetValueByPointer(location, "/address/0", address[0], a);
+        SetValueByPointer(location, "/address/1", address[1], a);
+    }
+
+    // ...
+
+    Document* document_;
+};
+~~~
+
+`Erase()` or `EraseValueByPointer()` does not need allocator. And they return `true` if the value is erased successfully.
+
+# Error Handling {#ErrorHandling}
+
+A `Pointer` parses a source string in its constructor. If there is parsing error, `Pointer::IsValid()` returns `false`. And you can use `Pointer::GetParseErrorCode()` and `GetParseErrorOffset()` to retrieve the error information.
+
+Note that, all resolving functions assumes valid pointer. Resolving with an invalid pointer causes assertion failure.
+
+# URI Fragment Representation {#URIFragment}
+
+In addition to the string representation of JSON pointer that we are using till now, [RFC6901] also defines the URI fragment representation of JSON pointer. URI fragment is specified in [RFC3986] "Uniform Resource Identifier (URI): Generic Syntax".
+
+The main differences are that a the URI fragment always has a `#` (pound sign) in the beginning, and some characters are encoded by percent-encoding in UTF-8 sequence. For example, the following table shows different C/C++ string literals of different representations.
+
+String Representation | URI Fragment Representation | Pointer Tokens (UTF-8)
+----------------------|-----------------------------|------------------------
+`"/foo/0"`            | `"#/foo/0"`                 | `{"foo", 0}`
+`"/a~1b"`             | `"#/a~1b"`                  | `{"a/b"}`
+`"/m~0n"`             | `"#/m~0n"`                  | `{"m~n"}`
+`"/ "`                | `"#/%20"`                   | `{" "}`
+`"/\0"`               | `"#/%00"`                   | `{"\0"}`
+`"/€"`                | `"#/%E2%82%AC"`             | `{"€"}`
+
+RapidJSON fully support URI fragment representation. It automatically detects the pound sign during parsing.
+
+# Stringify
+
+You may also stringify a `Pointer` to a string or other output streams. This can be done by:
+
+~~~
+Pointer p(...);
+StringBuffer sb;
+p.Stringify(sb);
+std::cout << sb.GetString() << std::endl;
+~~~
+
+It can also stringify to URI fragment representation by `StringifyUriFragment()`.
+
+# User-Supplied Tokens {#UserSuppliedTokens}
+
+If a pointer will be resolved multiple times, it should be constructed once, and then apply it to different DOMs or in different times. This reduce time and memory allocation for constructing `Pointer` multiple times.
+
+We can go one step further, to completely eliminate the parsing process and dynamic memory allocation, we can establish the token array directly:
+
+~~~cpp
+#define NAME(s) { s, sizeof(s) / sizeof(s[0]) - 1, kPointerInvalidIndex }
+#define INDEX(i) { #i, sizeof(#i) - 1, i }
+
+static const Pointer::Token kTokens[] = { NAME("foo"), INDEX(123) };
+static const Pointer p(kTokens, sizeof(kTokens) / sizeof(kTokens[0]));
+// Equivalent to static const Pointer p("/foo/123");
+~~~
+
+This may be useful for memory constrained systems.
+
+[RFC3986]: https://tools.ietf.org/html/rfc3986
+[RFC6901]: https://tools.ietf.org/html/rfc6901
diff --git a/doc/pointer.zh-cn.md b/doc/pointer.zh-cn.md
new file mode 100644
index 0000000..239569d
--- /dev/null
+++ b/doc/pointer.zh-cn.md
@@ -0,0 +1,234 @@
+# Pointer
+
+(本功能于 v1.1.0 发布)
+
+JSON Pointer 是一个标准化([RFC6901])的方式去选取一个 JSON Document(DOM)中的值。这类似于 XML 的 XPath。然而,JSON Pointer 简单得多,而且每个 JSON Pointer 仅指向单个值。
+
+使用 RapidJSON 的 JSON Pointer 实现能简化一些 DOM 的操作。
+
+[TOC]
+
+# JSON Pointer {#JsonPointer}
+
+一个 JSON Pointer 由一串(零至多个)token 所组成,每个 token 都有 `/` 前缀。每个 token 可以是一个字符串或数字。例如,给定一个 JSON:
+~~~javascript
+{
+    "foo" : ["bar", "baz"],
+    "pi" : 3.1416
+}
+~~~
+
+以下的 JSON Pointer 解析为:
+
+1. `"/foo"` → `[ "bar", "baz" ]`
+2. `"/foo/0"` → `"bar"`
+3. `"/foo/1"` → `"baz"`
+4. `"/pi"` → `3.1416`
+
+要注意,一个空 JSON Pointer `""` (零个 token)解析为整个 JSON。
+
+# 基本使用方法 {#BasicUsage}
+
+以下的代码范例不解自明。
+
+~~~cpp
+#include "rapidjson/pointer.h"
+
+// ...
+Document d;
+
+// 使用 Set() 创建 DOM
+Pointer("/project").Set(d, "RapidJSON");
+Pointer("/stars").Set(d, 10);
+
+// { "project" : "RapidJSON", "stars" : 10 }
+
+// 使用 Get() 访问 DOM。若该值不存在则返回 nullptr。
+if (Value* stars = Pointer("/stars").Get(d))
+    stars->SetInt(stars->GetInt() + 1);
+
+// { "project" : "RapidJSON", "stars" : 11 }
+
+// Set() 和 Create() 自动生成父值(如果它们不存在)。
+Pointer("/a/b/0").Create(d);
+
+// { "project" : "RapidJSON", "stars" : 11, "a" : { "b" : [ null ] } }
+
+// GetWithDefault() 返回引用。若该值不存在则会深拷贝缺省值。
+Value& hello = Pointer("/hello").GetWithDefault(d, "world");
+
+// { "project" : "RapidJSON", "stars" : 11, "a" : { "b" : [ null ] }, "hello" : "world" }
+
+// Swap() 和 Set() 相似
+Value x("C++");
+Pointer("/hello").Swap(d, x);
+
+// { "project" : "RapidJSON", "stars" : 11, "a" : { "b" : [ null ] }, "hello" : "C++" }
+// x 变成 "world"
+
+// 删去一个成员或元素,若值存在返回 true
+bool success = Pointer("/a").Erase(d);
+assert(success);
+
+// { "project" : "RapidJSON", "stars" : 10 }
+~~~
+
+# 辅助函数 {#HelperFunctions}
+
+由于面向对象的调用习惯可能不符直觉,RapidJSON 也提供了一些辅助函数,它们把成员函数包装成自由函数。
+
+以下的例子与上面例子所做的事情完全相同。
+
+~~~cpp
+Document d;
+
+SetValueByPointer(d, "/project", "RapidJSON");
+SetValueByPointer(d, "/stars", 10);
+
+if (Value* stars = GetValueByPointer(d, "/stars"))
+    stars->SetInt(stars->GetInt() + 1);
+
+CreateValueByPointer(d, "/a/b/0");
+
+Value& hello = GetValueByPointerWithDefault(d, "/hello", "world");
+
+Value x("C++");
+SwapValueByPointer(d, "/hello", x);
+
+bool success = EraseValueByPointer(d, "/a");
+assert(success);
+~~~
+
+以下对比 3 种调用方式:
+
+1. `Pointer(source).<Method>(root, ...)`
+2. `<Method>ValueByPointer(root, Pointer(source), ...)`
+3. `<Method>ValueByPointer(root, source, ...)`
+
+# 解析 Pointer {#ResolvingPointer}
+
+`Pointer::Get()` 或 `GetValueByPointer()` 函数并不修改 DOM。若那些 token 不能匹配 DOM 里的值,这些函数便返回 `nullptr`。使用者可利用这个方法来检查一个值是否存在。
+
+注意,数值 token 可表示数组索引或成员名字。解析过程中会按值的类型来匹配。
+
+~~~javascript
+{
+    "0" : 123,
+    "1" : [456]
+}
+~~~
+
+1. `"/0"` → `123`
+2. `"/1/0"` → `456`
+
+Token `"0"` 在第一个 pointer 中被当作成员名字。它在第二个 pointer 中被当作成数组索引。
+
+其他函数会改变 DOM,包括 `Create()`、`GetWithDefault()`、`Set()`、`Swap()`。这些函数总是成功的。若一些父值不存在,就会创建它们。若父值类型不匹配 token,也会强行改变其类型。改变类型也意味着完全移除其 DOM 子树的内容。
+
+例如,把上面的 JSON 解译至 `d` 之后,
+
+~~~cpp
+SetValueByPointer(d, "1/a", 789); // { "0" : 123, "1" : { "a" : 789 } }
+~~~
+
+## 解析负号 token
+
+另外,[RFC6901] 定义了一个特殊 token `-` (单个负号),用于表示数组最后元素的下一个元素。 `Get()` 只会把此 token 当作成员名字 '"-"'。而其他函数则会以此解析数组,等同于对数组调用 `Value::PushBack()` 。
+
+~~~cpp
+Document d;
+d.Parse("{\"foo\":[123]}");
+SetValueByPointer(d, "/foo/-", 456); // { "foo" : [123, 456] }
+SetValueByPointer(d, "/-", 789);    // { "foo" : [123, 456], "-" : 789 }
+~~~
+
+## 解析 Document 及 Value
+
+当使用 `p.Get(root)` 或 `GetValueByPointer(root, p)`,`root` 是一个(常数) `Value&`。这意味着,它也可以是 DOM 里的一个子树。
+
+其他函数有两组签名。一组使用 `Document& document` 作为参数,另一组使用 `Value& root`。第一组使用 `document.GetAllocator()` 去创建值,而第二组则需要使用者提供一个 allocator,如同 DOM 里的函数。
+
+以上例子都不需要 allocator 参数,因为它的第一个参数是 `Document&`。但如果你需要对一个子树进行解析,就需要如下面的例子般提供 allocator:
+
+~~~cpp
+class Person {
+public:
+    Person() {
+        document_ = new Document();
+        // CreateValueByPointer() here no need allocator
+        SetLocation(CreateValueByPointer(*document_, "/residence"), ...);
+        SetLocation(CreateValueByPointer(*document_, "/office"), ...);
+    };
+
+private:
+    void SetLocation(Value& location, const char* country, const char* addresses[2]) {
+        Value::Allocator& a = document_->GetAllocator();
+        // SetValueByPointer() here need allocator
+        SetValueByPointer(location, "/country", country, a);
+        SetValueByPointer(location, "/address/0", address[0], a);
+        SetValueByPointer(location, "/address/1", address[1], a);
+    }
+
+    // ...
+
+    Document* document_;
+};
+~~~
+
+`Erase()` 或 `EraseValueByPointer()` 不需要 allocator。而且它们成功删除值之后会返回 `true`。
+
+# 错误处理 {#ErrorHandling}
+
+`Pointer` 在其建构函数里会解译源字符串。若有解析错误,`Pointer::IsValid()` 返回 `false`。你可使用 `Pointer::GetParseErrorCode()` 和 `GetParseErrorOffset()` 去获取错信息。
+
+要注意的是,所有解析函数都假设 pointer 是合法的。对一个非法 pointer 解析会造成断言失败。
+
+# URI 片段表示方式 {#URIFragment}
+
+除了我们一直在使用的字符串方式表示 JSON pointer,[RFC6901] 也定义了一个 JSON Pointer 的 URI 片段(fragment)表示方式。URI 片段是定义于 [RFC3986] "Uniform Resource Identifier (URI): Generic Syntax"。
+
+URI 片段的主要分别是必然以 `#` (pound sign)开头,而一些字符也会以百分比编码成 UTF-8 序列。例如,以下的表展示了不同表示法下的 C/C++ 字符串常数。
+
+字符串表示方式 | URI 片段表示方式 | Pointer Tokens (UTF-8)
+----------------------|-----------------------------|------------------------
+`"/foo/0"`            | `"#/foo/0"`                 | `{"foo", 0}`
+`"/a~1b"`             | `"#/a~1b"`                  | `{"a/b"}`
+`"/m~0n"`             | `"#/m~0n"`                  | `{"m~n"}`
+`"/ "`                | `"#/%20"`                   | `{" "}`
+`"/\0"`               | `"#/%00"`                   | `{"\0"}`
+`"/€"`                | `"#/%E2%82%AC"`             | `{"€"}`
+
+RapidJSON 完全支持 URI 片段表示方式。它在解译时会自动检测 `#` 号。
+
+# 字符串化
+
+你也可以把一个 `Pointer` 字符串化,储存于字符串或其他输出流。例如:
+
+~~~
+Pointer p(...);
+StringBuffer sb;
+p.Stringify(sb);
+std::cout << sb.GetString() << std::endl;
+~~~
+
+使用 `StringifyUriFragment()` 可以把 pointer 字符串化为 URI 片段表示法。
+
+# 使用者提供的 tokens {#UserSuppliedTokens}
+
+若一个 pointer 会用于多次解析,它应该只被创建一次,然后再施于不同的 DOM ,或在不同时间做解析。这样可以避免多次创键 `Pointer`,节省时间和内存分配。
+
+我们甚至可以再更进一步,完全消去解析过程及动态内存分配。我们可以直接生成 token 数组:
+
+~~~cpp
+#define NAME(s) { s, sizeof(s) / sizeof(s[0]) - 1, kPointerInvalidIndex }
+#define INDEX(i) { #i, sizeof(#i) - 1, i }
+
+static const Pointer::Token kTokens[] = { NAME("foo"), INDEX(123) };
+static const Pointer p(kTokens, sizeof(kTokens) / sizeof(kTokens[0]));
+// Equivalent to static const Pointer p("/foo/123");
+~~~
+
+这种做法可能适合内存受限的系统。
+
+[RFC3986]: https://tools.ietf.org/html/rfc3986
+[RFC6901]: https://tools.ietf.org/html/rfc6901
diff --git a/doc/sax.md b/doc/sax.md
new file mode 100644
index 0000000..874361f
--- /dev/null
+++ b/doc/sax.md
@@ -0,0 +1,509 @@
+# SAX
+
+The term "SAX" originated from [Simple API for XML](http://en.wikipedia.org/wiki/Simple_API_for_XML). We borrowed this term for JSON parsing and generation.
+
+In RapidJSON, `Reader` (typedef of `GenericReader<...>`) is the SAX-style parser for JSON, and `Writer` (typedef of `GenericWriter<...>`) is the SAX-style generator for JSON.
+
+[TOC]
+
+# Reader {#Reader}
+
+`Reader` parses a JSON from a stream. While it reads characters from the stream, it analyzes the characters according to the syntax of JSON, and publishes events to a handler.
+
+For example, here is a JSON.
+
+~~~~~~~~~~js
+{
+    "hello": "world",
+    "t": true ,
+    "f": false,
+    "n": null,
+    "i": 123,
+    "pi": 3.1416,
+    "a": [1, 2, 3, 4]
+}
+~~~~~~~~~~
+
+When a `Reader` parses this JSON, it publishes the following events to the handler sequentially:
+
+~~~~~~~~~~
+StartObject()
+Key("hello", 5, true)
+String("world", 5, true)
+Key("t", 1, true)
+Bool(true)
+Key("f", 1, true)
+Bool(false)
+Key("n", 1, true)
+Null()
+Key("i")
+UInt(123)
+Key("pi")
+Double(3.1416)
+Key("a")
+StartArray()
+Uint(1)
+Uint(2)
+Uint(3)
+Uint(4)
+EndArray(4)
+EndObject(7)
+~~~~~~~~~~
+
+These events can be easily matched with the JSON, but some event parameters need further explanation. Let's see the `simplereader` example which produces exactly the same output as above:
+
+~~~~~~~~~~cpp
+#include "rapidjson/reader.h"
+#include <iostream>
+
+using namespace rapidjson;
+using namespace std;
+
+struct MyHandler : public BaseReaderHandler<UTF8<>, MyHandler> {
+    bool Null() { cout << "Null()" << endl; return true; }
+    bool Bool(bool b) { cout << "Bool(" << boolalpha << b << ")" << endl; return true; }
+    bool Int(int i) { cout << "Int(" << i << ")" << endl; return true; }
+    bool Uint(unsigned u) { cout << "Uint(" << u << ")" << endl; return true; }
+    bool Int64(int64_t i) { cout << "Int64(" << i << ")" << endl; return true; }
+    bool Uint64(uint64_t u) { cout << "Uint64(" << u << ")" << endl; return true; }
+    bool Double(double d) { cout << "Double(" << d << ")" << endl; return true; }
+    bool String(const char* str, SizeType length, bool copy) { 
+        cout << "String(" << str << ", " << length << ", " << boolalpha << copy << ")" << endl;
+        return true;
+    }
+    bool StartObject() { cout << "StartObject()" << endl; return true; }
+    bool Key(const char* str, SizeType length, bool copy) { 
+        cout << "Key(" << str << ", " << length << ", " << boolalpha << copy << ")" << endl;
+        return true;
+    }
+    bool EndObject(SizeType memberCount) { cout << "EndObject(" << memberCount << ")" << endl; return true; }
+    bool StartArray() { cout << "StartArray()" << endl; return true; }
+    bool EndArray(SizeType elementCount) { cout << "EndArray(" << elementCount << ")" << endl; return true; }
+};
+
+void main() {
+    const char json[] = " { \"hello\" : \"world\", \"t\" : true , \"f\" : false, \"n\": null, \"i\":123, \"pi\": 3.1416, \"a\":[1, 2, 3, 4] } ";
+
+    MyHandler handler;
+    Reader reader;
+    StringStream ss(json);
+    reader.Parse(ss, handler);
+}
+~~~~~~~~~~
+
+Note that RapidJSON uses templates to statically bind the `Reader` type and the handler type, instead of using classes with virtual functions. This paradigm can improve performance by inlining functions.
+
+## Handler {#Handler}
+
+As shown in the previous example, the user needs to implement a handler which consumes the events (via function calls) from the `Reader`. The handler must contain the following member functions.
+
+~~~~~~~~~~cpp
+class Handler {
+    bool Null();
+    bool Bool(bool b);
+    bool Int(int i);
+    bool Uint(unsigned i);
+    bool Int64(int64_t i);
+    bool Uint64(uint64_t i);
+    bool Double(double d);
+    bool RawNumber(const Ch* str, SizeType length, bool copy);
+    bool String(const Ch* str, SizeType length, bool copy);
+    bool StartObject();
+    bool Key(const Ch* str, SizeType length, bool copy);
+    bool EndObject(SizeType memberCount);
+    bool StartArray();
+    bool EndArray(SizeType elementCount);
+};
+~~~~~~~~~~
+
+`Null()` is called when the `Reader` encounters a JSON null value.
+
+`Bool(bool)` is called when the `Reader` encounters a JSON true or false value.
+
+When the `Reader` encounters a JSON number, it chooses a suitable C++ type mapping. And then it calls *one* function out of `Int(int)`, `Uint(unsigned)`, `Int64(int64_t)`, `Uint64(uint64_t)` and `Double(double)`. If `kParseNumbersAsStrings` is enabled, `Reader` will always calls `RawNumber()` instead.
+
+`String(const char* str, SizeType length, bool copy)` is called when the `Reader` encounters a string. The first parameter is pointer to the string. The second parameter is the length of the string (excluding the null terminator). Note that RapidJSON supports null character `\0` inside a string. If such situation happens, `strlen(str) < length`. The last `copy` indicates whether the handler needs to make a copy of the string. For normal parsing, `copy = true`. Only when *insitu* parsing is used, `copy = false`. And be aware that the character type depends on the target encoding, which will be explained later.
+
+When the `Reader` encounters the beginning of an object, it calls `StartObject()`. An object in JSON is a set of name-value pairs. If the object contains members it first calls `Key()` for the name of member, and then calls functions depending on the type of the value. These calls of name-value pairs repeat until calling `EndObject(SizeType memberCount)`. Note that the `memberCount` parameter is just an aid for the handler; users who do not need this parameter may ignore it.
+
+Arrays are similar to objects, but simpler. At the beginning of an array, the `Reader` calls `BeginArray()`. If there is elements, it calls functions according to the types of element. Similarly, in the last call `EndArray(SizeType elementCount)`, the parameter `elementCount` is just an aid for the handler.
+
+Every handler function returns a `bool`. Normally it should return `true`. If the handler encounters an error, it can return `false` to notify the event publisher to stop further processing.
+
+For example, when we parse a JSON with `Reader` and the handler detects that the JSON does not conform to the required schema, the handler can return `false` and let the `Reader` stop further parsing. This will place the `Reader` in an error state, with error code `kParseErrorTermination`.
+
+## GenericReader {#GenericReader}
+
+As mentioned before, `Reader` is a typedef of a template class `GenericReader`:
+
+~~~~~~~~~~cpp
+namespace rapidjson {
+
+template <typename SourceEncoding, typename TargetEncoding, typename Allocator = MemoryPoolAllocator<> >
+class GenericReader {
+    // ...
+};
+
+typedef GenericReader<UTF8<>, UTF8<> > Reader;
+
+} // namespace rapidjson
+~~~~~~~~~~
+
+The `Reader` uses UTF-8 as both source and target encoding. The source encoding means the encoding in the JSON stream. The target encoding means the encoding of the `str` parameter in `String()` calls. For example, to parse a UTF-8 stream and output UTF-16 string events, you can define a reader by:
+
+~~~~~~~~~~cpp
+GenericReader<UTF8<>, UTF16<> > reader;
+~~~~~~~~~~
+
+Note that, the default character type of `UTF16` is `wchar_t`. So this `reader` needs to call `String(const wchar_t*, SizeType, bool)` of the handler.
+
+The third template parameter `Allocator` is the allocator type for internal data structure (actually a stack).
+
+## Parsing {#SaxParsing}
+
+The main function of `Reader` is used to parse JSON. 
+
+~~~~~~~~~~cpp
+template <unsigned parseFlags, typename InputStream, typename Handler>
+bool Parse(InputStream& is, Handler& handler);
+
+// with parseFlags = kDefaultParseFlags
+template <typename InputStream, typename Handler>
+bool Parse(InputStream& is, Handler& handler);
+~~~~~~~~~~
+
+If an error occurs during parsing, it will return `false`. User can also call `bool HasParseError()`, `ParseErrorCode GetParseErrorCode()` and `size_t GetErrorOffset()` to obtain the error states. In fact, `Document` uses these `Reader` functions to obtain parse errors. Please refer to [DOM](doc/dom.md) for details about parse errors.
+
+## Token-by-Token Parsing {#TokenByTokenParsing}
+
+Some users may wish to parse a JSON input stream a single token at a time, instead of immediately parsing an entire document without stopping. To parse JSON this way, instead of calling `Parse`, you can use the `IterativeParse` set of functions:
+
+~~~~~~~~~~cpp
+    void IterativeParseInit();
+	
+    template <unsigned parseFlags, typename InputStream, typename Handler>
+    bool IterativeParseNext(InputStream& is, Handler& handler);
+
+    bool IterativeParseComplete();
+~~~~~~~~~~
+
+Here is an example of iteratively parsing JSON, token by token:
+
+~~~~~~~~~~cpp
+    reader.IterativeParseInit();
+    while (!reader.IterativeParseComplete()) {
+        reader.IterativeParseNext<kParseDefaultFlags>(is, handler);
+		// Your handler has been called once.
+    }
+~~~~~~~~~~
+
+# Writer {#Writer}
+
+`Reader` converts (parses) JSON into events. `Writer` does exactly the opposite. It converts events into JSON. 
+
+`Writer` is very easy to use. If your application only need to converts some data into JSON, it may be a good choice to use `Writer` directly, instead of building a `Document` and then stringifying it with a `Writer`.
+
+In `simplewriter` example, we do exactly the reverse of `simplereader`.
+
+~~~~~~~~~~cpp
+#include "rapidjson/writer.h"
+#include "rapidjson/stringbuffer.h"
+#include <iostream>
+
+using namespace rapidjson;
+using namespace std;
+
+void main() {
+    StringBuffer s;
+    Writer<StringBuffer> writer(s);
+    
+    writer.StartObject();
+    writer.Key("hello");
+    writer.String("world");
+    writer.Key("t");
+    writer.Bool(true);
+    writer.Key("f");
+    writer.Bool(false);
+    writer.Key("n");
+    writer.Null();
+    writer.Key("i");
+    writer.Uint(123);
+    writer.Key("pi");
+    writer.Double(3.1416);
+    writer.Key("a");
+    writer.StartArray();
+    for (unsigned i = 0; i < 4; i++)
+        writer.Uint(i);
+    writer.EndArray();
+    writer.EndObject();
+
+    cout << s.GetString() << endl;
+}
+~~~~~~~~~~
+
+~~~~~~~~~~
+{"hello":"world","t":true,"f":false,"n":null,"i":123,"pi":3.1416,"a":[0,1,2,3]}
+~~~~~~~~~~
+
+There are two `String()` and `Key()` overloads. One is the same as defined in handler concept with 3 parameters. It can handle string with null characters. Another one is the simpler version used in the above example.
+
+Note that, the example code does not pass any parameters in `EndArray()` and `EndObject()`. An `SizeType` can be passed but it will be simply ignored by `Writer`.
+
+You may doubt that, why not just using `sprintf()` or `std::stringstream` to build a JSON?
+
+There are various reasons:
+1. `Writer` must output a well-formed JSON. If there is incorrect event sequence (e.g. `Int()` just after `StartObject()`), it generates assertion fail in debug mode.
+2. `Writer::String()` can handle string escaping (e.g. converting code point `U+000A` to `\n`) and Unicode transcoding.
+3. `Writer` handles number output consistently.
+4. `Writer` implements the event handler concept. It can be used to handle events from `Reader`, `Document` or other event publisher.
+5. `Writer` can be optimized for different platforms.
+
+Anyway, using `Writer` API is even simpler than generating a JSON by ad hoc methods.
+
+## Template {#WriterTemplate}
+
+`Writer` has a minor design difference to `Reader`. `Writer` is a template class, not a typedef. There is no `GenericWriter`. The following is the declaration.
+
+~~~~~~~~~~cpp
+namespace rapidjson {
+
+template<typename OutputStream, typename SourceEncoding = UTF8<>, typename TargetEncoding = UTF8<>, typename Allocator = CrtAllocator<>, unsigned writeFlags = kWriteDefaultFlags>
+class Writer {
+public:
+    Writer(OutputStream& os, Allocator* allocator = 0, size_t levelDepth = kDefaultLevelDepth)
+// ...
+};
+
+} // namespace rapidjson
+~~~~~~~~~~
+
+The `OutputStream` template parameter is the type of output stream. It cannot be deduced and must be specified by user.
+
+The `SourceEncoding` template parameter specifies the encoding to be used in `String(const Ch*, ...)`.
+
+The `TargetEncoding` template parameter specifies the encoding in the output stream.
+
+The `Allocator` is the type of allocator, which is used for allocating internal data structure (a stack).
+
+The `writeFlags` are combination of the following bit-flags:
+
+Parse flags                   | Meaning
+------------------------------|-----------------------------------
+`kWriteNoFlags`               | No flag is set.
+`kWriteDefaultFlags`          | Default write flags. It is equal to macro `RAPIDJSON_WRITE_DEFAULT_FLAGS`, which is defined as `kWriteNoFlags`.
+`kWriteValidateEncodingFlag`  | Validate encoding of JSON strings.
+`kWriteNanAndInfFlag`         | Allow writing of `Infinity`, `-Infinity` and `NaN`.
+
+Besides, the constructor of `Writer` has a `levelDepth` parameter. This parameter affects the initial memory allocated for storing information per hierarchy level.
+
+## PrettyWriter {#PrettyWriter}
+
+While the output of `Writer` is the most condensed JSON without white-spaces, suitable for network transfer or storage, it is not easily readable by human.
+
+Therefore, RapidJSON provides a `PrettyWriter`, which adds indentation and line feeds in the output.
+
+The usage of `PrettyWriter` is exactly the same as `Writer`, expect that `PrettyWriter` provides a `SetIndent(Ch indentChar, unsigned indentCharCount)` function. The default is 4 spaces.
+
+## Completeness and Reset {#CompletenessReset}
+
+A `Writer` can only output a single JSON, which can be any JSON type at the root. Once the singular event for root (e.g. `String()`), or the last matching `EndObject()` or `EndArray()` event, is handled, the output JSON is well-formed and complete. User can detect this state by calling `Writer::IsComplete()`.
+
+When a JSON is complete, the `Writer` cannot accept any new events. Otherwise the output will be invalid (i.e. having more than one root). To reuse the `Writer` object, user can call `Writer::Reset(OutputStream& os)` to reset all internal states of the `Writer` with a new output stream.
+
+# Techniques {#SaxTechniques}
+
+## Parsing JSON to Custom Data Structure {#CustomDataStructure}
+
+`Document`'s parsing capability is completely based on `Reader`. Actually `Document` is a handler which receives events from a reader to build a DOM during parsing.
+
+User may uses `Reader` to build other data structures directly. This eliminates building of DOM, thus reducing memory and improving performance.
+
+In the following `messagereader` example, `ParseMessages()` parses a JSON which should be an object with key-string pairs.
+
+~~~~~~~~~~cpp
+#include "rapidjson/reader.h"
+#include "rapidjson/error/en.h"
+#include <iostream>
+#include <string>
+#include <map>
+
+using namespace std;
+using namespace rapidjson;
+
+typedef map<string, string> MessageMap;
+
+struct MessageHandler
+    : public BaseReaderHandler<UTF8<>, MessageHandler> {
+    MessageHandler() : state_(kExpectObjectStart) {
+    }
+
+    bool StartObject() {
+        switch (state_) {
+        case kExpectObjectStart:
+            state_ = kExpectNameOrObjectEnd;
+            return true;
+        default:
+            return false;
+        }
+    }
+
+    bool String(const char* str, SizeType length, bool) {
+        switch (state_) {
+        case kExpectNameOrObjectEnd:
+            name_ = string(str, length);
+            state_ = kExpectValue;
+            return true;
+        case kExpectValue:
+            messages_.insert(MessageMap::value_type(name_, string(str, length)));
+            state_ = kExpectNameOrObjectEnd;
+            return true;
+        default:
+            return false;
+        }
+    }
+
+    bool EndObject(SizeType) { return state_ == kExpectNameOrObjectEnd; }
+
+    bool Default() { return false; } // All other events are invalid.
+
+    MessageMap messages_;
+    enum State {
+        kExpectObjectStart,
+        kExpectNameOrObjectEnd,
+        kExpectValue,
+    }state_;
+    std::string name_;
+};
+
+void ParseMessages(const char* json, MessageMap& messages) {
+    Reader reader;
+    MessageHandler handler;
+    StringStream ss(json);
+    if (reader.Parse(ss, handler))
+        messages.swap(handler.messages_);   // Only change it if success.
+    else {
+        ParseErrorCode e = reader.GetParseErrorCode();
+        size_t o = reader.GetErrorOffset();
+        cout << "Error: " << GetParseError_En(e) << endl;;
+        cout << " at offset " << o << " near '" << string(json).substr(o, 10) << "...'" << endl;
+    }
+}
+
+int main() {
+    MessageMap messages;
+
+    const char* json1 = "{ \"greeting\" : \"Hello!\", \"farewell\" : \"bye-bye!\" }";
+    cout << json1 << endl;
+    ParseMessages(json1, messages);
+
+    for (MessageMap::const_iterator itr = messages.begin(); itr != messages.end(); ++itr)
+        cout << itr->first << ": " << itr->second << endl;
+
+    cout << endl << "Parse a JSON with invalid schema." << endl;
+    const char* json2 = "{ \"greeting\" : \"Hello!\", \"farewell\" : \"bye-bye!\", \"foo\" : {} }";
+    cout << json2 << endl;
+    ParseMessages(json2, messages);
+
+    return 0;
+}
+~~~~~~~~~~
+
+~~~~~~~~~~
+{ "greeting" : "Hello!", "farewell" : "bye-bye!" }
+farewell: bye-bye!
+greeting: Hello!
+
+Parse a JSON with invalid schema.
+{ "greeting" : "Hello!", "farewell" : "bye-bye!", "foo" : {} }
+Error: Terminate parsing due to Handler error.
+ at offset 59 near '} }...'
+~~~~~~~~~~
+
+The first JSON (`json1`) was successfully parsed into `MessageMap`. Since `MessageMap` is a `std::map`, the printing order are sorted by the key. This order is different from the JSON's order.
+
+In the second JSON (`json2`), `foo`'s value is an empty object. As it is an object, `MessageHandler::StartObject()` will be called. However, at that moment `state_ = kExpectValue`, so that function returns `false` and cause the parsing process be terminated. The error code is `kParseErrorTermination`.
+
+## Filtering of JSON {#Filtering}
+
+As mentioned earlier, `Writer` can handle the events published by `Reader`. `condense` example simply set a `Writer` as handler of a `Reader`, so it can remove all white-spaces in JSON. `pretty` example uses the same relationship, but replacing `Writer` by `PrettyWriter`. So `pretty` can be used to reformat a JSON with indentation and line feed.
+
+Actually, we can add intermediate layer(s) to filter the contents of JSON via these SAX-style API. For example, `capitalize` example capitalize all strings in a JSON.
+
+~~~~~~~~~~cpp
+#include "rapidjson/reader.h"
+#include "rapidjson/writer.h"
+#include "rapidjson/filereadstream.h"
+#include "rapidjson/filewritestream.h"
+#include "rapidjson/error/en.h"
+#include <vector>
+#include <cctype>
+
+using namespace rapidjson;
+
+template<typename OutputHandler>
+struct CapitalizeFilter {
+    CapitalizeFilter(OutputHandler& out) : out_(out), buffer_() {
+    }
+
+    bool Null() { return out_.Null(); }
+    bool Bool(bool b) { return out_.Bool(b); }
+    bool Int(int i) { return out_.Int(i); }
+    bool Uint(unsigned u) { return out_.Uint(u); }
+    bool Int64(int64_t i) { return out_.Int64(i); }
+    bool Uint64(uint64_t u) { return out_.Uint64(u); }
+    bool Double(double d) { return out_.Double(d); }
+    bool RawNumber(const char* str, SizeType length, bool copy) { return out_.RawNumber(str, length, copy); }
+    bool String(const char* str, SizeType length, bool) { 
+        buffer_.clear();
+        for (SizeType i = 0; i < length; i++)
+            buffer_.push_back(std::toupper(str[i]));
+        return out_.String(&buffer_.front(), length, true); // true = output handler need to copy the string
+    }
+    bool StartObject() { return out_.StartObject(); }
+    bool Key(const char* str, SizeType length, bool copy) { return String(str, length, copy); }
+    bool EndObject(SizeType memberCount) { return out_.EndObject(memberCount); }
+    bool StartArray() { return out_.StartArray(); }
+    bool EndArray(SizeType elementCount) { return out_.EndArray(elementCount); }
+
+    OutputHandler& out_;
+    std::vector<char> buffer_;
+};
+
+int main(int, char*[]) {
+    // Prepare JSON reader and input stream.
+    Reader reader;
+    char readBuffer[65536];
+    FileReadStream is(stdin, readBuffer, sizeof(readBuffer));
+
+    // Prepare JSON writer and output stream.
+    char writeBuffer[65536];
+    FileWriteStream os(stdout, writeBuffer, sizeof(writeBuffer));
+    Writer<FileWriteStream> writer(os);
+
+    // JSON reader parse from the input stream and let writer generate the output.
+    CapitalizeFilter<Writer<FileWriteStream> > filter(writer);
+    if (!reader.Parse(is, filter)) {
+        fprintf(stderr, "\nError(%u): %s\n", (unsigned)reader.GetErrorOffset(), GetParseError_En(reader.GetParseErrorCode()));
+        return 1;
+    }
+
+    return 0;
+}
+~~~~~~~~~~
+
+Note that, it is incorrect to simply capitalize the JSON as a string. For example:
+~~~~~~~~~~
+["Hello\nWorld"]
+~~~~~~~~~~
+
+Simply capitalizing the whole JSON would contain incorrect escape character:
+~~~~~~~~~~
+["HELLO\NWORLD"]
+~~~~~~~~~~
+
+The correct result by `capitalize`:
+~~~~~~~~~~
+["HELLO\nWORLD"]
+~~~~~~~~~~
+
+More complicated filters can be developed. However, since SAX-style API can only provide information about a single event at a time, user may need to book-keeping the contextual information (e.g. the path from root value, storage of other related values). Some processing may be easier to be implemented in DOM than SAX.
diff --git a/doc/sax.zh-cn.md b/doc/sax.zh-cn.md
new file mode 100644
index 0000000..740c339
--- /dev/null
+++ b/doc/sax.zh-cn.md
@@ -0,0 +1,487 @@
+# SAX
+
+"SAX" 此术语源于 [Simple API for XML](http://en.wikipedia.org/wiki/Simple_API_for_XML)。我们借了此术语去套用在 JSON 的解析及生成。
+
+在 RapidJSON 中,`Reader`(`GenericReader<...>` 的 typedef)是 JSON 的 SAX 风格解析器,而 `Writer`(`GenericWriter<...>` 的 typedef)则是 JSON 的 SAX 风格生成器。
+
+[TOC]
+
+# Reader {#Reader}
+
+`Reader` 从输入流解析一个 JSON。当它从流中读取字符时,它会基于 JSON 的语法去分析字符,并向处理器发送事件。
+
+例如,以下是一个 JSON。
+
+~~~~~~~~~~js
+{
+    "hello": "world",
+    "t": true ,
+    "f": false,
+    "n": null,
+    "i": 123,
+    "pi": 3.1416,
+    "a": [1, 2, 3, 4]
+}
+~~~~~~~~~~
+
+当一个 `Reader` 解析此 JSON 时,它会顺序地向处理器发送以下的事件:
+
+~~~~~~~~~~
+StartObject()
+Key("hello", 5, true)
+String("world", 5, true)
+Key("t", 1, true)
+Bool(true)
+Key("f", 1, true)
+Bool(false)
+Key("n", 1, true)
+Null()
+Key("i")
+UInt(123)
+Key("pi")
+Double(3.1416)
+Key("a")
+StartArray()
+Uint(1)
+Uint(2)
+Uint(3)
+Uint(4)
+EndArray(4)
+EndObject(7)
+~~~~~~~~~~
+
+除了一些事件参数需要再作解释,这些事件可以轻松地与 JSON 对上。我们可以看看 `simplereader` 例子怎样产生和以上完全相同的结果:
+
+~~~~~~~~~~cpp
+#include "rapidjson/reader.h"
+#include <iostream>
+
+using namespace rapidjson;
+using namespace std;
+
+struct MyHandler : public BaseReaderHandler<UTF8<>, MyHandler> {
+    bool Null() { cout << "Null()" << endl; return true; }
+    bool Bool(bool b) { cout << "Bool(" << boolalpha << b << ")" << endl; return true; }
+    bool Int(int i) { cout << "Int(" << i << ")" << endl; return true; }
+    bool Uint(unsigned u) { cout << "Uint(" << u << ")" << endl; return true; }
+    bool Int64(int64_t i) { cout << "Int64(" << i << ")" << endl; return true; }
+    bool Uint64(uint64_t u) { cout << "Uint64(" << u << ")" << endl; return true; }
+    bool Double(double d) { cout << "Double(" << d << ")" << endl; return true; }
+    bool String(const char* str, SizeType length, bool copy) { 
+        cout << "String(" << str << ", " << length << ", " << boolalpha << copy << ")" << endl;
+        return true;
+    }
+    bool StartObject() { cout << "StartObject()" << endl; return true; }
+    bool Key(const char* str, SizeType length, bool copy) { 
+        cout << "Key(" << str << ", " << length << ", " << boolalpha << copy << ")" << endl;
+        return true;
+    }
+    bool EndObject(SizeType memberCount) { cout << "EndObject(" << memberCount << ")" << endl; return true; }
+    bool StartArray() { cout << "StartArray()" << endl; return true; }
+    bool EndArray(SizeType elementCount) { cout << "EndArray(" << elementCount << ")" << endl; return true; }
+};
+
+void main() {
+    const char json[] = " { \"hello\" : \"world\", \"t\" : true , \"f\" : false, \"n\": null, \"i\":123, \"pi\": 3.1416, \"a\":[1, 2, 3, 4] } ";
+
+    MyHandler handler;
+    Reader reader;
+    StringStream ss(json);
+    reader.Parse(ss, handler);
+}
+~~~~~~~~~~
+
+注意 RapidJSON 使用模板去静态挷定 `Reader` 类型及处理器的类形,而不是使用含虚函数的类。这个范式可以通过把函数内联而改善性能。
+
+## 处理器 {#Handler}
+
+如前例所示,使用者需要实现一个处理器(handler),用于处理来自 `Reader` 的事件(函数调用)。处理器必须包含以下的成员函数。
+
+~~~~~~~~~~cpp
+class Handler {
+    bool Null();
+    bool Bool(bool b);
+    bool Int(int i);
+    bool Uint(unsigned i);
+    bool Int64(int64_t i);
+    bool Uint64(uint64_t i);
+    bool Double(double d);
+    bool RawNumber(const Ch* str, SizeType length, bool copy);
+    bool String(const Ch* str, SizeType length, bool copy);
+    bool StartObject();
+    bool Key(const Ch* str, SizeType length, bool copy);
+    bool EndObject(SizeType memberCount);
+    bool StartArray();
+    bool EndArray(SizeType elementCount);
+};
+~~~~~~~~~~
+
+当 `Reader` 遇到 JSON null 值时会调用 `Null()`。
+
+当 `Reader` 遇到 JSON true 或 false 值时会调用 `Bool(bool)`。
+
+当 `Reader` 遇到 JSON number,它会选择一个合适的 C++ 类型映射,然后调用 `Int(int)`、`Uint(unsigned)`、`Int64(int64_t)`、`Uint64(uint64_t)` 及 `Double(double)` 的 * 其中之一个 *。 若开启了 `kParseNumbersAsStrings` 选项,`Reader` 便会改为调用 `RawNumber()`。
+
+当 `Reader` 遇到 JSON string,它会调用 `String(const char* str, SizeType length, bool copy)`。第一个参数是字符串的指针。第二个参数是字符串的长度(不包含空终止符号)。注意 RapidJSON 支持字串中含有空字符 `\0`。若出现这种情况,便会有 `strlen(str) < length`。最后的 `copy` 参数表示处理器是否需要复制该字符串。在正常解析时,`copy = true`。仅当使用原位解析时,`copy = false`。此外,还要注意字符的类型与目标编码相关,我们稍后会再谈这一点。
+
+当 `Reader` 遇到 JSON object 的开始之时,它会调用 `StartObject()`。JSON 的 object 是一个键值对(成员)的集合。若 object 包含成员,它会先为成员的名字调用 `Key()`,然后再按值的类型调用函数。它不断调用这些键值对,直至最终调用 `EndObject(SizeType memberCount)`。注意 `memberCount` 参数对处理器来说只是协助性质,使用者可能不需要此参数。
+
+JSON array 与 object 相似,但更简单。在 array 开始时,`Reader` 会调用 `BeginArary()`。若 array 含有元素,它会按元素的类型来读用函数。相似地,最后它会调用 `EndArray(SizeType elementCount)`,其中 `elementCount` 参数对处理器来说只是协助性质。
+
+每个处理器函数都返回一个 `bool`。正常它们应返回 `true`。若处理器遇到错误,它可以返回 `false` 去通知事件发送方停止继续处理。
+
+例如,当我们用 `Reader` 解析一个 JSON 时,处理器检测到该 JSON 并不符合所需的 schema,那么处理器可以返回 `false`,令 `Reader` 停止之后的解析工作。而 `Reader` 会进入一个错误状态,并以 `kParseErrorTermination` 错误码标识。
+
+## GenericReader {#GenericReader}
+
+前面提及,`Reader` 是 `GenericReader` 模板类的 typedef:
+
+~~~~~~~~~~cpp
+namespace rapidjson {
+
+template <typename SourceEncoding, typename TargetEncoding, typename Allocator = MemoryPoolAllocator<> >
+class GenericReader {
+    // ...
+};
+
+typedef GenericReader<UTF8<>, UTF8<> > Reader;
+
+} // namespace rapidjson
+~~~~~~~~~~
+
+`Reader` 使用 UTF-8 作为来源及目标编码。来源编码是指 JSON 流的编码。目标编码是指 `String()` 的 `str` 参数所用的编码。例如,要解析一个 UTF-8 流并输出至 UTF-16 string 事件,你需要这么定义一个 reader:
+
+~~~~~~~~~~cpp
+GenericReader<UTF8<>, UTF16<> > reader;
+~~~~~~~~~~
+
+注意到 `UTF16` 的缺省类型是 `wchar_t`。因此这个 `reader` 需要调用处理器的 `String(const wchar_t*, SizeType, bool)`。
+
+第三个模板参数 `Allocator` 是内部数据结构(实际上是一个堆栈)的分配器类型。
+
+## 解析 {#SaxParsing}
+
+`Reader` 的唯一功能就是解析 JSON。 
+
+~~~~~~~~~~cpp
+template <unsigned parseFlags, typename InputStream, typename Handler>
+bool Parse(InputStream& is, Handler& handler);
+
+// 使用 parseFlags = kDefaultParseFlags
+template <typename InputStream, typename Handler>
+bool Parse(InputStream& is, Handler& handler);
+~~~~~~~~~~
+
+若在解析中出现错误,它会返回 `false`。使用者可调用 `bool HasParseEror()`, `ParseErrorCode GetParseErrorCode()` 及 `size_t GetErrorOffset()` 获取错误状态。实际上 `Document` 使用这些 `Reader` 函数去获取解析错误。请参考 [DOM](doc/dom.zh-cn.md) 去了解有关解析错误的细节。
+
+# Writer {#Writer}
+
+`Reader` 把 JSON 转换(解析)成为事件。`Writer` 做完全相反的事情。它把事件转换成 JSON。
+
+`Writer` 是非常容易使用的。若你的应用程序只需把一些数据转换成 JSON,可能直接使用 `Writer`,会比建立一个 `Document` 然后用 `Writer` 把它转换成 JSON 更加方便。
+
+在 `simplewriter` 例子里,我们做 `simplereader` 完全相反的事情。
+
+~~~~~~~~~~cpp
+#include "rapidjson/writer.h"
+#include "rapidjson/stringbuffer.h"
+#include <iostream>
+
+using namespace rapidjson;
+using namespace std;
+
+void main() {
+    StringBuffer s;
+    Writer<StringBuffer> writer(s);
+    
+    writer.StartObject();
+    writer.Key("hello");
+    writer.String("world");
+    writer.Key("t");
+    writer.Bool(true);
+    writer.Key("f");
+    writer.Bool(false);
+    writer.Key("n");
+    writer.Null();
+    writer.Key("i");
+    writer.Uint(123);
+    writer.Key("pi");
+    writer.Double(3.1416);
+    writer.Key("a");
+    writer.StartArray();
+    for (unsigned i = 0; i < 4; i++)
+        writer.Uint(i);
+    writer.EndArray();
+    writer.EndObject();
+
+    cout << s.GetString() << endl;
+}
+~~~~~~~~~~
+
+~~~~~~~~~~
+{"hello":"world","t":true,"f":false,"n":null,"i":123,"pi":3.1416,"a":[0,1,2,3]}
+~~~~~~~~~~
+
+`String()` 及 `Key()` 各有两个重载。一个是如处理器 concept 般,有 3 个参数。它能处理含空字符的字符串。另一个是如上中使用的较简单版本。
+
+注意到,例子代码中的 `EndArray()` 及 `EndObject()` 并没有参数。可以传递一个 `SizeType` 的参数,但它会被 `Writer` 忽略。
+
+你可能会怀疑,为什么不使用 `sprintf()` 或 `std::stringstream` 去建立一个 JSON?
+
+这有几个原因:
+1. `Writer` 必然会输出一个结构良好(well-formed)的 JSON。若然有错误的事件次序(如 `Int()` 紧随 `StartObject()` 出现),它会在调试模式中产生断言失败。
+2. `Writer::String()` 可处理字符串转义(如把码点 `U+000A` 转换成 `\n`)及进行 Unicode 转码。
+3. `Writer` 一致地处理 number 的输出。
+4. `Writer` 实现了事件处理器 concept。可用于处理来自 `Reader`、`Document` 或其他事件发生器。
+5. `Writer` 可对不同平台进行优化。
+
+无论如何,使用 `Writer` API 去生成 JSON 甚至乎比这些临时方法更简单。
+
+## 模板 {#WriterTemplate}
+
+`Writer` 与 `Reader` 有少许设计区别。`Writer` 是一个模板类,而不是一个 typedef。 并没有 `GenericWriter`。以下是 `Writer` 的声明。
+
+~~~~~~~~~~cpp
+namespace rapidjson {
+
+template<typename OutputStream, typename SourceEncoding = UTF8<>, typename TargetEncoding = UTF8<>, typename Allocator = CrtAllocator<> >
+class Writer {
+public:
+    Writer(OutputStream& os, Allocator* allocator = 0, size_t levelDepth = kDefaultLevelDepth)
+// ...
+};
+
+} // namespace rapidjson
+~~~~~~~~~~
+
+`OutputStream` 模板参数是输出流的类型。它的类型不可以被自动推断,必须由使用者提供。
+
+`SourceEncoding` 模板参数指定了 `String(const Ch*, ...)` 的编码。
+
+`TargetEncoding` 模板参数指定输出流的编码。
+
+`Allocator` 是分配器的类型,用于分配内部数据结构(一个堆栈)。
+
+`writeFlags` 是以下位标志的组合:
+
+写入位标志                     | 意义
+------------------------------|-----------------------------------
+`kWriteNoFlags`               | 没有任何标志。
+`kWriteDefaultFlags`          | 缺省的解析选项。它等于 `RAPIDJSON_WRITE_DEFAULT_FLAGS` 宏,此宏定义为  `kWriteNoFlags`。
+`kWriteValidateEncodingFlag`  | 校验 JSON 字符串的编码。
+`kWriteNanAndInfFlag`         | 容许写入 `Infinity`, `-Infinity` 及 `NaN`。
+
+此外,`Writer` 的构造函数有一 `levelDepth` 参数。存储每层阶信息的初始内存分配量受此参数影响。
+
+## PrettyWriter {#PrettyWriter}
+
+`Writer` 所输出的是没有空格字符的最紧凑 JSON,适合网络传输或储存,但不适合人类阅读。
+
+因此,RapidJSON 提供了一个 `PrettyWriter`,它在输出中加入缩进及换行。
+
+`PrettyWriter` 的用法与 `Writer` 几乎一样,不同之处是 `PrettyWriter` 提供了一个 `SetIndent(Ch indentChar, unsigned indentCharCount)` 函数。缺省的缩进是 4 个空格。
+
+## 完整性及重置 {#CompletenessReset}
+
+一个 `Writer` 只可输出单个 JSON,其根节点可以是任何 JSON 类型。当处理完单个根节点事件(如 `String()`),或匹配的最后 `EndObject()` 或 `EndArray()` 事件,输出的 JSON 是结构完整(well-formed)及完整的。使用者可调用 `Writer::IsComplete()` 去检测完整性。
+
+当 JSON 完整时,`Writer` 不能再接受新的事件。不然其输出便会是不合法的(例如有超过一个根节点)。为了重新利用 `Writer` 对象,使用者可调用 `Writer::Reset(OutputStream& os)` 去重置其所有内部状态及设置新的输出流。
+
+# 技巧 {#SaxTechniques}
+
+## 解析 JSON 至自定义结构 {#CustomDataStructure}
+
+`Document` 的解析功能完全依靠 `Reader`。实际上 `Document` 是一个处理器,在解析 JSON 时接收事件去建立一个 DOM。
+
+使用者可以直接使用 `Reader` 去建立其他数据结构。这消除了建立 DOM 的步骤,从而减少了内存开销并改善性能。
+
+在以下的 `messagereader` 例子中,`ParseMessages()` 解析一个 JSON,该 JSON 应该是一个含键值对的 object。
+
+~~~~~~~~~~cpp
+#include "rapidjson/reader.h"
+#include "rapidjson/error/en.h"
+#include <iostream>
+#include <string>
+#include <map>
+
+using namespace std;
+using namespace rapidjson;
+
+typedef map<string, string> MessageMap;
+
+struct MessageHandler
+    : public BaseReaderHandler<UTF8<>, MessageHandler> {
+    MessageHandler() : state_(kExpectObjectStart) {
+    }
+
+    bool StartObject() {
+        switch (state_) {
+        case kExpectObjectStart:
+            state_ = kExpectNameOrObjectEnd;
+            return true;
+        default:
+            return false;
+        }
+    }
+
+    bool String(const char* str, SizeType length, bool) {
+        switch (state_) {
+        case kExpectNameOrObjectEnd:
+            name_ = string(str, length);
+            state_ = kExpectValue;
+            return true;
+        case kExpectValue:
+            messages_.insert(MessageMap::value_type(name_, string(str, length)));
+            state_ = kExpectNameOrObjectEnd;
+            return true;
+        default:
+            return false;
+        }
+    }
+
+    bool EndObject(SizeType) { return state_ == kExpectNameOrObjectEnd; }
+
+    bool Default() { return false; } // All other events are invalid.
+
+    MessageMap messages_;
+    enum State {
+        kExpectObjectStart,
+        kExpectNameOrObjectEnd,
+        kExpectValue,
+    }state_;
+    std::string name_;
+};
+
+void ParseMessages(const char* json, MessageMap& messages) {
+    Reader reader;
+    MessageHandler handler;
+    StringStream ss(json);
+    if (reader.Parse(ss, handler))
+        messages.swap(handler.messages_);   // Only change it if success.
+    else {
+        ParseErrorCode e = reader.GetParseErrorCode();
+        size_t o = reader.GetErrorOffset();
+        cout << "Error: " << GetParseError_En(e) << endl;;
+        cout << " at offset " << o << " near '" << string(json).substr(o, 10) << "...'" << endl;
+    }
+}
+
+int main() {
+    MessageMap messages;
+
+    const char* json1 = "{ \"greeting\" : \"Hello!\", \"farewell\" : \"bye-bye!\" }";
+    cout << json1 << endl;
+    ParseMessages(json1, messages);
+
+    for (MessageMap::const_iterator itr = messages.begin(); itr != messages.end(); ++itr)
+        cout << itr->first << ": " << itr->second << endl;
+
+    cout << endl << "Parse a JSON with invalid schema." << endl;
+    const char* json2 = "{ \"greeting\" : \"Hello!\", \"farewell\" : \"bye-bye!\", \"foo\" : {} }";
+    cout << json2 << endl;
+    ParseMessages(json2, messages);
+
+    return 0;
+}
+~~~~~~~~~~
+
+~~~~~~~~~~
+{ "greeting" : "Hello!", "farewell" : "bye-bye!" }
+farewell: bye-bye!
+greeting: Hello!
+
+Parse a JSON with invalid schema.
+{ "greeting" : "Hello!", "farewell" : "bye-bye!", "foo" : {} }
+Error: Terminate parsing due to Handler error.
+ at offset 59 near '} }...'
+~~~~~~~~~~
+
+第一个 JSON(`json1`)被成功地解析至 `MessageMap`。由于 `MessageMap` 是一个 `std::map`,打印次序按键值排序。此次序与 JSON 中的次序不同。
+
+在第二个 JSON(`json2`)中,`foo` 的值是一个空 object。由于它是一个 object,`MessageHandler::StartObject()` 会被调用。然而,在 `state_ = kExpectValue` 的情况下,该函数会返回 `false`,并导致解析过程终止。错误代码是 `kParseErrorTermination`。
+
+## 过滤 JSON {#Filtering}
+
+如前面提及过,`Writer` 可处理 `Reader` 发出的事件。`example/condense/condense.cpp` 例子简单地设置 `Writer` 作为一个 `Reader` 的处理器,因此它能移除 JSON 中的所有空白字符。`example/pretty/pretty.cpp` 例子使用同样的关系,只是以 `PrettyWriter` 取代 `Writer`。因此 `pretty` 能够重新格式化 JSON,加入缩进及换行。
+
+实际上,我们可以使用 SAX 风格 API 去加入(多个)中间层去过滤 JSON 的内容。例如 `capitalize` 例子可以把所有 JSON string 改为大写。
+
+~~~~~~~~~~cpp
+#include "rapidjson/reader.h"
+#include "rapidjson/writer.h"
+#include "rapidjson/filereadstream.h"
+#include "rapidjson/filewritestream.h"
+#include "rapidjson/error/en.h"
+#include <vector>
+#include <cctype>
+
+using namespace rapidjson;
+
+template<typename OutputHandler>
+struct CapitalizeFilter {
+    CapitalizeFilter(OutputHandler& out) : out_(out), buffer_() {
+    }
+
+    bool Null() { return out_.Null(); }
+    bool Bool(bool b) { return out_.Bool(b); }
+    bool Int(int i) { return out_.Int(i); }
+    bool Uint(unsigned u) { return out_.Uint(u); }
+    bool Int64(int64_t i) { return out_.Int64(i); }
+    bool Uint64(uint64_t u) { return out_.Uint64(u); }
+    bool Double(double d) { return out_.Double(d); }
+    bool RawNumber(const char* str, SizeType length, bool copy) { return out_.RawNumber(str, length, copy); }
+    bool String(const char* str, SizeType length, bool) { 
+        buffer_.clear();
+        for (SizeType i = 0; i < length; i++)
+            buffer_.push_back(std::toupper(str[i]));
+        return out_.String(&buffer_.front(), length, true); // true = output handler need to copy the string
+    }
+    bool StartObject() { return out_.StartObject(); }
+    bool Key(const char* str, SizeType length, bool copy) { return String(str, length, copy); }
+    bool EndObject(SizeType memberCount) { return out_.EndObject(memberCount); }
+    bool StartArray() { return out_.StartArray(); }
+    bool EndArray(SizeType elementCount) { return out_.EndArray(elementCount); }
+
+    OutputHandler& out_;
+    std::vector<char> buffer_;
+};
+
+int main(int, char*[]) {
+    // Prepare JSON reader and input stream.
+    Reader reader;
+    char readBuffer[65536];
+    FileReadStream is(stdin, readBuffer, sizeof(readBuffer));
+
+    // Prepare JSON writer and output stream.
+    char writeBuffer[65536];
+    FileWriteStream os(stdout, writeBuffer, sizeof(writeBuffer));
+    Writer<FileWriteStream> writer(os);
+
+    // JSON reader parse from the input stream and let writer generate the output.
+    CapitalizeFilter<Writer<FileWriteStream> > filter(writer);
+    if (!reader.Parse(is, filter)) {
+        fprintf(stderr, "\nError(%u): %s\n", (unsigned)reader.GetErrorOffset(), GetParseError_En(reader.GetParseErrorCode()));
+        return 1;
+    }
+
+    return 0;
+}
+~~~~~~~~~~
+
+注意到,不可简单地把 JSON 当作字符串去改为大写。例如:
+~~~~~~~~~~
+["Hello\nWorld"]
+~~~~~~~~~~
+
+简单地把整个 JSON 转为大写的话会产生错误的转义符:
+~~~~~~~~~~
+["HELLO\NWORLD"]
+~~~~~~~~~~
+
+而 `capitalize` 就会产生正确的结果:
+~~~~~~~~~~
+["HELLO\nWORLD"]
+~~~~~~~~~~
+
+我们还可以开发更复杂的过滤器。然而,由于 SAX 风格 API 在某一时间点只能提供单一事件的信息,使用者需要自行记录一些上下文信息(例如从根节点起的路径、储存其他相关值)。对于处理某些情况,用 DOM 会比 SAX 更容易实现。
+
diff --git a/doc/schema.md b/doc/schema.md
new file mode 100644
index 0000000..238d7a5
--- /dev/null
+++ b/doc/schema.md
@@ -0,0 +1,505 @@
+# Schema
+
+(This feature was released in v1.1.0)
+
+JSON Schema is a draft standard for describing the format of JSON data. The schema itself is also JSON data. By validating a JSON structure with JSON Schema, your code can safely access the DOM without manually checking types, or whether a key exists, etc. It can also ensure that the serialized JSON conform to a specified schema.
+
+RapidJSON implemented a JSON Schema validator for [JSON Schema Draft v4](http://json-schema.org/documentation.html). If you are not familiar with JSON Schema, you may refer to [Understanding JSON Schema](http://spacetelescope.github.io/understanding-json-schema/).
+
+[TOC]
+
+# Basic Usage {#Basic}
+
+First of all, you need to parse a JSON Schema into `Document`, and then compile the `Document` into a `SchemaDocument`.
+
+Secondly, construct a `SchemaValidator` with the `SchemaDocument`. It is similar to a `Writer` in the sense of handling SAX events. So, you can use `document.Accept(validator)` to validate a document, and then check the validity.
+
+~~~cpp
+#include "rapidjson/schema.h"
+
+// ...
+
+Document sd;
+if (sd.Parse(schemaJson).HasParseError()) {
+    // the schema is not a valid JSON.
+    // ...       
+}
+SchemaDocument schema(sd); // Compile a Document to SchemaDocument
+// sd is no longer needed here.
+
+Document d;
+if (d.Parse(inputJson).HasParseError()) {
+    // the input is not a valid JSON.
+    // ...       
+}
+
+SchemaValidator validator(schema);
+if (!d.Accept(validator)) {
+    // Input JSON is invalid according to the schema
+    // Output diagnostic information
+    StringBuffer sb;
+    validator.GetInvalidSchemaPointer().StringifyUriFragment(sb);
+    printf("Invalid schema: %s\n", sb.GetString());
+    printf("Invalid keyword: %s\n", validator.GetInvalidSchemaKeyword());
+    sb.Clear();
+    validator.GetInvalidDocumentPointer().StringifyUriFragment(sb);
+    printf("Invalid document: %s\n", sb.GetString());
+}
+~~~
+
+Some notes:
+
+* One `SchemaDocument` can be referenced by multiple `SchemaValidator`s. It will not be modified by `SchemaValidator`s.
+* A `SchemaValidator` may be reused to validate multiple documents. To run it for other documents, call `validator.Reset()` first.
+
+# Validation during parsing/serialization {#Fused}
+
+Unlike most JSON Schema validator implementations, RapidJSON provides a SAX-based schema validator. Therefore, you can parse a JSON from a stream while validating it on the fly. If the validator encounters a JSON value that invalidates the supplied schema, the parsing will be terminated immediately. This design is especially useful for parsing large JSON files.
+
+## DOM parsing {#DOM}
+
+For using DOM in parsing, `Document` needs some preparation and finalizing tasks, in addition to receiving SAX events, thus it needs some work to route the reader, validator and the document. `SchemaValidatingReader` is a helper class that doing such work.
+
+~~~cpp
+#include "rapidjson/filereadstream.h"
+
+// ...
+SchemaDocument schema(sd); // Compile a Document to SchemaDocument
+
+// Use reader to parse the JSON
+FILE* fp = fopen("big.json", "r");
+FileReadStream is(fp, buffer, sizeof(buffer));
+
+// Parse JSON from reader, validate the SAX events, and store in d.
+Document d;
+SchemaValidatingReader<kParseDefaultFlags, FileReadStream, UTF8<> > reader(is, schema);
+d.Populate(reader);
+
+if (!reader.GetParseResult()) {
+    // Not a valid JSON
+    // When reader.GetParseResult().Code() == kParseErrorTermination,
+    // it may be terminated by:
+    // (1) the validator found that the JSON is invalid according to schema; or
+    // (2) the input stream has I/O error.
+
+    // Check the validation result
+    if (!reader.IsValid()) {
+        // Input JSON is invalid according to the schema
+        // Output diagnostic information
+        StringBuffer sb;
+        reader.GetInvalidSchemaPointer().StringifyUriFragment(sb);
+        printf("Invalid schema: %s\n", sb.GetString());
+        printf("Invalid keyword: %s\n", reader.GetInvalidSchemaKeyword());
+        sb.Clear();
+        reader.GetInvalidDocumentPointer().StringifyUriFragment(sb);
+        printf("Invalid document: %s\n", sb.GetString());
+    }
+}
+~~~
+
+## SAX parsing {#SAX}
+
+For using SAX in parsing, it is much simpler. If it only need to validate the JSON without further processing, it is simply:
+
+~~~
+SchemaValidator validator(schema);
+Reader reader;
+if (!reader.Parse(stream, validator)) {
+    if (!validator.IsValid()) {
+        // ...    
+    }
+}
+~~~
+
+This is exactly the method used in the [schemavalidator](example/schemavalidator/schemavalidator.cpp) example. The distinct advantage is low memory usage, no matter how big the JSON was (the memory usage depends on the complexity of the schema).
+
+If you need to handle the SAX events further, then you need to use the template class `GenericSchemaValidator` to set the output handler of the validator:
+
+~~~
+MyHandler handler;
+GenericSchemaValidator<SchemaDocument, MyHandler> validator(schema, handler);
+Reader reader;
+if (!reader.Parse(ss, validator)) {
+    if (!validator.IsValid()) {
+        // ...    
+    }
+}
+~~~
+
+## Serialization {#Serialization}
+
+It is also possible to do validation during serializing. This can ensure the result JSON is valid according to the JSON schema.
+
+~~~
+StringBuffer sb;
+Writer<StringBuffer> writer(sb);
+GenericSchemaValidator<SchemaDocument, Writer<StringBuffer> > validator(s, writer);
+if (!d.Accept(validator)) {
+    // Some problem during Accept(), it may be validation or encoding issues.
+    if (!validator.IsValid()) {
+        // ...
+    }
+}
+~~~
+
+Of course, if your application only needs SAX-style serialization, it can simply send SAX events to `SchemaValidator` instead of `Writer`.
+
+# Remote Schema {#Remote}
+
+JSON Schema supports [`$ref` keyword](http://spacetelescope.github.io/understanding-json-schema/structuring.html), which is a [JSON pointer](doc/pointer.md) referencing to a local or remote schema. Local pointer is prefixed with `#`, while remote pointer is an relative or absolute URI. For example:
+
+~~~js
+{ "$ref": "definitions.json#/address" }
+~~~
+
+As `SchemaDocument` does not know how to resolve such URI, it needs a user-provided `IRemoteSchemaDocumentProvider` instance to do so.
+
+~~~
+class MyRemoteSchemaDocumentProvider : public IRemoteSchemaDocumentProvider {
+public:
+    virtual const SchemaDocument* GetRemoteDocument(const char* uri, SizeType length) {
+        // Resolve the uri and returns a pointer to that schema.
+    }
+};
+
+// ...
+
+MyRemoteSchemaDocumentProvider provider;
+SchemaDocument schema(sd, &provider);
+~~~
+
+# Conformance {#Conformance}
+
+RapidJSON passed 262 out of 263 tests in [JSON Schema Test Suite](https://github.com/json-schema/JSON-Schema-Test-Suite) (Json Schema draft 4).
+
+The failed test is "changed scope ref invalid" of "change resolution scope" in `refRemote.json`. It is due to that `id` schema keyword and URI combining function are not implemented.
+
+Besides, the `format` schema keyword for string values is ignored, since it is not required by the specification.
+
+## Regular Expression {#Regex}
+
+The schema keyword `pattern` and `patternProperties` uses regular expression to match the required pattern.
+
+RapidJSON implemented a simple NFA regular expression engine, which is used by default. It supports the following syntax.
+
+|Syntax|Description|
+|------|-----------|
+|`ab`    | Concatenation |
+|<code>a&#124;b</code>   | Alternation |
+|`a?`    | Zero or one |
+|`a*`    | Zero or more |
+|`a+`    | One or more |
+|`a{3}`  | Exactly 3 times |
+|`a{3,}` | At least 3 times |
+|`a{3,5}`| 3 to 5 times |
+|`(ab)`  | Grouping |
+|`^a`    | At the beginning |
+|`a$`    | At the end |
+|`.`     | Any character |
+|`[abc]` | Character classes |
+|`[a-c]` | Character class range |
+|`[a-z0-9_]` | Character class combination |
+|`[^abc]` | Negated character classes |
+|`[^a-c]` | Negated character class range |
+|`[\b]`   | Backspace (U+0008) |
+|<code>\\&#124;</code>, `\\`, ...  | Escape characters |
+|`\f` | Form feed (U+000C) |
+|`\n` | Line feed (U+000A) |
+|`\r` | Carriage return (U+000D) |
+|`\t` | Tab (U+0009) |
+|`\v` | Vertical tab (U+000B) |
+
+For C++11 compiler, it is also possible to use the `std::regex` by defining `RAPIDJSON_SCHEMA_USE_INTERNALREGEX=0` and `RAPIDJSON_SCHEMA_USE_STDREGEX=1`. If your schemas do not need `pattern` and `patternProperties`, you can set both macros to zero to disable this feature, which will reduce some code size.
+
+# Performance {#Performance}
+
+Most C++ JSON libraries do not yet support JSON Schema. So we tried to evaluate the performance of RapidJSON's JSON Schema validator according to [json-schema-benchmark](https://github.com/ebdrup/json-schema-benchmark), which tests 11 JavaScript libraries running on Node.js.
+
+That benchmark runs validations on [JSON Schema Test Suite](https://github.com/json-schema/JSON-Schema-Test-Suite), in which some test suites and tests are excluded. We made the same benchmarking procedure in [`schematest.cpp`](test/perftest/schematest.cpp).
+
+On a Mac Book Pro (2.8 GHz Intel Core i7), the following results are collected.
+
+|Validator|Relative speed|Number of test runs per second|
+|---------|:------------:|:----------------------------:|
+|RapidJSON|155%|30682|
+|[`ajv`](https://github.com/epoberezkin/ajv)|100%|19770 (± 1.31%)|
+|[`is-my-json-valid`](https://github.com/mafintosh/is-my-json-valid)|70%|13835 (± 2.84%)|
+|[`jsen`](https://github.com/bugventure/jsen)|57.7%|11411 (± 1.27%)|
+|[`schemasaurus`](https://github.com/AlexeyGrishin/schemasaurus)|26%|5145 (± 1.62%)|
+|[`themis`](https://github.com/playlyfe/themis)|19.9%|3935 (± 2.69%)|
+|[`z-schema`](https://github.com/zaggino/z-schema)|7%|1388 (± 0.84%)|
+|[`jsck`](https://github.com/pandastrike/jsck#readme)|3.1%|606 (± 2.84%)|
+|[`jsonschema`](https://github.com/tdegrunt/jsonschema#readme)|0.9%|185 (± 1.01%)|
+|[`skeemas`](https://github.com/Prestaul/skeemas#readme)|0.8%|154 (± 0.79%)|
+|tv4|0.5%|93 (± 0.94%)|
+|[`jayschema`](https://github.com/natesilva/jayschema)|0.1%|21 (± 1.14%)|
+
+That is, RapidJSON is about 1.5x faster than the fastest JavaScript library (ajv). And 1400x faster than the slowest one.
+
+# Schema violation reporting {#Reporting}
+
+(Unreleased as of 2017-09-20)
+
+When validating an instance against a JSON Schema,
+it is often desirable to report not only whether the instance is valid,
+but also the ways in which it violates the schema.
+
+The `SchemaValidator` class
+collects errors encountered during validation
+into a JSON `Value`.
+This error object can then be accessed as `validator.GetError()`.
+
+The structure of the error object is subject to change
+in future versions of RapidJSON,
+as there is no standard schema for violations.
+The details below this point are provisional only.
+
+## General provisions {#ReportingGeneral}
+
+Validation of an instance value against a schema
+produces an error value.
+The error value is always an object.
+An empty object `{}` indicates the instance is valid.
+
+* The name of each member
+  corresponds to the JSON Schema keyword that is violated.
+* The value is either an object describing a single violation,
+  or an array of such objects.
+
+Each violation object contains two string-valued members
+named `instanceRef` and `schemaRef`.
+`instanceRef` contains the URI fragment serialization
+of a JSON Pointer to the instance subobject
+in which the violation was detected.
+`schemaRef` contains the URI of the schema
+and the fragment serialization of a JSON Pointer
+to the subschema that was violated.
+
+Individual violation objects can contain other keyword-specific members.
+These are detailed further.
+
+For example, validating this instance:
+
+~~~json
+{"numbers": [1, 2, "3", 4, 5]}
+~~~
+
+against this schema:
+
+~~~json
+{
+  "type": "object",
+  "properties": {
+    "numbers": {"$ref": "numbers.schema.json"}
+  }
+}
+~~~
+
+where `numbers.schema.json` refers
+(via a suitable `IRemoteSchemaDocumentProvider`)
+to this schema:
+
+~~~json
+{
+  "type": "array",
+  "items": {"type": "number"}
+}
+~~~
+
+produces the following error object:
+
+~~~json
+{
+  "type": {
+    "instanceRef": "#/numbers/2",
+    "schemaRef": "numbers.schema.json#/items",
+    "expected": ["number"],
+    "actual": "string"
+  }
+}
+~~~
+
+## Validation keywords for numbers {#Numbers}
+
+### multipleOf {#multipleof}
+
+* `expected`: required number strictly greater than 0.
+  The value of the `multipleOf` keyword specified in the schema.
+* `actual`: required number.
+  The instance value.
+
+### maximum {#maximum}
+
+* `expected`: required number.
+  The value of the `maximum` keyword specified in the schema.
+* `exclusiveMaximum`: optional boolean.
+  This will be true if the schema specified `"exclusiveMaximum": true`,
+  and will be omitted otherwise.
+* `actual`: required number.
+  The instance value.
+
+### minimum {#minimum}
+
+* `expected`: required number.
+  The value of the `minimum` keyword specified in the schema.
+* `exclusiveMinimum`: optional boolean.
+  This will be true if the schema specified `"exclusiveMinimum": true`,
+  and will be omitted otherwise.
+* `actual`: required number.
+  The instance value.
+
+## Validation keywords for strings {#Strings}
+
+### maxLength {#maxLength}
+
+* `expected`: required number greater than or equal to 0.
+  The value of the `maxLength` keyword specified in the schema.
+* `actual`: required string.
+  The instance value.
+
+### minLength {#minLength}
+
+* `expected`: required number greater than or equal to 0.
+  The value of the `minLength` keyword specified in the schema.
+* `actual`: required string.
+  The instance value.
+
+### pattern {#pattern}
+
+* `actual`: required string.
+  The instance value.
+
+(The expected pattern is not reported
+because the internal representation in `SchemaDocument`
+does not store the pattern in original string form.)
+
+## Validation keywords for arrays {#Arrays}
+
+### additionalItems {#additionalItems}
+
+This keyword is reported
+when the value of `items` schema keyword is an array,
+the value of `additionalItems` is `false`,
+and the instance is an array
+with more items than specified in the `items` array.
+
+* `disallowed`: required integer greater than or equal to 0.
+  The index of the first item that has no corresponding schema.
+
+### maxItems and minItems {#maxItems-minItems}
+
+* `expected`: required integer greater than or equal to 0.
+  The value of `maxItems` (respectively, `minItems`)
+  specified in the schema.
+* `actual`: required integer greater than or equal to 0.
+  Number of items in the instance array.
+
+### uniqueItems {#uniqueItems}
+
+* `duplicates`: required array
+  whose items are integers greater than or equal to 0.
+  Indices of items of the instance that are equal.
+
+(RapidJSON only reports the first two equal items,
+for performance reasons.)
+
+## Validation keywords for objects
+
+### maxProperties and minProperties {#maxProperties-minProperties}
+
+* `expected`: required integer greater than or equal to 0.
+  The value of `maxProperties` (respectively, `minProperties`)
+  specified in the schema.
+* `actual`: required integer greater than or equal to 0.
+  Number of properties in the instance object.
+
+### required {#required}
+
+* `missing`: required array of one or more unique strings.
+  The names of properties
+  that are listed in the value of the `required` schema keyword
+  but not present in the instance object.
+
+### additionalProperties {#additionalProperties}
+
+This keyword is reported
+when the schema specifies `additionalProperties: false`
+and the name of a property of the instance is
+neither listed in the `properties` keyword
+nor matches any regular expression in the `patternProperties` keyword.
+
+* `disallowed`: required string.
+  Name of the offending property of the instance.
+
+(For performance reasons,
+RapidJSON only reports the first such property encountered.)
+
+### dependencies {#dependencies}
+
+* `errors`: required object with one or more properties.
+  Names and values of its properties are described below.
+
+Recall that JSON Schema Draft 04 supports
+*schema dependencies*,
+where presence of a named *controlling* property
+requires the instance object to be valid against a subschema,
+and *property dependencies*,
+where presence of a controlling property
+requires other *dependent* properties to be also present.
+
+For a violated schema dependency,
+`errors` will contain a property
+with the name of the controlling property
+and its value will be the error object
+produced by validating the instance object
+against the dependent schema.
+
+For a violated property dependency,
+`errors` will contain a property
+with the name of the controlling property
+and its value will be an array of one or more unique strings
+listing the missing dependent properties.
+
+## Validation keywords for any instance type {#AnyTypes}
+
+### enum {#enum}
+
+This keyword has no additional properties
+beyond `instanceRef` and `schemaRef`.
+
+* The allowed values are not listed
+  because `SchemaDocument` does not store them in original form.
+* The violating value is not reported
+  because it might be unwieldy.
+
+If you need to report these details to your users,
+you can access the necessary information
+by following `instanceRef` and `schemaRef`.
+
+### type {#type}
+
+* `expected`: required array of one or more unique strings,
+  each of which is one of the seven primitive types
+  defined by the JSON Schema Draft 04 Core specification.
+  Lists the types allowed by the `type` schema keyword.
+* `actual`: required string, also one of seven primitive types.
+  The primitive type of the instance.
+
+### allOf, anyOf, and oneOf {#allOf-anyOf-oneOf}
+
+* `errors`: required array of at least one object.
+  There will be as many items as there are subschemas
+  in the `allOf`, `anyOf` or `oneOf` schema keyword, respectively.
+  Each item will be the error value
+  produced by validating the instance
+  against the corresponding subschema.
+
+For `allOf`, at least one error value will be non-empty.
+For `anyOf`, all error values will be non-empty.
+For `oneOf`, either all error values will be non-empty,
+or more than one will be empty.
+
+### not {#not}
+
+This keyword has no additional properties
+apart from `instanceRef` and `schemaRef`.
diff --git a/doc/schema.zh-cn.md b/doc/schema.zh-cn.md
new file mode 100644
index 0000000..c85177f
--- /dev/null
+++ b/doc/schema.zh-cn.md
@@ -0,0 +1,237 @@
+# Schema
+
+(本功能于 v1.1.0 发布)
+
+JSON Schema 是描述 JSON 格式的一个标准草案。一个 schema 本身也是一个 JSON。使用 JSON Schema 去校验 JSON,可以让你的代码安全地访问 DOM,而无须检查类型或键值是否存在等。这也能确保输出的 JSON 是符合指定的 schema。
+
+RapidJSON 实现了一个 [JSON Schema Draft v4](http://json-schema.org/documentation.html) 的校验器。若你不熟悉 JSON Schema,可以参考 [Understanding JSON Schema](http://spacetelescope.github.io/understanding-json-schema/)。
+
+[TOC]
+
+# 基本用法 {#BasicUsage}
+
+首先,你要把 JSON Schema 解析成 `Document`,再把它编译成一个 `SchemaDocument`。
+
+然后,利用该 `SchemaDocument` 创建一个 `SchemaValidator`。它与 `Writer` 相似,都是能够处理 SAX 事件的。因此,你可以用 `document.Accept(validator)` 去校验一个 JSON,然后再获取校验结果。
+
+~~~cpp
+#include "rapidjson/schema.h"
+
+// ...
+
+Document sd;
+if (sd.Parse(schemaJson).HasParseError()) {
+    // 此 schema 不是合法的 JSON
+    // ...       
+}
+SchemaDocument schema(sd); // 把一个 Document 编译至 SchemaDocument
+// 之后不再需要 sd
+
+Document d;
+if (d.Parse(inputJson).HasParseError()) {
+    // 输入不是一个合法的 JSON
+    // ...       
+}
+
+SchemaValidator validator(schema);
+if (!d.Accept(validator)) {
+    // 输入的 JSON 不合乎 schema
+    // 打印诊断信息
+    StringBuffer sb;
+    validator.GetInvalidSchemaPointer().StringifyUriFragment(sb);
+    printf("Invalid schema: %s\n", sb.GetString());
+    printf("Invalid keyword: %s\n", validator.GetInvalidSchemaKeyword());
+    sb.Clear();
+    validator.GetInvalidDocumentPointer().StringifyUriFragment(sb);
+    printf("Invalid document: %s\n", sb.GetString());
+}
+~~~
+
+一些注意点:
+
+* 一个 `SchemaDocment` 能被多个 `SchemaValidator` 引用。它不会被 `SchemaValidator` 修改。
+* 可以重复使用一个 `SchemaValidator` 来校验多个文件。在校验其他文件前,须先调用 `validator.Reset()`。
+
+# 在解析/生成时进行校验 {#ParsingSerialization}
+
+与大部分 JSON Schema 校验器有所不同,RapidJSON 提供了一个基于 SAX 的 schema 校验器实现。因此,你可以在输入流解析 JSON 的同时进行校验。若校验器遇到一个与 schema 不符的值,就会立即终止解析。这设计对于解析大型 JSON 文件时特别有用。
+
+## DOM 解析 {#DomParsing}
+
+在使用 DOM 进行解析时,`Document` 除了接收 SAX 事件外,还需做一些准备及结束工作,因此,为了连接 `Reader`、`SchemaValidator` 和 `Document` 要做多一点事情。`SchemaValidatingReader` 是一个辅助类去做那些工作。
+
+~~~cpp
+#include "rapidjson/filereadstream.h"
+
+// ...
+SchemaDocument schema(sd); // 把一个 Document 编译至 SchemaDocument
+
+// 使用 reader 解析 JSON
+FILE* fp = fopen("big.json", "r");
+FileReadStream is(fp, buffer, sizeof(buffer));
+
+// 用 reader 解析 JSON,校验它的 SAX 事件,并存储至 d
+Document d;
+SchemaValidatingReader<kParseDefaultFlags, FileReadStream, UTF8<> > reader(is, schema);
+d.Populate(reader);
+
+if (!reader.GetParseResult()) {
+    // 不是一个合法的 JSON
+    // 当 reader.GetParseResult().Code() == kParseErrorTermination,
+    // 它可能是被以下原因中止:
+    // (1) 校验器发现 JSON 不合乎 schema;或
+    // (2) 输入流有 I/O 错误。
+
+    // 检查校验结果
+    if (!reader.IsValid()) {
+        // 输入的 JSON 不合乎 schema
+        // 打印诊断信息
+        StringBuffer sb;
+        reader.GetInvalidSchemaPointer().StringifyUriFragment(sb);
+        printf("Invalid schema: %s\n", sb.GetString());
+        printf("Invalid keyword: %s\n", reader.GetInvalidSchemaKeyword());
+        sb.Clear();
+        reader.GetInvalidDocumentPointer().StringifyUriFragment(sb);
+        printf("Invalid document: %s\n", sb.GetString());
+    }
+}
+~~~
+
+## SAX 解析 {#SaxParsing}
+
+使用 SAX 解析时,情况就简单得多。若只需要校验 JSON 而无需进一步处理,那么仅需要:
+
+~~~
+SchemaValidator validator(schema);
+Reader reader;
+if (!reader.Parse(stream, validator)) {
+    if (!validator.IsValid()) {
+        // ...    
+    }
+}
+~~~
+
+这种方式和 [schemavalidator](example/schemavalidator/schemavalidator.cpp) 例子完全相同。这带来的独特优势是,无论 JSON 多巨大,永远维持低内存用量(内存用量只与 Schema 的复杂度相关)。
+
+若你需要进一步处理 SAX 事件,便可使用模板类 `GenericSchemaValidator` 去设置校验器的输出 `Handler`:
+
+~~~
+MyHandler handler;
+GenericSchemaValidator<SchemaDocument, MyHandler> validator(schema, handler);
+Reader reader;
+if (!reader.Parse(ss, validator)) {
+    if (!validator.IsValid()) {
+        // ...    
+    }
+}
+~~~
+
+## 生成 {#Serialization}
+
+我们也可以在生成(serialization)的时候进行校验。这能确保输出的 JSON 符合一个 JSON Schema。
+
+~~~
+StringBuffer sb;
+Writer<StringBuffer> writer(sb);
+GenericSchemaValidator<SchemaDocument, Writer<StringBuffer> > validator(s, writer);
+if (!d.Accept(validator)) {
+    // Some problem during Accept(), it may be validation or encoding issues.
+    if (!validator.IsValid()) {
+        // ...
+    }
+}
+~~~
+
+当然,如果你的应用仅需要 SAX 风格的生成,那么只需要把 SAX 事件由原来发送到 `Writer`,改为发送到 `SchemaValidator`。
+
+# 远程 Schema {#RemoteSchema}
+
+JSON Schema 支持 [`$ref` 关键字](http://spacetelescope.github.io/understanding-json-schema/structuring.html),它是一个 [JSON pointer](doc/pointer.zh-cn.md) 引用至一个本地(local)或远程(remote) schema。本地指针的首字符是 `#`,而远程指针是一个相对或绝对 URI。例如:
+
+~~~js
+{ "$ref": "definitions.json#/address" }
+~~~
+
+由于 `SchemaDocument` 并不知道如何处理那些 URI,它需要使用者提供一个 `IRemoteSchemaDocumentProvider` 的实例去处理。
+
+~~~
+class MyRemoteSchemaDocumentProvider : public IRemoteSchemaDocumentProvider {
+public:
+    virtual const SchemaDocument* GetRemoteDocument(const char* uri, SizeType length) {
+        // Resolve the uri and returns a pointer to that schema.
+    }
+};
+
+// ...
+
+MyRemoteSchemaDocumentProvider provider;
+SchemaDocument schema(sd, &provider);
+~~~
+
+# 标准的符合程度 {#Conformance}
+
+RapidJSON 通过了 [JSON Schema Test Suite](https://github.com/json-schema/JSON-Schema-Test-Suite) (Json Schema draft 4) 中 263 个测试的 262 个。
+
+没通过的测试是 `refRemote.json` 中的 "change resolution scope" - "changed scope ref invalid"。这是由于未实现 `id` schema 关键字及 URI 合并功能。
+
+除此以外,关于字符串类型的 `format` schema 关键字也会被忽略,因为标准中并没需求必须实现。
+
+## 正则表达式 {#RegEx}
+
+`pattern` 及 `patternProperties` 这两个 schema 关键字使用了正则表达式去匹配所需的模式。
+
+RapidJSON 实现了一个简单的 NFA 正则表达式引擎,并预设使用。它支持以下语法。
+
+|语法|描述|
+|------|-----------|
+|`ab`    | 串联 |
+|<code>a&#124;b</code>   | 交替 |
+|`a?`    | 零或一次 |
+|`a*`    | 零或多次 |
+|`a+`    | 一或多次 |
+|`a{3}`  | 刚好 3 次 |
+|`a{3,}` | 至少 3 次 |
+|`a{3,5}`| 3 至 5 次 |
+|`(ab)`  | 分组 |
+|`^a`    | 在开始处 |
+|`a$`    | 在结束处 |
+|`.`     | 任何字符 |
+|`[abc]` | 字符组 |
+|`[a-c]` | 字符组范围 |
+|`[a-z0-9_]` | 字符组组合 |
+|`[^abc]` | 字符组取反 |
+|`[^a-c]` | 字符组范围取反 |
+|`[\b]`   | 退格符 (U+0008) |
+|<code>\\&#124;</code>, `\\`, ...  | 转义字符 |
+|`\f` | 馈页 (U+000C) |
+|`\n` | 馈行 (U+000A) |
+|`\r` | 回车 (U+000D) |
+|`\t` | 制表 (U+0009) |
+|`\v` | 垂直制表 (U+000B) |
+
+对于使用 C++11 编译器的使用者,也可使用 `std::regex`,只需定义 `RAPIDJSON_SCHEMA_USE_INTERNALREGEX=0` 及 `RAPIDJSON_SCHEMA_USE_STDREGEX=1`。若你的 schema 无需使用 `pattern` 或 `patternProperties`,可以把两个宏都设为零,以禁用此功能,这样做可节省一些代码体积。
+
+# 性能 {#Performance}
+
+大部分 C++ JSON 库都未支持 JSON Schema。因此我们尝试按照 [json-schema-benchmark](https://github.com/ebdrup/json-schema-benchmark) 去评估 RapidJSON 的 JSON Schema 校验器。该评测测试了 11 个运行在 node.js 上的 JavaScript 库。
+
+该评测校验 [JSON Schema Test Suite](https://github.com/json-schema/JSON-Schema-Test-Suite) 中的测试,当中排除了一些测试套件及个别测试。我们在 [`schematest.cpp`](test/perftest/schematest.cpp) 实现了相同的评测。
+
+在 MacBook Pro (2.8 GHz Intel Core i7) 上收集到以下结果。
+
+|校验器|相对速度|每秒执行的测试数目|
+|---------|:------------:|:----------------------------:|
+|RapidJSON|155%|30682|
+|[`ajv`](https://github.com/epoberezkin/ajv)|100%|19770 (± 1.31%)|
+|[`is-my-json-valid`](https://github.com/mafintosh/is-my-json-valid)|70%|13835 (± 2.84%)|
+|[`jsen`](https://github.com/bugventure/jsen)|57.7%|11411 (± 1.27%)|
+|[`schemasaurus`](https://github.com/AlexeyGrishin/schemasaurus)|26%|5145 (± 1.62%)|
+|[`themis`](https://github.com/playlyfe/themis)|19.9%|3935 (± 2.69%)|
+|[`z-schema`](https://github.com/zaggino/z-schema)|7%|1388 (± 0.84%)|
+|[`jsck`](https://github.com/pandastrike/jsck#readme)|3.1%|606 (± 2.84%)|
+|[`jsonschema`](https://github.com/tdegrunt/jsonschema#readme)|0.9%|185 (± 1.01%)|
+|[`skeemas`](https://github.com/Prestaul/skeemas#readme)|0.8%|154 (± 0.79%)|
+|tv4|0.5%|93 (± 0.94%)|
+|[`jayschema`](https://github.com/natesilva/jayschema)|0.1%|21 (± 1.14%)|
+
+换言之,RapidJSON 比最快的 JavaScript 库(ajv)快约 1.5x。比最慢的快 1400x。
diff --git a/doc/stream.md b/doc/stream.md
new file mode 100644
index 0000000..d95de14
--- /dev/null
+++ b/doc/stream.md
@@ -0,0 +1,429 @@
+# Stream
+
+In RapidJSON, `rapidjson::Stream` is a concept for reading/writing JSON. Here we first show how to use streams provided. And then see how to create a custom stream.
+
+[TOC]
+
+# Memory Streams {#MemoryStreams}
+
+Memory streams store JSON in memory.
+
+## StringStream (Input) {#StringStream}
+
+`StringStream` is the most basic input stream. It represents a complete, read-only JSON stored in memory. It is defined in `rapidjson/rapidjson.h`.
+
+~~~~~~~~~~cpp
+#include "rapidjson/document.h" // will include "rapidjson/rapidjson.h"
+
+using namespace rapidjson;
+
+// ...
+const char json[] = "[1, 2, 3, 4]";
+StringStream s(json);
+
+Document d;
+d.ParseStream(s);
+~~~~~~~~~~
+
+Since this is very common usage, `Document::Parse(const char*)` is provided to do exactly the same as above:
+
+~~~~~~~~~~cpp
+// ...
+const char json[] = "[1, 2, 3, 4]";
+Document d;
+d.Parse(json);
+~~~~~~~~~~
+
+Note that, `StringStream` is a typedef of `GenericStringStream<UTF8<> >`, user may use another encodings to represent the character set of the stream.
+
+## StringBuffer (Output) {#StringBuffer}
+
+`StringBuffer` is a simple output stream. It allocates a memory buffer for writing the whole JSON. Use `GetString()` to obtain the buffer.
+
+~~~~~~~~~~cpp
+#include "rapidjson/stringbuffer.h"
+#include <rapidjson/writer.h>
+
+StringBuffer buffer;
+Writer<StringBuffer> writer(buffer);
+d.Accept(writer);
+
+const char* output = buffer.GetString();
+~~~~~~~~~~
+
+When the buffer is full, it will increases the capacity automatically. The default capacity is 256 characters (256 bytes for UTF8, 512 bytes for UTF16, etc.). User can provide an allocator and a initial capacity.
+
+~~~~~~~~~~cpp
+StringBuffer buffer1(0, 1024); // Use its allocator, initial size = 1024
+StringBuffer buffer2(allocator, 1024);
+~~~~~~~~~~
+
+By default, `StringBuffer` will instantiate an internal allocator.
+
+Similarly, `StringBuffer` is a typedef of `GenericStringBuffer<UTF8<> >`.
+
+# File Streams {#FileStreams}
+
+When parsing a JSON from file, you may read the whole JSON into memory and use ``StringStream`` above.
+
+However, if the JSON is big, or memory is limited, you can use `FileReadStream`. It only read a part of JSON from file into buffer, and then let the part be parsed. If it runs out of characters in the buffer, it will read the next part from file.
+
+## FileReadStream (Input) {#FileReadStream}
+
+`FileReadStream` reads the file via a `FILE` pointer. And user need to provide a buffer.
+
+~~~~~~~~~~cpp
+#include "rapidjson/filereadstream.h"
+#include <cstdio>
+
+using namespace rapidjson;
+
+FILE* fp = fopen("big.json", "rb"); // non-Windows use "r"
+
+char readBuffer[65536];
+FileReadStream is(fp, readBuffer, sizeof(readBuffer));
+
+Document d;
+d.ParseStream(is);
+
+fclose(fp);
+~~~~~~~~~~
+
+Different from string streams, `FileReadStream` is byte stream. It does not handle encodings. If the file is not UTF-8, the byte stream can be wrapped in a `EncodedInputStream`. It will be discussed very soon.
+
+Apart from reading file, user can also use `FileReadStream` to read `stdin`.
+
+## FileWriteStream (Output) {#FileWriteStream}
+
+`FileWriteStream` is buffered output stream. Its usage is very similar to `FileReadStream`.
+
+~~~~~~~~~~cpp
+#include "rapidjson/filewritestream.h"
+#include <rapidjson/writer.h>
+#include <cstdio>
+
+using namespace rapidjson;
+
+Document d;
+d.Parse(json);
+// ...
+
+FILE* fp = fopen("output.json", "wb"); // non-Windows use "w"
+
+char writeBuffer[65536];
+FileWriteStream os(fp, writeBuffer, sizeof(writeBuffer));
+
+Writer<FileWriteStream> writer(os);
+d.Accept(writer);
+
+fclose(fp);
+~~~~~~~~~~
+
+It can also directs the output to `stdout`.
+
+# iostream Wrapper {#iostreamWrapper}
+
+Due to users' requests, RapidJSON provided official wrappers for `std::basic_istream` and `std::basic_ostream`. However, please note that the performance will be much lower than the other streams above.
+
+## IStreamWrapper {#IStreamWrapper}
+
+`IStreamWrapper` wraps any class drived from `std::istream`, such as `std::istringstream`, `std::stringstream`, `std::ifstream`, `std::fstream`, into RapidJSON's input stream.
+
+~~~cpp
+#include <rapidjson/document.h>
+#include <rapidjson/istreamwrapper.h>
+#include <fstream>
+
+using namespace rapidjson;
+using namespace std;
+
+ifstream ifs("test.json");
+IStreamWrapper isw(ifs);
+
+Document d;
+d.ParseStream(isw);
+~~~
+
+For classes derived from `std::wistream`, use `WIStreamWrapper`.
+
+## OStreamWrapper {#OStreamWrapper}
+
+Similarly, `OStreamWrapper` wraps any class derived from `std::ostream`, such as `std::ostringstream`, `std::stringstream`, `std::ofstream`, `std::fstream`, into RapidJSON's input stream.
+
+~~~cpp
+#include <rapidjson/document.h>
+#include <rapidjson/ostreamwrapper.h>
+#include <rapidjson/writer.h>
+#include <fstream>
+
+using namespace rapidjson;
+using namespace std;
+
+Document d;
+d.Parse(json);
+
+// ...
+
+ofstream ofs("output.json");
+OStreamWrapper osw(ofs);
+
+Writer<OStreamWrapper> writer(osw);
+d.Accept(writer);
+~~~
+
+For classes derived from `std::wostream`, use `WOStreamWrapper`.
+
+# Encoded Streams {#EncodedStreams}
+
+Encoded streams do not contain JSON itself, but they wrap byte streams to provide basic encoding/decoding function.
+
+As mentioned above, UTF-8 byte streams can be read directly. However, UTF-16 and UTF-32 have endian issue. To handle endian correctly, it needs to convert bytes into characters (e.g. `wchar_t` for UTF-16) while reading, and characters into bytes while writing.
+
+Besides, it also need to handle [byte order mark (BOM)](http://en.wikipedia.org/wiki/Byte_order_mark). When reading from a byte stream, it is needed to detect or just consume the BOM if exists. When writing to a byte stream, it can optionally write BOM.
+
+If the encoding of stream is known in compile-time, you may use `EncodedInputStream` and `EncodedOutputStream`. If the stream can be UTF-8, UTF-16LE, UTF-16BE, UTF-32LE, UTF-32BE JSON, and it is only known in runtime, you may use `AutoUTFInputStream` and `AutoUTFOutputStream`. These streams are defined in `rapidjson/encodedstream.h`.
+
+Note that, these encoded streams can be applied to streams other than file. For example, you may have a file in memory, or a custom byte stream, be wrapped in encoded streams.
+
+## EncodedInputStream {#EncodedInputStream}
+
+`EncodedInputStream` has two template parameters. The first one is a `Encoding` class, such as `UTF8`, `UTF16LE`, defined in `rapidjson/encodings.h`. The second one is the class of stream to be wrapped.
+
+~~~~~~~~~~cpp
+#include "rapidjson/document.h"
+#include "rapidjson/filereadstream.h"   // FileReadStream
+#include "rapidjson/encodedstream.h"    // EncodedInputStream
+#include <cstdio>
+
+using namespace rapidjson;
+
+FILE* fp = fopen("utf16le.json", "rb"); // non-Windows use "r"
+
+char readBuffer[256];
+FileReadStream bis(fp, readBuffer, sizeof(readBuffer));
+
+EncodedInputStream<UTF16LE<>, FileReadStream> eis(bis);  // wraps bis into eis
+
+Document d; // Document is GenericDocument<UTF8<> > 
+d.ParseStream<0, UTF16LE<> >(eis);  // Parses UTF-16LE file into UTF-8 in memory
+
+fclose(fp);
+~~~~~~~~~~
+
+## EncodedOutputStream {#EncodedOutputStream}
+
+`EncodedOutputStream` is similar but it has a `bool putBOM` parameter in the constructor, controlling whether to write BOM into output byte stream.
+
+~~~~~~~~~~cpp
+#include "rapidjson/filewritestream.h"  // FileWriteStream
+#include "rapidjson/encodedstream.h"    // EncodedOutputStream
+#include <rapidjson/writer.h>
+#include <cstdio>
+
+Document d;         // Document is GenericDocument<UTF8<> > 
+// ...
+
+FILE* fp = fopen("output_utf32le.json", "wb"); // non-Windows use "w"
+
+char writeBuffer[256];
+FileWriteStream bos(fp, writeBuffer, sizeof(writeBuffer));
+
+typedef EncodedOutputStream<UTF32LE<>, FileWriteStream> OutputStream;
+OutputStream eos(bos, true);   // Write BOM
+
+Writer<OutputStream, UTF32LE<>, UTF8<>> writer(eos);
+d.Accept(writer);   // This generates UTF32-LE file from UTF-8 in memory
+
+fclose(fp);
+~~~~~~~~~~
+
+## AutoUTFInputStream {#AutoUTFInputStream}
+
+Sometimes an application may want to handle all supported JSON encoding. `AutoUTFInputStream` will detection encoding by BOM first. If BOM is unavailable, it will use  characteristics of valid JSON to make detection. If neither method success, it falls back to the UTF type provided in constructor.
+
+Since the characters (code units) may be 8-bit, 16-bit or 32-bit. `AutoUTFInputStream` requires a character type which can hold at least 32-bit. We may use `unsigned`, as in the template parameter:
+
+~~~~~~~~~~cpp
+#include "rapidjson/document.h"
+#include "rapidjson/filereadstream.h"   // FileReadStream
+#include "rapidjson/encodedstream.h"    // AutoUTFInputStream
+#include <cstdio>
+
+using namespace rapidjson;
+
+FILE* fp = fopen("any.json", "rb"); // non-Windows use "r"
+
+char readBuffer[256];
+FileReadStream bis(fp, readBuffer, sizeof(readBuffer));
+
+AutoUTFInputStream<unsigned, FileReadStream> eis(bis);  // wraps bis into eis
+
+Document d;         // Document is GenericDocument<UTF8<> > 
+d.ParseStream<0, AutoUTF<unsigned> >(eis); // This parses any UTF file into UTF-8 in memory
+
+fclose(fp);
+~~~~~~~~~~
+
+When specifying the encoding of stream, uses `AutoUTF<CharType>` as in `ParseStream()` above.
+
+You can obtain the type of UTF via `UTFType GetType()`. And check whether a BOM is found by `HasBOM()`
+
+## AutoUTFOutputStream {#AutoUTFOutputStream}
+
+Similarly, to choose encoding for output during runtime, we can use `AutoUTFOutputStream`. This class is not automatic *per se*. You need to specify the UTF type and whether to write BOM in runtime.
+
+~~~~~~~~~~cpp
+using namespace rapidjson;
+
+void WriteJSONFile(FILE* fp, UTFType type, bool putBOM, const Document& d) {
+    char writeBuffer[256];
+    FileWriteStream bos(fp, writeBuffer, sizeof(writeBuffer));
+
+    typedef AutoUTFOutputStream<unsigned, FileWriteStream> OutputStream;
+    OutputStream eos(bos, type, putBOM);
+    
+    Writer<OutputStream, UTF8<>, AutoUTF<> > writer;
+    d.Accept(writer);
+}
+~~~~~~~~~~
+
+`AutoUTFInputStream` and `AutoUTFOutputStream` is more convenient than `EncodedInputStream` and `EncodedOutputStream`. They just incur a little bit runtime overheads.
+
+# Custom Stream {#CustomStream}
+
+In addition to memory/file streams, user can create their own stream classes which fits RapidJSON's API. For example, you may create network stream, stream from compressed file, etc.
+
+RapidJSON combines different types using templates. A class containing all required interface can be a stream. The Stream interface is defined in comments of `rapidjson/rapidjson.h`:
+
+~~~~~~~~~~cpp
+concept Stream {
+    typename Ch;    //!< Character type of the stream.
+
+    //! Read the current character from stream without moving the read cursor.
+    Ch Peek() const;
+
+    //! Read the current character from stream and moving the read cursor to next character.
+    Ch Take();
+
+    //! Get the current read cursor.
+    //! \return Number of characters read from start.
+    size_t Tell();
+
+    //! Begin writing operation at the current read pointer.
+    //! \return The begin writer pointer.
+    Ch* PutBegin();
+
+    //! Write a character.
+    void Put(Ch c);
+
+    //! Flush the buffer.
+    void Flush();
+
+    //! End the writing operation.
+    //! \param begin The begin write pointer returned by PutBegin().
+    //! \return Number of characters written.
+    size_t PutEnd(Ch* begin);
+}
+~~~~~~~~~~
+
+For input stream, they must implement `Peek()`, `Take()` and `Tell()`.
+For output stream, they must implement `Put()` and `Flush()`. 
+There are two special interface, `PutBegin()` and `PutEnd()`, which are only for *in situ* parsing. Normal streams do not implement them. However, if the interface is not needed for a particular stream, it is still need to a dummy implementation, otherwise will generate compilation error.
+
+## Example: istream wrapper {#ExampleIStreamWrapper}
+
+The following example is a simple wrapper of `std::istream`, which only implements 3 functions.
+
+~~~~~~~~~~cpp
+class MyIStreamWrapper {
+public:
+    typedef char Ch;
+
+    MyIStreamWrapper(std::istream& is) : is_(is) {
+    }
+
+    Ch Peek() const { // 1
+        int c = is_.peek();
+        return c == std::char_traits<char>::eof() ? '\0' : (Ch)c;
+    }
+
+    Ch Take() { // 2
+        int c = is_.get();
+        return c == std::char_traits<char>::eof() ? '\0' : (Ch)c;
+    }
+
+    size_t Tell() const { return (size_t)is_.tellg(); } // 3
+
+    Ch* PutBegin() { assert(false); return 0; }
+    void Put(Ch) { assert(false); }
+    void Flush() { assert(false); }
+    size_t PutEnd(Ch*) { assert(false); return 0; }
+
+private:
+    MyIStreamWrapper(const MyIStreamWrapper&);
+    MyIStreamWrapper& operator=(const MyIStreamWrapper&);
+
+    std::istream& is_;
+};
+~~~~~~~~~~
+
+User can use it to wrap instances of `std::stringstream`, `std::ifstream`.
+
+~~~~~~~~~~cpp
+const char* json = "[1,2,3,4]";
+std::stringstream ss(json);
+MyIStreamWrapper is(ss);
+
+Document d;
+d.ParseStream(is);
+~~~~~~~~~~
+
+Note that, this implementation may not be as efficient as RapidJSON's memory or file streams, due to internal overheads of the standard library.
+
+## Example: ostream wrapper {#ExampleOStreamWrapper}
+
+The following example is a simple wrapper of `std::istream`, which only implements 2 functions.
+
+~~~~~~~~~~cpp
+class MyOStreamWrapper {
+public:
+    typedef char Ch;
+
+    MyOStreamWrapper(std::ostream& os) : os_(os) {
+    }
+
+    Ch Peek() const { assert(false); return '\0'; }
+    Ch Take() { assert(false); return '\0'; }
+    size_t Tell() const {  }
+
+    Ch* PutBegin() { assert(false); return 0; }
+    void Put(Ch c) { os_.put(c); }                  // 1
+    void Flush() { os_.flush(); }                   // 2
+    size_t PutEnd(Ch*) { assert(false); return 0; }
+
+private:
+    MyOStreamWrapper(const MyOStreamWrapper&);
+    MyOStreamWrapper& operator=(const MyOStreamWrapper&);
+
+    std::ostream& os_;
+};
+~~~~~~~~~~
+
+User can use it to wrap instances of `std::stringstream`, `std::ofstream`.
+
+~~~~~~~~~~cpp
+Document d;
+// ...
+
+std::stringstream ss;
+MyOStreamWrapper os(ss);
+
+Writer<MyOStreamWrapper> writer(os);
+d.Accept(writer);
+~~~~~~~~~~
+
+Note that, this implementation may not be as efficient as RapidJSON's memory or file streams, due to internal overheads of the standard library.
+
+# Summary {#Summary}
+
+This section describes stream classes available in RapidJSON. Memory streams are simple. File stream can reduce the memory required during JSON parsing and generation, if the JSON is stored in file system. Encoded streams converts between byte streams and character streams. Finally, user may create custom streams using a simple interface.
diff --git a/doc/stream.zh-cn.md b/doc/stream.zh-cn.md
new file mode 100644
index 0000000..7f2f356
--- /dev/null
+++ b/doc/stream.zh-cn.md
@@ -0,0 +1,429 @@
+# 流
+
+在 RapidJSON 中,`rapidjson::Stream` 是用於读写 JSON 的概念(概念是指 C++ 的 concept)。在这里我们先介绍如何使用 RapidJSON 提供的各种流。然后再看看如何自行定义流。
+
+[TOC]
+
+# 内存流 {#MemoryStreams}
+
+内存流把 JSON 存储在内存之中。
+
+## StringStream(输入){#StringStream}
+
+`StringStream` 是最基本的输入流,它表示一个完整的、只读的、存储于内存的 JSON。它在 `rapidjson/rapidjson.h` 中定义。
+
+~~~~~~~~~~cpp
+#include "rapidjson/document.h" // 会包含 "rapidjson/rapidjson.h"
+
+using namespace rapidjson;
+
+// ...
+const char json[] = "[1, 2, 3, 4]";
+StringStream s(json);
+
+Document d;
+d.ParseStream(s);
+~~~~~~~~~~
+
+由于这是非常常用的用法,RapidJSON 提供 `Document::Parse(const char*)` 去做完全相同的事情:
+
+~~~~~~~~~~cpp
+// ...
+const char json[] = "[1, 2, 3, 4]";
+Document d;
+d.Parse(json);
+~~~~~~~~~~
+
+需要注意,`StringStream` 是 `GenericStringStream<UTF8<> >` 的 typedef,使用者可用其他编码类去代表流所使用的字符集。
+
+## StringBuffer(输出){#StringBuffer}
+
+`StringBuffer` 是一个简单的输出流。它分配一个内存缓冲区,供写入整个 JSON。可使用 `GetString()` 来获取该缓冲区。
+
+~~~~~~~~~~cpp
+#include "rapidjson/stringbuffer.h"
+#include <rapidjson/writer.h>
+
+StringBuffer buffer;
+Writer<StringBuffer> writer(buffer);
+d.Accept(writer);
+
+const char* output = buffer.GetString();
+~~~~~~~~~~
+
+当缓冲区满溢,它将自动增加容量。缺省容量是 256 个字符(UTF8 是 256 字节,UTF16 是 512 字节等)。使用者能自行提供分配器及初始容量。
+
+~~~~~~~~~~cpp
+StringBuffer buffer1(0, 1024); // 使用它的分配器,初始大小 = 1024
+StringBuffer buffer2(allocator, 1024);
+~~~~~~~~~~
+
+如无设置分配器,`StringBuffer` 会自行实例化一个内部分配器。
+
+相似地,`StringBuffer` 是 `GenericStringBuffer<UTF8<> >` 的 typedef。
+
+# 文件流 {#FileStreams}
+
+当要从文件解析一个 JSON,你可以把整个 JSON 读入内存并使用上述的 `StringStream`。
+
+然而,若 JSON 很大,或是内存有限,你可以改用 `FileReadStream`。它只会从文件读取一部分至缓冲区,然后让那部分被解析。若缓冲区的字符都被读完,它会再从文件读取下一部分。
+
+## FileReadStream(输入) {#FileReadStream}
+
+`FileReadStream` 通过 `FILE` 指针读取文件。使用者需要提供一个缓冲区。
+
+~~~~~~~~~~cpp
+#include "rapidjson/filereadstream.h"
+#include <cstdio>
+
+using namespace rapidjson;
+
+FILE* fp = fopen("big.json", "rb"); // 非 Windows 平台使用 "r"
+
+char readBuffer[65536];
+FileReadStream is(fp, readBuffer, sizeof(readBuffer));
+
+Document d;
+d.ParseStream(is);
+
+fclose(fp);
+~~~~~~~~~~
+
+与 `StringStreams` 不一样,`FileReadStream` 是一个字节流。它不处理编码。若文件并非 UTF-8 编码,可以把字节流用 `EncodedInputStream` 包装。我们很快会讨论这个问题。
+
+除了读取文件,使用者也可以使用 `FileReadStream` 来读取 `stdin`。
+
+## FileWriteStream(输出){#FileWriteStream}
+
+`FileWriteStream` 是一个含缓冲功能的输出流。它的用法与 `FileReadStream` 非常相似。
+
+~~~~~~~~~~cpp
+#include "rapidjson/filewritestream.h"
+#include <rapidjson/writer.h>
+#include <cstdio>
+
+using namespace rapidjson;
+
+Document d;
+d.Parse(json);
+// ...
+
+FILE* fp = fopen("output.json", "wb"); // 非 Windows 平台使用 "w"
+
+char writeBuffer[65536];
+FileWriteStream os(fp, writeBuffer, sizeof(writeBuffer));
+
+Writer<FileWriteStream> writer(os);
+d.Accept(writer);
+
+fclose(fp);
+~~~~~~~~~~
+
+它也可以把输出导向 `stdout`。
+
+# iostream 包装类 {#iostreamWrapper}
+
+基于用户的要求,RapidJSON 提供了正式的 `std::basic_istream` 和 `std::basic_ostream` 包装类。然而,请注意其性能会大大低于以上的其他流。
+
+## IStreamWrapper {#IStreamWrapper}
+
+`IStreamWrapper` 把任何继承自 `std::istream` 的类(如 `std::istringstream`、`std::stringstream`、`std::ifstream`、`std::fstream`)包装成 RapidJSON 的输入流。
+
+~~~cpp
+#include <rapidjson/document.h>
+#include <rapidjson/istreamwrapper.h>
+#include <fstream>
+
+using namespace rapidjson;
+using namespace std;
+
+ifstream ifs("test.json");
+IStreamWrapper isw(ifs);
+
+Document d;
+d.ParseStream(isw);
+~~~
+
+对于继承自 `std::wistream` 的类,则使用 `WIStreamWrapper`。
+
+## OStreamWrapper {#OStreamWrapper}
+
+相似地,`OStreamWrapper` 把任何继承自 `std::ostream` 的类(如 `std::ostringstream`、`std::stringstream`、`std::ofstream`、`std::fstream`)包装成 RapidJSON 的输出流。
+
+~~~cpp
+#include <rapidjson/document.h>
+#include <rapidjson/ostreamwrapper.h>
+#include <rapidjson/writer.h>
+#include <fstream>
+
+using namespace rapidjson;
+using namespace std;
+
+Document d;
+d.Parse(json);
+
+// ...
+
+ofstream ofs("output.json");
+OStreamWrapper osw(ofs);
+
+Writer<OStreamWrapper> writer(osw);
+d.Accept(writer);
+~~~
+
+对于继承自 `std::wistream` 的类,则使用 `WIStreamWrapper`。
+
+# 编码流 {#EncodedStreams}
+
+编码流(encoded streams)本身不存储 JSON,它们是通过包装字节流来提供基本的编码/解码功能。
+
+如上所述,我们可以直接读入 UTF-8 字节流。然而,UTF-16 及 UTF-32 有字节序(endian)问题。要正确地处理字节序,需要在读取时把字节转换成字符(如对 UTF-16 使用 `wchar_t`),以及在写入时把字符转换为字节。
+
+除此以外,我们也需要处理 [字节顺序标记(byte order mark, BOM)](http://en.wikipedia.org/wiki/Byte_order_mark)。当从一个字节流读取时,需要检测 BOM,或者仅仅是把存在的 BOM 消去。当把 JSON 写入字节流时,也可选择写入 BOM。
+
+若一个流的编码在编译期已知,你可使用 `EncodedInputStream` 及 `EncodedOutputStream`。若一个流可能存储 UTF-8、UTF-16LE、UTF-16BE、UTF-32LE、UTF-32BE 的 JSON,并且编码只能在运行时得知,你便可以使用 `AutoUTFInputStream` 及 `AutoUTFOutputStream`。这些流定义在 `rapidjson/encodedstream.h`。
+
+注意到,这些编码流可以施于文件以外的流。例如,你可以用编码流包装内存中的文件或自定义的字节流。
+
+## EncodedInputStream {#EncodedInputStream}
+
+`EncodedInputStream` 含两个模板参数。第一个是 `Encoding` 类型,例如定义于 `rapidjson/encodings.h` 的 `UTF8`、`UTF16LE`。第二个参数是被包装的流的类型。
+
+~~~~~~~~~~cpp
+#include "rapidjson/document.h"
+#include "rapidjson/filereadstream.h"   // FileReadStream
+#include "rapidjson/encodedstream.h"    // EncodedInputStream
+#include <cstdio>
+
+using namespace rapidjson;
+
+FILE* fp = fopen("utf16le.json", "rb"); // 非 Windows 平台使用 "r"
+
+char readBuffer[256];
+FileReadStream bis(fp, readBuffer, sizeof(readBuffer));
+
+EncodedInputStream<UTF16LE<>, FileReadStream> eis(bis);  // 用 eis 包装 bis
+
+Document d; // Document 为 GenericDocument<UTF8<> > 
+d.ParseStream<0, UTF16LE<> >(eis);  // 把 UTF-16LE 文件解析至内存中的 UTF-8
+
+fclose(fp);
+~~~~~~~~~~
+
+## EncodedOutputStream {#EncodedOutputStream}
+
+`EncodedOutputStream` 也是相似的,但它的构造函数有一个 `bool putBOM` 参数,用于控制是否在输出字节流写入 BOM。
+
+~~~~~~~~~~cpp
+#include "rapidjson/filewritestream.h"  // FileWriteStream
+#include "rapidjson/encodedstream.h"    // EncodedOutputStream
+#include <rapidjson/writer.h>
+#include <cstdio>
+
+Document d;         // Document 为 GenericDocument<UTF8<> > 
+// ...
+
+FILE* fp = fopen("output_utf32le.json", "wb"); // 非 Windows 平台使用 "w"
+
+char writeBuffer[256];
+FileWriteStream bos(fp, writeBuffer, sizeof(writeBuffer));
+
+typedef EncodedOutputStream<UTF32LE<>, FileWriteStream> OutputStream;
+OutputStream eos(bos, true);   // 写入 BOM
+
+Writer<OutputStream, UTF32LE<>, UTF8<>> writer(eos);
+d.Accept(writer);   // 这里从内存的 UTF-8 生成 UTF32-LE 文件
+
+fclose(fp);
+~~~~~~~~~~
+
+## AutoUTFInputStream {#AutoUTFInputStream}
+
+有时候,应用软件可能需要㲃理所有可支持的 JSON 编码。`AutoUTFInputStream` 会先使用 BOM 来检测编码。若 BOM 不存在,它便会使用合法 JSON 的特性来检测。若两种方法都失败,它就会倒退至构造函数提供的 UTF 类型。
+
+由于字符(编码单元/code unit)可能是 8 位、16 位或 32 位,`AutoUTFInputStream` 需要一个能至少储存 32 位的字符类型。我们可以使用 `unsigned` 作为模板参数:
+
+~~~~~~~~~~cpp
+#include "rapidjson/document.h"
+#include "rapidjson/filereadstream.h"   // FileReadStream
+#include "rapidjson/encodedstream.h"    // AutoUTFInputStream
+#include <cstdio>
+
+using namespace rapidjson;
+
+FILE* fp = fopen("any.json", "rb"); // 非 Windows 平台使用 "r"
+
+char readBuffer[256];
+FileReadStream bis(fp, readBuffer, sizeof(readBuffer));
+
+AutoUTFInputStream<unsigned, FileReadStream> eis(bis);  // 用 eis 包装 bis
+
+Document d;         // Document 为 GenericDocument<UTF8<> > 
+d.ParseStream<0, AutoUTF<unsigned> >(eis); // 把任何 UTF 编码的文件解析至内存中的 UTF-8
+
+fclose(fp);
+~~~~~~~~~~
+
+当要指定流的编码,可使用上面例子中 `ParseStream()` 的参数 `AutoUTF<CharType>`。
+
+你可以使用 `UTFType GetType()` 去获取 UTF 类型,并且用 `HasBOM()` 检测输入流是否含有 BOM。
+
+## AutoUTFOutputStream {#AutoUTFOutputStream}
+
+相似地,要在运行时选择输出的编码,我们可使用 `AutoUTFOutputStream`。这个类本身并非「自动」。你需要在运行时指定 UTF 类型,以及是否写入 BOM。
+
+~~~~~~~~~~cpp
+using namespace rapidjson;
+
+void WriteJSONFile(FILE* fp, UTFType type, bool putBOM, const Document& d) {
+    char writeBuffer[256];
+    FileWriteStream bos(fp, writeBuffer, sizeof(writeBuffer));
+
+    typedef AutoUTFOutputStream<unsigned, FileWriteStream> OutputStream;
+    OutputStream eos(bos, type, putBOM);
+    
+    Writer<OutputStream, UTF8<>, AutoUTF<> > writer;
+    d.Accept(writer);
+}
+~~~~~~~~~~
+
+`AutoUTFInputStream`/`AutoUTFOutputStream` 是比 `EncodedInputStream`/`EncodedOutputStream` 方便。但前者会产生一点运行期额外开销。
+
+# 自定义流 {#CustomStream}
+
+除了内存/文件流,使用者可创建自行定义适配 RapidJSON API 的流类。例如,你可以创建网络流、从压缩文件读取的流等等。
+
+RapidJSON 利用模板结合不同的类型。只要一个类包含所有所需的接口,就可以作为一个流。流的接合定义在 `rapidjson/rapidjson.h` 的注释里:
+
+~~~~~~~~~~cpp
+concept Stream {
+    typename Ch;    //!< 流的字符类型
+
+    //! 从流读取当前字符,不移动读取指针(read cursor)
+    Ch Peek() const;
+
+    //! 从流读取当前字符,移动读取指针至下一字符。
+    Ch Take();
+
+    //! 获取读取指针。
+    //! \return 从开始以来所读过的字符数量。
+    size_t Tell();
+
+    //! 从当前读取指针开始写入操作。
+    //! \return 返回开始写入的指针。
+    Ch* PutBegin();
+
+    //! 写入一个字符。
+    void Put(Ch c);
+
+    //! 清空缓冲区。
+    void Flush();
+
+    //! 完成写作操作。
+    //! \param begin PutBegin() 返回的开始写入指针。
+    //! \return 已写入的字符数量。
+    size_t PutEnd(Ch* begin);
+}
+~~~~~~~~~~
+
+输入流必须实现 `Peek()`、`Take()` 及 `Tell()`。
+输出流必须实现 `Put()` 及 `Flush()`。
+`PutBegin()` 及 `PutEnd()` 是特殊的接口,仅用于原位(*in situ*)解析。一般的流不需实现它们。然而,即使接口不需用于某些流,仍然需要提供空实现,否则会产生编译错误。
+
+## 例子:istream 的包装类 {#ExampleIStreamWrapper}
+
+以下的简单例子是 `std::istream` 的包装类,它只需现 3 个函数。
+
+~~~~~~~~~~cpp
+class MyIStreamWrapper {
+public:
+    typedef char Ch;
+
+    MyIStreamWrapper(std::istream& is) : is_(is) {
+    }
+
+    Ch Peek() const { // 1
+        int c = is_.peek();
+        return c == std::char_traits<char>::eof() ? '\0' : (Ch)c;
+    }
+
+    Ch Take() { // 2
+        int c = is_.get();
+        return c == std::char_traits<char>::eof() ? '\0' : (Ch)c;
+    }
+
+    size_t Tell() const { return (size_t)is_.tellg(); } // 3
+
+    Ch* PutBegin() { assert(false); return 0; }
+    void Put(Ch) { assert(false); }
+    void Flush() { assert(false); }
+    size_t PutEnd(Ch*) { assert(false); return 0; }
+
+private:
+    MyIStreamWrapper(const MyIStreamWrapper&);
+    MyIStreamWrapper& operator=(const MyIStreamWrapper&);
+
+    std::istream& is_;
+};
+~~~~~~~~~~
+
+使用者能用它来包装 `std::stringstream`、`std::ifstream` 的实例。
+
+~~~~~~~~~~cpp
+const char* json = "[1,2,3,4]";
+std::stringstream ss(json);
+MyIStreamWrapper is(ss);
+
+Document d;
+d.ParseStream(is);
+~~~~~~~~~~
+
+但要注意,由于标准库的内部开销问,此实现的性能可能不如 RapidJSON 的内存/文件流。
+
+## 例子:ostream 的包装类 {#ExampleOStreamWrapper}
+
+以下的例子是 `std::istream` 的包装类,它只需实现 2 个函数。
+
+~~~~~~~~~~cpp
+class MyOStreamWrapper {
+public:
+    typedef char Ch;
+
+    OStreamWrapper(std::ostream& os) : os_(os) {
+    }
+
+    Ch Peek() const { assert(false); return '\0'; }
+    Ch Take() { assert(false); return '\0'; }
+    size_t Tell() const {  }
+
+    Ch* PutBegin() { assert(false); return 0; }
+    void Put(Ch c) { os_.put(c); }                  // 1
+    void Flush() { os_.flush(); }                   // 2
+    size_t PutEnd(Ch*) { assert(false); return 0; }
+
+private:
+    MyOStreamWrapper(const MyOStreamWrapper&);
+    MyOStreamWrapper& operator=(const MyOStreamWrapper&);
+
+    std::ostream& os_;
+};
+~~~~~~~~~~
+
+使用者能用它来包装 `std::stringstream`、`std::ofstream` 的实例。
+
+~~~~~~~~~~cpp
+Document d;
+// ...
+
+std::stringstream ss;
+MyOStreamWrapper os(ss);
+
+Writer<MyOStreamWrapper> writer(os);
+d.Accept(writer);
+~~~~~~~~~~
+
+但要注意,由于标准库的内部开销问,此实现的性能可能不如 RapidJSON 的内存/文件流。
+
+# 总结 {#Summary}
+
+本节描述了 RapidJSON 提供的各种流的类。内存流很简单。若 JSON 存储在文件中,文件流可减少 JSON 解析及生成所需的内存量。编码流在字节流和字符流之间作转换。最后,使用者可使用一个简单接口创建自定义的流。
diff --git a/doc/tutorial.md b/doc/tutorial.md
new file mode 100644
index 0000000..3fa63c9
--- /dev/null
+++ b/doc/tutorial.md
@@ -0,0 +1,536 @@
+# Tutorial
+
+This tutorial introduces the basics of the Document Object Model(DOM) API.
+
+As shown in [Usage at a glance](@ref index), JSON can be parsed into a DOM, and then the DOM can be queried and modified easily, and finally be converted back to JSON.
+
+[TOC]
+
+# Value & Document {#ValueDocument}
+
+Each JSON value is stored in a type called `Value`. A `Document`, representing the DOM, contains the root `Value` of the DOM tree. All public types and functions of RapidJSON are defined in the `rapidjson` namespace.
+
+# Query Value {#QueryValue}
+
+In this section, we will use excerpt of `example/tutorial/tutorial.cpp`.
+
+Assume we have the following JSON stored in a C string (`const char* json`):
+~~~~~~~~~~js
+{
+    "hello": "world",
+    "t": true ,
+    "f": false,
+    "n": null,
+    "i": 123,
+    "pi": 3.1416,
+    "a": [1, 2, 3, 4]
+}
+~~~~~~~~~~
+
+Parse it into a `Document`:
+~~~~~~~~~~cpp
+#include "rapidjson/document.h"
+
+using namespace rapidjson;
+
+// ...
+Document document;
+document.Parse(json);
+~~~~~~~~~~
+
+The JSON is now parsed into `document` as a *DOM tree*:
+
+![DOM in the tutorial](diagram/tutorial.png)
+
+Since the update to RFC 7159, the root of a conforming JSON document can be any JSON value.  In earlier RFC 4627, only objects or arrays were allowed as root values. In this case, the root is an object.
+~~~~~~~~~~cpp
+assert(document.IsObject());
+~~~~~~~~~~
+
+Let's query whether a `"hello"` member exists in the root object. Since a `Value` can contain different types of value, we may need to verify its type and use suitable API to obtain the value. In this example, `"hello"` member associates with a JSON string.
+~~~~~~~~~~cpp
+assert(document.HasMember("hello"));
+assert(document["hello"].IsString());
+printf("hello = %s\n", document["hello"].GetString());
+~~~~~~~~~~
+
+~~~~~~~~~~
+hello = world
+~~~~~~~~~~
+
+JSON true/false values are represented as `bool`.
+~~~~~~~~~~cpp
+assert(document["t"].IsBool());
+printf("t = %s\n", document["t"].GetBool() ? "true" : "false");
+~~~~~~~~~~
+
+~~~~~~~~~~
+t = true
+~~~~~~~~~~
+
+JSON null can be queried with `IsNull()`.
+~~~~~~~~~~cpp
+printf("n = %s\n", document["n"].IsNull() ? "null" : "?");
+~~~~~~~~~~
+
+~~~~~~~~~~
+n = null
+~~~~~~~~~~
+
+JSON number type represents all numeric values. However, C++ needs more specific type for manipulation.
+
+~~~~~~~~~~cpp
+assert(document["i"].IsNumber());
+
+// In this case, IsUint()/IsInt64()/IsUInt64() also return true.
+assert(document["i"].IsInt());          
+printf("i = %d\n", document["i"].GetInt());
+// Alternative (int)document["i"]
+
+assert(document["pi"].IsNumber());
+assert(document["pi"].IsDouble());
+printf("pi = %g\n", document["pi"].GetDouble());
+~~~~~~~~~~
+
+~~~~~~~~~~
+i = 123
+pi = 3.1416
+~~~~~~~~~~
+
+JSON array contains a number of elements.
+~~~~~~~~~~cpp
+// Using a reference for consecutive access is handy and faster.
+const Value& a = document["a"];
+assert(a.IsArray());
+for (SizeType i = 0; i < a.Size(); i++) // Uses SizeType instead of size_t
+        printf("a[%d] = %d\n", i, a[i].GetInt());
+~~~~~~~~~~
+
+~~~~~~~~~~
+a[0] = 1
+a[1] = 2
+a[2] = 3
+a[3] = 4
+~~~~~~~~~~
+
+Note that, RapidJSON does not automatically convert values between JSON types. If a value is a string, it is invalid to call `GetInt()`, for example. In debug mode it will fail an assertion. In release mode, the behavior is undefined.
+
+In the following sections we discuss details about querying individual types.
+
+## Query Array {#QueryArray}
+
+By default, `SizeType` is typedef of `unsigned`. In most systems, an array is limited to store up to 2^32-1 elements.
+
+You may access the elements in an array by integer literal, for example, `a[0]`, `a[1]`, `a[2]`.
+
+Array is similar to `std::vector`: instead of using indices, you may also use iterator to access all the elements.
+~~~~~~~~~~cpp
+for (Value::ConstValueIterator itr = a.Begin(); itr != a.End(); ++itr)
+    printf("%d ", itr->GetInt());
+~~~~~~~~~~
+
+And other familiar query functions:
+* `SizeType Capacity() const`
+* `bool Empty() const`
+
+### Range-based For Loop (New in v1.1.0)
+
+When C++11 is enabled, you can use range-based for loop to access all elements in an array.
+
+~~~~~~~~~~cpp
+for (auto& v : a.GetArray())
+    printf("%d ", v.GetInt());
+~~~~~~~~~~
+
+## Query Object {#QueryObject}
+
+Similar to Array, we can access all object members by iterator:
+
+~~~~~~~~~~cpp
+static const char* kTypeNames[] = 
+    { "Null", "False", "True", "Object", "Array", "String", "Number" };
+
+for (Value::ConstMemberIterator itr = document.MemberBegin();
+    itr != document.MemberEnd(); ++itr)
+{
+    printf("Type of member %s is %s\n",
+        itr->name.GetString(), kTypeNames[itr->value.GetType()]);
+}
+~~~~~~~~~~
+
+~~~~~~~~~~
+Type of member hello is String
+Type of member t is True
+Type of member f is False
+Type of member n is Null
+Type of member i is Number
+Type of member pi is Number
+Type of member a is Array
+~~~~~~~~~~
+
+Note that, when `operator[](const char*)` cannot find the member, it will fail an assertion.
+
+If we are unsure whether a member exists, we need to call `HasMember()` before calling `operator[](const char*)`. However, this incurs two lookup. A better way is to call `FindMember()`, which can check the existence of member and obtain its value at once:
+
+~~~~~~~~~~cpp
+Value::ConstMemberIterator itr = document.FindMember("hello");
+if (itr != document.MemberEnd())
+    printf("%s\n", itr->value.GetString());
+~~~~~~~~~~
+
+### Range-based For Loop (New in v1.1.0)
+
+When C++11 is enabled, you can use range-based for loop to access all members in an object.
+
+~~~~~~~~~~cpp
+for (auto& m : document.GetObject())
+    printf("Type of member %s is %s\n",
+        m.name.GetString(), kTypeNames[m.value.GetType()]);
+~~~~~~~~~~
+
+## Querying Number {#QueryNumber}
+
+JSON provides a single numerical type called Number. Number can be an integer or a real number. RFC 4627 says the range of Number is specified by the parser implementation.
+
+As C++ provides several integer and floating point number types, the DOM tries to handle these with the widest possible range and good performance.
+
+When a Number is parsed, it is stored in the DOM as one of the following types:
+
+Type       | Description
+-----------|---------------------------------------
+`unsigned` | 32-bit unsigned integer
+`int`      | 32-bit signed integer
+`uint64_t` | 64-bit unsigned integer
+`int64_t`  | 64-bit signed integer
+`double`   | 64-bit double precision floating point
+
+When querying a number, you can check whether the number can be obtained as the target type:
+
+Checking          | Obtaining
+------------------|---------------------
+`bool IsNumber()` | N/A
+`bool IsUint()`   | `unsigned GetUint()`
+`bool IsInt()`    | `int GetInt()`
+`bool IsUint64()` | `uint64_t GetUint64()`
+`bool IsInt64()`  | `int64_t GetInt64()`
+`bool IsDouble()` | `double GetDouble()`
+
+Note that, an integer value may be obtained in various ways without conversion. For example, A value `x` containing 123 will make `x.IsInt() == x.IsUint() == x.IsInt64() == x.IsUint64() == true`. But a value `y` containing -3000000000 will only make `x.IsInt64() == true`.
+
+When obtaining the numeric values, `GetDouble()` will convert internal integer representation to a `double`. Note that, `int` and `unsigned` can be safely converted to `double`, but `int64_t` and `uint64_t` may lose precision (since mantissa of `double` is only 52-bits).
+
+## Query String {#QueryString}
+
+In addition to `GetString()`, the `Value` class also contains `GetStringLength()`. Here explains why.
+
+According to RFC 4627, JSON strings can contain Unicode character `U+0000`, which must be escaped as `"\u0000"`. The problem is that, C/C++ often uses null-terminated string, which treats ``\0'` as the terminator symbol.
+
+To conform RFC 4627, RapidJSON supports string containing `U+0000`. If you need to handle this, you can use `GetStringLength()` to obtain the correct string length.
+
+For example, after parsing a the following JSON to `Document d`:
+
+~~~~~~~~~~js
+{ "s" :  "a\u0000b" }
+~~~~~~~~~~
+The correct length of the value `"a\u0000b"` is 3. But `strlen()` returns 1.
+
+`GetStringLength()` can also improve performance, as user may often need to call `strlen()` for allocating buffer.
+
+Besides, `std::string` also support a constructor:
+
+~~~~~~~~~~cpp
+string(const char* s, size_t count);
+~~~~~~~~~~
+
+which accepts the length of string as parameter. This constructor supports storing null character within the string, and should also provide better performance.
+
+## Comparing values
+
+You can use `==` and `!=` to compare values. Two values are equal if and only if they are have same type and contents. You can also compare values with primitive types. Here is an example.
+
+~~~~~~~~~~cpp
+if (document["hello"] == document["n"]) /*...*/;    // Compare values
+if (document["hello"] == "world") /*...*/;          // Compare value with literal string
+if (document["i"] != 123) /*...*/;                  // Compare with integers
+if (document["pi"] != 3.14) /*...*/;                // Compare with double.
+~~~~~~~~~~
+
+Array/object compares their elements/members in order. They are equal if and only if their whole subtrees are equal.
+
+Note that, currently if an object contains duplicated named member, comparing equality with any object is always `false`.
+
+# Create/Modify Values {#CreateModifyValues}
+
+There are several ways to create values. After a DOM tree is created and/or modified, it can be saved as JSON again using `Writer`.
+
+## Change Value Type {#ChangeValueType}
+When creating a Value or Document by default constructor, its type is Null. To change its type, call `SetXXX()` or assignment operator, for example:
+
+~~~~~~~~~~cpp
+Document d; // Null
+d.SetObject();
+
+Value v;    // Null
+v.SetInt(10);
+v = 10;     // Shortcut, same as above
+~~~~~~~~~~
+
+### Overloaded Constructors
+There are also overloaded constructors for several types:
+
+~~~~~~~~~~cpp
+Value b(true);    // calls Value(bool)
+Value i(-123);    // calls Value(int)
+Value u(123u);    // calls Value(unsigned)
+Value d(1.5);     // calls Value(double)
+~~~~~~~~~~
+
+To create empty object or array, you may use `SetObject()`/`SetArray()` after default constructor, or using the `Value(Type)` in one shot:
+
+~~~~~~~~~~cpp
+Value o(kObjectType);
+Value a(kArrayType);
+~~~~~~~~~~
+
+## Move Semantics {#MoveSemantics}
+
+A very special decision during design of RapidJSON is that, assignment of value does not copy the source value to destination value. Instead, the value from source is moved to the destination. For example,
+
+~~~~~~~~~~cpp
+Value a(123);
+Value b(456);
+b = a;         // a becomes a Null value, b becomes number 123.
+~~~~~~~~~~
+
+![Assignment with move semantics.](diagram/move1.png)
+
+Why? What is the advantage of this semantics?
+
+The simple answer is performance. For fixed size JSON types (Number, True, False, Null), copying them is fast and easy. However, For variable size JSON types (String, Array, Object), copying them will incur a lot of overheads. And these overheads are often unnoticed. Especially when we need to create temporary object, copy it to another variable, and then destruct it.
+
+For example, if normal *copy* semantics was used:
+
+~~~~~~~~~~cpp
+Document d;
+Value o(kObjectType);
+{
+    Value contacts(kArrayType);
+    // adding elements to contacts array.
+    // ...
+    o.AddMember("contacts", contacts, d.GetAllocator());  // deep clone contacts (may be with lots of allocations)
+    // destruct contacts.
+}
+~~~~~~~~~~
+
+![Copy semantics makes a lots of copy operations.](diagram/move2.png)
+
+The object `o` needs to allocate a buffer of same size as contacts, makes a deep clone of it, and then finally contacts is destructed. This will incur a lot of unnecessary allocations/deallocations and memory copying.
+
+There are solutions to prevent actual copying these data, such as reference counting and garbage collection(GC).
+
+To make RapidJSON simple and fast, we chose to use *move* semantics for assignment. It is similar to `std::auto_ptr` which transfer ownership during assignment. Move is much faster and simpler, it just destructs the original value, `memcpy()` the source to destination, and finally sets the source as Null type.
+
+So, with move semantics, the above example becomes:
+
+~~~~~~~~~~cpp
+Document d;
+Value o(kObjectType);
+{
+    Value contacts(kArrayType);
+    // adding elements to contacts array.
+    o.AddMember("contacts", contacts, d.GetAllocator());  // just memcpy() of contacts itself to the value of new member (16 bytes)
+    // contacts became Null here. Its destruction is trivial.
+}
+~~~~~~~~~~
+
+![Move semantics makes no copying.](diagram/move3.png)
+
+This is called move assignment operator in C++11. As RapidJSON supports C++03, it adopts move semantics using assignment operator, and all other modifying function like `AddMember()`, `PushBack()`.
+
+### Move semantics and temporary values {#TemporaryValues}
+
+Sometimes, it is convenient to construct a Value in place, before passing it to one of the "moving" functions, like `PushBack()` or `AddMember()`.  As temporary objects can't be converted to proper Value references, the convenience function `Move()` is available:
+
+~~~~~~~~~~cpp
+Value a(kArrayType);
+Document::AllocatorType& allocator = document.GetAllocator();
+// a.PushBack(Value(42), allocator);       // will not compile
+a.PushBack(Value().SetInt(42), allocator); // fluent API
+a.PushBack(Value(42).Move(), allocator);   // same as above
+~~~~~~~~~~
+
+## Create String {#CreateString}
+RapidJSON provides two strategies for storing string.
+
+1. copy-string: allocates a buffer, and then copy the source data into it.
+2. const-string: simply store a pointer of string.
+
+Copy-string is always safe because it owns a copy of the data. Const-string can be used for storing a string literal, and for in-situ parsing which will be mentioned in the DOM section.
+
+To make memory allocation customizable, RapidJSON requires users to pass an instance of allocator, whenever an operation may require allocation. This design is needed to prevent storing a allocator (or Document) pointer per Value.
+
+Therefore, when we assign a copy-string, we call this overloaded `SetString()` with allocator:
+
+~~~~~~~~~~cpp
+Document document;
+Value author;
+char buffer[10];
+int len = sprintf(buffer, "%s %s", "Milo", "Yip"); // dynamically created string.
+author.SetString(buffer, len, document.GetAllocator());
+memset(buffer, 0, sizeof(buffer));
+// author.GetString() still contains "Milo Yip" after buffer is destroyed
+~~~~~~~~~~
+
+In this example, we get the allocator from a `Document` instance. This is a common idiom when using RapidJSON. But you may use other instances of allocator.
+
+Besides, the above `SetString()` requires length. This can handle null characters within a string. There is another `SetString()` overloaded function without the length parameter. And it assumes the input is null-terminated and calls a `strlen()`-like function to obtain the length.
+
+Finally, for a string literal or string with a safe life-cycle one can use the const-string version of `SetString()`, which lacks an allocator parameter.  For string literals (or constant character arrays), simply passing the literal as parameter is safe and efficient:
+
+~~~~~~~~~~cpp
+Value s;
+s.SetString("rapidjson");    // can contain null character, length derived at compile time
+s = "rapidjson";             // shortcut, same as above
+~~~~~~~~~~
+
+For a character pointer, RapidJSON requires it to be marked as safe before using it without copying. This can be achieved by using the `StringRef` function:
+
+~~~~~~~~~cpp
+const char * cstr = getenv("USER");
+size_t cstr_len = ...;                 // in case length is available
+Value s;
+// s.SetString(cstr);                  // will not compile
+s.SetString(StringRef(cstr));          // ok, assume safe lifetime, null-terminated
+s = StringRef(cstr);                   // shortcut, same as above
+s.SetString(StringRef(cstr,cstr_len)); // faster, can contain null character
+s = StringRef(cstr,cstr_len);          // shortcut, same as above
+
+~~~~~~~~~
+
+## Modify Array {#ModifyArray}
+Value with array type provides an API similar to `std::vector`.
+
+* `Clear()`
+* `Reserve(SizeType, Allocator&)`
+* `Value& PushBack(Value&, Allocator&)`
+* `template <typename T> GenericValue& PushBack(T, Allocator&)`
+* `Value& PopBack()`
+* `ValueIterator Erase(ConstValueIterator pos)`
+* `ValueIterator Erase(ConstValueIterator first, ConstValueIterator last)`
+
+Note that, `Reserve(...)` and `PushBack(...)` may allocate memory for the array elements, therefore requiring an allocator.
+
+Here is an example of `PushBack()`:
+
+~~~~~~~~~~cpp
+Value a(kArrayType);
+Document::AllocatorType& allocator = document.GetAllocator();
+
+for (int i = 5; i <= 10; i++)
+    a.PushBack(i, allocator);   // allocator is needed for potential realloc().
+
+// Fluent interface
+a.PushBack("Lua", allocator).PushBack("Mio", allocator);
+~~~~~~~~~~
+
+This API differs from STL in that `PushBack()`/`PopBack()` return the array reference itself. This is called _fluent interface_.
+
+If you want to add a non-constant string or a string without sufficient lifetime (see [Create String](#CreateString)) to the array, you need to create a string Value by using the copy-string API.  To avoid the need for an intermediate variable, you can use a [temporary value](#TemporaryValues) in place:
+
+~~~~~~~~~~cpp
+// in-place Value parameter
+contact.PushBack(Value("copy", document.GetAllocator()).Move(), // copy string
+                 document.GetAllocator());
+
+// explicit parameters
+Value val("key", document.GetAllocator()); // copy string
+contact.PushBack(val, document.GetAllocator());
+~~~~~~~~~~
+
+## Modify Object {#ModifyObject}
+The Object class is a collection of key-value pairs (members). Each key must be a string value. To modify an object, either add or remove members. The following API is for adding members:
+
+* `Value& AddMember(Value&, Value&, Allocator& allocator)`
+* `Value& AddMember(StringRefType, Value&, Allocator&)`
+* `template <typename T> Value& AddMember(StringRefType, T value, Allocator&)`
+
+Here is an example.
+
+~~~~~~~~~~cpp
+Value contact(kObject);
+contact.AddMember("name", "Milo", document.GetAllocator());
+contact.AddMember("married", true, document.GetAllocator());
+~~~~~~~~~~
+
+The name parameter with `StringRefType` is similar to the interface of the `SetString` function for string values. These overloads are used to avoid the need for copying the `name` string, since constant key names are very common in JSON objects.
+
+If you need to create a name from a non-constant string or a string without sufficient lifetime (see [Create String](#CreateString)), you need to create a string Value by using the copy-string API.  To avoid the need for an intermediate variable, you can use a [temporary value](#TemporaryValues) in place:
+
+~~~~~~~~~~cpp
+// in-place Value parameter
+contact.AddMember(Value("copy", document.GetAllocator()).Move(), // copy string
+                  Value().Move(),                                // null value
+                  document.GetAllocator());
+
+// explicit parameters
+Value key("key", document.GetAllocator()); // copy string name
+Value val(42);                             // some value
+contact.AddMember(key, val, document.GetAllocator());
+~~~~~~~~~~
+
+For removing members, there are several choices: 
+
+* `bool RemoveMember(const Ch* name)`: Remove a member by search its name (linear time complexity).
+* `bool RemoveMember(const Value& name)`: same as above but `name` is a Value.
+* `MemberIterator RemoveMember(MemberIterator)`: Remove a member by iterator (_constant_ time complexity).
+* `MemberIterator EraseMember(MemberIterator)`: similar to the above but it preserves order of members (linear time complexity).
+* `MemberIterator EraseMember(MemberIterator first, MemberIterator last)`: remove a range of members, preserves order (linear time complexity).
+
+`MemberIterator RemoveMember(MemberIterator)` uses a "move-last" trick to achieve constant time complexity. Basically the member at iterator is destructed, and then the last element is moved to that position. So the order of the remaining members are changed.
+
+## Deep Copy Value {#DeepCopyValue}
+If we really need to copy a DOM tree, we can use two APIs for deep copy: constructor with allocator, and `CopyFrom()`.
+
+~~~~~~~~~~cpp
+Document d;
+Document::AllocatorType& a = d.GetAllocator();
+Value v1("foo");
+// Value v2(v1); // not allowed
+
+Value v2(v1, a);                      // make a copy
+assert(v1.IsString());                // v1 untouched
+d.SetArray().PushBack(v1, a).PushBack(v2, a);
+assert(v1.IsNull() && v2.IsNull());   // both moved to d
+
+v2.CopyFrom(d, a);                    // copy whole document to v2
+assert(d.IsArray() && d.Size() == 2); // d untouched
+v1.SetObject().AddMember("array", v2, a);
+d.PushBack(v1, a);
+~~~~~~~~~~
+
+## Swap Values {#SwapValues}
+
+`Swap()` is also provided.
+
+~~~~~~~~~~cpp
+Value a(123);
+Value b("Hello");
+a.Swap(b);
+assert(a.IsString());
+assert(b.IsInt());
+~~~~~~~~~~
+
+Swapping two DOM trees is fast (constant time), despite the complexity of the trees.
+
+# What's next {#WhatsNext}
+
+This tutorial shows the basics of DOM tree query and manipulation. There are several important concepts in RapidJSON:
+
+1. [Streams](doc/stream.md) are channels for reading/writing JSON, which can be a in-memory string, or file stream, etc. User can also create their streams.
+2. [Encoding](doc/encoding.md) defines which character encoding is used in streams and memory. RapidJSON also provide Unicode conversion/validation internally.
+3. [DOM](doc/dom.md)'s basics are already covered in this tutorial. Uncover more advanced features such as *in situ* parsing, other parsing options and advanced usages.
+4. [SAX](doc/sax.md) is the foundation of parsing/generating facility in RapidJSON. Learn how to use `Reader`/`Writer` to implement even faster applications. Also try `PrettyWriter` to format the JSON.
+5. [Performance](doc/performance.md) shows some in-house and third-party benchmarks.
+6. [Internals](doc/internals.md) describes some internal designs and techniques of RapidJSON.
+
+You may also refer to the [FAQ](doc/faq.md), API documentation, examples and unit tests.
diff --git a/doc/tutorial.zh-cn.md b/doc/tutorial.zh-cn.md
new file mode 100644
index 0000000..3bacfb0
--- /dev/null
+++ b/doc/tutorial.zh-cn.md
@@ -0,0 +1,535 @@
+# 教程
+
+本教程简介文件对象模型(Document Object Model, DOM)API。
+
+如 [用法一览](../readme.zh-cn.md#用法一览) 中所示,可以解析一个 JSON 至 DOM,然后就可以轻松查询及修改 DOM,并最终转换回 JSON。
+
+[TOC]
+
+# Value 及 Document {#ValueDocument}
+
+每个 JSON 值都储存为 `Value` 类,而 `Document` 类则表示整个 DOM,它存储了一个 DOM 树的根 `Value`。RapidJSON 的所有公开类型及函数都在 `rapidjson` 命名空间中。
+
+# 查询 Value {#QueryValue}
+
+在本节中,我们会使用到 `example/tutorial/tutorial.cpp` 中的代码片段。
+
+假设我们用 C 语言的字符串储存一个 JSON(`const char* json`):
+~~~~~~~~~~js
+{
+    "hello": "world",
+    "t": true ,
+    "f": false,
+    "n": null,
+    "i": 123,
+    "pi": 3.1416,
+    "a": [1, 2, 3, 4]
+}
+~~~~~~~~~~
+
+把它解析至一个 `Document`:
+~~~~~~~~~~cpp
+#include "rapidjson/document.h"
+
+using namespace rapidjson;
+
+// ...
+Document document;
+document.Parse(json);
+~~~~~~~~~~
+
+那么现在该 JSON 就会被解析至 `document` 中,成为一棵 *DOM 树 *:
+
+![教程中的 DOM](diagram/tutorial.png)
+
+自从 RFC 7159 作出更新,合法 JSON 文件的根可以是任何类型的 JSON 值。而在较早的 RFC 4627 中,根值只允许是 Object 或 Array。而在上述例子中,根是一个 Object。
+~~~~~~~~~~cpp
+assert(document.IsObject());
+~~~~~~~~~~
+
+让我们查询一下根 Object 中有没有 `"hello"` 成员。由于一个 `Value` 可包含不同类型的值,我们可能需要验证它的类型,并使用合适的 API 去获取其值。在此例中,`"hello"` 成员关联到一个 JSON String。
+~~~~~~~~~~cpp
+assert(document.HasMember("hello"));
+assert(document["hello"].IsString());
+printf("hello = %s\n", document["hello"].GetString());
+~~~~~~~~~~
+
+~~~~~~~~~~
+world
+~~~~~~~~~~
+
+JSON True/False 值是以 `bool` 表示的。
+~~~~~~~~~~cpp
+assert(document["t"].IsBool());
+printf("t = %s\n", document["t"].GetBool() ? "true" : "false");
+~~~~~~~~~~
+
+~~~~~~~~~~
+true
+~~~~~~~~~~
+
+JSON Null 值可用 `IsNull()` 查询。
+~~~~~~~~~~cpp
+printf("n = %s\n", document["n"].IsNull() ? "null" : "?");
+~~~~~~~~~~
+
+~~~~~~~~~~
+null
+~~~~~~~~~~
+
+JSON Number 类型表示所有数值。然而,C++ 需要使用更专门的类型。
+
+~~~~~~~~~~cpp
+assert(document["i"].IsNumber());
+
+// 在此情况下,IsUint()/IsInt64()/IsUInt64() 也会返回 true
+assert(document["i"].IsInt());          
+printf("i = %d\n", document["i"].GetInt());
+// 另一种用法: (int)document["i"]
+
+assert(document["pi"].IsNumber());
+assert(document["pi"].IsDouble());
+printf("pi = %g\n", document["pi"].GetDouble());
+~~~~~~~~~~
+
+~~~~~~~~~~
+i = 123
+pi = 3.1416
+~~~~~~~~~~
+
+JSON Array 包含一些元素。
+~~~~~~~~~~cpp
+// 使用引用来连续访问,方便之余还更高效。
+const Value& a = document["a"];
+assert(a.IsArray());
+for (SizeType i = 0; i < a.Size(); i++) // 使用 SizeType 而不是 size_t
+        printf("a[%d] = %d\n", i, a[i].GetInt());
+~~~~~~~~~~
+
+~~~~~~~~~~
+a[0] = 1
+a[1] = 2
+a[2] = 3
+a[3] = 4
+~~~~~~~~~~
+
+注意,RapidJSON 并不自动转换各种 JSON 类型。例如,对一个 String 的 Value 调用 `GetInt()` 是非法的。在调试模式下,它会被断言失败。在发布模式下,其行为是未定义的。
+
+以下将会讨论有关查询各类型的细节。
+
+## 查询 Array {#QueryArray}
+
+缺省情况下,`SizeType` 是 `unsigned` 的 typedef。在多数系统中,Array 最多能存储 2^32-1 个元素。
+
+你可以用整数字面量访问元素,如 `a[0]`、`a[1]`、`a[2]`。
+
+Array 与 `std::vector` 相似,除了使用索引,也可使用迭代器来访问所有元素。
+~~~~~~~~~~cpp
+for (Value::ConstValueIterator itr = a.Begin(); itr != a.End(); ++itr)
+    printf("%d ", itr->GetInt());
+~~~~~~~~~~
+
+还有一些熟悉的查询函数:
+* `SizeType Capacity() const`
+* `bool Empty() const`
+
+### 范围 for 循环 (v1.1.0 中的新功能)
+
+当使用 C++11 功能时,你可使用范围 for 循环去访问 Array 内的所有元素。
+
+~~~~~~~~~~cpp
+for (auto& v : a.GetArray())
+    printf("%d ", v.GetInt());
+~~~~~~~~~~
+
+## 查询 Object {#QueryObject}
+
+和 Array 相似,我们可以用迭代器去访问所有 Object 成员:
+
+~~~~~~~~~~cpp
+static const char* kTypeNames[] = 
+    { "Null", "False", "True", "Object", "Array", "String", "Number" };
+
+for (Value::ConstMemberIterator itr = document.MemberBegin();
+    itr != document.MemberEnd(); ++itr)
+{
+    printf("Type of member %s is %s\n",
+        itr->name.GetString(), kTypeNames[itr->value.GetType()]);
+}
+~~~~~~~~~~
+
+~~~~~~~~~~
+Type of member hello is String
+Type of member t is True
+Type of member f is False
+Type of member n is Null
+Type of member i is Number
+Type of member pi is Number
+Type of member a is Array
+~~~~~~~~~~
+
+注意,当 `operator[](const char*)` 找不到成员,它会断言失败。
+
+若我们不确定一个成员是否存在,便需要在调用 `operator[](const char*)` 前先调用 `HasMember()`。然而,这会导致两次查找。更好的做法是调用 `FindMember()`,它能同时检查成员是否存在并返回它的 Value:
+
+~~~~~~~~~~cpp
+Value::ConstMemberIterator itr = document.FindMember("hello");
+if (itr != document.MemberEnd())
+    printf("%s\n", itr->value.GetString());
+~~~~~~~~~~
+
+### 范围 for 循环 (v1.1.0 中的新功能)
+
+当使用 C++11 功能时,你可使用范围 for 循环去访问 Object 内的所有成员。
+
+~~~~~~~~~~cpp
+for (auto& m : document.GetObject())
+    printf("Type of member %s is %s\n",
+        m.name.GetString(), kTypeNames[m.value.GetType()]);
+~~~~~~~~~~
+
+## 查询 Number {#QueryNumber}
+
+JSON 只提供一种数值类型──Number。数字可以是整数或实数。RFC 4627 规定数字的范围由解析器指定。
+
+由于 C++ 提供多种整数及浮点数类型,DOM 尝试尽量提供最广的范围及良好性能。
+
+当解析一个 Number 时, 它会被存储在 DOM 之中,成为下列其中一个类型:
+
+类型       | 描述
+-----------|---------------------------------------
+`unsigned` | 32 位无号整数
+`int`      | 32 位有号整数
+`uint64_t` | 64 位无号整数
+`int64_t`  | 64 位有号整数
+`double`   | 64 位双精度浮点数
+
+当查询一个 Number 时, 你可以检查该数字是否能以目标类型来提取:
+
+查检              | 提取
+------------------|---------------------
+`bool IsNumber()` | 不适用
+`bool IsUint()`   | `unsigned GetUint()`
+`bool IsInt()`    | `int GetInt()`
+`bool IsUint64()` | `uint64_t GetUint64()`
+`bool IsInt64()`  | `int64_t GetInt64()`
+`bool IsDouble()` | `double GetDouble()`
+
+注意,一个整数可能用几种类型来提取,而无需转换。例如,一个名为 `x` 的 Value 包含 123,那么 `x.IsInt() == x.IsUint() == x.IsInt64() == x.IsUint64() == true`。但如果一个名为 `y` 的 Value 包含 -3000000000,那么仅会令 `x.IsInt64() == true`。
+
+当要提取 Number 类型,`GetDouble()` 是会把内部整数的表示转换成 `double`。注意 `int` 和 `unsigned` 可以安全地转换至 `double`,但 `int64_t` 及 `uint64_t` 可能会丧失精度(因为 `double` 的尾数只有 52 位)。
+
+## 查询 String {#QueryString}
+
+除了 `GetString()`,`Value` 类也有一个 `GetStringLength()`。这里会解释个中原因。
+
+根据 RFC 4627,JSON String 可包含 Unicode 字符 `U+0000`,在 JSON 中会表示为 `"\u0000"`。问题是,C/C++ 通常使用空字符结尾字符串(null-terminated string),这种字符串把 ``\0'` 作为结束符号。
+
+为了符合 RFC 4627,RapidJSON 支持包含 `U+0000` 的 String。若你需要处理这些 String,便可使用 `GetStringLength()` 去获得正确的字符串长度。
+
+例如,当解析以下的 JSON 至 `Document d` 之后:
+
+~~~~~~~~~~js
+{ "s" :  "a\u0000b" }
+~~~~~~~~~~
+`"a\u0000b"` 值的正确长度应该是 3。但 `strlen()` 会返回 1。
+
+`GetStringLength()` 也可以提高性能,因为用户可能需要调用 `strlen()` 去分配缓冲。
+
+此外,`std::string` 也支持这个构造函数:
+
+~~~~~~~~~~cpp
+string(const char* s, size_t count);
+~~~~~~~~~~
+
+此构造函数接受字符串长度作为参数。它支持在字符串中存储空字符,也应该会有更好的性能。
+
+## 比较两个 Value
+
+你可使用 `==` 及 `!=` 去比较两个 Value。当且仅当两个 Value 的类型及内容相同,它们才当作相等。你也可以比较 Value 和它的原生类型值。以下是一个例子。
+
+~~~~~~~~~~cpp
+if (document["hello"] == document["n"]) /*...*/;    // 比较两个值
+if (document["hello"] == "world") /*...*/;          // 与字符串字面量作比较
+if (document["i"] != 123) /*...*/;                  // 与整数作比较
+if (document["pi"] != 3.14) /*...*/;                // 与 double 作比较
+~~~~~~~~~~
+
+Array/Object 顺序以它们的元素/成员作比较。当且仅当它们的整个子树相等,它们才当作相等。
+
+注意,现时若一个 Object 含有重复命名的成员,它与任何 Object 作比较都总会返回 `false`。
+
+# 创建/修改值 {#CreateModifyValues}
+
+有多种方法去创建值。 当一个 DOM 树被创建或修改后,可使用 `Writer` 再次存储为 JSON。
+
+## 改变 Value 类型 {#ChangeValueType}
+当使用默认构造函数创建一个 Value 或 Document,它的类型便会是 Null。要改变其类型,需调用 `SetXXX()` 或赋值操作,例如:
+
+~~~~~~~~~~cpp
+Document d; // Null
+d.SetObject();
+
+Value v;    // Null
+v.SetInt(10);
+v = 10;     // 简写,和上面的相同
+~~~~~~~~~~
+
+### 构造函数的各个重载
+几个类型也有重载构造函数:
+
+~~~~~~~~~~cpp
+Value b(true);    // 调用 Value(bool)
+Value i(-123);    // 调用 Value(int)
+Value u(123u);    // 调用 Value(unsigned)
+Value d(1.5);     // 调用 Value(double)
+~~~~~~~~~~
+
+要重建空 Object 或 Array,可在默认构造函数后使用 `SetObject()`/`SetArray()`,或一次性使用 `Value(Type)`:
+
+~~~~~~~~~~cpp
+Value o(kObjectType);
+Value a(kArrayType);
+~~~~~~~~~~
+
+## 转移语义(Move Semantics) {#MoveSemantics}
+
+在设计 RapidJSON 时有一个非常特别的决定,就是 Value 赋值并不是把来源 Value 复制至目的 Value,而是把来源 Value 转移(move)至目的 Value。例如:
+
+~~~~~~~~~~cpp
+Value a(123);
+Value b(456);
+b = a;         // a 变成 Null,b 变成数字 123。
+~~~~~~~~~~
+
+![使用移动语义赋值。](diagram/move1.png)
+
+为什么?此语义有何优点?
+
+最简单的答案就是性能。对于固定大小的 JSON 类型(Number、True、False、Null),复制它们是简单快捷。然而,对于可变大小的 JSON 类型(String、Array、Object),复制它们会产生大量开销,而且这些开销常常不被察觉。尤其是当我们需要创建临时 Object,把它复制至另一变量,然后再析构它。
+
+例如,若使用正常 * 复制 * 语义:
+
+~~~~~~~~~~cpp
+Value o(kObjectType);
+{
+    Value contacts(kArrayType);
+    // 把元素加进 contacts 数组。
+    // ...
+    o.AddMember("contacts", contacts, d.GetAllocator());  // 深度复制 contacts (可能有大量内存分配)
+    // 析构 contacts。
+}
+~~~~~~~~~~
+
+![复制语义产生大量的复制操作。](diagram/move2.png)
+
+那个 `o` Object 需要分配一个和 contacts 相同大小的缓冲区,对 conacts 做深度复制,并最终要析构 contacts。这样会产生大量无必要的内存分配/释放,以及内存复制。
+
+有一些方案可避免实质地复制这些数据,例如引用计数(reference counting)、垃圾回收(garbage collection, GC)。
+
+为了使 RapidJSON 简单及快速,我们选择了对赋值采用 * 转移 * 语义。这方法与 `std::auto_ptr` 相似,都是在赋值时转移拥有权。转移快得多简单得多,只需要析构原来的 Value,把来源 `memcpy()` 至目标,最后把来源设置为 Null 类型。
+
+因此,使用转移语义后,上面的例子变成:
+
+~~~~~~~~~~cpp
+Value o(kObjectType);
+{
+    Value contacts(kArrayType);
+    // adding elements to contacts array.
+    o.AddMember("contacts", contacts, d.GetAllocator());  // 只需 memcpy() contacts 本身至新成员的 Value(16 字节)
+    // contacts 在这里变成 Null。它的析构是平凡的。
+}
+~~~~~~~~~~
+
+![转移语义不需复制。](diagram/move3.png)
+
+在 C++11 中这称为转移赋值操作(move assignment operator)。由于 RapidJSON 支持 C++03,它在赋值操作采用转移语义,其它修改型函数如 `AddMember()`, `PushBack()` 也采用转移语义。
+
+### 转移语义及临时值 {#TemporaryValues}
+
+有时候,我们想直接构造一个 Value 并传递给一个“转移”函数(如 `PushBack()`、`AddMember()`)。由于临时对象是不能转换为正常的 Value 引用,我们加入了一个方便的 `Move()` 函数:
+
+~~~~~~~~~~cpp
+Value a(kArrayType);
+Document::AllocatorType& allocator = document.GetAllocator();
+// a.PushBack(Value(42), allocator);       // 不能通过编译
+a.PushBack(Value().SetInt(42), allocator); // fluent API
+a.PushBack(Value(42).Move(), allocator);   // 和上一行相同
+~~~~~~~~~~
+
+## 创建 String {#CreateString}
+RapidJSON 提供两个 String 的存储策略。
+
+1. copy-string: 分配缓冲区,然后把来源数据复制至它。
+2. const-string: 简单地储存字符串的指针。
+
+Copy-string 总是安全的,因为它拥有数据的克隆。Const-string 可用于存储字符串字面量,以及用于在 DOM 一节中将会提到的 in-situ 解析中。
+
+为了让用户自定义内存分配方式,当一个操作可能需要内存分配时,RapidJSON 要求用户传递一个 allocator 实例作为 API 参数。此设计避免了在每个 Value 存储 allocator(或 document)的指针。
+
+因此,当我们把一个 copy-string 赋值时, 调用含有 allocator 的 `SetString()` 重载函数:
+
+~~~~~~~~~~cpp
+Document document;
+Value author;
+char buffer[10];
+int len = sprintf(buffer, "%s %s", "Milo", "Yip"); // 动态创建的字符串。
+author.SetString(buffer, len, document.GetAllocator());
+memset(buffer, 0, sizeof(buffer));
+// 清空 buffer 后 author.GetString() 仍然包含 "Milo Yip"
+~~~~~~~~~~
+
+在此例子中,我们使用 `Document` 实例的 allocator。这是使用 RapidJSON 时常用的惯用法。但你也可以用其他 allocator 实例。
+
+另外,上面的 `SetString()` 需要长度参数。这个 API 能处理含有空字符的字符串。另一个 `SetString()` 重载函数没有长度参数,它假设输入是空字符结尾的,并会调用类似 `strlen()` 的函数去获取长度。
+
+最后,对于字符串字面量或有安全生命周期的字符串,可以使用 const-string 版本的 `SetString()`,它没有
+allocator 参数。对于字符串字面量(或字符数组常量),只需简单地传递字面量,又安全又高效:
+
+~~~~~~~~~~cpp
+Value s;
+s.SetString("rapidjson");    // 可包含空字符,长度在编译期推导
+s = "rapidjson";             // 上行的缩写
+~~~~~~~~~~
+
+对于字符指针,RapidJSON 需要作一个标记,代表它不复制也是安全的。可以使用 `StringRef` 函数:
+
+~~~~~~~~~cpp
+const char * cstr = getenv("USER");
+size_t cstr_len = ...;                 // 如果有长度
+Value s;
+// s.SetString(cstr);                  // 这不能通过编译
+s.SetString(StringRef(cstr));          // 可以,假设它的生命周期安全,并且是以空字符结尾的
+s = StringRef(cstr);                   // 上行的缩写
+s.SetString(StringRef(cstr, cstr_len));// 更快,可处理空字符
+s = StringRef(cstr, cstr_len);         // 上行的缩写
+
+~~~~~~~~~
+
+## 修改 Array {#ModifyArray}
+Array 类型的 Value 提供与 `std::vector` 相似的 API。
+
+* `Clear()`
+* `Reserve(SizeType, Allocator&)`
+* `Value& PushBack(Value&, Allocator&)`
+* `template <typename T> GenericValue& PushBack(T, Allocator&)`
+* `Value& PopBack()`
+* `ValueIterator Erase(ConstValueIterator pos)`
+* `ValueIterator Erase(ConstValueIterator first, ConstValueIterator last)`
+
+注意,`Reserve(...)` 及 `PushBack(...)` 可能会为数组元素分配内存,所以需要一个 allocator。
+
+以下是 `PushBack()` 的例子:
+
+~~~~~~~~~~cpp
+Value a(kArrayType);
+Document::AllocatorType& allocator = document.GetAllocator();
+
+for (int i = 5; i <= 10; i++)
+    a.PushBack(i, allocator);   // 可能需要调用 realloc() 所以需要 allocator
+
+// 流畅接口(Fluent interface)
+a.PushBack("Lua", allocator).PushBack("Mio", allocator);
+~~~~~~~~~~
+
+与 STL 不一样的是,`PushBack()`/`PopBack()` 返回 Array 本身的引用。这称为流畅接口(_fluent interface_)。
+
+如果你想在 Array 中加入一个非常量字符串,或是一个没有足够生命周期的字符串(见 [Create String](#CreateString)),你需要使用 copy-string API 去创建一个 String。为了避免加入中间变量,可以就地使用一个 [临时值](#TemporaryValues):
+
+~~~~~~~~~~cpp
+// 就地 Value 参数
+contact.PushBack(Value("copy", document.GetAllocator()).Move(), // copy string
+                 document.GetAllocator());
+
+// 显式 Value 参数
+Value val("key", document.GetAllocator()); // copy string
+contact.PushBack(val, document.GetAllocator());
+~~~~~~~~~~
+
+## 修改 Object {#ModifyObject}
+Object 是键值对的集合。每个键必须为 String。要修改 Object,方法是增加或移除成员。以下的 API 用来增加城员:
+
+* `Value& AddMember(Value&, Value&, Allocator& allocator)`
+* `Value& AddMember(StringRefType, Value&, Allocator&)`
+* `template <typename T> Value& AddMember(StringRefType, T value, Allocator&)`
+
+以下是一个例子。
+
+~~~~~~~~~~cpp
+Value contact(kObject);
+contact.AddMember("name", "Milo", document.GetAllocator());
+contact.AddMember("married", true, document.GetAllocator());
+~~~~~~~~~~
+
+使用 `StringRefType` 作为 name 参数的重载版本与字符串的 `SetString` 的接口相似。 这些重载是为了避免复制 `name` 字符串,因为 JSON object 中经常会使用常数键名。
+
+如果你需要从非常数字符串或生命周期不足的字符串创建键名(见 [创建 String](#CreateString)),你需要使用 copy-string API。为了避免中间变量,可以就地使用 [临时值](#TemporaryValues):
+
+~~~~~~~~~~cpp
+// 就地 Value 参数
+contact.AddMember(Value("copy", document.GetAllocator()).Move(), // copy string
+                  Value().Move(),                                // null value
+                  document.GetAllocator());
+
+// 显式参数
+Value key("key", document.GetAllocator()); // copy string name
+Value val(42);                             // 某 Value
+contact.AddMember(key, val, document.GetAllocator());
+~~~~~~~~~~
+
+移除成员有几个选择:
+
+* `bool RemoveMember(const Ch* name)`:使用键名来移除成员(线性时间复杂度)。
+* `bool RemoveMember(const Value& name)`:除了 `name` 是一个 Value,和上一行相同。
+* `MemberIterator RemoveMember(MemberIterator)`:使用迭代器移除成员(_ 常数 _ 时间复杂度)。
+* `MemberIterator EraseMember(MemberIterator)`:和上行相似但维持成员次序(线性时间复杂度)。
+* `MemberIterator EraseMember(MemberIterator first, MemberIterator last)`:移除一个范围内的成员,维持次序(线性时间复杂度)。
+
+`MemberIterator RemoveMember(MemberIterator)` 使用了“转移最后”手法来达成常数时间复杂度。基本上就是析构迭代器位置的成员,然后把最后的成员转移至迭代器位置。因此,成员的次序会被改变。
+
+## 深复制 Value {#DeepCopyValue}
+若我们真的要复制一个 DOM 树,我们可使用两个 APIs 作深复制:含 allocator 的构造函数及 `CopyFrom()`。
+
+~~~~~~~~~~cpp
+Document d;
+Document::AllocatorType& a = d.GetAllocator();
+Value v1("foo");
+// Value v2(v1); // 不容许
+
+Value v2(v1, a);                      // 制造一个克隆
+assert(v1.IsString());                // v1 不变
+d.SetArray().PushBack(v1, a).PushBack(v2, a);
+assert(v1.IsNull() && v2.IsNull());   // 两个都转移动 d
+
+v2.CopyFrom(d, a);                    // 把整个 document 复制至 v2
+assert(d.IsArray() && d.Size() == 2); // d 不变
+v1.SetObject().AddMember("array", v2, a);
+d.PushBack(v1, a);
+~~~~~~~~~~
+
+## 交换 Value {#SwapValues}
+
+RapidJSON 也提供 `Swap()`。
+
+~~~~~~~~~~cpp
+Value a(123);
+Value b("Hello");
+a.Swap(b);
+assert(a.IsString());
+assert(b.IsInt());
+~~~~~~~~~~
+
+无论两棵 DOM 树有多复杂,交换是很快的(常数时间)。
+
+# 下一部分 {#WhatsNext}
+
+本教程展示了如何询查及修改 DOM 树。RapidJSON 还有一个重要概念:
+
+1. [流](doc/stream.zh-cn.md) 是读写 JSON 的通道。流可以是内存字符串、文件流等。用户也可以自定义流。
+2. [编码](doc/encoding.zh-cn.md) 定义在流或内存中使用的字符编码。RapidJSON 也在内部提供 Unicode 转换及校验功能。
+3. [DOM](doc/dom.zh-cn.md) 的基本功能已在本教程里介绍。还有更高级的功能,如原位(*in situ*)解析、其他解析选项及高级用法。
+4. [SAX](doc/sax.zh-cn.md) 是 RapidJSON 解析/生成功能的基础。学习使用 `Reader`/`Writer` 去实现更高性能的应用程序。也可以使用 `PrettyWriter` 去格式化 JSON。
+5. [性能](doc/performance.zh-cn.md) 展示一些我们做的及第三方的性能测试。
+6. [技术内幕](doc/internals.md) 讲述一些 RapidJSON 内部的设计及技术。
+
+你也可以参考 [常见问题](doc/faq.zh-cn.md)、API 文档、例子及单元测试。
diff --git a/docker/debian/Dockerfile b/docker/debian/Dockerfile
new file mode 100644
index 0000000..76f0235
--- /dev/null
+++ b/docker/debian/Dockerfile
@@ -0,0 +1,8 @@
+# BUILD:  docker build -t rapidjson-debian .
+# RUN:    docker run -it -v "$PWD"/../..:/rapidjson rapidjson-debian
+
+FROM debian:jessie
+
+RUN apt-get update && apt-get install -y g++ cmake doxygen valgrind
+
+ENTRYPOINT ["/bin/bash"]
diff --git a/example/CMakeLists.txt b/example/CMakeLists.txt
new file mode 100644
index 0000000..9f53c9a
--- /dev/null
+++ b/example/CMakeLists.txt
@@ -0,0 +1,46 @@
+cmake_minimum_required(VERSION 2.8)
+
+if(POLICY CMP0054)
+  cmake_policy(SET CMP0054 NEW)
+endif()
+
+set(EXAMPLES
+    capitalize
+    condense
+    filterkey
+    filterkeydom
+    jsonx
+    lookaheadparser
+    messagereader
+    parsebyparts
+    pretty
+    prettyauto
+    schemavalidator
+    serialize
+    simpledom
+    simplereader
+    simplepullreader
+    simplewriter
+    sortkeys
+    tutorial)
+    
+include_directories("../include/")
+
+add_definitions(-D__STDC_FORMAT_MACROS)
+set_property(DIRECTORY PROPERTY COMPILE_OPTIONS ${EXTRA_CXX_FLAGS})
+
+if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
+    set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pthread")
+endif()
+
+add_executable(archivertest archiver/archiver.cpp archiver/archivertest.cpp)
+
+foreach (example ${EXAMPLES})
+    add_executable(${example} ${example}/${example}.cpp)
+endforeach()
+
+if (CMAKE_CXX_COMPILER_ID MATCHES "Clang")
+    target_link_libraries(parsebyparts pthread)
+endif()
+
+add_custom_target(examples ALL DEPENDS ${EXAMPLES})
diff --git a/example/archiver/archiver.cpp b/example/archiver/archiver.cpp
new file mode 100644
index 0000000..59ae4c4
--- /dev/null
+++ b/example/archiver/archiver.cpp
@@ -0,0 +1,292 @@
+#include "archiver.h"
+#include <cassert>
+#include <stack>
+#include "rapidjson/document.h"
+#include "rapidjson/prettywriter.h"
+#include "rapidjson/stringbuffer.h"
+
+using namespace rapidjson;
+
+struct JsonReaderStackItem {
+    enum State {
+        BeforeStart,    //!< An object/array is in the stack but it is not yet called by StartObject()/StartArray().
+        Started,        //!< An object/array is called by StartObject()/StartArray().
+        Closed          //!< An array is closed after read all element, but before EndArray().
+    };
+
+    JsonReaderStackItem(const Value* value, State state) : value(value), state(state), index() {}
+
+    const Value* value;
+    State state;
+    SizeType index;   // For array iteration
+};
+
+typedef std::stack<JsonReaderStackItem> JsonReaderStack;
+
+#define DOCUMENT reinterpret_cast<Document*>(mDocument)
+#define STACK (reinterpret_cast<JsonReaderStack*>(mStack))
+#define TOP (STACK->top())
+#define CURRENT (*TOP.value)
+
+JsonReader::JsonReader(const char* json) : mDocument(), mStack(), mError(false) {
+    mDocument = new Document;
+    DOCUMENT->Parse(json);
+    if (DOCUMENT->HasParseError())
+        mError = true;
+    else {
+        mStack = new JsonReaderStack;
+        STACK->push(JsonReaderStackItem(DOCUMENT, JsonReaderStackItem::BeforeStart));
+    }
+}
+
+JsonReader::~JsonReader() {
+    delete DOCUMENT;
+    delete STACK;
+}
+
+// Archive concept
+JsonReader& JsonReader::StartObject() {
+    if (!mError) {
+        if (CURRENT.IsObject() && TOP.state == JsonReaderStackItem::BeforeStart)
+            TOP.state = JsonReaderStackItem::Started;
+        else
+            mError = true;
+    }
+    return *this;
+}
+
+JsonReader& JsonReader::EndObject() {
+    if (!mError) {
+        if (CURRENT.IsObject() && TOP.state == JsonReaderStackItem::Started)
+            Next();
+        else
+            mError = true;
+    }
+    return *this;
+}
+
+JsonReader& JsonReader::Member(const char* name) {
+    if (!mError) {
+        if (CURRENT.IsObject() && TOP.state == JsonReaderStackItem::Started) {
+            Value::ConstMemberIterator memberItr = CURRENT.FindMember(name);
+            if (memberItr != CURRENT.MemberEnd()) 
+                STACK->push(JsonReaderStackItem(&memberItr->value, JsonReaderStackItem::BeforeStart));
+            else
+                mError = true;
+        }
+        else
+            mError = true;
+    }
+    return *this;
+}
+
+bool JsonReader::HasMember(const char* name) const {
+    if (!mError && CURRENT.IsObject() && TOP.state == JsonReaderStackItem::Started)
+        return CURRENT.HasMember(name);
+    return false;
+}
+
+JsonReader& JsonReader::StartArray(size_t* size) {
+    if (!mError) {
+        if (CURRENT.IsArray() && TOP.state == JsonReaderStackItem::BeforeStart) {
+            TOP.state = JsonReaderStackItem::Started;
+            if (size)
+                *size = CURRENT.Size();
+
+            if (!CURRENT.Empty()) {
+                const Value* value = &CURRENT[TOP.index];
+                STACK->push(JsonReaderStackItem(value, JsonReaderStackItem::BeforeStart));
+            }
+            else
+                TOP.state = JsonReaderStackItem::Closed;
+        }
+        else
+            mError = true;
+    }
+    return *this;
+}
+
+JsonReader& JsonReader::EndArray() {
+    if (!mError) {
+        if (CURRENT.IsArray() && TOP.state == JsonReaderStackItem::Closed)
+            Next();
+        else
+            mError = true;
+    }
+    return *this;
+}
+
+JsonReader& JsonReader::operator&(bool& b) {
+    if (!mError) {
+        if (CURRENT.IsBool()) {
+            b = CURRENT.GetBool();
+            Next();
+        }
+        else
+            mError = true;
+    }
+    return *this;
+}
+
+JsonReader& JsonReader::operator&(unsigned& u) {
+    if (!mError) {
+        if (CURRENT.IsUint()) {
+            u = CURRENT.GetUint();
+            Next();
+        }
+        else
+            mError = true;
+    }
+    return *this;
+}
+
+JsonReader& JsonReader::operator&(int& i) {
+    if (!mError) {
+        if (CURRENT.IsInt()) {
+            i = CURRENT.GetInt();
+            Next();
+        }
+        else
+            mError = true;
+    }
+    return *this;
+}
+
+JsonReader& JsonReader::operator&(double& d) {
+    if (!mError) {
+        if (CURRENT.IsNumber()) {
+            d = CURRENT.GetDouble();
+            Next();
+        }
+        else
+            mError = true;
+    }
+    return *this;
+}
+
+JsonReader& JsonReader::operator&(std::string& s) {
+    if (!mError) {
+        if (CURRENT.IsString()) {
+            s = CURRENT.GetString();
+            Next();
+        }
+        else
+            mError = true;
+    }
+    return *this;
+}
+
+JsonReader& JsonReader::SetNull() {
+    // This function is for JsonWriter only.
+    mError = true;
+    return *this;
+}
+
+void JsonReader::Next() {
+    if (!mError) {
+        assert(!STACK->empty());
+        STACK->pop();
+
+        if (!STACK->empty() && CURRENT.IsArray()) {
+            if (TOP.state == JsonReaderStackItem::Started) { // Otherwise means reading array item pass end
+                if (TOP.index < CURRENT.Size() - 1) {
+                    const Value* value = &CURRENT[++TOP.index];
+                    STACK->push(JsonReaderStackItem(value, JsonReaderStackItem::BeforeStart));
+                }
+                else
+                    TOP.state = JsonReaderStackItem::Closed;
+            }
+            else
+                mError = true;
+        }
+    }
+}
+
+#undef DOCUMENT
+#undef STACK
+#undef TOP
+#undef CURRENT
+
+////////////////////////////////////////////////////////////////////////////////
+// JsonWriter
+
+#define WRITER reinterpret_cast<PrettyWriter<StringBuffer>*>(mWriter)
+#define STREAM reinterpret_cast<StringBuffer*>(mStream)
+
+JsonWriter::JsonWriter() : mWriter(), mStream() {
+    mStream = new StringBuffer;
+    mWriter = new PrettyWriter<StringBuffer>(*STREAM);
+}
+
+JsonWriter::~JsonWriter() { 
+    delete WRITER;
+    delete STREAM;
+}
+
+const char* JsonWriter::GetString() const {
+    return STREAM->GetString();
+}
+
+JsonWriter& JsonWriter::StartObject() {
+    WRITER->StartObject();
+    return *this;
+}
+
+JsonWriter& JsonWriter::EndObject() {
+    WRITER->EndObject();
+    return *this;
+}
+
+JsonWriter& JsonWriter::Member(const char* name) {
+    WRITER->String(name, static_cast<SizeType>(strlen(name)));
+    return *this;
+}
+
+bool JsonWriter::HasMember(const char*) const {
+    // This function is for JsonReader only.
+    assert(false);
+    return false;
+}
+
+JsonWriter& JsonWriter::StartArray(size_t*) {
+    WRITER->StartArray();   
+    return *this;
+}
+
+JsonWriter& JsonWriter::EndArray() {
+    WRITER->EndArray();
+    return *this;
+}
+
+JsonWriter& JsonWriter::operator&(bool& b) {
+    WRITER->Bool(b);
+    return *this;
+}
+
+JsonWriter& JsonWriter::operator&(unsigned& u) {
+    WRITER->Uint(u);
+    return *this;
+}
+
+JsonWriter& JsonWriter::operator&(int& i) {
+    WRITER->Int(i);
+    return *this;
+}
+
+JsonWriter& JsonWriter::operator&(double& d) {
+    WRITER->Double(d);
+    return *this;
+}
+
+JsonWriter& JsonWriter::operator&(std::string& s) {
+    WRITER->String(s.c_str(), static_cast<SizeType>(s.size()));
+    return *this;
+}
+
+JsonWriter& JsonWriter::SetNull() {
+    WRITER->Null();
+    return *this;
+}
+
+#undef STREAM
+#undef WRITER
diff --git a/example/archiver/archiver.h b/example/archiver/archiver.h
new file mode 100644
index 0000000..285ca73
--- /dev/null
+++ b/example/archiver/archiver.h
@@ -0,0 +1,145 @@
+#ifndef ARCHIVER_H_
+#define ARCHIVER_H_
+
+#include <cstddef>
+#include <string>
+
+/**
+\class Archiver
+\brief Archiver concept
+
+Archiver can be a reader or writer for serialization or deserialization respectively.
+
+class Archiver {
+public:
+    /// \returns true if the archiver is in normal state. false if it has errors.
+    operator bool() const;
+
+    /// Starts an object
+    Archiver& StartObject();
+    
+    /// After calling StartObject(), assign a member with a name
+    Archiver& Member(const char* name);
+
+    /// After calling StartObject(), check if a member presents
+    bool HasMember(const char* name) const;
+
+    /// Ends an object
+    Archiver& EndObject();
+
+    /// Starts an array
+    /// \param size If Archiver::IsReader is true, the size of array is written.
+    Archiver& StartArray(size_t* size = 0);
+
+    /// Ends an array
+    Archiver& EndArray();
+
+    /// Read/Write primitive types.
+    Archiver& operator&(bool& b);
+    Archiver& operator&(unsigned& u);
+    Archiver& operator&(int& i);
+    Archiver& operator&(double& d);
+    Archiver& operator&(std::string& s);
+
+    /// Write primitive types.
+    Archiver& SetNull();
+
+    //! Whether it is a reader.
+    static const bool IsReader;
+
+    //! Whether it is a writer.
+    static const bool IsWriter;
+};
+*/
+
+/// Represents a JSON reader which implements Archiver concept.
+class JsonReader {
+public:
+    /// Constructor.
+    /**
+        \param json A non-const source json string for in-situ parsing.
+        \note in-situ means the source JSON string will be modified after parsing.
+    */
+    JsonReader(const char* json);
+
+    /// Destructor.
+    ~JsonReader();
+
+    // Archive concept
+
+    operator bool() const { return !mError; }
+
+    JsonReader& StartObject();
+    JsonReader& Member(const char* name);
+    bool HasMember(const char* name) const;
+    JsonReader& EndObject();
+
+    JsonReader& StartArray(size_t* size = 0);
+    JsonReader& EndArray();
+
+    JsonReader& operator&(bool& b);
+    JsonReader& operator&(unsigned& u);
+    JsonReader& operator&(int& i);
+    JsonReader& operator&(double& d);
+    JsonReader& operator&(std::string& s);
+
+    JsonReader& SetNull();
+
+    static const bool IsReader = true;
+    static const bool IsWriter = !IsReader;
+
+private:
+    JsonReader(const JsonReader&);
+    JsonReader& operator=(const JsonReader&);
+
+    void Next();
+
+    // PIMPL
+    void* mDocument;              ///< DOM result of parsing.
+    void* mStack;                 ///< Stack for iterating the DOM
+    bool mError;                  ///< Whether an error has occurred.
+};
+
+class JsonWriter {
+public:
+    /// Constructor.
+    JsonWriter();
+
+    /// Destructor.
+    ~JsonWriter();
+
+    /// Obtains the serialized JSON string.
+    const char* GetString() const;
+
+    // Archive concept
+
+    operator bool() const { return true; }
+
+    JsonWriter& StartObject();
+    JsonWriter& Member(const char* name);
+    bool HasMember(const char* name) const;
+    JsonWriter& EndObject();
+
+    JsonWriter& StartArray(size_t* size = 0);
+    JsonWriter& EndArray();
+
+    JsonWriter& operator&(bool& b);
+    JsonWriter& operator&(unsigned& u);
+    JsonWriter& operator&(int& i);
+    JsonWriter& operator&(double& d);
+    JsonWriter& operator&(std::string& s);
+    JsonWriter& SetNull();
+
+    static const bool IsReader = false;
+    static const bool IsWriter = !IsReader;
+
+private:
+    JsonWriter(const JsonWriter&);
+    JsonWriter& operator=(const JsonWriter&);
+
+    // PIMPL idiom
+    void* mWriter;      ///< JSON writer.
+    void* mStream;      ///< Stream buffer.
+};
+
+#endif // ARCHIVER_H__
diff --git a/example/archiver/archivertest.cpp b/example/archiver/archivertest.cpp
new file mode 100644
index 0000000..417a421
--- /dev/null
+++ b/example/archiver/archivertest.cpp
@@ -0,0 +1,287 @@
+#include "archiver.h"
+#include <iostream>
+#include <vector>
+
+//////////////////////////////////////////////////////////////////////////////
+// Test1: simple object
+
+struct Student {
+    Student() : name(), age(), height(), canSwim() {}
+    Student(const std::string name, unsigned age, double height, bool canSwim) :
+        name(name), age(age), height(height), canSwim(canSwim)
+    {}
+
+    std::string name;
+    unsigned age;
+    double height;
+    bool canSwim;
+};
+
+template <typename Archiver>
+Archiver& operator&(Archiver& ar, Student& s) {
+    ar.StartObject();
+    ar.Member("name") & s.name;
+    ar.Member("age") & s.age;
+    ar.Member("height") & s.height;
+    ar.Member("canSwim") & s.canSwim;
+    return ar.EndObject();
+}
+
+std::ostream& operator<<(std::ostream& os, const Student& s) {
+    return os << s.name << " " << s.age << " " << s.height << " " << s.canSwim;
+}
+
+void test1() {
+    std::string json;
+
+    // Serialize
+    {
+        Student s("Lua", 9, 150.5, true);
+
+        JsonWriter writer;
+        writer & s;
+        json = writer.GetString();
+        std::cout << json << std::endl;
+    }
+
+    // Deserialize
+    {
+        Student s;
+        JsonReader reader(json.c_str());
+        reader & s;
+        std::cout << s << std::endl;
+    }
+}
+
+//////////////////////////////////////////////////////////////////////////////
+// Test2: std::vector <=> JSON array
+// 
+// You can map a JSON array to other data structures as well
+
+struct Group {
+    Group() : groupName(), students() {}
+    std::string groupName;
+    std::vector<Student> students;
+};
+
+template <typename Archiver>
+Archiver& operator&(Archiver& ar, Group& g) {
+    ar.StartObject();
+    
+    ar.Member("groupName");
+    ar & g.groupName;
+
+    ar.Member("students");
+    size_t studentCount = g.students.size();
+    ar.StartArray(&studentCount);
+    if (ar.IsReader)
+        g.students.resize(studentCount);
+    for (size_t i = 0; i < studentCount; i++)
+        ar & g.students[i];
+    ar.EndArray();
+
+    return ar.EndObject();
+}
+
+std::ostream& operator<<(std::ostream& os, const Group& g) {
+    os << g.groupName << std::endl;
+    for (std::vector<Student>::const_iterator itr = g.students.begin(); itr != g.students.end(); ++itr)
+        os << *itr << std::endl;
+    return os;
+}
+
+void test2() {
+    std::string json;
+
+    // Serialize
+    {
+        Group g;
+        g.groupName = "Rainbow";
+
+        Student s1("Lua", 9, 150.5, true);
+        Student s2("Mio", 7, 120.0, false);
+        g.students.push_back(s1);
+        g.students.push_back(s2);
+
+        JsonWriter writer;
+        writer & g;
+        json = writer.GetString();
+        std::cout << json << std::endl;
+    }
+
+    // Deserialize
+    {
+        Group g;
+        JsonReader reader(json.c_str());
+        reader & g;
+        std::cout << g << std::endl;
+    }
+}
+
+//////////////////////////////////////////////////////////////////////////////
+// Test3: polymorphism & friend
+//
+// Note that friendship is not necessary but make things simpler.
+
+class Shape {
+public:
+    virtual ~Shape() {}
+    virtual const char* GetType() const = 0;
+    virtual void Print(std::ostream& os) const = 0;
+
+protected:
+    Shape() : x_(), y_() {}
+    Shape(double x, double y) : x_(x), y_(y) {}
+
+    template <typename Archiver>
+    friend Archiver& operator&(Archiver& ar, Shape& s);
+
+    double x_, y_;
+};
+
+template <typename Archiver>
+Archiver& operator&(Archiver& ar, Shape& s) {
+    ar.Member("x") & s.x_;
+    ar.Member("y") & s.y_;
+    return ar;
+}
+
+class Circle : public Shape {
+public:
+    Circle() : radius_() {}
+    Circle(double x, double y, double radius) : Shape(x, y), radius_(radius) {}
+    ~Circle() {}
+
+    const char* GetType() const { return "Circle"; }
+
+    void Print(std::ostream& os) const {
+        os << "Circle (" << x_ << ", " << y_ << ")" << " radius = " << radius_;
+    }
+
+private:
+    template <typename Archiver>
+    friend Archiver& operator&(Archiver& ar, Circle& c);
+
+    double radius_;
+};
+
+template <typename Archiver>
+Archiver& operator&(Archiver& ar, Circle& c) {
+    ar & static_cast<Shape&>(c);
+    ar.Member("radius") & c.radius_;
+    return ar;
+}
+
+class Box : public Shape {
+public:
+    Box() : width_(), height_() {}
+    Box(double x, double y, double width, double height) : Shape(x, y), width_(width), height_(height) {}
+    ~Box() {}
+
+    const char* GetType() const { return "Box"; }
+
+    void Print(std::ostream& os) const {
+        os << "Box (" << x_ << ", " << y_ << ")" << " width = " << width_ << " height = " << height_;
+    }
+
+private:
+    template <typename Archiver>
+    friend Archiver& operator&(Archiver& ar, Box& b);
+
+    double width_, height_;
+};
+
+template <typename Archiver>
+Archiver& operator&(Archiver& ar, Box& b) {
+    ar & static_cast<Shape&>(b);
+    ar.Member("width") & b.width_;
+    ar.Member("height") & b.height_;
+    return ar;
+}
+
+class Canvas {
+public:
+    Canvas() : shapes_() {}
+    ~Canvas() { Clear(); }
+    
+    void Clear() {
+        for (std::vector<Shape*>::iterator itr = shapes_.begin(); itr != shapes_.end(); ++itr)
+            delete *itr;
+    }
+
+    void AddShape(Shape* shape) { shapes_.push_back(shape); }
+    
+    void Print(std::ostream& os) {
+        for (std::vector<Shape*>::iterator itr = shapes_.begin(); itr != shapes_.end(); ++itr) {
+            (*itr)->Print(os);
+            std::cout << std::endl;
+        }
+    }
+
+private:
+    template <typename Archiver>
+    friend Archiver& operator&(Archiver& ar, Canvas& c);
+
+    std::vector<Shape*> shapes_;
+};
+
+template <typename Archiver>
+Archiver& operator&(Archiver& ar, Shape*& shape) {
+    std::string type = ar.IsReader ? "" : shape->GetType();
+    ar.StartObject();
+    ar.Member("type") & type;
+    if (type == "Circle") {
+        if (ar.IsReader) shape = new Circle;
+        ar & static_cast<Circle&>(*shape);
+    }
+    else if (type == "Box") {
+        if (ar.IsReader) shape = new Box;
+        ar & static_cast<Box&>(*shape);
+    }
+    return ar.EndObject();
+}
+
+template <typename Archiver>
+Archiver& operator&(Archiver& ar, Canvas& c) {
+    size_t shapeCount = c.shapes_.size();
+    ar.StartArray(&shapeCount);
+    if (ar.IsReader) {
+        c.Clear();
+        c.shapes_.resize(shapeCount);
+    }
+    for (size_t i = 0; i < shapeCount; i++)
+        ar & c.shapes_[i];
+    return ar.EndArray();
+}
+
+void test3() {
+    std::string json;
+
+    // Serialize
+    {
+        Canvas c;
+        c.AddShape(new Circle(1.0, 2.0, 3.0));
+        c.AddShape(new Box(4.0, 5.0, 6.0, 7.0));
+
+        JsonWriter writer;
+        writer & c;
+        json = writer.GetString();
+        std::cout << json << std::endl;
+    }
+
+    // Deserialize
+    {
+        Canvas c;
+        JsonReader reader(json.c_str());
+        reader & c;
+        c.Print(std::cout);
+    }
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+int main() {
+    test1();
+    test2();
+    test3();
+}
diff --git a/example/capitalize/capitalize.cpp b/example/capitalize/capitalize.cpp
new file mode 100644
index 0000000..7da37e9
--- /dev/null
+++ b/example/capitalize/capitalize.cpp
@@ -0,0 +1,67 @@
+// JSON condenser example
+
+// This example parses JSON from stdin with validation, 
+// and re-output the JSON content to stdout with all string capitalized, and without whitespace.
+
+#include "rapidjson/reader.h"
+#include "rapidjson/writer.h"
+#include "rapidjson/filereadstream.h"
+#include "rapidjson/filewritestream.h"
+#include "rapidjson/error/en.h"
+#include <vector>
+#include <cctype>
+
+using namespace rapidjson;
+
+template<typename OutputHandler>
+struct CapitalizeFilter {
+    CapitalizeFilter(OutputHandler& out) : out_(out), buffer_() {}
+
+    bool Null() { return out_.Null(); }
+    bool Bool(bool b) { return out_.Bool(b); }
+    bool Int(int i) { return out_.Int(i); }
+    bool Uint(unsigned u) { return out_.Uint(u); }
+    bool Int64(int64_t i) { return out_.Int64(i); }
+    bool Uint64(uint64_t u) { return out_.Uint64(u); }
+    bool Double(double d) { return out_.Double(d); }
+    bool RawNumber(const char* str, SizeType length, bool copy) { return out_.RawNumber(str, length, copy); }
+    bool String(const char* str, SizeType length, bool) {
+        buffer_.clear();
+        for (SizeType i = 0; i < length; i++)
+            buffer_.push_back(static_cast<char>(std::toupper(str[i])));
+        return out_.String(&buffer_.front(), length, true); // true = output handler need to copy the string
+    }
+    bool StartObject() { return out_.StartObject(); }
+    bool Key(const char* str, SizeType length, bool copy) { return String(str, length, copy); }
+    bool EndObject(SizeType memberCount) { return out_.EndObject(memberCount); }
+    bool StartArray() { return out_.StartArray(); }
+    bool EndArray(SizeType elementCount) { return out_.EndArray(elementCount); }
+
+    OutputHandler& out_;
+    std::vector<char> buffer_;
+
+private:
+    CapitalizeFilter(const CapitalizeFilter&);
+    CapitalizeFilter& operator=(const CapitalizeFilter&);
+};
+
+int main(int, char*[]) {
+    // Prepare JSON reader and input stream.
+    Reader reader;
+    char readBuffer[65536];
+    FileReadStream is(stdin, readBuffer, sizeof(readBuffer));
+
+    // Prepare JSON writer and output stream.
+    char writeBuffer[65536];
+    FileWriteStream os(stdout, writeBuffer, sizeof(writeBuffer));
+    Writer<FileWriteStream> writer(os);
+
+    // JSON reader parse from the input stream and let writer generate the output.
+    CapitalizeFilter<Writer<FileWriteStream> > filter(writer);
+    if (!reader.Parse(is, filter)) {
+        fprintf(stderr, "\nError(%u): %s\n", static_cast<unsigned>(reader.GetErrorOffset()), GetParseError_En(reader.GetParseErrorCode()));
+        return 1;
+    }
+
+    return 0;
+}
diff --git a/example/condense/condense.cpp b/example/condense/condense.cpp
new file mode 100644
index 0000000..46dc350
--- /dev/null
+++ b/example/condense/condense.cpp
@@ -0,0 +1,32 @@
+// JSON condenser example
+
+// This example parses JSON text from stdin with validation, 
+// and re-output the JSON content to stdout without whitespace.
+
+#include "rapidjson/reader.h"
+#include "rapidjson/writer.h"
+#include "rapidjson/filereadstream.h"
+#include "rapidjson/filewritestream.h"
+#include "rapidjson/error/en.h"
+
+using namespace rapidjson;
+
+int main(int, char*[]) {
+    // Prepare JSON reader and input stream.
+    Reader reader;
+    char readBuffer[65536];
+    FileReadStream is(stdin, readBuffer, sizeof(readBuffer));
+
+    // Prepare JSON writer and output stream.
+    char writeBuffer[65536];
+    FileWriteStream os(stdout, writeBuffer, sizeof(writeBuffer));
+    Writer<FileWriteStream> writer(os);
+
+    // JSON reader parse from the input stream and let writer generate the output.
+    if (!reader.Parse(is, writer)) {
+        fprintf(stderr, "\nError(%u): %s\n", static_cast<unsigned>(reader.GetErrorOffset()), GetParseError_En(reader.GetParseErrorCode()));
+        return 1;
+    }
+
+    return 0;
+}
diff --git a/example/filterkey/filterkey.cpp b/example/filterkey/filterkey.cpp
new file mode 100644
index 0000000..c34a050
--- /dev/null
+++ b/example/filterkey/filterkey.cpp
@@ -0,0 +1,135 @@
+// JSON filterkey example with SAX-style API.
+
+// This example parses JSON text from stdin with validation.
+// During parsing, specified key will be filtered using a SAX handler.
+// It re-output the JSON content to stdout without whitespace.
+
+#include "rapidjson/reader.h"
+#include "rapidjson/writer.h"
+#include "rapidjson/filereadstream.h"
+#include "rapidjson/filewritestream.h"
+#include "rapidjson/error/en.h"
+#include <stack>
+
+using namespace rapidjson;
+
+// This handler forwards event into an output handler, with filtering the descendent events of specified key.
+template <typename OutputHandler>
+class FilterKeyHandler {
+public:
+    typedef char Ch;
+
+    FilterKeyHandler(OutputHandler& outputHandler, const Ch* keyString, SizeType keyLength) : 
+        outputHandler_(outputHandler), keyString_(keyString), keyLength_(keyLength), filterValueDepth_(), filteredKeyCount_()
+    {}
+
+    bool Null()             { return filterValueDepth_ > 0 ? EndValue() : outputHandler_.Null()    && EndValue(); }
+    bool Bool(bool b)       { return filterValueDepth_ > 0 ? EndValue() : outputHandler_.Bool(b)   && EndValue(); }
+    bool Int(int i)         { return filterValueDepth_ > 0 ? EndValue() : outputHandler_.Int(i)    && EndValue(); }
+    bool Uint(unsigned u)   { return filterValueDepth_ > 0 ? EndValue() : outputHandler_.Uint(u)   && EndValue(); }
+    bool Int64(int64_t i)   { return filterValueDepth_ > 0 ? EndValue() : outputHandler_.Int64(i)  && EndValue(); }
+    bool Uint64(uint64_t u) { return filterValueDepth_ > 0 ? EndValue() : outputHandler_.Uint64(u) && EndValue(); }
+    bool Double(double d)   { return filterValueDepth_ > 0 ? EndValue() : outputHandler_.Double(d) && EndValue(); }
+    bool RawNumber(const Ch* str, SizeType len, bool copy) { return filterValueDepth_ > 0 ? EndValue() : outputHandler_.RawNumber(str, len, copy) && EndValue(); }
+    bool String   (const Ch* str, SizeType len, bool copy) { return filterValueDepth_ > 0 ? EndValue() : outputHandler_.String   (str, len, copy) && EndValue(); }
+    
+    bool StartObject() { 
+        if (filterValueDepth_ > 0) {
+            filterValueDepth_++;
+            return true;
+        }
+        else {
+            filteredKeyCount_.push(0);
+            return outputHandler_.StartObject();
+        }
+    }
+    
+    bool Key(const Ch* str, SizeType len, bool copy) { 
+        if (filterValueDepth_ > 0) 
+            return true;
+        else if (len == keyLength_ && std::memcmp(str, keyString_, len) == 0) {
+            filterValueDepth_ = 1;
+            return true;
+        }
+        else {
+            ++filteredKeyCount_.top();
+            return outputHandler_.Key(str, len, copy);
+        }
+    }
+
+    bool EndObject(SizeType) {
+        if (filterValueDepth_ > 0) {
+            filterValueDepth_--;
+            return EndValue();
+        }
+        else {
+            // Use our own filtered memberCount
+            SizeType memberCount = filteredKeyCount_.top();
+            filteredKeyCount_.pop();
+            return outputHandler_.EndObject(memberCount) && EndValue();
+        }
+    }
+
+    bool StartArray() {
+        if (filterValueDepth_ > 0) {
+            filterValueDepth_++;
+            return true;
+        }
+        else
+            return outputHandler_.StartArray();
+    }
+
+    bool EndArray(SizeType elementCount) {
+        if (filterValueDepth_ > 0) {
+            filterValueDepth_--;
+            return EndValue();
+        }
+        else
+            return outputHandler_.EndArray(elementCount) && EndValue();
+    }
+
+private:
+    FilterKeyHandler(const FilterKeyHandler&);
+    FilterKeyHandler& operator=(const FilterKeyHandler&);
+
+    bool EndValue() {
+        if (filterValueDepth_ == 1) // Just at the end of value after filtered key
+            filterValueDepth_ = 0;
+        return true;
+    }
+    
+    OutputHandler& outputHandler_;
+    const char* keyString_;
+    const SizeType keyLength_;
+    unsigned filterValueDepth_;
+    std::stack<SizeType> filteredKeyCount_;
+};
+
+int main(int argc, char* argv[]) {
+    if (argc != 2) {
+        fprintf(stderr, "filterkey key < input.json > output.json\n");
+        return 1;
+    }
+
+    // Prepare JSON reader and input stream.
+    Reader reader;
+    char readBuffer[65536];
+    FileReadStream is(stdin, readBuffer, sizeof(readBuffer));
+
+    // Prepare JSON writer and output stream.
+    char writeBuffer[65536];
+    FileWriteStream os(stdout, writeBuffer, sizeof(writeBuffer));
+    Writer<FileWriteStream> writer(os);
+
+    // Prepare Filter
+    FilterKeyHandler<Writer<FileWriteStream> > filter(writer, argv[1], static_cast<SizeType>(strlen(argv[1])));
+
+    // JSON reader parse from the input stream, filter handler filters the events, and forward to writer.
+    // i.e. the events flow is: reader -> filter -> writer
+    if (!reader.Parse(is, filter)) {
+        fprintf(stderr, "\nError(%u): %s\n", static_cast<unsigned>(reader.GetErrorOffset()), GetParseError_En(reader.GetParseErrorCode()));
+        return 1;
+    }
+
+    return 0;
+}
diff --git a/example/filterkeydom/filterkeydom.cpp b/example/filterkeydom/filterkeydom.cpp
new file mode 100644
index 0000000..732cc81
--- /dev/null
+++ b/example/filterkeydom/filterkeydom.cpp
@@ -0,0 +1,170 @@
+// JSON filterkey example which populates filtered SAX events into a Document.
+
+// This example parses JSON text from stdin with validation.
+// During parsing, specified key will be filtered using a SAX handler.
+// And finally the filtered events are used to populate a Document.
+// As an example, the document is written to standard output.
+
+#include "rapidjson/document.h"
+#include "rapidjson/writer.h"
+#include "rapidjson/filereadstream.h"
+#include "rapidjson/filewritestream.h"
+#include "rapidjson/error/en.h"
+#include <stack>
+
+using namespace rapidjson;
+
+// This handler forwards event into an output handler, with filtering the descendent events of specified key.
+template <typename OutputHandler>
+class FilterKeyHandler {
+public:
+    typedef char Ch;
+
+    FilterKeyHandler(OutputHandler& outputHandler, const Ch* keyString, SizeType keyLength) : 
+        outputHandler_(outputHandler), keyString_(keyString), keyLength_(keyLength), filterValueDepth_(), filteredKeyCount_()
+    {}
+
+    bool Null()             { return filterValueDepth_ > 0 ? EndValue() : outputHandler_.Null()    && EndValue(); }
+    bool Bool(bool b)       { return filterValueDepth_ > 0 ? EndValue() : outputHandler_.Bool(b)   && EndValue(); }
+    bool Int(int i)         { return filterValueDepth_ > 0 ? EndValue() : outputHandler_.Int(i)    && EndValue(); }
+    bool Uint(unsigned u)   { return filterValueDepth_ > 0 ? EndValue() : outputHandler_.Uint(u)   && EndValue(); }
+    bool Int64(int64_t i)   { return filterValueDepth_ > 0 ? EndValue() : outputHandler_.Int64(i)  && EndValue(); }
+    bool Uint64(uint64_t u) { return filterValueDepth_ > 0 ? EndValue() : outputHandler_.Uint64(u) && EndValue(); }
+    bool Double(double d)   { return filterValueDepth_ > 0 ? EndValue() : outputHandler_.Double(d) && EndValue(); }
+    bool RawNumber(const Ch* str, SizeType len, bool copy) { return filterValueDepth_ > 0 ? EndValue() : outputHandler_.RawNumber(str, len, copy) && EndValue(); }
+    bool String   (const Ch* str, SizeType len, bool copy) { return filterValueDepth_ > 0 ? EndValue() : outputHandler_.String   (str, len, copy) && EndValue(); }
+    
+    bool StartObject() { 
+        if (filterValueDepth_ > 0) {
+            filterValueDepth_++;
+            return true;
+        }
+        else {
+            filteredKeyCount_.push(0);
+            return outputHandler_.StartObject();
+        }
+    }
+    
+    bool Key(const Ch* str, SizeType len, bool copy) { 
+        if (filterValueDepth_ > 0) 
+            return true;
+        else if (len == keyLength_ && std::memcmp(str, keyString_, len) == 0) {
+            filterValueDepth_ = 1;
+            return true;
+        }
+        else {
+            ++filteredKeyCount_.top();
+            return outputHandler_.Key(str, len, copy);
+        }
+    }
+
+    bool EndObject(SizeType) {
+        if (filterValueDepth_ > 0) {
+            filterValueDepth_--;
+            return EndValue();
+        }
+        else {
+            // Use our own filtered memberCount
+            SizeType memberCount = filteredKeyCount_.top();
+            filteredKeyCount_.pop();
+            return outputHandler_.EndObject(memberCount) && EndValue();
+        }
+    }
+
+    bool StartArray() {
+        if (filterValueDepth_ > 0) {
+            filterValueDepth_++;
+            return true;
+        }
+        else
+            return outputHandler_.StartArray();
+    }
+
+    bool EndArray(SizeType elementCount) {
+        if (filterValueDepth_ > 0) {
+            filterValueDepth_--;
+            return EndValue();
+        }
+        else
+            return outputHandler_.EndArray(elementCount) && EndValue();
+    }
+
+private:
+    FilterKeyHandler(const FilterKeyHandler&);
+    FilterKeyHandler& operator=(const FilterKeyHandler&);
+
+    bool EndValue() {
+        if (filterValueDepth_ == 1) // Just at the end of value after filtered key
+            filterValueDepth_ = 0;
+        return true;
+    }
+
+    OutputHandler& outputHandler_;
+    const char* keyString_;
+    const SizeType keyLength_;
+    unsigned filterValueDepth_;
+    std::stack<SizeType> filteredKeyCount_;
+};
+
+// Implements a generator for Document::Populate()
+template <typename InputStream>
+class FilterKeyReader {
+public:
+    typedef char Ch;
+
+    FilterKeyReader(InputStream& is, const Ch* keyString, SizeType keyLength) : 
+        is_(is), keyString_(keyString), keyLength_(keyLength), parseResult_()
+    {}
+
+    // SAX event flow: reader -> filter -> handler
+    template <typename Handler>
+    bool operator()(Handler& handler) {
+        FilterKeyHandler<Handler> filter(handler, keyString_, keyLength_);
+        Reader reader;
+        parseResult_ = reader.Parse(is_, filter);
+        return parseResult_;
+    }
+
+    const ParseResult& GetParseResult() const { return parseResult_; }
+
+private:
+    FilterKeyReader(const FilterKeyReader&);
+    FilterKeyReader& operator=(const FilterKeyReader&);
+
+    InputStream& is_;
+    const char* keyString_;
+    const SizeType keyLength_;
+    ParseResult parseResult_;
+};
+
+int main(int argc, char* argv[]) {
+    if (argc != 2) {
+        fprintf(stderr, "filterkeydom key < input.json > output.json\n");
+        return 1;
+    }
+
+    // Prepare input stream.
+    char readBuffer[65536];
+    FileReadStream is(stdin, readBuffer, sizeof(readBuffer));
+
+    // Prepare Filter
+    FilterKeyReader<FileReadStream> reader(is, argv[1], static_cast<SizeType>(strlen(argv[1])));
+
+    // Populates the filtered events from reader
+    Document document;
+    document.Populate(reader);
+    ParseResult pr = reader.GetParseResult();
+    if (!pr) {
+        fprintf(stderr, "\nError(%u): %s\n", static_cast<unsigned>(pr.Offset()), GetParseError_En(pr.Code()));
+        return 1;
+    }
+
+    // Prepare JSON writer and output stream.
+    char writeBuffer[65536];
+    FileWriteStream os(stdout, writeBuffer, sizeof(writeBuffer));
+    Writer<FileWriteStream> writer(os);
+
+    // Write the document to standard output
+    document.Accept(writer);
+    return 0;
+}
diff --git a/example/jsonx/jsonx.cpp b/example/jsonx/jsonx.cpp
new file mode 100644
index 0000000..954aa2b
--- /dev/null
+++ b/example/jsonx/jsonx.cpp
@@ -0,0 +1,207 @@
+// JSON to JSONx conversion example, using SAX API.
+// JSONx is an IBM standard format to represent JSON as XML.
+// https://www-01.ibm.com/support/knowledgecenter/SS9H2Y_7.1.0/com.ibm.dp.doc/json_jsonx.html
+// This example parses JSON text from stdin with validation, 
+// and convert to JSONx format to stdout.
+// Need compile with -D__STDC_FORMAT_MACROS for defining PRId64 and PRIu64 macros.
+
+#include "rapidjson/reader.h"
+#include "rapidjson/stringbuffer.h"
+#include "rapidjson/filereadstream.h"
+#include "rapidjson/filewritestream.h"
+#include "rapidjson/error/en.h"
+#include <cstdio>
+
+using namespace rapidjson;
+
+// For simplicity, this example only read/write in UTF-8 encoding
+template <typename OutputStream>
+class JsonxWriter {
+public:
+    JsonxWriter(OutputStream& os) : os_(os), name_(), level_(0), hasName_(false) {
+    }
+
+    bool Null() {
+        return WriteStartElement("null", true);
+    }
+    
+    bool Bool(bool b) {
+        return 
+            WriteStartElement("boolean") &&
+            WriteString(b ? "true" : "false") &&
+            WriteEndElement("boolean");
+    }
+    
+    bool Int(int i) {
+        char buffer[12];
+        return WriteNumberElement(buffer, sprintf(buffer, "%d", i));
+    }
+    
+    bool Uint(unsigned i) {
+        char buffer[11];
+        return WriteNumberElement(buffer, sprintf(buffer, "%u", i));
+    }
+    
+    bool Int64(int64_t i) {
+        char buffer[21];
+        return WriteNumberElement(buffer, sprintf(buffer, "%" PRId64, i));
+    }
+    
+    bool Uint64(uint64_t i) {
+        char buffer[21];
+        return WriteNumberElement(buffer, sprintf(buffer, "%" PRIu64, i));
+    }
+    
+    bool Double(double d) {
+        char buffer[30];
+        return WriteNumberElement(buffer, sprintf(buffer, "%.17g", d));
+    }
+
+    bool RawNumber(const char* str, SizeType length, bool) {
+        return
+            WriteStartElement("number") &&
+            WriteEscapedText(str, length) &&
+            WriteEndElement("number");
+    }
+
+    bool String(const char* str, SizeType length, bool) {
+        return
+            WriteStartElement("string") &&
+            WriteEscapedText(str, length) &&
+            WriteEndElement("string");
+    }
+
+    bool StartObject() {
+        return WriteStartElement("object");
+    }
+
+    bool Key(const char* str, SizeType length, bool) {
+        // backup key to name_
+        name_.Clear();
+        for (SizeType i = 0; i < length; i++)
+            name_.Put(str[i]);
+        hasName_ = true;
+        return true;
+    }
+
+    bool EndObject(SizeType) {
+        return WriteEndElement("object");
+    }
+
+    bool StartArray() {
+        return WriteStartElement("array");
+    }
+
+    bool EndArray(SizeType) {
+        return WriteEndElement("array");
+    }
+
+private:
+    bool WriteString(const char* s) {
+        while (*s)
+            os_.Put(*s++);
+        return true;
+    }
+
+    bool WriteEscapedAttributeValue(const char* s, size_t length) {
+        for (size_t i = 0; i < length; i++) {
+            switch (s[i]) {
+                case '&': WriteString("&amp;"); break;
+                case '<': WriteString("&lt;"); break;
+                case '"': WriteString("&quot;"); break;
+                default: os_.Put(s[i]); break;
+            }
+        }
+        return true;
+    }
+
+    bool WriteEscapedText(const char* s, size_t length) {
+        for (size_t i = 0; i < length; i++) {
+            switch (s[i]) {
+                case '&': WriteString("&amp;"); break;
+                case '<': WriteString("&lt;"); break;
+                default: os_.Put(s[i]); break;
+            }
+        }
+        return true;
+    }
+
+    bool WriteStartElement(const char* type, bool emptyElement = false) {
+        if (level_ == 0)
+            if (!WriteString("<?xml version=\"1.0\" encoding=\"UTF-8\"?>"))
+                return false;
+
+        if (!WriteString("<json:") || !WriteString(type))
+            return false;
+
+        // For root element, need to add declarations
+        if (level_ == 0) {
+            if (!WriteString(
+                " xsi:schemaLocation=\"http://www.datapower.com/schemas/json jsonx.xsd\""
+                " xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\""
+                " xmlns:json=\"http://www.ibm.com/xmlns/prod/2009/jsonx\""))
+                return false;
+        }
+
+        if (hasName_) {
+            hasName_ = false;
+            if (!WriteString(" name=\"") ||
+                !WriteEscapedAttributeValue(name_.GetString(), name_.GetSize()) ||
+                !WriteString("\""))
+                return false;
+        }
+
+        if (emptyElement)
+            return WriteString("/>");
+        else {
+            level_++;
+            return WriteString(">");
+        }
+    }
+
+    bool WriteEndElement(const char* type) {
+        if (!WriteString("</json:") ||
+            !WriteString(type) ||
+            !WriteString(">"))
+            return false;
+
+        // For the last end tag, flush the output stream.
+        if (--level_ == 0)
+            os_.Flush();
+
+        return true;
+    }
+
+    bool WriteNumberElement(const char* buffer, int length) {
+        if (!WriteStartElement("number"))
+            return false;
+        for (int j = 0; j < length; j++)
+            os_.Put(buffer[j]);
+        return WriteEndElement("number");
+    }
+
+    OutputStream& os_;
+    StringBuffer name_;
+    unsigned level_;
+    bool hasName_;
+};
+
+int main(int, char*[]) {
+    // Prepare JSON reader and input stream.
+    Reader reader;
+    char readBuffer[65536];
+    FileReadStream is(stdin, readBuffer, sizeof(readBuffer));
+
+    // Prepare JSON writer and output stream.
+    char writeBuffer[65536];
+    FileWriteStream os(stdout, writeBuffer, sizeof(writeBuffer));
+    JsonxWriter<FileWriteStream> writer(os);
+
+    // JSON reader parse from the input stream and let writer generate the output.
+    if (!reader.Parse(is, writer)) {
+        fprintf(stderr, "\nError(%u): %s\n", static_cast<unsigned>(reader.GetErrorOffset()), GetParseError_En(reader.GetParseErrorCode()));
+        return 1;
+    }
+
+    return 0;
+}
diff --git a/example/lookaheadparser/lookaheadparser.cpp b/example/lookaheadparser/lookaheadparser.cpp
new file mode 100644
index 0000000..f627f4d
--- /dev/null
+++ b/example/lookaheadparser/lookaheadparser.cpp
@@ -0,0 +1,350 @@
+#include "rapidjson/reader.h"
+#include "rapidjson/document.h"
+#include <iostream>
+
+RAPIDJSON_DIAG_PUSH
+#ifdef __GNUC__
+RAPIDJSON_DIAG_OFF(effc++)
+#endif
+
+// This example demonstrates JSON token-by-token parsing with an API that is
+// more direct; you don't need to design your logic around a handler object and
+// callbacks. Instead, you retrieve values from the JSON stream by calling
+// GetInt(), GetDouble(), GetString() and GetBool(), traverse into structures
+// by calling EnterObject() and EnterArray(), and skip over unwanted data by
+// calling SkipValue(). When you know your JSON's structure, this can be quite
+// convenient.
+//
+// If you aren't sure of what's next in the JSON data, you can use PeekType() and
+// PeekValue() to look ahead to the next object before reading it.
+//
+// If you call the wrong retrieval method--e.g. GetInt when the next JSON token is
+// not an int, EnterObject or EnterArray when there isn't actually an object or array
+// to read--the stream parsing will end immediately and no more data will be delivered.
+//
+// After calling EnterObject, you retrieve keys via NextObjectKey() and values via
+// the normal getters. When NextObjectKey() returns null, you have exited the
+// object, or you can call SkipObject() to skip to the end of the object
+// immediately. If you fetch the entire object (i.e. NextObjectKey() returned  null),
+// you should not call SkipObject().
+//
+// After calling EnterArray(), you must alternate between calling NextArrayValue()
+// to see if the array has more data, and then retrieving values via the normal
+// getters. You can call SkipArray() to skip to the end of the array immediately.
+// If you fetch the entire array (i.e. NextArrayValue() returned null),
+// you should not call SkipArray().
+//
+// This parser uses in-situ strings, so the JSON buffer will be altered during the
+// parse.
+
+using namespace rapidjson;
+
+
+class LookaheadParserHandler {
+public:
+    bool Null() { st_ = kHasNull; v_.SetNull(); return true; }
+    bool Bool(bool b) { st_ = kHasBool; v_.SetBool(b); return true; }
+    bool Int(int i) { st_ = kHasNumber; v_.SetInt(i); return true; }
+    bool Uint(unsigned u) { st_ = kHasNumber; v_.SetUint(u); return true; }
+    bool Int64(int64_t i) { st_ = kHasNumber; v_.SetInt64(i); return true; }
+    bool Uint64(uint64_t u) { st_ = kHasNumber; v_.SetUint64(u); return true; }
+    bool Double(double d) { st_ = kHasNumber; v_.SetDouble(d); return true; }
+    bool RawNumber(const char*, SizeType, bool) { return false; }
+    bool String(const char* str, SizeType length, bool) { st_ = kHasString; v_.SetString(str, length); return true; }
+    bool StartObject() { st_ = kEnteringObject; return true; }
+    bool Key(const char* str, SizeType length, bool) { st_ = kHasKey; v_.SetString(str, length); return true; }
+    bool EndObject(SizeType) { st_ = kExitingObject; return true; }
+    bool StartArray() { st_ = kEnteringArray; return true; }
+    bool EndArray(SizeType) { st_ = kExitingArray; return true; }
+
+protected:
+    LookaheadParserHandler(char* str);
+    void ParseNext();
+
+protected:
+    enum LookaheadParsingState {
+        kInit,
+        kError,
+        kHasNull,
+        kHasBool,
+        kHasNumber,
+        kHasString,
+        kHasKey,
+        kEnteringObject,
+        kExitingObject,
+        kEnteringArray,
+        kExitingArray
+    };
+    
+    Value v_;
+    LookaheadParsingState st_;
+    Reader r_;
+    InsituStringStream ss_;
+    
+    static const int parseFlags = kParseDefaultFlags | kParseInsituFlag;
+};
+
+LookaheadParserHandler::LookaheadParserHandler(char* str) : v_(), st_(kInit), r_(), ss_(str) {
+    r_.IterativeParseInit();
+    ParseNext();
+}
+
+void LookaheadParserHandler::ParseNext() {
+    if (r_.HasParseError()) {
+        st_ = kError;
+        return;
+    }
+    
+    r_.IterativeParseNext<parseFlags>(ss_, *this);
+}
+
+class LookaheadParser : protected LookaheadParserHandler {
+public:
+    LookaheadParser(char* str) : LookaheadParserHandler(str) {}
+    
+    bool EnterObject();
+    bool EnterArray();
+    const char* NextObjectKey();
+    bool NextArrayValue();
+    int GetInt();
+    double GetDouble();
+    const char* GetString();
+    bool GetBool();
+    void GetNull();
+
+    void SkipObject();
+    void SkipArray();
+    void SkipValue();
+    Value* PeekValue();
+    int PeekType(); // returns a rapidjson::Type, or -1 for no value (at end of object/array)
+    
+    bool IsValid() { return st_ != kError; }
+    
+protected:
+    void SkipOut(int depth);
+};
+
+bool LookaheadParser::EnterObject() {
+    if (st_ != kEnteringObject) {
+        st_  = kError;
+        return false;
+    }
+    
+    ParseNext();
+    return true;
+}
+
+bool LookaheadParser::EnterArray() {
+    if (st_ != kEnteringArray) {
+        st_  = kError;
+        return false;
+    }
+    
+    ParseNext();
+    return true;
+}
+
+const char* LookaheadParser::NextObjectKey() {
+    if (st_ == kHasKey) {
+        const char* result = v_.GetString();
+        ParseNext();
+        return result;
+    }
+    
+    if (st_ != kExitingObject) {
+        st_ = kError;
+        return 0;
+    }
+    
+    ParseNext();
+    return 0;
+}
+
+bool LookaheadParser::NextArrayValue() {
+    if (st_ == kExitingArray) {
+        ParseNext();
+        return false;
+    }
+    
+    if (st_ == kError || st_ == kExitingObject || st_ == kHasKey) {
+        st_ = kError;
+        return false;
+    }
+
+    return true;
+}
+
+int LookaheadParser::GetInt() {
+    if (st_ != kHasNumber || !v_.IsInt()) {
+        st_ = kError;
+        return 0;
+    }
+
+    int result = v_.GetInt();
+    ParseNext();
+    return result;
+}
+
+double LookaheadParser::GetDouble() {
+    if (st_ != kHasNumber) {
+        st_  = kError;
+        return 0.;
+    }
+    
+    double result = v_.GetDouble();
+    ParseNext();
+    return result;
+}
+
+bool LookaheadParser::GetBool() {
+    if (st_ != kHasBool) {
+        st_  = kError;
+        return false;
+    }
+    
+    bool result = v_.GetBool();
+    ParseNext();
+    return result;
+}
+
+void LookaheadParser::GetNull() {
+    if (st_ != kHasNull) {
+        st_  = kError;
+        return;
+    }
+
+    ParseNext();
+}
+
+const char* LookaheadParser::GetString() {
+    if (st_ != kHasString) {
+        st_  = kError;
+        return 0;
+    }
+    
+    const char* result = v_.GetString();
+    ParseNext();
+    return result;
+}
+
+void LookaheadParser::SkipOut(int depth) {
+    do {
+        if (st_ == kEnteringArray || st_ == kEnteringObject) {
+            ++depth;
+        }
+        else if (st_ == kExitingArray || st_ == kExitingObject) {
+            --depth;
+        }
+        else if (st_ == kError) {
+            return;
+        }
+
+        ParseNext();
+    }
+    while (depth > 0);
+}
+
+void LookaheadParser::SkipValue() {
+    SkipOut(0);
+}
+
+void LookaheadParser::SkipArray() {
+    SkipOut(1);
+}
+
+void LookaheadParser::SkipObject() {
+    SkipOut(1);
+}
+
+Value* LookaheadParser::PeekValue() {
+    if (st_ >= kHasNull && st_ <= kHasKey) {
+        return &v_;
+    }
+    
+    return 0;
+}
+
+int LookaheadParser::PeekType() {
+    if (st_ >= kHasNull && st_ <= kHasKey) {
+        return v_.GetType();
+    }
+    
+    if (st_ == kEnteringArray) {
+        return kArrayType;
+    }
+    
+    if (st_ == kEnteringObject) {
+        return kObjectType;
+    }
+
+    return -1;
+}
+
+//-------------------------------------------------------------------------
+
+int main() {
+    using namespace std;
+
+    char json[] = " { \"hello\" : \"world\", \"t\" : true , \"f\" : false, \"n\": null,"
+        "\"i\":123, \"pi\": 3.1416, \"a\":[-1, 2, 3, 4, \"array\", []], \"skipArrays\":[1, 2, [[[3]]]], "
+        "\"skipObject\":{ \"i\":0, \"t\":true, \"n\":null, \"d\":123.45 }, "
+        "\"skipNested\":[[[[{\"\":0}, {\"\":[-9.87]}]]], [], []], "
+        "\"skipString\":\"zzz\", \"reachedEnd\":null, \"t\":true }";
+
+    LookaheadParser r(json);
+    
+    RAPIDJSON_ASSERT(r.PeekType() == kObjectType);
+
+    r.EnterObject();
+    while (const char* key = r.NextObjectKey()) {
+        if (0 == strcmp(key, "hello")) {
+            RAPIDJSON_ASSERT(r.PeekType() == kStringType);
+            cout << key << ":" << r.GetString() << endl;
+        }
+        else if (0 == strcmp(key, "t") || 0 == strcmp(key, "f")) {
+            RAPIDJSON_ASSERT(r.PeekType() == kTrueType || r.PeekType() == kFalseType);
+            cout << key << ":" << r.GetBool() << endl;
+            continue;
+        }
+        else if (0 == strcmp(key, "n")) {
+            RAPIDJSON_ASSERT(r.PeekType() == kNullType);
+            r.GetNull();
+            cout << key << endl;
+            continue;
+        }
+        else if (0 == strcmp(key, "pi")) {
+            RAPIDJSON_ASSERT(r.PeekType() == kNumberType);
+            cout << key << ":" << r.GetDouble() << endl;
+            continue;
+        }
+        else if (0 == strcmp(key, "a")) {
+            RAPIDJSON_ASSERT(r.PeekType() == kArrayType);
+            
+            r.EnterArray();
+            
+            cout << key << ":[ ";
+            while (r.NextArrayValue()) {
+                if (r.PeekType() == kNumberType) {
+                    cout << r.GetDouble() << " ";
+                }
+                else if (r.PeekType() == kStringType) {
+                    cout << r.GetString() << " ";
+                }
+                else {
+                    r.SkipArray();
+                    break;
+                }
+            }
+            
+            cout << "]" << endl;
+        }
+        else {
+            cout << key << ":skipped" << endl;
+            r.SkipValue();
+        }
+    }
+    
+    return 0;
+}
+
+RAPIDJSON_DIAG_POP
diff --git a/example/messagereader/messagereader.cpp b/example/messagereader/messagereader.cpp
new file mode 100644
index 0000000..3399bc9
--- /dev/null
+++ b/example/messagereader/messagereader.cpp
@@ -0,0 +1,105 @@
+// Reading a message JSON with Reader (SAX-style API).
+// The JSON should be an object with key-string pairs.
+
+#include "rapidjson/reader.h"
+#include "rapidjson/error/en.h"
+#include <iostream>
+#include <string>
+#include <map>
+
+using namespace std;
+using namespace rapidjson;
+
+typedef map<string, string> MessageMap;
+
+#if defined(__GNUC__)
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(effc++)
+#endif
+
+#ifdef __clang__
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(switch-enum)
+#endif
+
+struct MessageHandler
+    : public BaseReaderHandler<UTF8<>, MessageHandler> {
+    MessageHandler() : messages_(), state_(kExpectObjectStart), name_() {}
+
+    bool StartObject() {
+        switch (state_) {
+        case kExpectObjectStart:
+            state_ = kExpectNameOrObjectEnd;
+            return true;
+        default:
+            return false;
+        }
+    }
+
+    bool String(const char* str, SizeType length, bool) {
+        switch (state_) {
+        case kExpectNameOrObjectEnd:
+            name_ = string(str, length);
+            state_ = kExpectValue;
+            return true;
+        case kExpectValue:
+            messages_.insert(MessageMap::value_type(name_, string(str, length)));
+            state_ = kExpectNameOrObjectEnd;
+            return true;
+        default:
+            return false;
+        }
+    }
+
+    bool EndObject(SizeType) { return state_ == kExpectNameOrObjectEnd; }
+
+    bool Default() { return false; } // All other events are invalid.
+
+    MessageMap messages_;
+    enum State {
+        kExpectObjectStart,
+        kExpectNameOrObjectEnd,
+        kExpectValue
+    }state_;
+    std::string name_;
+};
+
+#if defined(__GNUC__)
+RAPIDJSON_DIAG_POP
+#endif
+
+#ifdef __clang__
+RAPIDJSON_DIAG_POP
+#endif
+
+static void ParseMessages(const char* json, MessageMap& messages) {
+    Reader reader;
+    MessageHandler handler;
+    StringStream ss(json);
+    if (reader.Parse(ss, handler))
+        messages.swap(handler.messages_);   // Only change it if success.
+    else {
+        ParseErrorCode e = reader.GetParseErrorCode();
+        size_t o = reader.GetErrorOffset();
+        cout << "Error: " << GetParseError_En(e) << endl;;
+        cout << " at offset " << o << " near '" << string(json).substr(o, 10) << "...'" << endl;
+    }
+}
+
+int main() {
+    MessageMap messages;
+
+    const char* json1 = "{ \"greeting\" : \"Hello!\", \"farewell\" : \"bye-bye!\" }";
+    cout << json1 << endl;
+    ParseMessages(json1, messages);
+
+    for (MessageMap::const_iterator itr = messages.begin(); itr != messages.end(); ++itr)
+        cout << itr->first << ": " << itr->second << endl;
+
+    cout << endl << "Parse a JSON with invalid schema." << endl;
+    const char* json2 = "{ \"greeting\" : \"Hello!\", \"farewell\" : \"bye-bye!\", \"foo\" : {} }";
+    cout << json2 << endl;
+    ParseMessages(json2, messages);
+
+    return 0;
+}
diff --git a/example/parsebyparts/parsebyparts.cpp b/example/parsebyparts/parsebyparts.cpp
new file mode 100644
index 0000000..ff73539
--- /dev/null
+++ b/example/parsebyparts/parsebyparts.cpp
@@ -0,0 +1,176 @@
+// Example of parsing JSON to document by parts.
+
+// Using C++11 threads
+// Temporarily disable for clang (older version) due to incompatibility with libstdc++
+#if (__cplusplus >= 201103L || (defined(_MSC_VER) && _MSC_VER >= 1700)) && !defined(__clang__)
+
+#include "rapidjson/document.h"
+#include "rapidjson/error/en.h"
+#include "rapidjson/writer.h"
+#include "rapidjson/ostreamwrapper.h"
+#include <condition_variable>
+#include <iostream>
+#include <mutex>
+#include <thread>
+
+using namespace rapidjson;
+
+template<unsigned parseFlags = kParseDefaultFlags>
+class AsyncDocumentParser {
+public:
+    AsyncDocumentParser(Document& d)
+        : stream_(*this)
+        , d_(d)
+        , parseThread_()
+        , mutex_()
+        , notEmpty_()
+        , finish_()
+        , completed_()
+    {
+        // Create and execute thread after all member variables are initialized.
+        parseThread_ = std::thread(&AsyncDocumentParser::Parse, this);
+    }
+
+    ~AsyncDocumentParser() {
+        if (!parseThread_.joinable())
+            return;
+
+        {        
+            std::unique_lock<std::mutex> lock(mutex_);
+
+            // Wait until the buffer is read up (or parsing is completed)
+            while (!stream_.Empty() && !completed_)
+                finish_.wait(lock);
+
+            // Automatically append '\0' as the terminator in the stream.
+            static const char terminator[] = "";
+            stream_.src_ = terminator;
+            stream_.end_ = terminator + 1;
+            notEmpty_.notify_one(); // unblock the AsyncStringStream
+        }
+
+        parseThread_.join();
+    }
+
+    void ParsePart(const char* buffer, size_t length) {
+        std::unique_lock<std::mutex> lock(mutex_);
+        
+        // Wait until the buffer is read up (or parsing is completed)
+        while (!stream_.Empty() && !completed_)
+            finish_.wait(lock);
+
+        // Stop further parsing if the parsing process is completed.
+        if (completed_)
+            return;
+
+        // Set the buffer to stream and unblock the AsyncStringStream
+        stream_.src_ = buffer;
+        stream_.end_ = buffer + length;
+        notEmpty_.notify_one();
+    }
+
+private:
+    void Parse() {
+        d_.ParseStream<parseFlags>(stream_);
+
+        // The stream may not be fully read, notify finish anyway to unblock ParsePart()
+        std::unique_lock<std::mutex> lock(mutex_);
+        completed_ = true;      // Parsing process is completed
+        finish_.notify_one();   // Unblock ParsePart() or destructor if they are waiting.
+    }
+
+    struct AsyncStringStream {
+        typedef char Ch;
+
+        AsyncStringStream(AsyncDocumentParser& parser) : parser_(parser), src_(), end_(), count_() {}
+
+        char Peek() const {
+            std::unique_lock<std::mutex> lock(parser_.mutex_);
+
+            // If nothing in stream, block to wait.
+            while (Empty())
+                parser_.notEmpty_.wait(lock);
+
+            return *src_;
+        }
+
+        char Take() {
+            std::unique_lock<std::mutex> lock(parser_.mutex_);
+
+            // If nothing in stream, block to wait.
+            while (Empty())
+                parser_.notEmpty_.wait(lock);
+
+            count_++;
+            char c = *src_++;
+
+            // If all stream is read up, notify that the stream is finish.
+            if (Empty())
+                parser_.finish_.notify_one();
+
+            return c;
+        }
+
+        size_t Tell() const { return count_; }
+
+        // Not implemented
+        char* PutBegin() { return 0; }
+        void Put(char) {}
+        void Flush() {}
+        size_t PutEnd(char*) { return 0; }
+
+        bool Empty() const { return src_ == end_; }
+
+        AsyncDocumentParser& parser_;
+        const char* src_;     //!< Current read position.
+        const char* end_;     //!< End of buffer
+        size_t count_;        //!< Number of characters taken so far.
+    };
+
+    AsyncStringStream stream_;
+    Document& d_;
+    std::thread parseThread_;
+    std::mutex mutex_;
+    std::condition_variable notEmpty_;
+    std::condition_variable finish_;
+    bool completed_;
+};
+
+int main() {
+    Document d;
+
+    {
+        AsyncDocumentParser<> parser(d);
+
+        const char json1[] = " { \"hello\" : \"world\", \"t\" : tr";
+        //const char json1[] = " { \"hello\" : \"world\", \"t\" : trX"; // For test parsing error
+        const char json2[] = "ue, \"f\" : false, \"n\": null, \"i\":123, \"pi\": 3.14";
+        const char json3[] = "16, \"a\":[1, 2, 3, 4] } ";
+
+        parser.ParsePart(json1, sizeof(json1) - 1);
+        parser.ParsePart(json2, sizeof(json2) - 1);
+        parser.ParsePart(json3, sizeof(json3) - 1);
+    }
+
+    if (d.HasParseError()) {
+        std::cout << "Error at offset " << d.GetErrorOffset() << ": " << GetParseError_En(d.GetParseError()) << std::endl;
+        return EXIT_FAILURE;
+    }
+    
+    // Stringify the JSON to cout
+    OStreamWrapper os(std::cout);
+    Writer<OStreamWrapper> writer(os);
+    d.Accept(writer);
+    std::cout << std::endl;
+
+    return EXIT_SUCCESS;
+}
+
+#else // Not supporting C++11 
+
+#include <iostream>
+int main() {
+    std::cout << "This example requires C++11 compiler" << std::endl;
+}
+
+#endif
diff --git a/example/pretty/pretty.cpp b/example/pretty/pretty.cpp
new file mode 100644
index 0000000..2feff5d
--- /dev/null
+++ b/example/pretty/pretty.cpp
@@ -0,0 +1,30 @@
+// JSON pretty formatting example
+// This example can only handle UTF-8. For handling other encodings, see prettyauto example.
+
+#include "rapidjson/reader.h"
+#include "rapidjson/prettywriter.h"
+#include "rapidjson/filereadstream.h"
+#include "rapidjson/filewritestream.h"
+#include "rapidjson/error/en.h"
+
+using namespace rapidjson;
+
+int main(int, char*[]) {
+    // Prepare reader and input stream.
+    Reader reader;
+    char readBuffer[65536];
+    FileReadStream is(stdin, readBuffer, sizeof(readBuffer));
+
+    // Prepare writer and output stream.
+    char writeBuffer[65536];
+    FileWriteStream os(stdout, writeBuffer, sizeof(writeBuffer));
+    PrettyWriter<FileWriteStream> writer(os);
+
+    // JSON reader parse from the input stream and let writer generate the output.
+    if (!reader.Parse<kParseValidateEncodingFlag>(is, writer)) {
+        fprintf(stderr, "\nError(%u): %s\n", static_cast<unsigned>(reader.GetErrorOffset()), GetParseError_En(reader.GetParseErrorCode()));
+        return 1;
+    }
+
+    return 0;
+}
diff --git a/example/prettyauto/prettyauto.cpp b/example/prettyauto/prettyauto.cpp
new file mode 100644
index 0000000..1687bae
--- /dev/null
+++ b/example/prettyauto/prettyauto.cpp
@@ -0,0 +1,56 @@
+// JSON pretty formatting example
+// This example can handle UTF-8/UTF-16LE/UTF-16BE/UTF-32LE/UTF-32BE.
+// The input firstly convert to UTF8, and then write to the original encoding with pretty formatting.
+
+#include "rapidjson/reader.h"
+#include "rapidjson/prettywriter.h"
+#include "rapidjson/filereadstream.h"
+#include "rapidjson/filewritestream.h"
+#include "rapidjson/encodedstream.h"    // NEW
+#include "rapidjson/error/en.h"
+#ifdef _WIN32
+#include <fcntl.h>
+#include <io.h>
+#endif
+
+using namespace rapidjson;
+
+int main(int, char*[]) {
+#ifdef _WIN32
+    // Prevent Windows converting between CR+LF and LF
+    _setmode(_fileno(stdin), _O_BINARY);    // NEW
+    _setmode(_fileno(stdout), _O_BINARY);   // NEW
+#endif
+
+    // Prepare reader and input stream.
+    //Reader reader;
+    GenericReader<AutoUTF<unsigned>, UTF8<> > reader;       // CHANGED
+    char readBuffer[65536];
+    FileReadStream is(stdin, readBuffer, sizeof(readBuffer));
+    AutoUTFInputStream<unsigned, FileReadStream> eis(is);   // NEW
+
+    // Prepare writer and output stream.
+    char writeBuffer[65536];
+    FileWriteStream os(stdout, writeBuffer, sizeof(writeBuffer));
+
+#if 1
+    // Use the same Encoding of the input. Also use BOM according to input.
+    typedef AutoUTFOutputStream<unsigned, FileWriteStream> OutputStream;    // NEW
+    OutputStream eos(os, eis.GetType(), eis.HasBOM());                      // NEW
+    PrettyWriter<OutputStream, UTF8<>, AutoUTF<unsigned> > writer(eos);     // CHANGED
+#else
+    // You may also use static bound encoding type, such as output to UTF-16LE with BOM
+    typedef EncodedOutputStream<UTF16LE<>,FileWriteStream> OutputStream;    // NEW
+    OutputStream eos(os, true);                                             // NEW
+    PrettyWriter<OutputStream, UTF8<>, UTF16LE<> > writer(eos);             // CHANGED
+#endif
+
+    // JSON reader parse from the input stream and let writer generate the output.
+    //if (!reader.Parse<kParseValidateEncodingFlag>(is, writer)) {
+    if (!reader.Parse<kParseValidateEncodingFlag>(eis, writer)) {   // CHANGED
+        fprintf(stderr, "\nError(%u): %s\n", static_cast<unsigned>(reader.GetErrorOffset()), GetParseError_En(reader.GetParseErrorCode()));
+        return 1;
+    }
+
+    return 0;
+}
diff --git a/example/schemavalidator/schemavalidator.cpp b/example/schemavalidator/schemavalidator.cpp
new file mode 100644
index 0000000..06bbe4d
--- /dev/null
+++ b/example/schemavalidator/schemavalidator.cpp
@@ -0,0 +1,78 @@
+// Schema Validator example
+
+// The example validates JSON text from stdin with a JSON schema specified in the argument.
+
+#include "rapidjson/error/en.h"
+#include "rapidjson/filereadstream.h"
+#include "rapidjson/schema.h"
+#include "rapidjson/stringbuffer.h"
+#include "rapidjson/prettywriter.h"
+
+using namespace rapidjson;
+
+int main(int argc, char *argv[]) {
+    if (argc != 2) {
+        fprintf(stderr, "Usage: schemavalidator schema.json < input.json\n");
+        return EXIT_FAILURE;
+    }
+
+    // Read a JSON schema from file into Document
+    Document d;
+    char buffer[4096];
+
+    {
+        FILE *fp = fopen(argv[1], "r");
+        if (!fp) {
+            printf("Schema file '%s' not found\n", argv[1]);
+            return -1;
+        }
+        FileReadStream fs(fp, buffer, sizeof(buffer));
+        d.ParseStream(fs);
+        if (d.HasParseError()) {
+            fprintf(stderr, "Schema file '%s' is not a valid JSON\n", argv[1]);
+            fprintf(stderr, "Error(offset %u): %s\n",
+                static_cast<unsigned>(d.GetErrorOffset()),
+                GetParseError_En(d.GetParseError()));
+            fclose(fp);
+            return EXIT_FAILURE;
+        }
+        fclose(fp);
+    }
+    
+    // Then convert the Document into SchemaDocument
+    SchemaDocument sd(d);
+
+    // Use reader to parse the JSON in stdin, and forward SAX events to validator
+    SchemaValidator validator(sd);
+    Reader reader;
+    FileReadStream is(stdin, buffer, sizeof(buffer));
+    if (!reader.Parse(is, validator) && reader.GetParseErrorCode() != kParseErrorTermination) {
+        // Schema validator error would cause kParseErrorTermination, which will handle it in next step.
+        fprintf(stderr, "Input is not a valid JSON\n");
+        fprintf(stderr, "Error(offset %u): %s\n",
+            static_cast<unsigned>(reader.GetErrorOffset()),
+            GetParseError_En(reader.GetParseErrorCode()));
+    }
+
+    // Check the validation result
+    if (validator.IsValid()) {
+        printf("Input JSON is valid.\n");
+        return EXIT_SUCCESS;
+    }
+    else {
+        printf("Input JSON is invalid.\n");
+        StringBuffer sb;
+        validator.GetInvalidSchemaPointer().StringifyUriFragment(sb);
+        fprintf(stderr, "Invalid schema: %s\n", sb.GetString());
+        fprintf(stderr, "Invalid keyword: %s\n", validator.GetInvalidSchemaKeyword());
+        sb.Clear();
+        validator.GetInvalidDocumentPointer().StringifyUriFragment(sb);
+        fprintf(stderr, "Invalid document: %s\n", sb.GetString());
+        // Detailed violation report is available as a JSON value
+        sb.Clear();
+        PrettyWriter<StringBuffer> w(sb);
+        validator.GetError().Accept(w);
+        fprintf(stderr, "Error report:\n%s\n", sb.GetString());
+        return EXIT_FAILURE;
+    }
+}
diff --git a/example/serialize/serialize.cpp b/example/serialize/serialize.cpp
new file mode 100644
index 0000000..12d8715
--- /dev/null
+++ b/example/serialize/serialize.cpp
@@ -0,0 +1,173 @@
+// Serialize example
+// This example shows writing JSON string with writer directly.
+
+#include "rapidjson/prettywriter.h" // for stringify JSON
+#include <cstdio>
+#include <string>
+#include <vector>
+
+using namespace rapidjson;
+
+class Person {
+public:
+    Person(const std::string& name, unsigned age) : name_(name), age_(age) {}
+    Person(const Person& rhs) : name_(rhs.name_), age_(rhs.age_) {}
+    virtual ~Person();
+
+    Person& operator=(const Person& rhs) {
+        name_ = rhs.name_;
+        age_ = rhs.age_;
+        return *this;
+    }
+
+protected:
+    template <typename Writer>
+    void Serialize(Writer& writer) const {
+        // This base class just write out name-value pairs, without wrapping within an object.
+        writer.String("name");
+#if RAPIDJSON_HAS_STDSTRING
+        writer.String(name_);
+#else
+        writer.String(name_.c_str(), static_cast<SizeType>(name_.length())); // Supplying length of string is faster.
+#endif
+        writer.String("age");
+        writer.Uint(age_);
+    }
+
+private:
+    std::string name_;
+    unsigned age_;
+};
+
+Person::~Person() {
+}
+
+class Education {
+public:
+    Education(const std::string& school, double GPA) : school_(school), GPA_(GPA) {}
+    Education(const Education& rhs) : school_(rhs.school_), GPA_(rhs.GPA_) {}
+
+    template <typename Writer>
+    void Serialize(Writer& writer) const {
+        writer.StartObject();
+        
+        writer.String("school");
+#if RAPIDJSON_HAS_STDSTRING
+        writer.String(school_);
+#else
+        writer.String(school_.c_str(), static_cast<SizeType>(school_.length()));
+#endif
+
+        writer.String("GPA");
+        writer.Double(GPA_);
+
+        writer.EndObject();
+    }
+
+private:
+    std::string school_;
+    double GPA_;
+};
+
+class Dependent : public Person {
+public:
+    Dependent(const std::string& name, unsigned age, Education* education = 0) : Person(name, age), education_(education) {}
+    Dependent(const Dependent& rhs) : Person(rhs), education_(0) { education_ = (rhs.education_ == 0) ? 0 : new Education(*rhs.education_); }
+    virtual ~Dependent();
+
+    Dependent& operator=(const Dependent& rhs) {
+        if (this == &rhs)
+            return *this;
+        delete education_;
+        education_ = (rhs.education_ == 0) ? 0 : new Education(*rhs.education_);
+        return *this;
+    }
+
+    template <typename Writer>
+    void Serialize(Writer& writer) const {
+        writer.StartObject();
+
+        Person::Serialize(writer);
+
+        writer.String("education");
+        if (education_)
+            education_->Serialize(writer);
+        else
+            writer.Null();
+
+        writer.EndObject();
+    }
+
+private:
+
+    Education *education_;
+};
+
+Dependent::~Dependent() {
+    delete education_; 
+}
+
+class Employee : public Person {
+public:
+    Employee(const std::string& name, unsigned age, bool married) : Person(name, age), dependents_(), married_(married) {}
+    Employee(const Employee& rhs) : Person(rhs), dependents_(rhs.dependents_), married_(rhs.married_) {}
+    virtual ~Employee();
+
+    Employee& operator=(const Employee& rhs) {
+        static_cast<Person&>(*this) = rhs;
+        dependents_ = rhs.dependents_;
+        married_ = rhs.married_;
+        return *this;
+    }
+
+    void AddDependent(const Dependent& dependent) {
+        dependents_.push_back(dependent);
+    }
+
+    template <typename Writer>
+    void Serialize(Writer& writer) const {
+        writer.StartObject();
+
+        Person::Serialize(writer);
+
+        writer.String("married");
+        writer.Bool(married_);
+
+        writer.String(("dependents"));
+        writer.StartArray();
+        for (std::vector<Dependent>::const_iterator dependentItr = dependents_.begin(); dependentItr != dependents_.end(); ++dependentItr)
+            dependentItr->Serialize(writer);
+        writer.EndArray();
+
+        writer.EndObject();
+    }
+
+private:
+    std::vector<Dependent> dependents_;
+    bool married_;
+};
+
+Employee::~Employee() {
+}
+
+int main(int, char*[]) {
+    std::vector<Employee> employees;
+
+    employees.push_back(Employee("Milo YIP", 34, true));
+    employees.back().AddDependent(Dependent("Lua YIP", 3, new Education("Happy Kindergarten", 3.5)));
+    employees.back().AddDependent(Dependent("Mio YIP", 1));
+
+    employees.push_back(Employee("Percy TSE", 30, false));
+
+    StringBuffer sb;
+    PrettyWriter<StringBuffer> writer(sb);
+
+    writer.StartArray();
+    for (std::vector<Employee>::const_iterator employeeItr = employees.begin(); employeeItr != employees.end(); ++employeeItr)
+        employeeItr->Serialize(writer);
+    writer.EndArray();
+
+    puts(sb.GetString());
+
+    return 0;
+}
diff --git a/example/simpledom/simpledom.cpp b/example/simpledom/simpledom.cpp
new file mode 100644
index 0000000..8038419
--- /dev/null
+++ b/example/simpledom/simpledom.cpp
@@ -0,0 +1,29 @@
+// JSON simple example
+// This example does not handle errors.
+
+#include "rapidjson/document.h"
+#include "rapidjson/writer.h"
+#include "rapidjson/stringbuffer.h"
+#include <iostream>
+
+using namespace rapidjson;
+
+int main() {
+    // 1. Parse a JSON string into DOM.
+    const char* json = "{\"project\":\"rapidjson\",\"stars\":10}";
+    Document d;
+    d.Parse(json);
+
+    // 2. Modify it by DOM.
+    Value& s = d["stars"];
+    s.SetInt(s.GetInt() + 1);
+
+    // 3. Stringify the DOM
+    StringBuffer buffer;
+    Writer<StringBuffer> writer(buffer);
+    d.Accept(writer);
+
+    // Output {"project":"rapidjson","stars":11}
+    std::cout << buffer.GetString() << std::endl;
+    return 0;
+}
diff --git a/example/simplepullreader/simplepullreader.cpp b/example/simplepullreader/simplepullreader.cpp
new file mode 100644
index 0000000..a4fb116
--- /dev/null
+++ b/example/simplepullreader/simplepullreader.cpp
@@ -0,0 +1,53 @@
+#include "rapidjson/reader.h"
+#include <iostream>
+#include <sstream>
+
+using namespace rapidjson;
+using namespace std;
+
+// If you can require C++11, you could use std::to_string here
+template <typename T> std::string stringify(T x) {
+    std::stringstream ss;
+    ss << x;
+    return ss.str();
+}
+
+struct MyHandler {
+    const char* type;
+    std::string data;
+    
+    MyHandler() : type(), data() {}
+
+    bool Null() { type = "Null"; data.clear(); return true; }
+    bool Bool(bool b) { type = "Bool:"; data = b? "true": "false"; return true; }
+    bool Int(int i) { type = "Int:"; data = stringify(i); return true; }
+    bool Uint(unsigned u) { type = "Uint:"; data = stringify(u); return true; }
+    bool Int64(int64_t i) { type = "Int64:"; data = stringify(i); return true; }
+    bool Uint64(uint64_t u) { type = "Uint64:"; data = stringify(u); return true; }
+    bool Double(double d) { type = "Double:"; data = stringify(d); return true; }
+    bool RawNumber(const char* str, SizeType length, bool) { type = "Number:"; data = std::string(str, length); return true; }
+    bool String(const char* str, SizeType length, bool) { type = "String:"; data = std::string(str, length); return true; }
+    bool StartObject() { type = "StartObject"; data.clear(); return true; }
+    bool Key(const char* str, SizeType length, bool) { type = "Key:"; data = std::string(str, length); return true; }
+    bool EndObject(SizeType memberCount) { type = "EndObject:"; data = stringify(memberCount); return true; }
+    bool StartArray() { type = "StartArray"; data.clear(); return true; }
+    bool EndArray(SizeType elementCount) { type = "EndArray:"; data = stringify(elementCount); return true; }
+private:
+    MyHandler(const MyHandler& noCopyConstruction);
+    MyHandler& operator=(const MyHandler& noAssignment);
+};
+
+int main() {
+    const char json[] = " { \"hello\" : \"world\", \"t\" : true , \"f\" : false, \"n\": null, \"i\":123, \"pi\": 3.1416, \"a\":[1, 2, 3, 4] } ";
+
+    MyHandler handler;
+    Reader reader;
+    StringStream ss(json);
+    reader.IterativeParseInit();
+    while (!reader.IterativeParseComplete()) {
+        reader.IterativeParseNext<kParseDefaultFlags>(ss, handler);
+        cout << handler.type << handler.data << endl;
+    }
+
+    return 0;
+}
diff --git a/example/simplereader/simplereader.cpp b/example/simplereader/simplereader.cpp
new file mode 100644
index 0000000..5aae8a1
--- /dev/null
+++ b/example/simplereader/simplereader.cpp
@@ -0,0 +1,42 @@
+#include "rapidjson/reader.h"
+#include <iostream>
+
+using namespace rapidjson;
+using namespace std;
+
+struct MyHandler {
+    bool Null() { cout << "Null()" << endl; return true; }
+    bool Bool(bool b) { cout << "Bool(" << boolalpha << b << ")" << endl; return true; }
+    bool Int(int i) { cout << "Int(" << i << ")" << endl; return true; }
+    bool Uint(unsigned u) { cout << "Uint(" << u << ")" << endl; return true; }
+    bool Int64(int64_t i) { cout << "Int64(" << i << ")" << endl; return true; }
+    bool Uint64(uint64_t u) { cout << "Uint64(" << u << ")" << endl; return true; }
+    bool Double(double d) { cout << "Double(" << d << ")" << endl; return true; }
+    bool RawNumber(const char* str, SizeType length, bool copy) { 
+        cout << "Number(" << str << ", " << length << ", " << boolalpha << copy << ")" << endl;
+        return true;
+    }
+    bool String(const char* str, SizeType length, bool copy) { 
+        cout << "String(" << str << ", " << length << ", " << boolalpha << copy << ")" << endl;
+        return true;
+    }
+    bool StartObject() { cout << "StartObject()" << endl; return true; }
+    bool Key(const char* str, SizeType length, bool copy) {
+        cout << "Key(" << str << ", " << length << ", " << boolalpha << copy << ")" << endl;
+        return true;
+    }
+    bool EndObject(SizeType memberCount) { cout << "EndObject(" << memberCount << ")" << endl; return true; }
+    bool StartArray() { cout << "StartArray()" << endl; return true; }
+    bool EndArray(SizeType elementCount) { cout << "EndArray(" << elementCount << ")" << endl; return true; }
+};
+
+int main() {
+    const char json[] = " { \"hello\" : \"world\", \"t\" : true , \"f\" : false, \"n\": null, \"i\":123, \"pi\": 3.1416, \"a\":[1, 2, 3, 4] } ";
+
+    MyHandler handler;
+    Reader reader;
+    StringStream ss(json);
+    reader.Parse(ss, handler);
+
+    return 0;
+}
diff --git a/example/simplewriter/simplewriter.cpp b/example/simplewriter/simplewriter.cpp
new file mode 100644
index 0000000..8d1275c
--- /dev/null
+++ b/example/simplewriter/simplewriter.cpp
@@ -0,0 +1,36 @@
+#include "rapidjson/writer.h"
+#include "rapidjson/stringbuffer.h"
+#include <iostream>
+
+using namespace rapidjson;
+using namespace std;
+
+int main() {
+    StringBuffer s;
+    Writer<StringBuffer> writer(s);
+    
+    writer.StartObject();               // Between StartObject()/EndObject(), 
+    writer.Key("hello");                // output a key,
+    writer.String("world");             // follow by a value.
+    writer.Key("t");
+    writer.Bool(true);
+    writer.Key("f");
+    writer.Bool(false);
+    writer.Key("n");
+    writer.Null();
+    writer.Key("i");
+    writer.Uint(123);
+    writer.Key("pi");
+    writer.Double(3.1416);
+    writer.Key("a");
+    writer.StartArray();                // Between StartArray()/EndArray(),
+    for (unsigned i = 0; i < 4; i++)
+        writer.Uint(i);                 // all values are elements of the array.
+    writer.EndArray();
+    writer.EndObject();
+
+    // {"hello":"world","t":true,"f":false,"n":null,"i":123,"pi":3.1416,"a":[0,1,2,3]}
+    cout << s.GetString() << endl;
+
+    return 0;
+}
diff --git a/example/sortkeys/sortkeys.cpp b/example/sortkeys/sortkeys.cpp
new file mode 100644
index 0000000..c473784
--- /dev/null
+++ b/example/sortkeys/sortkeys.cpp
@@ -0,0 +1,62 @@
+#include "rapidjson/document.h"
+#include "rapidjson/filewritestream.h"
+#include <rapidjson/prettywriter.h>
+
+#include <algorithm>
+#include <iostream>
+
+using namespace rapidjson;
+using namespace std;
+
+static void printIt(const Value &doc) {
+    char writeBuffer[65536];
+    FileWriteStream os(stdout, writeBuffer, sizeof(writeBuffer));
+    PrettyWriter<FileWriteStream> writer(os);
+    doc.Accept(writer);
+    cout << endl;
+}
+
+struct NameComparator {
+    bool operator()(const Value::Member &lhs, const Value::Member &rhs) const {
+        return (strcmp(lhs.name.GetString(), rhs.name.GetString()) < 0);
+    }
+};
+
+int main() {
+    Document d(kObjectType);
+    Document::AllocatorType &allocator = d.GetAllocator();
+
+    d.AddMember("zeta", Value().SetBool(false), allocator);
+    d.AddMember("gama", Value().SetString("test string", allocator), allocator);
+    d.AddMember("delta", Value().SetInt(123), allocator);
+    d.AddMember("alpha", Value(kArrayType).Move(), allocator);
+
+    printIt(d);
+
+/*
+{
+    "zeta": false,
+    "gama": "test string",
+    "delta": 123,
+    "alpha": []
+}
+*/
+
+// C++11 supports std::move() of Value so it always have no problem for std::sort().
+// Some C++03 implementations of std::sort() requires copy constructor which causes compilation error.
+// Needs a sorting function only depends on std::swap() instead.
+#if __cplusplus >= 201103L || !defined(__GLIBCXX__)
+    std::sort(d.MemberBegin(), d.MemberEnd(), NameComparator());
+#endif
+
+    printIt(d);
+
+/*
+{
+  "alpha": [],
+  "delta": 123,
+  "gama": "test string",
+  "zeta": false
+}
+*/
+}
diff --git a/example/tutorial/tutorial.cpp b/example/tutorial/tutorial.cpp
new file mode 100644
index 0000000..c8bfcc1
--- /dev/null
+++ b/example/tutorial/tutorial.cpp
@@ -0,0 +1,151 @@
+// Hello World example
+// This example shows basic usage of DOM-style API.
+
+#include "rapidjson/document.h"     // rapidjson's DOM-style API
+#include "rapidjson/prettywriter.h" // for stringify JSON
+#include <cstdio>
+
+using namespace rapidjson;
+using namespace std;
+
+int main(int, char*[]) {
+    ////////////////////////////////////////////////////////////////////////////
+    // 1. Parse a JSON text string to a document.
+
+    const char json[] = " { \"hello\" : \"world\", \"t\" : true , \"f\" : false, \"n\": null, \"i\":123, \"pi\": 3.1416, \"a\":[1, 2, 3, 4] } ";
+    printf("Original JSON:\n %s\n", json);
+
+    Document document;  // Default template parameter uses UTF8 and MemoryPoolAllocator.
+
+#if 0
+    // "normal" parsing, decode strings to new buffers. Can use other input stream via ParseStream().
+    if (document.Parse(json).HasParseError())
+        return 1;
+#else
+    // In-situ parsing, decode strings directly in the source string. Source must be string.
+    char buffer[sizeof(json)];
+    memcpy(buffer, json, sizeof(json));
+    if (document.ParseInsitu(buffer).HasParseError())
+        return 1;
+#endif
+
+    printf("\nParsing to document succeeded.\n");
+
+    ////////////////////////////////////////////////////////////////////////////
+    // 2. Access values in document. 
+
+    printf("\nAccess values in document:\n");
+    assert(document.IsObject());    // Document is a JSON value represents the root of DOM. Root can be either an object or array.
+
+    assert(document.HasMember("hello"));
+    assert(document["hello"].IsString());
+    printf("hello = %s\n", document["hello"].GetString());
+
+    // Since version 0.2, you can use single lookup to check the existing of member and its value:
+    Value::MemberIterator hello = document.FindMember("hello");
+    assert(hello != document.MemberEnd());
+    assert(hello->value.IsString());
+    assert(strcmp("world", hello->value.GetString()) == 0);
+    (void)hello;
+
+    assert(document["t"].IsBool());     // JSON true/false are bool. Can also uses more specific function IsTrue().
+    printf("t = %s\n", document["t"].GetBool() ? "true" : "false");
+
+    assert(document["f"].IsBool());
+    printf("f = %s\n", document["f"].GetBool() ? "true" : "false");
+
+    printf("n = %s\n", document["n"].IsNull() ? "null" : "?");
+
+    assert(document["i"].IsNumber());   // Number is a JSON type, but C++ needs more specific type.
+    assert(document["i"].IsInt());      // In this case, IsUint()/IsInt64()/IsUInt64() also return true.
+    printf("i = %d\n", document["i"].GetInt()); // Alternative (int)document["i"]
+
+    assert(document["pi"].IsNumber());
+    assert(document["pi"].IsDouble());
+    printf("pi = %g\n", document["pi"].GetDouble());
+
+    {
+        const Value& a = document["a"]; // Using a reference for consecutive access is handy and faster.
+        assert(a.IsArray());
+        for (SizeType i = 0; i < a.Size(); i++) // rapidjson uses SizeType instead of size_t.
+            printf("a[%d] = %d\n", i, a[i].GetInt());
+        
+        int y = a[0].GetInt();
+        (void)y;
+
+        // Iterating array with iterators
+        printf("a = ");
+        for (Value::ConstValueIterator itr = a.Begin(); itr != a.End(); ++itr)
+            printf("%d ", itr->GetInt());
+        printf("\n");
+    }
+
+    // Iterating object members
+    static const char* kTypeNames[] = { "Null", "False", "True", "Object", "Array", "String", "Number" };
+    for (Value::ConstMemberIterator itr = document.MemberBegin(); itr != document.MemberEnd(); ++itr)
+        printf("Type of member %s is %s\n", itr->name.GetString(), kTypeNames[itr->value.GetType()]);
+
+    ////////////////////////////////////////////////////////////////////////////
+    // 3. Modify values in document.
+
+    // Change i to a bigger number
+    {
+        uint64_t f20 = 1;   // compute factorial of 20
+        for (uint64_t j = 1; j <= 20; j++)
+            f20 *= j;
+        document["i"] = f20;    // Alternate form: document["i"].SetUint64(f20)
+        assert(!document["i"].IsInt()); // No longer can be cast as int or uint.
+    }
+
+    // Adding values to array.
+    {
+        Value& a = document["a"];   // This time we uses non-const reference.
+        Document::AllocatorType& allocator = document.GetAllocator();
+        for (int i = 5; i <= 10; i++)
+            a.PushBack(i, allocator);   // May look a bit strange, allocator is needed for potentially realloc. We normally uses the document's.
+
+        // Fluent API
+        a.PushBack("Lua", allocator).PushBack("Mio", allocator);
+    }
+
+    // Making string values.
+
+    // This version of SetString() just store the pointer to the string.
+    // So it is for literal and string that exists within value's life-cycle.
+    {
+        document["hello"] = "rapidjson";    // This will invoke strlen()
+        // Faster version:
+        // document["hello"].SetString("rapidjson", 9);
+    }
+
+    // This version of SetString() needs an allocator, which means it will allocate a new buffer and copy the the string into the buffer.
+    Value author;
+    {
+        char buffer2[10];
+        int len = sprintf(buffer2, "%s %s", "Milo", "Yip");  // synthetic example of dynamically created string.
+
+        author.SetString(buffer2, static_cast<SizeType>(len), document.GetAllocator());
+        // Shorter but slower version:
+        // document["hello"].SetString(buffer, document.GetAllocator());
+
+        // Constructor version: 
+        // Value author(buffer, len, document.GetAllocator());
+        // Value author(buffer, document.GetAllocator());
+        memset(buffer2, 0, sizeof(buffer2)); // For demonstration purpose.
+    }
+    // Variable 'buffer' is unusable now but 'author' has already made a copy.
+    document.AddMember("author", author, document.GetAllocator());
+
+    assert(author.IsNull());        // Move semantic for assignment. After this variable is assigned as a member, the variable becomes null.
+
+    ////////////////////////////////////////////////////////////////////////////
+    // 4. Stringify JSON
+
+    printf("\nModified JSON with reformatting:\n");
+    StringBuffer sb;
+    PrettyWriter<StringBuffer> writer(sb);
+    document.Accept(writer);    // Accept() traverses the DOM and generates Handler events.
+    puts(sb.GetString());
+
+    return 0;
+}
diff --git a/include/rapidjson/allocators.h b/include/rapidjson/allocators.h
index 655f4a3..cc67c89 100644
--- a/include/rapidjson/allocators.h
+++ b/include/rapidjson/allocators.h
@@ -52,6 +52,19 @@
 \endcode
 */
 
+
+/*! \def RAPIDJSON_ALLOCATOR_DEFAULT_CHUNK_CAPACITY
+    \ingroup RAPIDJSON_CONFIG
+    \brief User-defined kDefaultChunkCapacity definition.
+
+    User can define this as any \c size that is a power of 2.
+*/
+
+#ifndef RAPIDJSON_ALLOCATOR_DEFAULT_CHUNK_CAPACITY
+#define RAPIDJSON_ALLOCATOR_DEFAULT_CHUNK_CAPACITY (64 * 1024)
+#endif
+
+
 ///////////////////////////////////////////////////////////////////////////////
 // CrtAllocator
 
@@ -248,7 +261,7 @@
             return false;
     }
 
-    static const int kDefaultChunkCapacity = 64 * 1024; //!< Default chunk capacity.
+    static const int kDefaultChunkCapacity = RAPIDJSON_ALLOCATOR_DEFAULT_CHUNK_CAPACITY; //!< Default chunk capacity.
 
     //! Chunk header for perpending to each chunk.
     /*! Chunks are stored as a singly linked list.
diff --git a/include/rapidjson/document.h b/include/rapidjson/document.h
index a6acc24..9783fe4 100644
--- a/include/rapidjson/document.h
+++ b/include/rapidjson/document.h
@@ -26,22 +26,17 @@
 #include <limits>
 
 RAPIDJSON_DIAG_PUSH
-#ifdef _MSC_VER
-RAPIDJSON_DIAG_OFF(4127) // conditional expression is constant
-RAPIDJSON_DIAG_OFF(4244) // conversion from kXxxFlags to 'uint16_t', possible loss of data
-#endif
-
 #ifdef __clang__
 RAPIDJSON_DIAG_OFF(padded)
 RAPIDJSON_DIAG_OFF(switch-enum)
 RAPIDJSON_DIAG_OFF(c++98-compat)
+#elif defined(_MSC_VER)
+RAPIDJSON_DIAG_OFF(4127) // conditional expression is constant
+RAPIDJSON_DIAG_OFF(4244) // conversion from kXxxFlags to 'uint16_t', possible loss of data
 #endif
 
 #ifdef __GNUC__
 RAPIDJSON_DIAG_OFF(effc++)
-#if __GNUC__ >= 6
-RAPIDJSON_DIAG_OFF(terminate) // ignore throwing RAPIDJSON_ASSERT in RAPIDJSON_NOEXCEPT functions
-#endif
 #endif // __GNUC__
 
 #ifndef RAPIDJSON_NOMEMBERITERATORCLASS
@@ -71,6 +66,12 @@
 struct GenericMember { 
     GenericValue<Encoding, Allocator> name;     //!< name of member (must be a string)
     GenericValue<Encoding, Allocator> value;    //!< value of member.
+
+    // swap() for std::sort() and other potential use in STL.
+    friend inline void swap(GenericMember& a, GenericMember& b) RAPIDJSON_NOEXCEPT {
+        a.name.Swap(b.name);
+        a.value.Swap(b.value);
+    }
 };
 
 ///////////////////////////////////////////////////////////////////////////////
@@ -204,17 +205,17 @@
 // class-based member iterator implementation disabled, use plain pointers
 
 template <bool Const, typename Encoding, typename Allocator>
-struct GenericMemberIterator;
+class GenericMemberIterator;
 
 //! non-const GenericMemberIterator
 template <typename Encoding, typename Allocator>
-struct GenericMemberIterator<false,Encoding,Allocator> {
+class GenericMemberIterator<false,Encoding,Allocator> {
     //! use plain pointer as iterator type
     typedef GenericMember<Encoding,Allocator>* Iterator;
 };
 //! const GenericMemberIterator
 template <typename Encoding, typename Allocator>
-struct GenericMemberIterator<true,Encoding,Allocator> {
+class GenericMemberIterator<true,Encoding,Allocator> {
     //! use plain const pointer as iterator type
     typedef const GenericMember<Encoding,Allocator>* Iterator;
 };
@@ -627,11 +628,11 @@
         \note Default content for number is zero.
     */
     explicit GenericValue(Type type) RAPIDJSON_NOEXCEPT : data_() {
-        static const uint16_t defaultFlags[7] = {
+        static const uint16_t defaultFlags[] = {
             kNullFlag, kFalseFlag, kTrueFlag, kObjectFlag, kArrayFlag, kShortStringFlag,
             kNumberAnyFlag
         };
-        RAPIDJSON_ASSERT(type >= kNullType && type <= kNumberType);
+        RAPIDJSON_NOEXCEPT_ASSERT(type >= kNullType && type <= kNumberType);
         data_.f.flags = defaultFlags[type];
 
         // Use ShortString to store empty string.
@@ -833,9 +834,10 @@
     /*! \param rhs Source of the assignment. It will become a null value after assignment.
     */
     GenericValue& operator=(GenericValue& rhs) RAPIDJSON_NOEXCEPT {
-        RAPIDJSON_ASSERT(this != &rhs);
-        this->~GenericValue();
-        RawAssign(rhs);
+        if (RAPIDJSON_LIKELY(this != &rhs)) {
+            this->~GenericValue();
+            RawAssign(rhs);
+        }
         return *this;
     }
 
@@ -927,7 +929,7 @@
     //! Equal-to operator
     /*!
         \note If an object contains duplicated named member, comparing equality with any object is always \c false.
-        \note Linear time complexity (number of all values in the subtree and total lengths of all strings).
+        \note Complexity is quadratic in Object's member number and linear for the rest (number of all values in the subtree and total lengths of all strings).
     */
     template <typename SourceAllocator>
     bool operator==(const GenericValue<Encoding, SourceAllocator>& rhs) const {
@@ -1515,7 +1517,7 @@
         MemberIterator pos = MemberBegin() + (first - MemberBegin());
         for (MemberIterator itr = pos; itr != last; ++itr)
             itr->~Member();
-        std::memmove(&*pos, &*last, static_cast<size_t>(MemberEnd() - last) * sizeof(Member));
+        std::memmove(static_cast<void*>(&*pos), &*last, static_cast<size_t>(MemberEnd() - last) * sizeof(Member));
         data_.o.size -= static_cast<SizeType>(last - first);
         return pos;
     }
@@ -1718,8 +1720,8 @@
         RAPIDJSON_ASSERT(last <= End());
         ValueIterator pos = Begin() + (first - Begin());
         for (ValueIterator itr = pos; itr != last; ++itr)
-            itr->~GenericValue();       
-        std::memmove(pos, last, static_cast<size_t>(End() - last) * sizeof(GenericValue));
+            itr->~GenericValue();
+        std::memmove(static_cast<void*>(pos), last, static_cast<size_t>(End() - last) * sizeof(GenericValue));
         data_.a.size -= static_cast<SizeType>(last - first);
         return pos;
     }
@@ -2034,12 +2036,7 @@
         if (count) {
             GenericValue* e = static_cast<GenericValue*>(allocator.Malloc(count * sizeof(GenericValue)));
             SetElementsPointer(e);
-RAPIDJSON_DIAG_PUSH
-#if defined(__GNUC__) && __GNUC__ >= 8
-RAPIDJSON_DIAG_OFF(class-memaccess) // ignore complains from gcc that no trivial copy constructor exists.
-#endif
-            std::memcpy(e, values, count * sizeof(GenericValue));
-RAPIDJSON_DIAG_POP
+            std::memcpy(static_cast<void*>(e), values, count * sizeof(GenericValue));
         }
         else
             SetElementsPointer(0);
@@ -2052,12 +2049,7 @@
         if (count) {
             Member* m = static_cast<Member*>(allocator.Malloc(count * sizeof(Member)));
             SetMembersPointer(m);
-RAPIDJSON_DIAG_PUSH
-#if defined(__GNUC__) && __GNUC__ >= 8
-RAPIDJSON_DIAG_OFF(class-memaccess) // ignore complains from gcc that no trivial copy constructor exists.
-#endif
-            std::memcpy(m, members, count * sizeof(Member));
-RAPIDJSON_DIAG_POP
+            std::memcpy(static_cast<void*>(m), members, count * sizeof(Member));
         }
         else
             SetMembersPointer(0);
diff --git a/include/rapidjson/encodings.h b/include/rapidjson/encodings.h
index 7903e76..0b24467 100644
--- a/include/rapidjson/encodings.h
+++ b/include/rapidjson/encodings.h
@@ -17,7 +17,7 @@
 
 #include "rapidjson.h"
 
-#ifdef _MSC_VER
+#if defined(_MSC_VER) && !defined(__clang__)
 RAPIDJSON_DIAG_PUSH
 RAPIDJSON_DIAG_OFF(4244) // conversion from 'type1' to 'type2', possible loss of data
 RAPIDJSON_DIAG_OFF(4702)  // unreachable code
@@ -144,9 +144,9 @@
 
     template <typename InputStream>
     static bool Decode(InputStream& is, unsigned* codepoint) {
-#define COPY() c = is.Take(); *codepoint = (*codepoint << 6) | (static_cast<unsigned char>(c) & 0x3Fu)
-#define TRANS(mask) result &= ((GetRange(static_cast<unsigned char>(c)) & mask) != 0)
-#define TAIL() COPY(); TRANS(0x70)
+#define RAPIDJSON_COPY() c = is.Take(); *codepoint = (*codepoint << 6) | (static_cast<unsigned char>(c) & 0x3Fu)
+#define RAPIDJSON_TRANS(mask) result &= ((GetRange(static_cast<unsigned char>(c)) & mask) != 0)
+#define RAPIDJSON_TAIL() RAPIDJSON_COPY(); RAPIDJSON_TRANS(0x70)
         typename InputStream::Ch c = is.Take();
         if (!(c & 0x80)) {
             *codepoint = static_cast<unsigned char>(c);
@@ -161,44 +161,44 @@
         }
         bool result = true;
         switch (type) {
-        case 2: TAIL(); return result;
-        case 3: TAIL(); TAIL(); return result;
-        case 4: COPY(); TRANS(0x50); TAIL(); return result;
-        case 5: COPY(); TRANS(0x10); TAIL(); TAIL(); return result;
-        case 6: TAIL(); TAIL(); TAIL(); return result;
-        case 10: COPY(); TRANS(0x20); TAIL(); return result;
-        case 11: COPY(); TRANS(0x60); TAIL(); TAIL(); return result;
+        case 2: RAPIDJSON_TAIL(); return result;
+        case 3: RAPIDJSON_TAIL(); RAPIDJSON_TAIL(); return result;
+        case 4: RAPIDJSON_COPY(); RAPIDJSON_TRANS(0x50); RAPIDJSON_TAIL(); return result;
+        case 5: RAPIDJSON_COPY(); RAPIDJSON_TRANS(0x10); RAPIDJSON_TAIL(); RAPIDJSON_TAIL(); return result;
+        case 6: RAPIDJSON_TAIL(); RAPIDJSON_TAIL(); RAPIDJSON_TAIL(); return result;
+        case 10: RAPIDJSON_COPY(); RAPIDJSON_TRANS(0x20); RAPIDJSON_TAIL(); return result;
+        case 11: RAPIDJSON_COPY(); RAPIDJSON_TRANS(0x60); RAPIDJSON_TAIL(); RAPIDJSON_TAIL(); return result;
         default: return false;
         }
-#undef COPY
-#undef TRANS
-#undef TAIL
+#undef RAPIDJSON_COPY
+#undef RAPIDJSON_TRANS
+#undef RAPIDJSON_TAIL
     }
 
     template <typename InputStream, typename OutputStream>
     static bool Validate(InputStream& is, OutputStream& os) {
-#define COPY() os.Put(c = is.Take())
-#define TRANS(mask) result &= ((GetRange(static_cast<unsigned char>(c)) & mask) != 0)
-#define TAIL() COPY(); TRANS(0x70)
+#define RAPIDJSON_COPY() os.Put(c = is.Take())
+#define RAPIDJSON_TRANS(mask) result &= ((GetRange(static_cast<unsigned char>(c)) & mask) != 0)
+#define RAPIDJSON_TAIL() RAPIDJSON_COPY(); RAPIDJSON_TRANS(0x70)
         Ch c;
-        COPY();
+        RAPIDJSON_COPY();
         if (!(c & 0x80))
             return true;
 
         bool result = true;
         switch (GetRange(static_cast<unsigned char>(c))) {
-        case 2: TAIL(); return result;
-        case 3: TAIL(); TAIL(); return result;
-        case 4: COPY(); TRANS(0x50); TAIL(); return result;
-        case 5: COPY(); TRANS(0x10); TAIL(); TAIL(); return result;
-        case 6: TAIL(); TAIL(); TAIL(); return result;
-        case 10: COPY(); TRANS(0x20); TAIL(); return result;
-        case 11: COPY(); TRANS(0x60); TAIL(); TAIL(); return result;
+        case 2: RAPIDJSON_TAIL(); return result;
+        case 3: RAPIDJSON_TAIL(); RAPIDJSON_TAIL(); return result;
+        case 4: RAPIDJSON_COPY(); RAPIDJSON_TRANS(0x50); RAPIDJSON_TAIL(); return result;
+        case 5: RAPIDJSON_COPY(); RAPIDJSON_TRANS(0x10); RAPIDJSON_TAIL(); RAPIDJSON_TAIL(); return result;
+        case 6: RAPIDJSON_TAIL(); RAPIDJSON_TAIL(); RAPIDJSON_TAIL(); return result;
+        case 10: RAPIDJSON_COPY(); RAPIDJSON_TRANS(0x20); RAPIDJSON_TAIL(); return result;
+        case 11: RAPIDJSON_COPY(); RAPIDJSON_TRANS(0x60); RAPIDJSON_TAIL(); RAPIDJSON_TAIL(); return result;
         default: return false;
         }
-#undef COPY
-#undef TRANS
-#undef TAIL
+#undef RAPIDJSON_COPY
+#undef RAPIDJSON_TRANS
+#undef RAPIDJSON_TAIL
     }
 
     static unsigned char GetRange(unsigned char c) {
@@ -709,7 +709,7 @@
 
 RAPIDJSON_NAMESPACE_END
 
-#if defined(__GNUC__) || defined(_MSC_VER)
+#if defined(__GNUC__) || (defined(_MSC_VER) && !defined(__clang__))
 RAPIDJSON_DIAG_POP
 #endif
 
diff --git a/include/rapidjson/filereadstream.h b/include/rapidjson/filereadstream.h
index f1bfb7d..6b34370 100644
--- a/include/rapidjson/filereadstream.h
+++ b/include/rapidjson/filereadstream.h
@@ -59,7 +59,7 @@
 
     // For encoding detection only.
     const Ch* Peek4() const {
-        return (current_ + 4 <= bufferLast_) ? current_ : 0;
+        return (current_ + 4 - !eof_ <= bufferLast_) ? current_ : 0;
     }
 
 private:
diff --git a/include/rapidjson/internal/biginteger.h b/include/rapidjson/internal/biginteger.h
index 9d3e88c..a31c8a8 100644
--- a/include/rapidjson/internal/biginteger.h
+++ b/include/rapidjson/internal/biginteger.h
@@ -17,7 +17,7 @@
 
 #include "../rapidjson.h"
 
-#if defined(_MSC_VER) && defined(_M_AMD64)
+#if defined(_MSC_VER) && !__INTEL_COMPILER && defined(_M_AMD64)
 #include <intrin.h> // for _umul128
 #pragma intrinsic(_umul128)
 #endif
@@ -133,7 +133,7 @@
         RAPIDJSON_ASSERT(count_ + offset <= kCapacity);
 
         if (interShift == 0) {
-            std::memmove(&digits_[count_ - 1 + offset], &digits_[count_ - 1], count_ * sizeof(Type));
+            std::memmove(digits_ + offset, digits_, count_ * sizeof(Type));
             count_ += offset;
         }
         else {
diff --git a/include/rapidjson/internal/diyfp.h b/include/rapidjson/internal/diyfp.h
index 29abf80..b6c2cf5 100644
--- a/include/rapidjson/internal/diyfp.h
+++ b/include/rapidjson/internal/diyfp.h
@@ -1,5 +1,5 @@
 // Tencent is pleased to support the open source community by making RapidJSON available.
-// 
+//
 // Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
 //
 // Licensed under the MIT License (the "License"); you may not use this file except
@@ -7,9 +7,9 @@
 //
 // http://opensource.org/licenses/MIT
 //
-// Unless required by applicable law or agreed to in writing, software distributed 
-// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 
-// CONDITIONS OF ANY KIND, either express or implied. See the License for the 
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
 // specific language governing permissions and limitations under the License.
 
 // This is a C++ header-only implementation of Grisu2 algorithm from the publication:
@@ -20,6 +20,7 @@
 #define RAPIDJSON_DIYFP_H_
 
 #include "../rapidjson.h"
+#include <limits>
 
 #if defined(_MSC_VER) && defined(_M_AMD64) && !defined(__INTEL_COMPILER)
 #include <intrin.h>
@@ -56,7 +57,7 @@
         if (biased_e != 0) {
             f = significand + kDpHiddenBit;
             e = biased_e - kDpExponentBias;
-        } 
+        }
         else {
             f = significand;
             e = kDpMinExponent + 1;
@@ -99,6 +100,7 @@
     }
 
     DiyFp Normalize() const {
+        RAPIDJSON_ASSERT(f != 0); // https://stackoverflow.com/a/26809183/291737
 #if defined(_MSC_VER) && defined(_M_AMD64)
         unsigned long index;
         _BitScanReverse64(&index, f);
@@ -141,7 +143,16 @@
             double d;
             uint64_t u64;
         }u;
-        const uint64_t be = (e == kDpDenormalExponent && (f & kDpHiddenBit) == 0) ? 0 : 
+        RAPIDJSON_ASSERT(f <= kDpHiddenBit + kDpSignificandMask);
+        if (e < kDpDenormalExponent) {
+            // Underflow.
+            return 0.0;
+        }
+        if (e >= kDpMaxExponent) {
+            // Overflow.
+            return std::numeric_limits<double>::infinity();
+        }
+        const uint64_t be = (e == kDpDenormalExponent && (f & kDpHiddenBit) == 0) ? 0 :
             static_cast<uint64_t>(e + kDpExponentBias);
         u.u64 = (f & kDpSignificandMask) | (be << kDpSignificandSize);
         return u.d;
@@ -220,9 +231,10 @@
         641,   667,   694,   720,   747,   774,   800,   827,   853,   880,
         907,   933,   960,   986,  1013,  1039,  1066
     };
+    RAPIDJSON_ASSERT(index < 87);
     return DiyFp(kCachedPowers_F[index], kCachedPowers_E[index]);
 }
-    
+
 inline DiyFp GetCachedPower(int e, int* K) {
 
     //int k = static_cast<int>(ceil((-61 - e) * 0.30102999566398114)) + 374;
@@ -238,10 +250,11 @@
 }
 
 inline DiyFp GetCachedPower10(int exp, int *outExp) {
-     unsigned index = (static_cast<unsigned>(exp) + 348u) / 8u;
-     *outExp = -348 + static_cast<int>(index) * 8;
-     return GetCachedPowerByIndex(index);
- }
+    RAPIDJSON_ASSERT(exp >= -348);
+    unsigned index = static_cast<unsigned>(exp + 348) / 8u;
+    *outExp = -348 + static_cast<int>(index) * 8;
+    return GetCachedPowerByIndex(index);
+}
 
 #ifdef __GNUC__
 RAPIDJSON_DIAG_POP
diff --git a/include/rapidjson/internal/itoa.h b/include/rapidjson/internal/itoa.h
index a39accb..9b1c45c 100644
--- a/include/rapidjson/internal/itoa.h
+++ b/include/rapidjson/internal/itoa.h
@@ -211,9 +211,8 @@
             *buffer++ = cDigitsLut[d3 + 1];
         if (value >= kTen9)
             *buffer++ = cDigitsLut[d4];
-        if (value >= kTen8)
-            *buffer++ = cDigitsLut[d4 + 1];
 
+        *buffer++ = cDigitsLut[d4 + 1];
         *buffer++ = cDigitsLut[d5];
         *buffer++ = cDigitsLut[d5 + 1];
         *buffer++ = cDigitsLut[d6];
diff --git a/include/rapidjson/internal/meta.h b/include/rapidjson/internal/meta.h
index 5a9aaa4..d401edf 100644
--- a/include/rapidjson/internal/meta.h
+++ b/include/rapidjson/internal/meta.h
@@ -21,7 +21,8 @@
 RAPIDJSON_DIAG_PUSH
 RAPIDJSON_DIAG_OFF(effc++)
 #endif
-#if defined(_MSC_VER)
+
+#if defined(_MSC_VER) && !defined(__clang__)
 RAPIDJSON_DIAG_PUSH
 RAPIDJSON_DIAG_OFF(6334)
 #endif
@@ -174,7 +175,11 @@
 RAPIDJSON_NAMESPACE_END
 //@endcond
 
-#if defined(__GNUC__) || defined(_MSC_VER)
+#if defined(_MSC_VER) && !defined(__clang__)
+RAPIDJSON_DIAG_POP
+#endif
+
+#ifdef __GNUC__
 RAPIDJSON_DIAG_POP
 #endif
 
diff --git a/include/rapidjson/internal/regex.h b/include/rapidjson/internal/regex.h
index e1a2faa..16e3559 100644
--- a/include/rapidjson/internal/regex.h
+++ b/include/rapidjson/internal/regex.h
@@ -24,6 +24,9 @@
 RAPIDJSON_DIAG_OFF(padded)
 RAPIDJSON_DIAG_OFF(switch-enum)
 RAPIDJSON_DIAG_OFF(implicit-fallthrough)
+#elif defined(_MSC_VER)
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(4512) // assignment operator could not be generated
 #endif
 
 #ifdef __GNUC__
@@ -34,11 +37,6 @@
 #endif
 #endif
 
-#ifdef _MSC_VER
-RAPIDJSON_DIAG_PUSH
-RAPIDJSON_DIAG_OFF(4512) // assignment operator could not be generated
-#endif
-
 #ifndef RAPIDJSON_REGEX_VERBOSE
 #define RAPIDJSON_REGEX_VERBOSE 0
 #endif
@@ -120,7 +118,8 @@
     template <typename, typename> friend class GenericRegexSearch;
 
     GenericRegex(const Ch* source, Allocator* allocator = 0) : 
-        states_(allocator, 256), ranges_(allocator, 256), root_(kRegexInvalidState), stateCount_(), rangeCount_(), 
+        ownAllocator_(allocator ? 0 : RAPIDJSON_NEW(Allocator)()), allocator_(allocator ? allocator : ownAllocator_), 
+        states_(allocator_, 256), ranges_(allocator_, 256), root_(kRegexInvalidState), stateCount_(), rangeCount_(), 
         anchorBegin_(), anchorEnd_()
     {
         GenericStringStream<Encoding> ss(source);
@@ -128,7 +127,10 @@
         Parse(ds);
     }
 
-    ~GenericRegex() {}
+    ~GenericRegex()
+    {
+        RAPIDJSON_DELETE(ownAllocator_);
+    }
 
     bool IsValid() const {
         return root_ != kRegexInvalidState;
@@ -190,10 +192,9 @@
 
     template <typename InputStream>
     void Parse(DecodedStream<InputStream, Encoding>& ds) {
-        Allocator allocator;
-        Stack<Allocator> operandStack(&allocator, 256);     // Frag
-        Stack<Allocator> operatorStack(&allocator, 256);    // Operator
-        Stack<Allocator> atomCountStack(&allocator, 256);   // unsigned (Atom per parenthesis)
+        Stack<Allocator> operandStack(allocator_, 256);    // Frag
+        Stack<Allocator> operatorStack(allocator_, 256);   // Operator
+        Stack<Allocator> atomCountStack(allocator_, 256);  // unsigned (Atom per parenthesis)
 
         *atomCountStack.template Push<unsigned>() = 0;
 
@@ -394,8 +395,7 @@
                 }
                 return false;
 
-            default: 
-                RAPIDJSON_ASSERT(op == kOneOrMore);
+            case kOneOrMore:
                 if (operandStack.GetSize() >= sizeof(Frag)) {
                     Frag e = *operandStack.template Pop<Frag>(1);
                     SizeType s = NewState(kRegexInvalidState, e.start, 0);
@@ -404,6 +404,10 @@
                     return true;
                 }
                 return false;
+
+            default: 
+                // syntax error (e.g. unclosed kLeftParenthesis)
+                return false;
         }
     }
 
@@ -584,6 +588,8 @@
         }
     }
 
+    Allocator* ownAllocator_;
+    Allocator* allocator_;
     Stack<Allocator> states_;
     Stack<Allocator> ranges_;
     SizeType root_;
@@ -723,11 +729,11 @@
 } // namespace internal
 RAPIDJSON_NAMESPACE_END
 
-#ifdef __clang__
+#ifdef __GNUC__
 RAPIDJSON_DIAG_POP
 #endif
 
-#ifdef _MSC_VER
+#if defined(__clang__) || defined(_MSC_VER)
 RAPIDJSON_DIAG_POP
 #endif
 
diff --git a/include/rapidjson/internal/stack.h b/include/rapidjson/internal/stack.h
index 89558d0..45dca6a 100644
--- a/include/rapidjson/internal/stack.h
+++ b/include/rapidjson/internal/stack.h
@@ -17,6 +17,7 @@
 
 #include "../allocators.h"
 #include "swap.h"
+#include <cstddef>
 
 #if defined(__clang__)
 RAPIDJSON_DIAG_PUSH
@@ -114,7 +115,7 @@
     template<typename T>
     RAPIDJSON_FORCEINLINE void Reserve(size_t count = 1) {
          // Expand the stack if needed
-        if (RAPIDJSON_UNLIKELY(stackTop_ + sizeof(T) * count > stackEnd_))
+        if (RAPIDJSON_UNLIKELY(static_cast<std::ptrdiff_t>(sizeof(T) * count) > (stackEnd_ - stackTop_)))
             Expand<T>(count);
     }
 
@@ -127,7 +128,7 @@
     template<typename T>
     RAPIDJSON_FORCEINLINE T* PushUnsafe(size_t count = 1) {
         RAPIDJSON_ASSERT(stackTop_);
-        RAPIDJSON_ASSERT(stackTop_ + sizeof(T) * count <= stackEnd_);
+        RAPIDJSON_ASSERT(static_cast<std::ptrdiff_t>(sizeof(T) * count) <= (stackEnd_ - stackTop_));
         T* ret = reinterpret_cast<T*>(stackTop_);
         stackTop_ += sizeof(T) * count;
         return ret;
diff --git a/include/rapidjson/internal/strtod.h b/include/rapidjson/internal/strtod.h
index adf49e3..dfca22b 100644
--- a/include/rapidjson/internal/strtod.h
+++ b/include/rapidjson/internal/strtod.h
@@ -19,6 +19,8 @@
 #include "biginteger.h"
 #include "diyfp.h"
 #include "pow10.h"
+#include <climits>
+#include <limits>
 
 RAPIDJSON_NAMESPACE_BEGIN
 namespace internal {
@@ -126,20 +128,20 @@
 }
 
 // Compute an approximation and see if it is within 1/2 ULP
-inline bool StrtodDiyFp(const char* decimals, size_t length, size_t decimalPosition, int exp, double* result) {
+inline bool StrtodDiyFp(const char* decimals, int dLen, int dExp, double* result) {
     uint64_t significand = 0;
-    size_t i = 0;   // 2^64 - 1 = 18446744073709551615, 1844674407370955161 = 0x1999999999999999    
-    for (; i < length; i++) {
+    int i = 0;   // 2^64 - 1 = 18446744073709551615, 1844674407370955161 = 0x1999999999999999    
+    for (; i < dLen; i++) {
         if (significand  >  RAPIDJSON_UINT64_C2(0x19999999, 0x99999999) ||
             (significand == RAPIDJSON_UINT64_C2(0x19999999, 0x99999999) && decimals[i] > '5'))
             break;
         significand = significand * 10u + static_cast<unsigned>(decimals[i] - '0');
     }
     
-    if (i < length && decimals[i] >= '5') // Rounding
+    if (i < dLen && decimals[i] >= '5') // Rounding
         significand++;
 
-    size_t remaining = length - i;
+    int remaining = dLen - i;
     const int kUlpShift = 3;
     const int kUlp = 1 << kUlpShift;
     int64_t error = (remaining == 0) ? 0 : kUlp / 2;
@@ -148,24 +150,24 @@
     v = v.Normalize();
     error <<= -v.e;
 
-    const int dExp = static_cast<int>(decimalPosition) - static_cast<int>(i) + exp;
+    dExp += remaining;
 
     int actualExp;
     DiyFp cachedPower = GetCachedPower10(dExp, &actualExp);
     if (actualExp != dExp) {
         static const DiyFp kPow10[] = {
-            DiyFp(RAPIDJSON_UINT64_C2(0xa0000000, 00000000), -60),  // 10^1
-            DiyFp(RAPIDJSON_UINT64_C2(0xc8000000, 00000000), -57),  // 10^2
-            DiyFp(RAPIDJSON_UINT64_C2(0xfa000000, 00000000), -54),  // 10^3
-            DiyFp(RAPIDJSON_UINT64_C2(0x9c400000, 00000000), -50),  // 10^4
-            DiyFp(RAPIDJSON_UINT64_C2(0xc3500000, 00000000), -47),  // 10^5
-            DiyFp(RAPIDJSON_UINT64_C2(0xf4240000, 00000000), -44),  // 10^6
-            DiyFp(RAPIDJSON_UINT64_C2(0x98968000, 00000000), -40)   // 10^7
+            DiyFp(RAPIDJSON_UINT64_C2(0xa0000000, 0x00000000), -60),  // 10^1
+            DiyFp(RAPIDJSON_UINT64_C2(0xc8000000, 0x00000000), -57),  // 10^2
+            DiyFp(RAPIDJSON_UINT64_C2(0xfa000000, 0x00000000), -54),  // 10^3
+            DiyFp(RAPIDJSON_UINT64_C2(0x9c400000, 0x00000000), -50),  // 10^4
+            DiyFp(RAPIDJSON_UINT64_C2(0xc3500000, 0x00000000), -47),  // 10^5
+            DiyFp(RAPIDJSON_UINT64_C2(0xf4240000, 0x00000000), -44),  // 10^6
+            DiyFp(RAPIDJSON_UINT64_C2(0x98968000, 0x00000000), -40)   // 10^7
         };
-        int  adjustment = dExp - actualExp - 1;
-        RAPIDJSON_ASSERT(adjustment >= 0 && adjustment < 7);
-        v = v * kPow10[adjustment];
-        if (length + static_cast<unsigned>(adjustment)> 19u) // has more digits than decimal digits in 64-bit
+        int adjustment = dExp - actualExp;
+        RAPIDJSON_ASSERT(adjustment >= 1 && adjustment < 8);
+        v = v * kPow10[adjustment - 1];
+        if (dLen + adjustment > 19) // has more digits than decimal digits in 64-bit
             error += kUlp / 2;
     }
 
@@ -203,9 +205,9 @@
     return halfWay - static_cast<unsigned>(error) >= precisionBits || precisionBits >= halfWay + static_cast<unsigned>(error);
 }
 
-inline double StrtodBigInteger(double approx, const char* decimals, size_t length, size_t decimalPosition, int exp) {
-    const BigInteger dInt(decimals, length);
-    const int dExp = static_cast<int>(decimalPosition) - static_cast<int>(length) + exp;
+inline double StrtodBigInteger(double approx, const char* decimals, int dLen, int dExp) {
+    RAPIDJSON_ASSERT(dLen >= 0);
+    const BigInteger dInt(decimals, static_cast<unsigned>(dLen));
     Double a(approx);
     int cmp = CheckWithinHalfULP(a.Value(), dInt, dExp);
     if (cmp < 0)
@@ -225,42 +227,61 @@
     RAPIDJSON_ASSERT(d >= 0.0);
     RAPIDJSON_ASSERT(length >= 1);
 
-    double result;
+    double result = 0.0;
     if (StrtodFast(d, p, &result))
         return result;
 
+    RAPIDJSON_ASSERT(length <= INT_MAX);
+    int dLen = static_cast<int>(length);
+
+    RAPIDJSON_ASSERT(length >= decimalPosition);
+    RAPIDJSON_ASSERT(length - decimalPosition <= INT_MAX);
+    int dExpAdjust = static_cast<int>(length - decimalPosition);
+
+    RAPIDJSON_ASSERT(exp >= INT_MIN + dExpAdjust);
+    int dExp = exp - dExpAdjust;
+
+    // Make sure length+dExp does not overflow
+    RAPIDJSON_ASSERT(dExp <= INT_MAX - dLen);
+
     // Trim leading zeros
-    while (*decimals == '0' && length > 1) {
-        length--;
+    while (dLen > 0 && *decimals == '0') {
+        dLen--;
         decimals++;
-        decimalPosition--;
     }
 
     // Trim trailing zeros
-    while (decimals[length - 1] == '0' && length > 1) {
-        length--;
-        decimalPosition--;
-        exp++;
+    while (dLen > 0 && decimals[dLen - 1] == '0') {
+        dLen--;
+        dExp++;
+    }
+
+    if (dLen == 0) { // Buffer only contains zeros.
+        return 0.0;
     }
 
     // Trim right-most digits
-    const int kMaxDecimalDigit = 780;
-    if (static_cast<int>(length) > kMaxDecimalDigit) {
-        int delta = (static_cast<int>(length) - kMaxDecimalDigit);
-        exp += delta;
-        decimalPosition -= static_cast<unsigned>(delta);
-        length = kMaxDecimalDigit;
+    const int kMaxDecimalDigit = 767 + 1;
+    if (dLen > kMaxDecimalDigit) {
+        dExp += dLen - kMaxDecimalDigit;
+        dLen = kMaxDecimalDigit;
     }
 
-    // If too small, underflow to zero
-    if (int(length) + exp < -324)
+    // If too small, underflow to zero.
+    // Any x <= 10^-324 is interpreted as zero.
+    if (dLen + dExp <= -324)
         return 0.0;
 
-    if (StrtodDiyFp(decimals, length, decimalPosition, exp, &result))
+    // If too large, overflow to infinity.
+    // Any x >= 10^309 is interpreted as +infinity.
+    if (dLen + dExp > 309)
+        return std::numeric_limits<double>::infinity();
+
+    if (StrtodDiyFp(decimals, dLen, dExp, &result))
         return result;
 
     // Use approximation from StrtodDiyFp and make adjustment with BigInteger comparison
-    return StrtodBigInteger(result, decimals, length, decimalPosition, exp);
+    return StrtodBigInteger(result, decimals, dLen, dExp);
 }
 
 } // namespace internal
diff --git a/include/rapidjson/istreamwrapper.h b/include/rapidjson/istreamwrapper.h
index 8639c8c..c4950b9 100644
--- a/include/rapidjson/istreamwrapper.h
+++ b/include/rapidjson/istreamwrapper.h
@@ -17,13 +17,12 @@
 
 #include "stream.h"
 #include <iosfwd>
+#include <ios>
 
 #ifdef __clang__
 RAPIDJSON_DIAG_PUSH
 RAPIDJSON_DIAG_OFF(padded)
-#endif
-
-#ifdef _MSC_VER
+#elif defined(_MSC_VER)
 RAPIDJSON_DIAG_PUSH
 RAPIDJSON_DIAG_OFF(4351) // new behavior: elements of array 'array' will be default initialized
 #endif
@@ -50,57 +49,71 @@
 class BasicIStreamWrapper {
 public:
     typedef typename StreamType::char_type Ch;
-    BasicIStreamWrapper(StreamType& stream) : stream_(stream), count_(), peekBuffer_() {}
 
-    Ch Peek() const { 
-        typename StreamType::int_type c = stream_.peek();
-        return RAPIDJSON_LIKELY(c != StreamType::traits_type::eof()) ? static_cast<Ch>(c) : static_cast<Ch>('\0');
+    //! Constructor.
+    /*!
+        \param stream stream opened for read.
+    */
+    BasicIStreamWrapper(StreamType &stream) : stream_(stream), buffer_(peekBuffer_), bufferSize_(4), bufferLast_(0), current_(buffer_), readCount_(0), count_(0), eof_(false) { 
+        Read();
     }
 
-    Ch Take() { 
-        typename StreamType::int_type c = stream_.get();
-        if (RAPIDJSON_LIKELY(c != StreamType::traits_type::eof())) {
-            count_++;
-            return static_cast<Ch>(c);
-        }
-        else
-            return '\0';
+    //! Constructor.
+    /*!
+        \param stream stream opened for read.
+        \param buffer user-supplied buffer.
+        \param bufferSize size of buffer in bytes. Must >=4 bytes.
+    */
+    BasicIStreamWrapper(StreamType &stream, char* buffer, size_t bufferSize) : stream_(stream), buffer_(buffer), bufferSize_(bufferSize), bufferLast_(0), current_(buffer_), readCount_(0), count_(0), eof_(false) { 
+        RAPIDJSON_ASSERT(bufferSize >= 4);
+        Read();
     }
 
-    // tellg() may return -1 when failed. So we count by ourself.
-    size_t Tell() const { return count_; }
+    Ch Peek() const { return *current_; }
+    Ch Take() { Ch c = *current_; Read(); return c; }
+    size_t Tell() const { return count_ + static_cast<size_t>(current_ - buffer_); }
 
-    Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; }
+    // Not implemented
     void Put(Ch) { RAPIDJSON_ASSERT(false); }
-    void Flush() { RAPIDJSON_ASSERT(false); }
+    void Flush() { RAPIDJSON_ASSERT(false); } 
+    Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; }
     size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; }
 
     // For encoding detection only.
     const Ch* Peek4() const {
-        RAPIDJSON_ASSERT(sizeof(Ch) == 1); // Only usable for byte stream.
-        int i;
-        bool hasError = false;
-        for (i = 0; i < 4; ++i) {
-            typename StreamType::int_type c = stream_.get();
-            if (c == StreamType::traits_type::eof()) {
-                hasError = true;
-                stream_.clear();
-                break;
-            }
-            peekBuffer_[i] = static_cast<Ch>(c);
-        }
-        for (--i; i >= 0; --i)
-            stream_.putback(peekBuffer_[i]);
-        return !hasError ? peekBuffer_ : 0;
+        return (current_ + 4 - !eof_ <= bufferLast_) ? current_ : 0;
     }
 
 private:
+    BasicIStreamWrapper();
     BasicIStreamWrapper(const BasicIStreamWrapper&);
     BasicIStreamWrapper& operator=(const BasicIStreamWrapper&);
 
-    StreamType& stream_;
-    size_t count_;  //!< Number of characters read. Note:
-    mutable Ch peekBuffer_[4];
+    void Read() {
+        if (current_ < bufferLast_)
+            ++current_;
+        else if (!eof_) {
+            count_ += readCount_;
+            readCount_ = bufferSize_;
+            bufferLast_ = buffer_ + readCount_ - 1;
+            current_ = buffer_;
+
+            if (!stream_.read(buffer_, static_cast<std::streamsize>(bufferSize_))) {
+                readCount_ = static_cast<size_t>(stream_.gcount());
+                *(bufferLast_ = buffer_ + readCount_) = '\0';
+                eof_ = true;
+            }
+        }
+    }
+
+    StreamType &stream_;
+    Ch peekBuffer_[4], *buffer_;
+    size_t bufferSize_;
+    Ch *bufferLast_;
+    Ch *current_;
+    size_t readCount_;
+    size_t count_;  //!< Number of characters read
+    bool eof_;
 };
 
 typedef BasicIStreamWrapper<std::istream> IStreamWrapper;
diff --git a/include/rapidjson/pointer.h b/include/rapidjson/pointer.h
index 8bcb85e..063abab 100644
--- a/include/rapidjson/pointer.h
+++ b/include/rapidjson/pointer.h
@@ -21,9 +21,7 @@
 #ifdef __clang__
 RAPIDJSON_DIAG_PUSH
 RAPIDJSON_DIAG_OFF(switch-enum)
-#endif
-
-#ifdef _MSC_VER
+#elif defined(_MSC_VER)
 RAPIDJSON_DIAG_PUSH
 RAPIDJSON_DIAG_OFF(4512) // assignment operator could not be generated
 #endif
@@ -202,6 +200,36 @@
         return *this;
     }
 
+    //! Swap the content of this pointer with an other.
+    /*!
+        \param other The pointer to swap with.
+        \note Constant complexity.
+    */
+    GenericPointer& Swap(GenericPointer& other) RAPIDJSON_NOEXCEPT {
+        internal::Swap(allocator_, other.allocator_);
+        internal::Swap(ownAllocator_, other.ownAllocator_);
+        internal::Swap(nameBuffer_, other.nameBuffer_);
+        internal::Swap(tokens_, other.tokens_);
+        internal::Swap(tokenCount_, other.tokenCount_);
+        internal::Swap(parseErrorOffset_, other.parseErrorOffset_);
+        internal::Swap(parseErrorCode_, other.parseErrorCode_);
+        return *this;
+    }
+
+    //! free-standing swap function helper
+    /*!
+        Helper function to enable support for common swap implementation pattern based on \c std::swap:
+        \code
+        void swap(MyClass& a, MyClass& b) {
+            using std::swap;
+            swap(a.pointer, b.pointer);
+            // ...
+        }
+        \endcode
+        \see Swap()
+     */
+    friend inline void swap(GenericPointer& a, GenericPointer& b) RAPIDJSON_NOEXCEPT { a.Swap(b); }
+
     //@}
 
     //!@name Append token
@@ -358,6 +386,33 @@
     */
     bool operator!=(const GenericPointer& rhs) const { return !(*this == rhs); }
 
+    //! Less than operator.
+    /*!
+        \note Invalid pointers are always greater than valid ones.
+    */
+    bool operator<(const GenericPointer& rhs) const {
+        if (!IsValid())
+            return false;
+        if (!rhs.IsValid())
+            return true;
+
+        if (tokenCount_ != rhs.tokenCount_)
+            return tokenCount_ < rhs.tokenCount_;
+
+        for (size_t i = 0; i < tokenCount_; i++) {
+            if (tokens_[i].index != rhs.tokens_[i].index)
+                return tokens_[i].index < rhs.tokens_[i].index;
+
+            if (tokens_[i].length != rhs.tokens_[i].length)
+                return tokens_[i].length < rhs.tokens_[i].length;
+
+            if (int cmp = std::memcmp(tokens_[i].name, rhs.tokens_[i].name, sizeof(Ch) * tokens_[i].length))
+                return cmp < 0;
+        }
+
+        return false;
+    }
+
     //@}
 
     //!@name Stringify
@@ -1352,11 +1407,7 @@
 
 RAPIDJSON_NAMESPACE_END
 
-#ifdef __clang__
-RAPIDJSON_DIAG_POP
-#endif
-
-#ifdef _MSC_VER
+#if defined(__clang__) || defined(_MSC_VER)
 RAPIDJSON_DIAG_POP
 #endif
 
diff --git a/include/rapidjson/prettywriter.h b/include/rapidjson/prettywriter.h
index 95bb6ff..45afb69 100644
--- a/include/rapidjson/prettywriter.h
+++ b/include/rapidjson/prettywriter.h
@@ -92,26 +92,26 @@
     */
     //@{
 
-    bool Null()                 { PrettyPrefix(kNullType);   return Base::WriteNull(); }
-    bool Bool(bool b)           { PrettyPrefix(b ? kTrueType : kFalseType); return Base::WriteBool(b); }
-    bool Int(int i)             { PrettyPrefix(kNumberType); return Base::WriteInt(i); }
-    bool Uint(unsigned u)       { PrettyPrefix(kNumberType); return Base::WriteUint(u); }
-    bool Int64(int64_t i64)     { PrettyPrefix(kNumberType); return Base::WriteInt64(i64); }
-    bool Uint64(uint64_t u64)   { PrettyPrefix(kNumberType); return Base::WriteUint64(u64);  }
-    bool Double(double d)       { PrettyPrefix(kNumberType); return Base::WriteDouble(d); }
+    bool Null()                 { PrettyPrefix(kNullType);   return Base::EndValue(Base::WriteNull()); }
+    bool Bool(bool b)           { PrettyPrefix(b ? kTrueType : kFalseType); return Base::EndValue(Base::WriteBool(b)); }
+    bool Int(int i)             { PrettyPrefix(kNumberType); return Base::EndValue(Base::WriteInt(i)); }
+    bool Uint(unsigned u)       { PrettyPrefix(kNumberType); return Base::EndValue(Base::WriteUint(u)); }
+    bool Int64(int64_t i64)     { PrettyPrefix(kNumberType); return Base::EndValue(Base::WriteInt64(i64)); }
+    bool Uint64(uint64_t u64)   { PrettyPrefix(kNumberType); return Base::EndValue(Base::WriteUint64(u64));  }
+    bool Double(double d)       { PrettyPrefix(kNumberType); return Base::EndValue(Base::WriteDouble(d)); }
 
     bool RawNumber(const Ch* str, SizeType length, bool copy = false) {
         RAPIDJSON_ASSERT(str != 0);
         (void)copy;
         PrettyPrefix(kNumberType);
-        return Base::WriteString(str, length);
+        return Base::EndValue(Base::WriteString(str, length));
     }
 
     bool String(const Ch* str, SizeType length, bool copy = false) {
         RAPIDJSON_ASSERT(str != 0);
         (void)copy;
         PrettyPrefix(kStringType);
-        return Base::WriteString(str, length);
+        return Base::EndValue(Base::WriteString(str, length));
     }
 
 #if RAPIDJSON_HAS_STDSTRING
@@ -146,7 +146,7 @@
             Base::os_->Put('\n');
             WriteIndent();
         }
-        bool ret = Base::WriteEndObject();
+        bool ret = Base::EndValue(Base::WriteEndObject());
         (void)ret;
         RAPIDJSON_ASSERT(ret == true);
         if (Base::level_stack_.Empty()) // end of json text
@@ -170,7 +170,7 @@
             Base::os_->Put('\n');
             WriteIndent();
         }
-        bool ret = Base::WriteEndArray();
+        bool ret = Base::EndValue(Base::WriteEndArray());
         (void)ret;
         RAPIDJSON_ASSERT(ret == true);
         if (Base::level_stack_.Empty()) // end of json text
@@ -201,7 +201,7 @@
     bool RawValue(const Ch* json, size_t length, Type type) {
         RAPIDJSON_ASSERT(json != 0);
         PrettyPrefix(type);
-        return Base::WriteRawValue(json, length);
+        return Base::EndValue(Base::WriteRawValue(json, length));
     }
 
 protected:
diff --git a/include/rapidjson/rapidjson.h b/include/rapidjson/rapidjson.h
index 98332fa..549936f 100644
--- a/include/rapidjson/rapidjson.h
+++ b/include/rapidjson/rapidjson.h
@@ -269,16 +269,11 @@
 /*! \ingroup RAPIDJSON_CONFIG
     \param x pointer to align
 
-    Some machines require strict data alignment. Currently the default uses 4 bytes
-    alignment on 32-bit platforms and 8 bytes alignment for 64-bit platforms.
+    Some machines require strict data alignment. The default is 8 bytes.
     User can customize by defining the RAPIDJSON_ALIGN function macro.
 */
 #ifndef RAPIDJSON_ALIGN
-#if RAPIDJSON_64BIT == 1
-#define RAPIDJSON_ALIGN(x) (((x) + static_cast<uint64_t>(7u)) & ~static_cast<uint64_t>(7u))
-#else
-#define RAPIDJSON_ALIGN(x) (((x) + 3u) & ~3u)
-#endif
+#define RAPIDJSON_ALIGN(x) (((x) + static_cast<size_t>(7u)) & ~static_cast<size_t>(7u))
 #endif
 
 ///////////////////////////////////////////////////////////////////////////////
@@ -433,7 +428,7 @@
 template <size_t x> struct StaticAssertTest {};
 RAPIDJSON_NAMESPACE_END
 
-#if defined(__GNUC__)
+#if defined(__GNUC__) || defined(__clang__)
 #define RAPIDJSON_STATIC_ASSERT_UNUSED_ATTRIBUTE __attribute__((unused))
 #else
 #define RAPIDJSON_STATIC_ASSERT_UNUSED_ATTRIBUTE 
@@ -543,13 +538,14 @@
 #ifndef RAPIDJSON_HAS_CXX11_RVALUE_REFS
 #if defined(__clang__)
 #if __has_feature(cxx_rvalue_references) && \
-    (defined(_LIBCPP_VERSION) || defined(__GLIBCXX__) && __GLIBCXX__ >= 20080306)
+    (defined(_MSC_VER) || defined(_LIBCPP_VERSION) || defined(__GLIBCXX__) && __GLIBCXX__ >= 20080306)
 #define RAPIDJSON_HAS_CXX11_RVALUE_REFS 1
 #else
 #define RAPIDJSON_HAS_CXX11_RVALUE_REFS 0
 #endif
 #elif (defined(RAPIDJSON_GNUC) && (RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,3,0)) && defined(__GXX_EXPERIMENTAL_CXX0X__)) || \
-      (defined(_MSC_VER) && _MSC_VER >= 1600)
+      (defined(_MSC_VER) && _MSC_VER >= 1600) || \
+      (defined(__SUNPRO_CC) && __SUNPRO_CC >= 0x5140 && defined(__GXX_EXPERIMENTAL_CXX0X__))
 
 #define RAPIDJSON_HAS_CXX11_RVALUE_REFS 1
 #else
@@ -560,8 +556,9 @@
 #ifndef RAPIDJSON_HAS_CXX11_NOEXCEPT
 #if defined(__clang__)
 #define RAPIDJSON_HAS_CXX11_NOEXCEPT __has_feature(cxx_noexcept)
-#elif (defined(RAPIDJSON_GNUC) && (RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,6,0)) && defined(__GXX_EXPERIMENTAL_CXX0X__))
-//    (defined(_MSC_VER) && _MSC_VER >= ????) // not yet supported
+#elif (defined(RAPIDJSON_GNUC) && (RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,6,0)) && defined(__GXX_EXPERIMENTAL_CXX0X__)) || \
+    (defined(_MSC_VER) && _MSC_VER >= 1900) || \
+    (defined(__SUNPRO_CC) && __SUNPRO_CC >= 0x5140 && defined(__GXX_EXPERIMENTAL_CXX0X__))
 #define RAPIDJSON_HAS_CXX11_NOEXCEPT 1
 #else
 #define RAPIDJSON_HAS_CXX11_NOEXCEPT 0
@@ -575,14 +572,19 @@
 
 // no automatic detection, yet
 #ifndef RAPIDJSON_HAS_CXX11_TYPETRAITS
+#if (defined(_MSC_VER) && _MSC_VER >= 1700)
+#define RAPIDJSON_HAS_CXX11_TYPETRAITS 1
+#else
 #define RAPIDJSON_HAS_CXX11_TYPETRAITS 0
 #endif
+#endif
 
 #ifndef RAPIDJSON_HAS_CXX11_RANGE_FOR
 #if defined(__clang__)
 #define RAPIDJSON_HAS_CXX11_RANGE_FOR __has_feature(cxx_range_for)
 #elif (defined(RAPIDJSON_GNUC) && (RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,6,0)) && defined(__GXX_EXPERIMENTAL_CXX0X__)) || \
-      (defined(_MSC_VER) && _MSC_VER >= 1700)
+      (defined(_MSC_VER) && _MSC_VER >= 1700) || \
+      (defined(__SUNPRO_CC) && __SUNPRO_CC >= 0x5140 && defined(__GXX_EXPERIMENTAL_CXX0X__))
 #define RAPIDJSON_HAS_CXX11_RANGE_FOR 1
 #else
 #define RAPIDJSON_HAS_CXX11_RANGE_FOR 0
@@ -591,6 +593,32 @@
 
 //!@endcond
 
+//! Assertion (in non-throwing contexts).
+ /*! \ingroup RAPIDJSON_CONFIG
+    Some functions provide a \c noexcept guarantee, if the compiler supports it.
+    In these cases, the \ref RAPIDJSON_ASSERT macro cannot be overridden to
+    throw an exception.  This macro adds a separate customization point for
+    such cases.
+
+    Defaults to C \c assert() (as \ref RAPIDJSON_ASSERT), if \c noexcept is
+    supported, and to \ref RAPIDJSON_ASSERT otherwise.
+ */
+
+///////////////////////////////////////////////////////////////////////////////
+// RAPIDJSON_NOEXCEPT_ASSERT
+
+#ifndef RAPIDJSON_NOEXCEPT_ASSERT
+#ifdef RAPIDJSON_ASSERT_THROWS
+#if RAPIDJSON_HAS_CXX11_NOEXCEPT
+#define RAPIDJSON_NOEXCEPT_ASSERT(x)
+#else
+#define RAPIDJSON_NOEXCEPT_ASSERT(x) RAPIDJSON_ASSERT(x)
+#endif // RAPIDJSON_HAS_CXX11_NOEXCEPT
+#else
+#define RAPIDJSON_NOEXCEPT_ASSERT(x) RAPIDJSON_ASSERT(x)
+#endif // RAPIDJSON_ASSERT_THROWS
+#endif // RAPIDJSON_NOEXCEPT_ASSERT
+
 ///////////////////////////////////////////////////////////////////////////////
 // new/delete
 
diff --git a/include/rapidjson/reader.h b/include/rapidjson/reader.h
index 084efaa..44a6bcd 100644
--- a/include/rapidjson/reader.h
+++ b/include/rapidjson/reader.h
@@ -37,17 +37,15 @@
 #include <arm_neon.h>
 #endif
 
-#ifdef _MSC_VER
-RAPIDJSON_DIAG_PUSH
-RAPIDJSON_DIAG_OFF(4127)  // conditional expression is constant
-RAPIDJSON_DIAG_OFF(4702)  // unreachable code
-#endif
-
 #ifdef __clang__
 RAPIDJSON_DIAG_PUSH
 RAPIDJSON_DIAG_OFF(old-style-cast)
 RAPIDJSON_DIAG_OFF(padded)
 RAPIDJSON_DIAG_OFF(switch-enum)
+#elif defined(_MSC_VER)
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(4127)  // conditional expression is constant
+RAPIDJSON_DIAG_OFF(4702)  // unreachable code
 #endif
 
 #ifdef __GNUC__
@@ -608,7 +606,7 @@
         parseResult_.Clear();
         state_ = IterativeParsingStartState;
     }
-    
+
     //! Parse one token from JSON text
     /*! \tparam InputStream Type of input stream, implementing Stream concept
         \tparam Handler Type of handler, implementing Handler concept.
@@ -620,11 +618,11 @@
     bool IterativeParseNext(InputStream& is, Handler& handler) {
         while (RAPIDJSON_LIKELY(is.Peek() != '\0')) {
             SkipWhitespaceAndComments<parseFlags>(is);
-            
+
             Token t = Tokenize(is.Peek());
             IterativeParsingState n = Predict(state_, t);
             IterativeParsingState d = Transit<parseFlags>(state_, t, n, is, handler);
-            
+
             // If we've finished or hit an error...
             if (RAPIDJSON_UNLIKELY(IsIterativeParsingCompleteState(d))) {
                 // Report errors.
@@ -632,11 +630,11 @@
                     HandleError(state_, is);
                     return false;
                 }
-            
+
                 // Transition to the finish state.
                 RAPIDJSON_ASSERT(d == IterativeParsingFinishState);
                 state_ = d;
-                
+
                 // If StopWhenDone is not set...
                 if (!(parseFlags & kParseStopWhenDoneFlag)) {
                     // ... and extra non-whitespace data is found...
@@ -647,11 +645,11 @@
                         return false;
                     }
                 }
-                
+
                 // Success! We are done!
                 return true;
             }
-            
+
             // Transition to the new state.
             state_ = d;
 
@@ -659,7 +657,7 @@
             if (!IsIterativeParsingDelimiterState(n))
                 return true;
         }
-        
+
         // We reached the end of file.
         stack_.Clear();
 
@@ -667,14 +665,14 @@
             HandleError(state_, is);
             return false;
         }
-        
+
         return true;
     }
-    
+
     //! Check if token-by-token parsing JSON text is complete
     /*! \return Whether the JSON has been fully decoded.
      */
-    RAPIDJSON_FORCEINLINE bool IterativeParseComplete() {
+    RAPIDJSON_FORCEINLINE bool IterativeParseComplete() const {
         return IsIterativeParsingCompleteState(state_);
     }
 
@@ -1525,7 +1523,7 @@
                     }
                 }
             }
-            
+
             if (RAPIDJSON_UNLIKELY(!useNanOrInf)) {
                 RAPIDJSON_PARSE_ERROR(kParseErrorValueInvalid, s.Tell());
             }
@@ -1563,8 +1561,6 @@
         // Force double for big integer
         if (useDouble) {
             while (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) {
-                if (RAPIDJSON_UNLIKELY(d >= 1.7976931348623157e307)) // DBL_MAX / 10.0
-                    RAPIDJSON_PARSE_ERROR(kParseErrorNumberTooBig, startOffset);
                 d = d * 10 + (s.TakePush() - '0');
             }
         }
@@ -1634,9 +1630,18 @@
             if (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) {
                 exp = static_cast<int>(s.Take() - '0');
                 if (expMinus) {
+                    // (exp + expFrac) must not underflow int => we're detecting when -exp gets
+                    // dangerously close to INT_MIN (a pessimistic next digit 9 would push it into
+                    // underflow territory):
+                    //
+                    //        -(exp * 10 + 9) + expFrac >= INT_MIN
+                    //   <=>  exp <= (expFrac - INT_MIN - 9) / 10
+                    RAPIDJSON_ASSERT(expFrac <= 0);
+                    int maxExp = (expFrac + 2147483639) / 10;
+
                     while (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) {
                         exp = exp * 10 + static_cast<int>(s.Take() - '0');
-                        if (exp >= 214748364) {                         // Issue #313: prevent overflow exponent
+                        if (RAPIDJSON_UNLIKELY(exp > maxExp)) {
                             while (RAPIDJSON_UNLIKELY(s.Peek() >= '0' && s.Peek() <= '9'))  // Consume the rest of exponent
                                 s.Take();
                         }
@@ -1695,6 +1700,13 @@
                else
                    d = internal::StrtodNormalPrecision(d, p);
 
+               // Use > max, instead of == inf, to fix bogus warning -Wfloat-equal
+               if (d > (std::numeric_limits<double>::max)()) {
+                   // Overflow
+                   // TODO: internal::StrtodX should report overflow (or underflow)
+                   RAPIDJSON_PARSE_ERROR(kParseErrorNumberTooBig, startOffset);
+               }
+
                cont = handler.Double(minus ? -d : d);
            }
            else if (useNanOrInf) {
@@ -1757,12 +1769,12 @@
 
         // Single value state
         IterativeParsingValueState,
-        
+
         // Delimiter states (at bottom)
         IterativeParsingElementDelimiterState,
         IterativeParsingMemberDelimiterState,
         IterativeParsingKeyValueDelimiterState,
-        
+
         cIterativeParsingStateCount
     };
 
@@ -1786,7 +1798,7 @@
         kTokenCount
     };
 
-    RAPIDJSON_FORCEINLINE Token Tokenize(Ch c) {
+    RAPIDJSON_FORCEINLINE Token Tokenize(Ch c) const {
 
 //!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN
 #define N NumberToken
@@ -1813,7 +1825,7 @@
             return NumberToken;
     }
 
-    RAPIDJSON_FORCEINLINE IterativeParsingState Predict(IterativeParsingState state, Token token) {
+    RAPIDJSON_FORCEINLINE IterativeParsingState Predict(IterativeParsingState state, Token token) const {
         // current state x one lookahead token -> new state
         static const char G[cIterativeParsingStateCount][kTokenCount] = {
             // Finish(sink state)
@@ -2152,46 +2164,46 @@
         }
     }
 
-    RAPIDJSON_FORCEINLINE bool IsIterativeParsingDelimiterState(IterativeParsingState s) {
+    RAPIDJSON_FORCEINLINE bool IsIterativeParsingDelimiterState(IterativeParsingState s) const {
         return s >= IterativeParsingElementDelimiterState;
     }
-    
-    RAPIDJSON_FORCEINLINE bool IsIterativeParsingCompleteState(IterativeParsingState s) {
+
+    RAPIDJSON_FORCEINLINE bool IsIterativeParsingCompleteState(IterativeParsingState s) const {
         return s <= IterativeParsingErrorState;
     }
-    
+
     template <unsigned parseFlags, typename InputStream, typename Handler>
     ParseResult IterativeParse(InputStream& is, Handler& handler) {
         parseResult_.Clear();
         ClearStackOnExit scope(*this);
         IterativeParsingState state = IterativeParsingStartState;
-        
+
         SkipWhitespaceAndComments<parseFlags>(is);
         RAPIDJSON_PARSE_ERROR_EARLY_RETURN(parseResult_);
         while (is.Peek() != '\0') {
             Token t = Tokenize(is.Peek());
             IterativeParsingState n = Predict(state, t);
             IterativeParsingState d = Transit<parseFlags>(state, t, n, is, handler);
-            
+
             if (d == IterativeParsingErrorState) {
                 HandleError(state, is);
                 break;
             }
-            
+
             state = d;
-            
+
             // Do not further consume streams if a root JSON has been parsed.
             if ((parseFlags & kParseStopWhenDoneFlag) && state == IterativeParsingFinishState)
                 break;
-            
+
             SkipWhitespaceAndComments<parseFlags>(is);
             RAPIDJSON_PARSE_ERROR_EARLY_RETURN(parseResult_);
         }
-        
+
         // Handle the end of file.
         if (state != IterativeParsingFinishState)
             HandleError(state, is);
-        
+
         return parseResult_;
     }
 
@@ -2206,7 +2218,7 @@
 
 RAPIDJSON_NAMESPACE_END
 
-#ifdef __clang__
+#if defined(__clang__) || defined(_MSC_VER)
 RAPIDJSON_DIAG_POP
 #endif
 
@@ -2215,8 +2227,4 @@
 RAPIDJSON_DIAG_POP
 #endif
 
-#ifdef _MSC_VER
-RAPIDJSON_DIAG_POP
-#endif
-
 #endif // RAPIDJSON_READER_H_
diff --git a/include/rapidjson/schema.h b/include/rapidjson/schema.h
index 1a8fb26..26ae947 100644
--- a/include/rapidjson/schema.h
+++ b/include/rapidjson/schema.h
@@ -63,9 +63,7 @@
 RAPIDJSON_DIAG_OFF(exit-time-destructors)
 RAPIDJSON_DIAG_OFF(c++98-compat-pedantic)
 RAPIDJSON_DIAG_OFF(variadic-macros)
-#endif
-
-#ifdef _MSC_VER
+#elif defined(_MSC_VER)
 RAPIDJSON_DIAG_OFF(4512) // assignment operator could not be generated
 #endif
 
@@ -411,7 +409,7 @@
     Schema(SchemaDocumentType* schemaDocument, const PointerType& p, const ValueType& value, const ValueType& document, AllocatorType* allocator) :
         allocator_(allocator),
         uri_(schemaDocument->GetURI(), *allocator),
-        pointer_(p),
+        pointer_(p, allocator),
         typeless_(schemaDocument->GetTypeless()),
         enum_(),
         enumCount_(),
@@ -442,7 +440,8 @@
         minLength_(0),
         maxLength_(~SizeType(0)),
         exclusiveMinimum_(false),
-        exclusiveMaximum_(false)
+        exclusiveMaximum_(false),
+        defaultValueLength_(0)
     {
         typedef typename SchemaDocumentType::ValueType ValueType;
         typedef typename ValueType::ConstValueIterator ConstValueIterator;
@@ -465,7 +464,7 @@
                 enum_ = static_cast<uint64_t*>(allocator_->Malloc(sizeof(uint64_t) * v->Size()));
                 for (ConstValueIterator itr = v->Begin(); itr != v->End(); ++itr) {
                     typedef Hasher<EncodingType, MemoryPoolAllocator<> > EnumHasherType;
-                    char buffer[256 + 24];
+                    char buffer[256u + 24];
                     MemoryPoolAllocator<> hasherAllocator(buffer, sizeof(buffer));
                     EnumHasherType h(&hasherAllocator, 256);
                     itr->Accept(h);
@@ -637,6 +636,12 @@
         if (const ValueType* v = GetMember(value, GetMultipleOfString()))
             if (v->IsNumber() && v->GetDouble() > 0.0)
                 multipleOf_.CopyFrom(*v, *allocator_);
+
+        // Default
+        if (const ValueType* v = GetMember(value, GetDefaultValueString()))
+            if (v->IsString())
+                defaultValueLength_ = v->GetStringLength();
+
     }
 
     ~Schema() {
@@ -938,7 +943,8 @@
             context.error_handler.StartMissingProperties();
             for (SizeType index = 0; index < propertyCount_; index++)
                 if (properties_[index].required && !context.propertyExist[index])
-                    context.error_handler.AddMissingProperty(properties_[index].name);
+                    if (properties_[index].schema->defaultValueLength_ == 0 )
+                        context.error_handler.AddMissingProperty(properties_[index].name);
             if (context.error_handler.EndMissingProperties())
                 RAPIDJSON_INVALID_KEYWORD_RETURN(GetRequiredString());
         }
@@ -1048,6 +1054,7 @@
     RAPIDJSON_STRING_(ExclusiveMinimum, 'e', 'x', 'c', 'l', 'u', 's', 'i', 'v', 'e', 'M', 'i', 'n', 'i', 'm', 'u', 'm')
     RAPIDJSON_STRING_(ExclusiveMaximum, 'e', 'x', 'c', 'l', 'u', 's', 'i', 'v', 'e', 'M', 'a', 'x', 'i', 'm', 'u', 'm')
     RAPIDJSON_STRING_(MultipleOf, 'm', 'u', 'l', 't', 'i', 'p', 'l', 'e', 'O', 'f')
+    RAPIDJSON_STRING_(DefaultValue, 'd', 'e', 'f', 'a', 'u', 'l', 't')
 
 #undef RAPIDJSON_STRING_
 
@@ -1142,12 +1149,15 @@
 #elif RAPIDJSON_SCHEMA_USE_STDREGEX
     template <typename ValueType>
     RegexType* CreatePattern(const ValueType& value) {
-        if (value.IsString())
+        if (value.IsString()) {
+            RegexType *r = static_cast<RegexType*>(allocator_->Malloc(sizeof(RegexType)));
             try {
-                return new (allocator_->Malloc(sizeof(RegexType))) RegexType(value.GetString(), std::size_t(value.GetStringLength()), std::regex_constants::ECMAScript);
+                return new (r) RegexType(value.GetString(), std::size_t(value.GetStringLength()), std::regex_constants::ECMAScript);
             }
             catch (const std::regex_error&) {
+                AllocatorType::Free(r);
             }
+        }
         return 0;
     }
 
@@ -1428,6 +1438,8 @@
     SValue multipleOf_;
     bool exclusiveMinimum_;
     bool exclusiveMaximum_;
+    
+    SizeType defaultValueLength_;
 };
 
 template<typename Stack, typename Ch>
@@ -1673,6 +1685,7 @@
                                 if (const SchemaType* sc = remoteDocument->GetSchema(pointer)) {
                                     if (schema)
                                         *schema = sc;
+                                    new (schemaMap_.template Push<SchemaEntry>()) SchemaEntry(source, const_cast<SchemaType*>(sc), false, allocator_);
                                     return true;
                                 }
                             }
@@ -1860,7 +1873,12 @@
 
     //! Gets the JSON pointer pointed to the invalid value.
     PointerType GetInvalidDocumentPointer() const {
-        return documentStack_.Empty() ? PointerType() : PointerType(documentStack_.template Bottom<Ch>(), documentStack_.GetSize() / sizeof(Ch));
+        if (documentStack_.Empty()) {
+            return PointerType();
+        }
+        else {
+            return PointerType(documentStack_.template Bottom<Ch>(), documentStack_.GetSize() / sizeof(Ch));
+        }
     }
 
     void NotMultipleOf(int64_t actual, const SValue& expected) {
diff --git a/include/rapidjson/writer.h b/include/rapidjson/writer.h
index a978891..6f5b690 100644
--- a/include/rapidjson/writer.h
+++ b/include/rapidjson/writer.h
@@ -36,16 +36,14 @@
 #include <arm_neon.h>
 #endif
 
-#ifdef _MSC_VER
-RAPIDJSON_DIAG_PUSH
-RAPIDJSON_DIAG_OFF(4127) // conditional expression is constant
-#endif
-
 #ifdef __clang__
 RAPIDJSON_DIAG_PUSH
 RAPIDJSON_DIAG_OFF(padded)
 RAPIDJSON_DIAG_OFF(unreachable-code)
 RAPIDJSON_DIAG_OFF(c++98-compat)
+#elif defined(_MSC_VER)
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(4127) // conditional expression is constant
 #endif
 
 RAPIDJSON_NAMESPACE_BEGIN
@@ -462,8 +460,7 @@
         PutReserve(*os_, length);
         GenericStringStream<SourceEncoding> is(json);
         while (RAPIDJSON_LIKELY(is.Tell() < length)) {
-            const Ch c = is.Peek();
-            RAPIDJSON_ASSERT(c != '\0');
+            RAPIDJSON_ASSERT(is.Peek() != '\0');
             if (RAPIDJSON_UNLIKELY(!(writeFlags & kWriteValidateEncodingFlag ? 
                 Transcoder<SourceEncoding, TargetEncoding>::Validate(is, *os_) :
                 Transcoder<SourceEncoding, TargetEncoding>::TranscodeUnsafe(is, *os_))))
@@ -705,11 +702,7 @@
 
 RAPIDJSON_NAMESPACE_END
 
-#ifdef _MSC_VER
-RAPIDJSON_DIAG_POP
-#endif
-
-#ifdef __clang__
+#if defined(_MSC_VER) || defined(__clang__)
 RAPIDJSON_DIAG_POP
 #endif
 
diff --git a/include_dirs.js b/include_dirs.js
new file mode 100644
index 0000000..b373e85
--- /dev/null
+++ b/include_dirs.js
@@ -0,0 +1,2 @@
+var path = require('path');
+console.log(path.join(path.relative('.', __dirname), 'include'));
diff --git a/library.json b/library.json
new file mode 100644
index 0000000..2210fcd
--- /dev/null
+++ b/library.json
@@ -0,0 +1,15 @@
+{
+  "name": "RapidJSON",
+  "version": "1.1.0",
+  "keywords": "json, sax, dom, parser, generator",
+  "description": "A fast JSON parser/generator for C++ with both SAX/DOM style API",
+  "export": {
+    "include": "include"
+  },
+  "examples": "example/*/*.cpp",
+  "repository":
+  {
+    "type": "git",
+    "url": "https://github.com/Tencent/rapidjson"
+  }
+}
diff --git a/license.txt b/license.txt
new file mode 100644
index 0000000..7ccc161
--- /dev/null
+++ b/license.txt
@@ -0,0 +1,57 @@
+Tencent is pleased to support the open source community by making RapidJSON available. 
+ 
+Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.  All rights reserved.
+
+If you have downloaded a copy of the RapidJSON binary from Tencent, please note that the RapidJSON binary is licensed under the MIT License.
+If you have downloaded a copy of the RapidJSON source code from Tencent, please note that RapidJSON source code is licensed under the MIT License, except for the third-party components listed below which are subject to different license terms.  Your integration of RapidJSON into your own projects may require compliance with the MIT License, as well as the other licenses applicable to the third-party components included within RapidJSON. To avoid the problematic JSON license in your own projects, it's sufficient to exclude the bin/jsonchecker/ directory, as it's the only code under the JSON license.
+A copy of the MIT License is included in this file.
+
+Other dependencies and licenses:
+
+Open Source Software Licensed Under the BSD License:
+--------------------------------------------------------------------
+
+The msinttypes r29 
+Copyright (c) 2006-2013 Alexander Chemeris 
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 
+* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
+* Neither the name of  copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Open Source Software Licensed Under the JSON License:
+--------------------------------------------------------------------
+
+json.org 
+Copyright (c) 2002 JSON.org
+All Rights Reserved.
+
+JSON_checker
+Copyright (c) 2002 JSON.org
+All Rights Reserved.
+
+	
+Terms of the JSON License:
+---------------------------------------------------
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+The Software shall be used for Good, not Evil.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+
+Terms of the MIT License:
+--------------------------------------------------------------------
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/package.json b/package.json
new file mode 100644
index 0000000..129581a
--- /dev/null
+++ b/package.json
@@ -0,0 +1,24 @@
+{
+  "name": "rapidjson",
+  "version": "1.0.4",
+  "description": "![](doc/logo/rapidjson.png)",
+  "main": "include_dirs.js",
+  "directories": {
+    "doc": "doc",
+    "example": "example",
+    "test": "test"
+  },
+  "scripts": {
+    "test": "echo \"Error: no test specified\" && exit 1"
+  },
+  "repository": {
+    "type": "git",
+    "url": "git+https://github.com/Tencent/rapidjson.git"
+  },
+  "author": "",
+  "license": "ISC",
+  "bugs": {
+    "url": "https://github.com/Tencent/rapidjson/issues"
+  },
+  "homepage": "https://github.com/Tencent/rapidjson#readme"
+}
diff --git a/rapidjson.autopkg b/rapidjson.autopkg
new file mode 100644
index 0000000..fe72030
--- /dev/null
+++ b/rapidjson.autopkg
@@ -0,0 +1,77 @@
+nuget {
+	//Usage:  Write-NuGetPackage rapidjson.autopkg -defines:MYVERSION=1.1.0
+	//Be sure you are running Powershell 3.0 and have the CoApp powershell extensions installed properly.
+	nuspec {
+		id = rapidjson;
+		version : ${MYVERSION};
+		title: "rapidjson";
+		authors: {"https://github.com/Tencent/rapidjson/releases/tag/v1.1.0"};
+		owners: {"@lsantos (github)"};
+		licenseUrl: "https://github.com/Tencent/rapidjson/blob/master/license.txt";
+		projectUrl: "https://github.com/Tencent/rapidjson/";
+		iconUrl: "https://cdn1.iconfinder.com/data/icons/fatcow/32x32/json.png";
+		requireLicenseAcceptance:false;
+		summary: @"A fast JSON parser/generator for C++ with both SAX/DOM style API";
+		
+		// if you need to span several lines you can prefix a string with an @ symbol (exactly like c# does).
+		description: @"Rapidjson is an attempt to create the fastest JSON parser and generator.
+
+              - Small but complete. Supports both SAX and DOM style API. SAX parser only a few hundred lines of code.
+              - Fast. In the order of magnitude of strlen(). Optionally supports SSE2/SSE4.2 for acceleration.
+              - Self-contained. Minimal dependency on standard libraries. No BOOST, not even STL.
+              - Compact. Each JSON value is 16 or 20 bytes for 32 or 64-bit machines respectively (excluding text string storage). With the custom memory allocator, parser allocates memory compactly during parsing.
+              - Full  RFC4627 compliance. Supports UTF-8, UTF-16 and UTF-32.
+              - Support both in-situ parsing (directly decode strings into the source JSON text) and non-destructive parsing (decode strings into new buffers).
+              - Parse number to int/unsigned/int64_t/uint64_t/double depending on input
+              - Support custom memory allocation. Also, the default memory pool allocator can also be supplied with a user buffer (such as a buffer allocated on user's heap or - programme stack) to minimize allocation.
+
+              As the name implies, rapidjson is inspired by rapidxml.";
+		
+		releaseNotes: @"
+Added
+	Add Value::XXXMember(...) overloads for std::string (#335)
+
+Fixed
+	Include rapidjson.h for all internal/error headers.
+	Parsing some numbers incorrectly in full-precision mode (kFullPrecisionParseFlag) (#342)
+	Fix alignment of 64bit platforms (#328)
+	Fix MemoryPoolAllocator::Clear() to clear user-buffer (0691502)
+
+Changed
+	CMakeLists for include as a thirdparty in projects (#334, #337)
+	Change Document::ParseStream() to use stack allocator for Reader (ffbe386)";
+
+		copyright: "Copyright 2015";
+		tags: { native, coapp, JSON, nativepackage };
+		language: en-US;
+	};
+	
+	dependencies {
+		packages : {
+			//TODO:  Add dependencies here in [pkg.name]/[version] form per newline		
+			//zlib/[1.2.8],			
+		};
+	}
+	
+	// the files that go into the content folders
+	files {	
+		#defines {
+			SDK_ROOT 	 = .\;			
+		}
+
+		// grab all the files in the include folder
+		// the folder that contains all the .h files will 
+		// automatically get added to the Includes path.
+		nestedinclude += {
+			#destination = ${d_include}rapidjson;
+			"${SDK_ROOT}include\rapidjson\**\*.h"
+		};
+	};
+	
+	targets {
+		// We're trying to be standard about these sorts of thing. (Will help with config.h later :D)
+		//Defines += HAS_EQCORE;
+		// Fix creating the package with Raggles' fork of CoApp
+		Includes += "$(MSBuildThisFileDirectory)../..${d_include}";
+	};
+}
\ No newline at end of file
diff --git a/readme.md b/readme.md
new file mode 100644
index 0000000..78c9540
--- /dev/null
+++ b/readme.md
@@ -0,0 +1,160 @@
+![RapidJSON logo](doc/logo/rapidjson.png)
+
+![Release version](https://img.shields.io/badge/release-v1.1.0-blue.svg)
+
+## A fast JSON parser/generator for C++ with both SAX/DOM style API
+
+Tencent is pleased to support the open source community by making RapidJSON available.
+
+Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+
+* [RapidJSON GitHub](https://github.com/Tencent/rapidjson/)
+* RapidJSON Documentation
+  * [English](http://rapidjson.org/)
+  * [简体中文](http://rapidjson.org/zh-cn/)
+  * [GitBook](https://www.gitbook.com/book/miloyip/rapidjson/) with downloadable PDF/EPUB/MOBI, without API reference.
+
+## Build status
+
+| [Linux][lin-link] | [Windows][win-link] | [Coveralls][cov-link] |
+| :---------------: | :-----------------: | :-------------------: |
+| ![lin-badge]      | ![win-badge]        | ![cov-badge]          |
+
+[lin-badge]: https://travis-ci.org/Tencent/rapidjson.svg?branch=master "Travis build status"
+[lin-link]:  https://travis-ci.org/Tencent/rapidjson "Travis build status"
+[win-badge]: https://ci.appveyor.com/api/projects/status/l6qulgqahcayidrf/branch/master?svg=true "AppVeyor build status"
+[win-link]:  https://ci.appveyor.com/project/miloyip/rapidjson-0fdqj/branch/master "AppVeyor build status"
+[cov-badge]: https://coveralls.io/repos/Tencent/rapidjson/badge.svg?branch=master "Coveralls coverage"
+[cov-link]:  https://coveralls.io/r/Tencent/rapidjson?branch=master "Coveralls coverage"
+
+## Introduction
+
+RapidJSON is a JSON parser and generator for C++. It was inspired by [RapidXml](http://rapidxml.sourceforge.net/).
+
+* RapidJSON is **small** but **complete**. It supports both SAX and DOM style API. The SAX parser is only a half thousand lines of code.
+
+* RapidJSON is **fast**. Its performance can be comparable to `strlen()`. It also optionally supports SSE2/SSE4.2 for acceleration.
+
+* RapidJSON is **self-contained** and **header-only**. It does not depend on external libraries such as BOOST. It even does not depend on STL.
+
+* RapidJSON is **memory-friendly**. Each JSON value occupies exactly 16 bytes for most 32/64-bit machines (excluding text string). By default it uses a fast memory allocator, and the parser allocates memory compactly during parsing.
+
+* RapidJSON is **Unicode-friendly**. It supports UTF-8, UTF-16, UTF-32 (LE & BE), and their detection, validation and transcoding internally. For example, you can read a UTF-8 file and let RapidJSON transcode the JSON strings into UTF-16 in the DOM. It also supports surrogates and "\u0000" (null character).
+
+More features can be read [here](doc/features.md).
+
+JSON(JavaScript Object Notation) is a light-weight data exchange format. RapidJSON should be in full compliance with RFC7159/ECMA-404, with optional support of relaxed syntax. More information about JSON can be obtained at
+* [Introducing JSON](http://json.org/)
+* [RFC7159: The JavaScript Object Notation (JSON) Data Interchange Format](https://tools.ietf.org/html/rfc7159)
+* [Standard ECMA-404: The JSON Data Interchange Format](https://www.ecma-international.org/publications/standards/Ecma-404.htm)
+
+## Highlights in v1.1 (2016-8-25)
+
+* Added [JSON Pointer](doc/pointer.md)
+* Added [JSON Schema](doc/schema.md)
+* Added [relaxed JSON syntax](doc/dom.md) (comment, trailing comma, NaN/Infinity)
+* Iterating array/object with [C++11 Range-based for loop](doc/tutorial.md)
+* Reduce memory overhead of each `Value` from 24 bytes to 16 bytes in x86-64 architecture.
+
+For other changes please refer to [change log](CHANGELOG.md).
+
+## Compatibility
+
+RapidJSON is cross-platform. Some platform/compiler combinations which have been tested are shown as follows.
+* Visual C++ 2008/2010/2013 on Windows (32/64-bit)
+* GNU C++ 3.8.x on Cygwin
+* Clang 3.4 on Mac OS X (32/64-bit) and iOS
+* Clang 3.4 on Android NDK
+
+Users can build and run the unit tests on their platform/compiler.
+
+## Installation
+
+RapidJSON is a header-only C++ library. Just copy the `include/rapidjson` folder to system or project's include path.
+
+RapidJSON uses following software as its dependencies:
+* [CMake](https://cmake.org/) as a general build tool
+* (optional) [Doxygen](http://www.doxygen.org) to build documentation
+* (optional) [googletest](https://github.com/google/googletest) for unit and performance testing
+
+To generate user documentation and run tests please proceed with the steps below:
+
+1. Execute `git submodule update --init` to get the files of thirdparty submodules (google test).
+2. Create directory called `build` in rapidjson source directory.
+3. Change to `build` directory and run `cmake ..` command to configure your build. Windows users can do the same with cmake-gui application.
+4. On Windows, build the solution found in the build directory. On Linux, run `make` from the build directory.
+
+On successful build you will find compiled test and example binaries in `bin`
+directory. The generated documentation will be available in `doc/html`
+directory of the build tree. To run tests after finished build please run `make
+test` or `ctest` from your build tree. You can get detailed output using `ctest
+-V` command.
+
+It is possible to install library system-wide by running `make install` command
+from the build tree with administrative privileges. This will install all files
+according to system preferences.  Once RapidJSON is installed, it is possible
+to use it from other CMake projects by adding `find_package(RapidJSON)` line to
+your CMakeLists.txt.
+
+## Usage at a glance
+
+This simple example parses a JSON string into a document (DOM), make a simple modification of the DOM, and finally stringify the DOM to a JSON string.
+
+~~~~~~~~~~cpp
+// rapidjson/example/simpledom/simpledom.cpp`
+#include "rapidjson/document.h"
+#include "rapidjson/writer.h"
+#include "rapidjson/stringbuffer.h"
+#include <iostream>
+
+using namespace rapidjson;
+
+int main() {
+    // 1. Parse a JSON string into DOM.
+    const char* json = "{\"project\":\"rapidjson\",\"stars\":10}";
+    Document d;
+    d.Parse(json);
+
+    // 2. Modify it by DOM.
+    Value& s = d["stars"];
+    s.SetInt(s.GetInt() + 1);
+
+    // 3. Stringify the DOM
+    StringBuffer buffer;
+    Writer<StringBuffer> writer(buffer);
+    d.Accept(writer);
+
+    // Output {"project":"rapidjson","stars":11}
+    std::cout << buffer.GetString() << std::endl;
+    return 0;
+}
+~~~~~~~~~~
+
+Note that this example did not handle potential errors.
+
+The following diagram shows the process.
+
+![simpledom](doc/diagram/simpledom.png)
+
+More [examples](https://github.com/Tencent/rapidjson/tree/master/example) are available:
+
+* DOM API
+  * [tutorial](https://github.com/Tencent/rapidjson/blob/master/example/tutorial/tutorial.cpp): Basic usage of DOM API.
+
+* SAX API
+  * [simplereader](https://github.com/Tencent/rapidjson/blob/master/example/simplereader/simplereader.cpp): Dumps all SAX events while parsing a JSON by `Reader`.
+  * [condense](https://github.com/Tencent/rapidjson/blob/master/example/condense/condense.cpp): A command line tool to rewrite a JSON, with all whitespaces removed.
+  * [pretty](https://github.com/Tencent/rapidjson/blob/master/example/pretty/pretty.cpp): A command line tool to rewrite a JSON with indents and newlines by `PrettyWriter`.
+  * [capitalize](https://github.com/Tencent/rapidjson/blob/master/example/capitalize/capitalize.cpp): A command line tool to capitalize strings in JSON.
+  * [messagereader](https://github.com/Tencent/rapidjson/blob/master/example/messagereader/messagereader.cpp): Parse a JSON message with SAX API.
+  * [serialize](https://github.com/Tencent/rapidjson/blob/master/example/serialize/serialize.cpp): Serialize a C++ object into JSON with SAX API.
+  * [jsonx](https://github.com/Tencent/rapidjson/blob/master/example/jsonx/jsonx.cpp): Implements a `JsonxWriter` which stringify SAX events into [JSONx](https://www-01.ibm.com/support/knowledgecenter/SS9H2Y_7.1.0/com.ibm.dp.doc/json_jsonx.html) (a kind of XML) format. The example is a command line tool which converts input JSON into JSONx format.
+
+* Schema
+  * [schemavalidator](https://github.com/Tencent/rapidjson/blob/master/example/schemavalidator/schemavalidator.cpp) : A command line tool to validate a JSON with a JSON schema.
+
+* Advanced
+  * [prettyauto](https://github.com/Tencent/rapidjson/blob/master/example/prettyauto/prettyauto.cpp): A modified version of [pretty](https://github.com/Tencent/rapidjson/blob/master/example/pretty/pretty.cpp) to automatically handle JSON with any UTF encodings.
+  * [parsebyparts](https://github.com/Tencent/rapidjson/blob/master/example/parsebyparts/parsebyparts.cpp): Implements an `AsyncDocumentParser` which can parse JSON in parts, using C++11 thread.
+  * [filterkey](https://github.com/Tencent/rapidjson/blob/master/example/filterkey/filterkey.cpp): A command line tool to remove all values with user-specified key.
+  * [filterkeydom](https://github.com/Tencent/rapidjson/blob/master/example/filterkeydom/filterkeydom.cpp): Same tool as above, but it demonstrates how to use a generator to populate a `Document`.
diff --git a/readme.zh-cn.md b/readme.zh-cn.md
new file mode 100644
index 0000000..ccf1669
--- /dev/null
+++ b/readme.zh-cn.md
@@ -0,0 +1,152 @@
+![RapidJSON logo](doc/logo/rapidjson.png)
+
+![Release version](https://img.shields.io/badge/release-v1.1.0-blue.svg)
+
+## 高效的 C++ JSON 解析/生成器,提供 SAX 及 DOM 风格 API
+
+Tencent is pleased to support the open source community by making RapidJSON available.
+
+Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+
+* [RapidJSON GitHub](https://github.com/Tencent/rapidjson/)
+* RapidJSON 文档
+  * [English](http://rapidjson.org/)
+  * [简体中文](http://rapidjson.org/zh-cn/)
+  * [GitBook](https://www.gitbook.com/book/miloyip/rapidjson/details/zh-cn) 可下载 PDF/EPUB/MOBI,但不含 API 参考手册。
+
+## Build 状态
+
+| [Linux][lin-link] | [Windows][win-link] | [Coveralls][cov-link] |
+| :---------------: | :-----------------: | :-------------------: |
+| ![lin-badge]      | ![win-badge]        | ![cov-badge]          |
+
+[lin-badge]: https://travis-ci.org/Tencent/rapidjson.svg?branch=master "Travis build status"
+[lin-link]:  https://travis-ci.org/Tencent/rapidjson "Travis build status"
+[win-badge]: https://ci.appveyor.com/api/projects/status/l6qulgqahcayidrf/branch/master?svg=true "AppVeyor build status"
+[win-link]:  https://ci.appveyor.com/project/miloyip/rapidjson-0fdqj/branch/master "AppVeyor build status"
+[cov-badge]: https://coveralls.io/repos/Tencent/rapidjson/badge.svg?branch=master "Coveralls coverage"
+[cov-link]:  https://coveralls.io/r/Tencent/rapidjson?branch=master "Coveralls coverage"
+
+## 简介
+
+RapidJSON 是一个 C++ 的 JSON 解析器及生成器。它的灵感来自 [RapidXml](http://rapidxml.sourceforge.net/)。
+
+* RapidJSON 小而全。它同时支持 SAX 和 DOM 风格的 API。SAX 解析器只有约 500 行代码。
+
+* RapidJSON 快。它的性能可与 `strlen()` 相比。可支持 SSE2/SSE4.2 加速。
+
+* RapidJSON 独立。它不依赖于 BOOST 等外部库。它甚至不依赖于 STL。
+
+* RapidJSON 对内存友好。在大部分 32/64 位机器上,每个 JSON 值只占 16 字节(除字符串外)。它预设使用一个快速的内存分配器,令分析器可以紧凑地分配内存。
+
+* RapidJSON 对 Unicode 友好。它支持 UTF-8、UTF-16、UTF-32 (大端序/小端序),并内部支持这些编码的检测、校验及转码。例如,RapidJSON 可以在分析一个 UTF-8 文件至 DOM 时,把当中的 JSON 字符串转码至 UTF-16。它也支持代理对(surrogate pair)及 `"\u0000"`(空字符)。
+
+在 [这里](doc/features.zh-cn.md) 可读取更多特点。
+
+JSON(JavaScript Object Notation)是一个轻量的数据交换格式。RapidJSON 应该完全遵从 RFC7159/ECMA-404,并支持可选的放宽语法。 关于 JSON 的更多信息可参考:
+* [Introducing JSON](http://json.org/)
+* [RFC7159: The JavaScript Object Notation (JSON) Data Interchange Format](https://tools.ietf.org/html/rfc7159)
+* [Standard ECMA-404: The JSON Data Interchange Format](https://www.ecma-international.org/publications/standards/Ecma-404.htm)
+
+## v1.1 中的亮点 (2016-8-25)
+
+* 加入 [JSON Pointer](doc/pointer.zh-cn.md) 功能,可更简单地访问及更改 DOM。
+* 加入 [JSON Schema](doc/schema.zh-cn.md) 功能,可在解析或生成 JSON 时进行校验。
+* 加入 [放宽的 JSON 语法](doc/dom.zh-cn.md) (注释、尾随逗号、NaN/Infinity)
+* 使用 [C++11 范围 for 循环](doc/tutorial.zh-cn.md) 去遍历 array 和 object。
+* 在 x86-64 架构下,缩减每个 `Value` 的内存开销从 24 字节至 16 字节。
+
+其他改动请参考 [change log](CHANGELOG.md).
+
+## 兼容性
+
+RapidJSON 是跨平台的。以下是一些曾测试的平台/编译器组合:
+* Visual C++ 2008/2010/2013 在 Windows (32/64-bit)
+* GNU C++ 3.8.x 在 Cygwin
+* Clang 3.4 在 Mac OS X (32/64-bit) 及 iOS
+* Clang 3.4 在 Android NDK
+
+用户也可以在他们的平台上生成及执行单元测试。
+
+## 安装
+
+RapidJSON 是只有头文件的 C++ 库。只需把 `include/rapidjson` 目录复制至系统或项目的 include 目录中。
+
+RapidJSON 依赖于以下软件:
+* [CMake](https://cmake.org/) 作为通用生成工具
+* (optional) [Doxygen](http://www.doxygen.org) 用于生成文档
+* (optional) [googletest](https://github.com/google/googletest) 用于单元及性能测试
+
+生成测试及例子的步骤:
+
+1. 执行 `git submodule update --init` 去获取 thirdparty submodules (google test)。
+2. 在 rapidjson 目录下,建立一个 `build` 目录。
+3. 在 `build` 目录下执行 `cmake ..` 命令以设置生成。Windows 用户可使用 cmake-gui 应用程序。
+4. 在 Windows 下,编译生成在 build 目录中的 solution。在 Linux 下,于 build 目录运行 `make`。
+
+成功生成后,你会在 `bin` 的目录下找到编译后的测试及例子可执行文件。而生成的文档将位于 build 下的 `doc/html` 目录。要执行测试,请在 build 下执行 `make test` 或 `ctest`。使用 `ctest -V` 命令可获取详细的输出。
+
+我们也可以把程序库安装至全系统中,只要在具管理权限下从 build 目录执行 `make install` 命令。这样会按系统的偏好设置安装所有文件。当安装 RapidJSON 后,其他的 CMake 项目需要使用它时,可以通过在 `CMakeLists.txt` 加入一句 `find_package(RapidJSON)`。
+
+## 用法一览
+
+此简单例子解析一个 JSON 字符串至一个 document (DOM),对 DOM 作出简单修改,最终把 DOM 转换(stringify)至 JSON 字符串。
+
+~~~~~~~~~~cpp
+// rapidjson/example/simpledom/simpledom.cpp`
+#include "rapidjson/document.h"
+#include "rapidjson/writer.h"
+#include "rapidjson/stringbuffer.h"
+#include <iostream>
+
+using namespace rapidjson;
+
+int main() {
+    // 1. 把 JSON 解析至 DOM。
+    const char* json = "{\"project\":\"rapidjson\",\"stars\":10}";
+    Document d;
+    d.Parse(json);
+
+    // 2. 利用 DOM 作出修改。
+    Value& s = d["stars"];
+    s.SetInt(s.GetInt() + 1);
+
+    // 3. 把 DOM 转换(stringify)成 JSON。
+    StringBuffer buffer;
+    Writer<StringBuffer> writer(buffer);
+    d.Accept(writer);
+
+    // Output {"project":"rapidjson","stars":11}
+    std::cout << buffer.GetString() << std::endl;
+    return 0;
+}
+~~~~~~~~~~
+
+注意此例子并没有处理潜在错误。
+
+下图展示执行过程。
+
+![simpledom](doc/diagram/simpledom.png)
+
+还有许多 [例子](https://github.com/Tencent/rapidjson/tree/master/example) 可供参考:
+
+* DOM API
+  * [tutorial](https://github.com/Tencent/rapidjson/blob/master/example/tutorial/tutorial.cpp): DOM API 的基本使用方法。
+
+* SAX API
+  * [simplereader](https://github.com/Tencent/rapidjson/blob/master/example/simplereader/simplereader.cpp): 使用 `Reader` 解析 JSON 时,打印所有 SAX 事件。
+  * [condense](https://github.com/Tencent/rapidjson/blob/master/example/condense/condense.cpp): 移除 JSON 中所有空白符的命令行工具。
+  * [pretty](https://github.com/Tencent/rapidjson/blob/master/example/pretty/pretty.cpp): 为 JSON 加入缩进与换行的命令行工具,当中使用了 `PrettyWriter`。
+  * [capitalize](https://github.com/Tencent/rapidjson/blob/master/example/capitalize/capitalize.cpp): 把 JSON 中所有字符串改为大写的命令行工具。
+  * [messagereader](https://github.com/Tencent/rapidjson/blob/master/example/messagereader/messagereader.cpp): 使用 SAX API 去解析一个 JSON 报文。
+  * [serialize](https://github.com/Tencent/rapidjson/blob/master/example/serialize/serialize.cpp): 使用 SAX API 去序列化 C++ 对象,生成 JSON。
+  * [jsonx](https://github.com/Tencent/rapidjson/blob/master/example/jsonx/jsonx.cpp): 实现了一个 `JsonxWriter`,它能把 SAX 事件写成 [JSONx](https://www-01.ibm.com/support/knowledgecenter/SS9H2Y_7.1.0/com.ibm.dp.doc/json_jsonx.html)(一种 XML)格式。这个例子是把 JSON 输入转换成 JSONx 格式的命令行工具。
+
+* Schema API
+  * [schemavalidator](https://github.com/Tencent/rapidjson/blob/master/example/schemavalidator/schemavalidator.cpp): 使用 JSON Schema 去校验 JSON 的命令行工具。
+
+* 进阶
+  * [prettyauto](https://github.com/Tencent/rapidjson/blob/master/example/prettyauto/prettyauto.cpp): [pretty](https://github.com/Tencent/rapidjson/blob/master/example/pretty/pretty.cpp) 的修改版本,可自动处理任何 UTF 编码的 JSON。
+  * [parsebyparts](https://github.com/Tencent/rapidjson/blob/master/example/parsebyparts/parsebyparts.cpp): 这例子中的 `AsyncDocumentParser` 类使用 C++ 线程来逐段解析 JSON。
+  * [filterkey](https://github.com/Tencent/rapidjson/blob/master/example/filterkey/filterkey.cpp): 移取使用者指定的键值的命令行工具。
+  * [filterkeydom](https://github.com/Tencent/rapidjson/blob/master/example/filterkey/filterkey.cpp): 如上的工具,但展示如何使用生成器(generator)去填充一个 `Document`。
\ No newline at end of file
diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt
new file mode 100644
index 0000000..11c1b04
--- /dev/null
+++ b/test/CMakeLists.txt
@@ -0,0 +1,20 @@
+find_package(GTestSrc)
+
+IF(GTESTSRC_FOUND)
+    enable_testing()
+
+    if (WIN32 AND (NOT CYGWIN) AND (NOT MINGW))
+        set(gtest_disable_pthreads ON)
+        set(gtest_force_shared_crt ON)
+    endif()
+
+    add_subdirectory(${GTEST_SOURCE_DIR} ${CMAKE_BINARY_DIR}/googletest)
+    include_directories(SYSTEM ${GTEST_INCLUDE_DIR})
+
+    set(TEST_LIBRARIES gtest gtest_main)
+
+    add_custom_target(tests ALL)
+    add_subdirectory(perftest)
+    add_subdirectory(unittest)
+
+ENDIF(GTESTSRC_FOUND)
diff --git a/test/perftest/CMakeLists.txt b/test/perftest/CMakeLists.txt
new file mode 100644
index 0000000..035e544
--- /dev/null
+++ b/test/perftest/CMakeLists.txt
@@ -0,0 +1,28 @@
+set(PERFTEST_SOURCES
+    misctest.cpp
+    perftest.cpp
+    platformtest.cpp
+    rapidjsontest.cpp
+    schematest.cpp)
+
+add_executable(perftest ${PERFTEST_SOURCES})
+target_link_libraries(perftest ${TEST_LIBRARIES})
+
+add_dependencies(tests perftest)
+
+find_program(CCACHE_FOUND ccache)
+if(CCACHE_FOUND)
+    set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache)
+    set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK ccache)
+    if (CMAKE_CXX_COMPILER_ID MATCHES "Clang")
+        set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Qunused-arguments -fcolor-diagnostics")
+    endif()
+endif(CCACHE_FOUND)
+
+set_property(DIRECTORY PROPERTY COMPILE_OPTIONS ${EXTRA_CXX_FLAGS})
+
+IF(NOT (CMAKE_BUILD_TYPE STREQUAL "Debug"))
+add_test(NAME perftest
+    COMMAND ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/perftest
+    WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/bin)
+ENDIF()
diff --git a/test/perftest/misctest.cpp b/test/perftest/misctest.cpp
new file mode 100644
index 0000000..d81062f
--- /dev/null
+++ b/test/perftest/misctest.cpp
@@ -0,0 +1,974 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+// 
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed 
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the 
+// specific language governing permissions and limitations under the License.
+
+#include "perftest.h"
+
+#if TEST_MISC
+
+#define __STDC_FORMAT_MACROS
+#include "rapidjson/stringbuffer.h"
+
+#define protected public
+#include "rapidjson/writer.h"
+#undef private
+
+class Misc : public PerfTest {
+};
+
+// Copyright (c) 2008-2010 Bjoern Hoehrmann <bjoern@hoehrmann.de>
+// See http://bjoern.hoehrmann.de/utf-8/decoder/dfa/ for details.
+
+#define UTF8_ACCEPT 0
+#define UTF8_REJECT 12
+
+static const unsigned char utf8d[] = {
+    // The first part of the table maps bytes to character classes that
+    // to reduce the size of the transition table and create bitmasks.
+    0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,  0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+    0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,  0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+    0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,  0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+    0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,  0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+    1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,  9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
+    7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,  7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+    8,8,2,2,2,2,2,2,2,2,2,2,2,2,2,2,  2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
+    10,3,3,3,3,3,3,3,3,3,3,3,3,4,3,3, 11,6,6,6,5,8,8,8,8,8,8,8,8,8,8,8,
+
+    // The second part is a transition table that maps a combination
+    // of a state of the automaton and a character class to a state.
+    0,12,24,36,60,96,84,12,12,12,48,72, 12,12,12,12,12,12,12,12,12,12,12,12,
+    12, 0,12,12,12,12,12, 0,12, 0,12,12, 12,24,12,12,12,12,12,24,12,24,12,12,
+    12,12,12,12,12,12,12,24,12,12,12,12, 12,24,12,12,12,12,12,12,12,24,12,12,
+    12,12,12,12,12,12,12,36,12,36,12,12, 12,36,12,12,12,12,12,36,12,36,12,12,
+    12,36,12,12,12,12,12,12,12,12,12,12, 
+};
+
+static unsigned inline decode(unsigned* state, unsigned* codep, unsigned byte) {
+    unsigned type = utf8d[byte];
+
+    *codep = (*state != UTF8_ACCEPT) ?
+        (byte & 0x3fu) | (*codep << 6) :
+    (0xff >> type) & (byte);
+
+    *state = utf8d[256 + *state + type];
+    return *state;
+}
+
+static bool IsUTF8(unsigned char* s) {
+    unsigned codepoint, state = 0;
+
+    while (*s)
+        decode(&state, &codepoint, *s++);
+
+    return state == UTF8_ACCEPT;
+}
+
+TEST_F(Misc, Hoehrmann_IsUTF8) {
+    for (size_t i = 0; i < kTrialCount; i++) {
+        EXPECT_TRUE(IsUTF8((unsigned char*)json_));
+    }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// CountDecimalDigit: Count number of decimal places
+
+inline unsigned CountDecimalDigit_naive(unsigned n) {
+    unsigned count = 1;
+    while (n >= 10) {
+        n /= 10;
+        count++;
+    }
+    return count;
+}
+
+inline unsigned CountDecimalDigit_enroll4(unsigned n) {
+    unsigned count = 1;
+    while (n >= 10000) {
+        n /= 10000u;
+        count += 4;
+    }
+    if (n < 10) return count;
+    if (n < 100) return count + 1;
+    if (n < 1000) return count + 2;
+    return count + 3;
+}
+
+inline unsigned CountDecimalDigit64_enroll4(uint64_t n) {
+    unsigned count = 1;
+    while (n >= 10000) {
+        n /= 10000u;
+        count += 4;
+    }
+    if (n < 10) return count;
+    if (n < 100) return count + 1;
+    if (n < 1000) return count + 2;
+    return count + 3;
+}
+
+inline unsigned CountDecimalDigit_fast(unsigned n) {
+    static const uint32_t powers_of_10[] = {
+        0,
+        10,
+        100,
+        1000,
+        10000,
+        100000,
+        1000000,
+        10000000,
+        100000000,
+        1000000000
+    };
+
+#if defined(_M_IX86) || defined(_M_X64)
+    unsigned long i = 0;
+    _BitScanReverse(&i, n | 1);
+    uint32_t t = (i + 1) * 1233 >> 12;
+#elif defined(__GNUC__)
+    uint32_t t = (32 - __builtin_clz(n | 1)) * 1233 >> 12;
+#else
+#error
+#endif
+    return t - (n < powers_of_10[t]) + 1;
+}
+
+inline unsigned CountDecimalDigit64_fast(uint64_t n) {
+    static const uint64_t powers_of_10[] = {
+        0,
+        10,
+        100,
+        1000,
+        10000,
+        100000,
+        1000000,
+        10000000,
+        100000000,
+        1000000000,
+        10000000000,
+        100000000000,
+        1000000000000,
+        10000000000000,
+        100000000000000,
+        1000000000000000,
+        10000000000000000,
+        100000000000000000,
+        1000000000000000000,
+        10000000000000000000U
+    };
+
+#if defined(_M_IX86)
+    uint64_t m = n | 1;
+    unsigned long i = 0;
+    if (_BitScanReverse(&i, m >> 32))
+        i += 32;
+    else
+        _BitScanReverse(&i, m & 0xFFFFFFFF);
+    uint32_t t = (i + 1) * 1233 >> 12;
+#elif defined(_M_X64)
+    unsigned long i = 0;
+    _BitScanReverse64(&i, n | 1);
+    uint32_t t = (i + 1) * 1233 >> 12;
+#elif defined(__GNUC__)
+    uint32_t t = (64 - __builtin_clzll(n | 1)) * 1233 >> 12;
+#else
+#error
+#endif
+
+    return t - (n < powers_of_10[t]) + 1;
+}
+
+#if 0
+// Exhaustive, very slow
+TEST_F(Misc, CountDecimalDigit_Verify) {
+    unsigned i = 0;
+    do {
+        if (i % (65536 * 256) == 0)
+            printf("%u\n", i);
+        ASSERT_EQ(CountDecimalDigit_enroll4(i), CountDecimalDigit_fast(i));
+        i++;
+    } while (i != 0);
+}
+
+static const unsigned kDigits10Trial = 1000000000u;
+TEST_F(Misc, CountDecimalDigit_naive) {
+    unsigned sum = 0;
+    for (unsigned i = 0; i < kDigits10Trial; i++)
+        sum += CountDecimalDigit_naive(i);
+    printf("%u\n", sum);
+}
+
+TEST_F(Misc, CountDecimalDigit_enroll4) {
+    unsigned sum = 0;
+    for (unsigned i = 0; i < kDigits10Trial; i++)
+        sum += CountDecimalDigit_enroll4(i);
+    printf("%u\n", sum);
+}
+
+TEST_F(Misc, CountDecimalDigit_fast) {
+    unsigned sum = 0;
+    for (unsigned i = 0; i < kDigits10Trial; i++)
+        sum += CountDecimalDigit_fast(i);
+    printf("%u\n", sum);
+}
+#endif
+
+TEST_F(Misc, CountDecimalDigit64_VerifyFast) {
+    uint64_t i = 1, j;
+    do {
+        //printf("%" PRIu64 "\n", i);
+        ASSERT_EQ(CountDecimalDigit64_enroll4(i), CountDecimalDigit64_fast(i));
+        j = i;
+        i *= 3;
+    } while (j < i);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// integer-to-string conversion
+
+// https://gist.github.com/anonymous/7179097
+static const int randval[] ={
+     936116,  369532,  453755,  -72860,  209713,  268347,  435278, -360266, -416287, -182064,
+    -644712,  944969,  640463, -366588,  471577,  -69401, -744294, -505829,  923883,  831785,
+    -601136, -636767, -437054,  591718,  100758,  231907, -719038,  973540, -605220,  506659,
+    -871653,  462533,  764843, -919138,  404305, -630931, -288711, -751454, -173726, -718208,
+     432689, -281157,  360737,  659827,   19174, -376450,  769984, -858198,  439127,  734703,
+    -683426,       7,  386135,  186997, -643900, -744422, -604708, -629545,   42313, -933592,
+    -635566,  182308,  439024, -367219,  -73924, -516649,  421935, -470515,  413507,  -78952,
+    -427917, -561158,  737176,   94538,  572322,  405217,  709266, -357278, -908099, -425447,
+     601119,  750712, -862285, -177869,  900102,  384877,  157859, -641680,  503738, -702558,
+     278225,  463290,  268378, -212840,  580090,  347346, -473985, -950968, -114547, -839893,
+    -738032, -789424,  409540,  493495,  432099,  119755,  905004, -174834,  338266,  234298,
+      74641, -965136, -754593,  685273,  466924,  920560,  385062,  796402,  -67229,  994864,
+     376974,  299869, -647540, -128724,  469890, -163167, -547803, -743363,  486463, -621028,
+     612288,   27459, -514224,  126342,  -66612,  803409, -777155, -336453, -284002,  472451,
+     342390, -163630,  908356, -456147, -825607,  268092, -974715,  287227,  227890, -524101,
+     616370, -782456,  922098, -624001, -813690,  171605, -192962,  796151,  707183,  -95696,
+     -23163, -721260,  508892,  430715,  791331,  482048, -996102,  863274,  275406,   -8279,
+    -556239, -902076,  268647, -818565,  260069, -798232, -172924, -566311, -806503, -885992,
+     813969,  -78468,  956632,  304288,  494867, -508784,  381751,  151264,  762953,   76352,
+     594902,  375424,  271700, -743062,  390176,  924237,  772574,  676610,  435752, -153847,
+       3959, -971937, -294181, -538049, -344620, -170136,   19120, -703157,  868152, -657961,
+    -818631,  219015, -872729, -940001, -956570,  880727, -345910,  942913, -942271, -788115,
+     225294,  701108, -517736, -416071,  281940,  488730,  942698,  711494,  838382, -892302,
+    -533028,  103052,  528823,  901515,  949577,  159364,  718227, -241814, -733661, -462928,
+    -495829,  165170,  513580, -629188, -509571, -459083,  198437,   77198, -644612,  811276,
+    -422298, -860842,  -52584,  920369,  686424, -530667, -243476,   49763,  345866, -411960,
+    -114863,  470810, -302860,  683007, -509080,       2, -174981, -772163,  -48697,  447770,
+    -268246,  213268,  269215,   78810, -236340, -639140, -864323,  505113, -986569, -325215,
+     541859,  163070, -819998, -645161, -583336,  573414,  696417, -132375,       3, -294501,
+     320435,  682591,  840008,  351740,  426951,  609354,  898154, -943254,  227321, -859793,
+    -727993,   44137, -497965, -782239,   14955, -746080, -243366,    9837, -233083,  606507,
+    -995864, -615287, -994307,  602715,  770771, -315040,  610860,  446102, -307120,  710728,
+    -590392, -230474, -762625, -637525,  134963, -202700, -766902, -985541,  218163,  682009,
+     926051,  525156,  -61195,  403211, -810098,  245539, -431733,  179998, -806533,  745943,
+     447597,  131973, -187130,  826019,  286107, -937230, -577419,   20254,  681802, -340500,
+     323080,  266283, -667617,  309656,  416386,  611863,  759991, -534257,  523112, -634892,
+    -169913, -204905, -909867, -882185, -944908,  741811, -717675,  967007, -317396,  407230,
+    -412805,  792905,  994873,  744793, -456797,  713493,  355232,  116900, -945199,  880539,
+     342505, -580824, -262273,  982968, -349497, -735488,  311767, -455191,  570918,  389734,
+    -958386,   10262,  -99267,  155481,  304210,  204724,  704367, -144893, -233664, -671441,
+     896849,  408613,  762236,  322697,  981321,  688476,   13663, -970704, -379507,  896412,
+     977084,  348869,  875948,  341348,  318710,  512081,    6163,  669044,  833295,  811883,
+     708756, -802534, -536057,  608413, -389625, -694603,  541106, -110037,  720322, -540581,
+     645420,   32980,   62442,  510157, -981870,  -87093, -325960, -500494, -718291,  -67889,
+     991501,  374804,  769026, -978869,  294747,  714623,  413327, -199164,  671368,  804789,
+    -362507,  798196, -170790, -568895, -869379,   62020, -316693, -837793,  644994,  -39341,
+    -417504, -243068, -957756,   99072,  622234, -739992,  225668,    8863, -505910,   82483,
+    -559244,  241572,    1315,  -36175,  -54990,  376813,     -11,  162647, -688204, -486163,
+     -54934, -197470,  744223, -762707,  732540,  996618,  351561, -445933, -898491,  486531,
+     456151,   15276,  290186, -817110,  -52995,  313046, -452533,  -96267,   94470, -500176,
+    -818026, -398071, -810548, -143325, -819741,    1338, -897676, -101577, -855445,   37309,
+     285742,  953804, -777927, -926962, -811217, -936744, -952245, -802300, -490188, -964953,
+    -552279,  329142, -570048, -505756,  682898, -381089,  -14352,  175138,  152390, -582268,
+    -485137,  717035,  805329,  239572, -730409,  209643, -184403, -385864,  675086,  819648,
+     629058, -527109, -488666, -171981,  532788,  552441,  174666,  984921,  766514,  758787,
+     716309,  338801, -978004, -412163,  876079, -734212,  789557, -160491, -522719,   56644,
+       -991, -286038,  -53983,  663740,  809812,  919889, -717502, -137704,  220511,  184396,
+    -825740, -588447,  430870,  124309,  135956,  558662, -307087, -788055, -451328,  812260,
+     931601,  324347, -482989, -117858, -278861,  189068, -172774,  929057,  293787,  198161,
+    -342386,  -47173,  906555, -759955,  -12779,  777604,  -97869,  899320,  927486,  -25284,
+    -848550,  259450, -485856,  -17820,      88,  171400,  235492, -326783, -340793,  886886,
+     112428, -246280,    5979,  648444, -114982,  991013,  -56489,   -9497,  419706,  632820,
+    -341664,  393926, -848977,  -22538,  257307,  773731, -905319,  491153,  734883, -868212,
+    -951053,  644458, -580758,  764735,  584316,  297077,   28852, -397710, -953669,  201772,
+     879050, -198237, -588468,  448102, -116837,  770007, -231812,  642906, -582166, -885828,
+          9,  305082, -996577,  303559,   75008, -772956, -447960,  599825, -295552,  870739,
+    -386278, -950300,  485359, -457081,  629461, -850276,  550496, -451755, -620841,  -11766,
+    -950137,  832337,   28711, -273398, -507197,   91921, -271360, -705991, -753220, -388968,
+     967945,  340434, -320883, -662793, -554617, -574568,  477946,   -6148, -129519,  689217,
+     920020, -656315, -974523, -212525,   80921, -612532,  645096,  545655,  655713, -591631,
+    -307385, -816688, -618823, -113713,  526430,  673063,  735916, -809095, -850417,  639004,
+     432281, -388185,  270708,  860146,  -39902, -786157, -258180, -246169, -966720, -264957,
+     548072, -306010,  -57367, -635665,  933824,   70553, -989936, -488741,   72411, -452509,
+     529831,  956277,  449019, -577850, -360986, -803418,   48833,  296073,  203430,  609591,
+     715483,  470964,  658106, -718254,  -96424,  790163,  334739,  181070, -373578,       5,
+    -435088,  329841,  330939, -256602,  394355,  912412,  231910,  927278, -661933,  788539,
+    -769664, -893274,  -96856,  298205,  901043, -608122, -527430,  183618, -553963,  -35246,
+    -393924,  948832, -483198,  594501,   35460, -407007,   93494, -336881, -634072,  984205,
+    -812161,  944664,  -31062,  753872,  823933,  -69566,   50445,  290147,   85134,   34706,
+     551902,  405202, -991246,  -84642,  154341,  316432, -695101, -651588,   -5030,  137564,
+    -294665,  332541,  528307,  -90572, -344923,  523766, -758498, -968047,  339028,  494578,
+     593129, -725773,   31834, -718406, -208638,  159665,   -2043,  673344, -442767,   75816,
+     755442,  769257, -158730, -410272,  691688,  589550, -878398, -184121,  460679,  346312,
+     294163, -544602,  653308,  254167, -276979,   52073, -892684,  887653,  -41222,  983065,
+     -68258, -408799,  -99069, -674069, -863635,  -32890,  622757, -743862,   40872,   -4837,
+    -967228,  522370, -903951, -818669,  524459,  514702,  925801,   20007, -299229,  579348,
+     626021,  430089,  348139, -562692, -607728, -130606, -928451, -424793, -458647, -448892,
+    -312230,  143337,  109746,  880042, -339658, -785614,  938995,  540916,  118429,  661351,
+    -402967,  404729,  -40918, -976535,  743230,  713110,  440182, -381314, -499252,   74613,
+     193652,  912717,  491323,  583633,  324691,  459397,  281253,  195540,   -2764, -888651,
+     892449,  132663, -478373, -430002, -314551,  527826,  247165,  557966,  554778,  481531,
+    -946634,  431685, -769059, -348371,  174046,  184597, -354867,  584422,  227390, -850397,
+    -542924, -849093, -737769,  325359,  736314,  269101,  767940,  674809,   81413, -447458,
+     445076,  189072,  906218,  502688, -718476, -863827, -731381,  100660,  623249,  710008,
+     572060,  922203,  685740,   55096,  263394, -243695, -353910, -516788,  388471,  455165,
+     844103, -643772,  363976,  268875, -899450,  104470,  104029, -238874, -274659,  732969,
+    -676443,  953291, -916289, -861849, -242344,  958083, -479593, -970395,  799831,  277841,
+    -243236, -283462, -201510,  166263, -259105, -575706,  878926,  891064,  895297,  655262,
+     -34807, -809833,  -89281,  342585,  554920,       1,  902141, -333425,  139703,  852318,
+    -618438,  329498, -932596, -692836, -513372,  733656, -523411,   85779,  500478, -682697,
+    -502836,  138776,  156341, -420037, -557964, -556378,  710993,  -50383, -877159,  916334,
+     132996,  583516, -603392, -111615,  -12288, -780214,  476780,  123327,  137607,  519956,
+     745837,   17358, -158581,  -53490
+};
+static const size_t randvalCount = sizeof(randval) / sizeof(randval[0]);
+static const size_t kItoaTrialCount = 10000;
+
+static const char digits[201] =
+"0001020304050607080910111213141516171819"
+"2021222324252627282930313233343536373839"
+"4041424344454647484950515253545556575859"
+"6061626364656667686970717273747576777879"
+"8081828384858687888990919293949596979899";
+
+// Prevent code being optimized out
+//#define OUTPUT_LENGTH(length) printf("", length)
+#define OUTPUT_LENGTH(length) printf("%u\n", (unsigned)length)
+
+template<typename OutputStream>
+class Writer1 {
+public:
+    Writer1() : os_() {}
+    Writer1(OutputStream& os) : os_(&os) {}
+
+    void Reset(OutputStream& os) {
+        os_ = &os;
+    }
+
+    bool WriteInt(int i) {
+        if (i < 0) {
+            os_->Put('-');
+            i = -i;
+        }
+        return WriteUint((unsigned)i);
+    }
+
+    bool WriteUint(unsigned u) {
+        char buffer[10];
+        char *p = buffer;
+        do {
+            *p++ = char(u % 10) + '0';
+            u /= 10;
+        } while (u > 0);
+
+        do {
+            --p;
+            os_->Put(*p);
+        } while (p != buffer);
+        return true;
+    }
+
+    bool WriteInt64(int64_t i64) {
+        if (i64 < 0) {
+            os_->Put('-');
+            i64 = -i64;
+        }
+        WriteUint64((uint64_t)i64);
+        return true;
+    }
+
+    bool WriteUint64(uint64_t u64) {
+        char buffer[20];
+        char *p = buffer;
+        do {
+            *p++ = char(u64 % 10) + '0';
+            u64 /= 10;
+        } while (u64 > 0);
+
+        do {
+            --p;
+            os_->Put(*p);
+        } while (p != buffer);
+        return true;
+    }
+
+private:
+    OutputStream* os_;
+};
+
+template<>
+bool Writer1<rapidjson::StringBuffer>::WriteUint(unsigned u) {
+    char buffer[10];
+    char* p = buffer;
+    do {
+        *p++ = char(u % 10) + '0';
+        u /= 10;
+    } while (u > 0);
+
+    char* d = os_->Push(p - buffer);
+    do {
+        --p;
+        *d++ = *p;
+    } while (p != buffer);
+    return true;
+}
+
+// Using digits LUT to reduce division/modulo
+template<typename OutputStream>
+class Writer2 {
+public:
+    Writer2() : os_() {}
+    Writer2(OutputStream& os) : os_(&os) {}
+
+    void Reset(OutputStream& os) {
+        os_ = &os;
+    }
+
+    bool WriteInt(int i) {
+        if (i < 0) {
+            os_->Put('-');
+            i = -i;
+        }
+        return WriteUint((unsigned)i);
+    }
+
+    bool WriteUint(unsigned u) {
+        char buffer[10];
+        char* p = buffer;
+        while (u >= 100) {
+            const unsigned i = (u % 100) << 1;
+            u /= 100;
+            *p++ = digits[i + 1];
+            *p++ = digits[i];
+        }
+        if (u < 10)
+            *p++ = char(u) + '0';
+        else {
+            const unsigned i = u << 1;
+            *p++ = digits[i + 1];
+            *p++ = digits[i];
+        }
+
+        do {
+            --p;
+            os_->Put(*p);
+        } while (p != buffer);
+        return true;
+    }
+
+    bool WriteInt64(int64_t i64) {
+        if (i64 < 0) {
+            os_->Put('-');
+            i64 = -i64;
+        }
+        WriteUint64((uint64_t)i64);
+        return true;
+    }
+
+    bool WriteUint64(uint64_t u64) {
+        char buffer[20];
+        char* p = buffer;
+        while (u64 >= 100) {
+            const unsigned i = static_cast<unsigned>(u64 % 100) << 1;
+            u64 /= 100;
+            *p++ = digits[i + 1];
+            *p++ = digits[i];
+        }
+        if (u64 < 10)
+            *p++ = char(u64) + '0';
+        else {
+            const unsigned i = static_cast<unsigned>(u64) << 1;
+            *p++ = digits[i + 1];
+            *p++ = digits[i];
+        }
+
+        do {
+            --p;
+            os_->Put(*p);
+        } while (p != buffer);
+        return true;
+    }
+
+private:
+    OutputStream* os_;
+};
+
+// First pass to count digits
+template<typename OutputStream>
+class Writer3 {
+public:
+    Writer3() : os_() {}
+    Writer3(OutputStream& os) : os_(&os) {}
+
+    void Reset(OutputStream& os) {
+        os_ = &os;
+    }
+
+    bool WriteInt(int i) {
+        if (i < 0) {
+            os_->Put('-');
+            i = -i;
+        }
+        return WriteUint((unsigned)i);
+    }
+
+    bool WriteUint(unsigned u) {
+        char buffer[10];
+        char *p = buffer;
+        do {
+            *p++ = char(u % 10) + '0';
+            u /= 10;
+        } while (u > 0);
+
+        do {
+            --p;
+            os_->Put(*p);
+        } while (p != buffer);
+        return true;
+    }
+
+    bool WriteInt64(int64_t i64) {
+        if (i64 < 0) {
+            os_->Put('-');
+            i64 = -i64;
+        }
+        WriteUint64((uint64_t)i64);
+        return true;
+    }
+
+    bool WriteUint64(uint64_t u64) {
+        char buffer[20];
+        char *p = buffer;
+        do {
+            *p++ = char(u64 % 10) + '0';
+            u64 /= 10;
+        } while (u64 > 0);
+
+        do {
+            --p;
+            os_->Put(*p);
+        } while (p != buffer);
+        return true;
+    }
+
+private:
+    void WriteUintReverse(char* d, unsigned u) {
+        do {
+            *--d = char(u % 10) + '0';
+            u /= 10;
+        } while (u > 0);
+    }
+
+    void WriteUint64Reverse(char* d, uint64_t u) {
+        do {
+            *--d = char(u % 10) + '0';
+            u /= 10;
+        } while (u > 0);
+    }
+
+    OutputStream* os_;
+};
+
+template<>
+inline bool Writer3<rapidjson::StringBuffer>::WriteUint(unsigned u) {
+    unsigned digit = CountDecimalDigit_fast(u);
+    WriteUintReverse(os_->Push(digit) + digit, u);
+    return true;
+}
+
+template<>
+inline bool Writer3<rapidjson::InsituStringStream>::WriteUint(unsigned u) {
+    unsigned digit = CountDecimalDigit_fast(u);
+    WriteUintReverse(os_->Push(digit) + digit, u);
+    return true;
+}
+
+template<>
+inline bool Writer3<rapidjson::StringBuffer>::WriteUint64(uint64_t u) {
+    unsigned digit = CountDecimalDigit64_fast(u);
+    WriteUint64Reverse(os_->Push(digit) + digit, u);
+    return true;
+}
+
+template<>
+inline bool Writer3<rapidjson::InsituStringStream>::WriteUint64(uint64_t u) {
+    unsigned digit = CountDecimalDigit64_fast(u);
+    WriteUint64Reverse(os_->Push(digit) + digit, u);
+    return true;
+}
+
+// Using digits LUT to reduce division/modulo, two passes
+template<typename OutputStream>
+class Writer4 {
+public:
+    Writer4() : os_() {}
+    Writer4(OutputStream& os) : os_(&os) {}
+
+    void Reset(OutputStream& os) {
+        os_ = &os;
+    }
+
+    bool WriteInt(int i) {
+        if (i < 0) {
+            os_->Put('-');
+            i = -i;
+        }
+        return WriteUint((unsigned)i);
+    }
+
+    bool WriteUint(unsigned u) {
+        char buffer[10];
+        char* p = buffer;
+        while (u >= 100) {
+            const unsigned i = (u % 100) << 1;
+            u /= 100;
+            *p++ = digits[i + 1];
+            *p++ = digits[i];
+        }
+        if (u < 10)
+            *p++ = char(u) + '0';
+        else {
+            const unsigned i = u << 1;
+            *p++ = digits[i + 1];
+            *p++ = digits[i];
+        }
+
+        do {
+            --p;
+            os_->Put(*p);
+        } while (p != buffer);
+        return true;
+    }
+
+    bool WriteInt64(int64_t i64) {
+        if (i64 < 0) {
+            os_->Put('-');
+            i64 = -i64;
+        }
+        WriteUint64((uint64_t)i64);
+        return true;
+    }
+
+    bool WriteUint64(uint64_t u64) {
+        char buffer[20];
+        char* p = buffer;
+        while (u64 >= 100) {
+            const unsigned i = static_cast<unsigned>(u64 % 100) << 1;
+            u64 /= 100;
+            *p++ = digits[i + 1];
+            *p++ = digits[i];
+        }
+        if (u64 < 10)
+            *p++ = char(u64) + '0';
+        else {
+            const unsigned i = static_cast<unsigned>(u64) << 1;
+            *p++ = digits[i + 1];
+            *p++ = digits[i];
+        }
+
+        do {
+            --p;
+            os_->Put(*p);
+        } while (p != buffer);
+        return true;
+    }
+
+private:
+    void WriteUintReverse(char* d, unsigned u) {
+        while (u >= 100) {
+            const unsigned i = (u % 100) << 1;
+            u /= 100;
+            *--d = digits[i + 1];
+            *--d = digits[i];
+        }
+        if (u < 10) {
+            *--d = char(u) + '0';
+        }
+        else {
+            const unsigned i = u << 1;
+            *--d = digits[i + 1];
+            *--d = digits[i];
+        }
+    }
+
+    void WriteUint64Reverse(char* d, uint64_t u) {
+        while (u >= 100) {
+            const unsigned i = (u % 100) << 1;
+            u /= 100;
+            *--d = digits[i + 1];
+            *--d = digits[i];
+        }
+        if (u < 10) {
+            *--d = char(u) + '0';
+        }
+        else {
+            const unsigned i = u << 1;
+            *--d = digits[i + 1];
+            *--d = digits[i];
+        }
+    }
+
+    OutputStream* os_;
+};
+
+template<>
+inline bool Writer4<rapidjson::StringBuffer>::WriteUint(unsigned u) {
+    unsigned digit = CountDecimalDigit_fast(u);
+    WriteUintReverse(os_->Push(digit) + digit, u);
+    return true;
+}
+
+template<>
+inline bool Writer4<rapidjson::InsituStringStream>::WriteUint(unsigned u) {
+    unsigned digit = CountDecimalDigit_fast(u);
+    WriteUintReverse(os_->Push(digit) + digit, u);
+    return true;
+}
+
+template<>
+inline bool Writer4<rapidjson::StringBuffer>::WriteUint64(uint64_t u) {
+    unsigned digit = CountDecimalDigit64_fast(u);
+    WriteUint64Reverse(os_->Push(digit) + digit, u);
+    return true;
+}
+
+template<>
+inline bool Writer4<rapidjson::InsituStringStream>::WriteUint64(uint64_t u) {
+    unsigned digit = CountDecimalDigit64_fast(u);
+    WriteUint64Reverse(os_->Push(digit) + digit, u);
+    return true;
+}
+
+template <typename Writer>
+void itoa_Writer_StringBufferVerify() {
+    rapidjson::StringBuffer sb;
+    Writer writer(sb);
+    for (size_t j = 0; j < randvalCount; j++) {
+        char buffer[32];
+        sprintf(buffer, "%d", randval[j]);
+        writer.WriteInt(randval[j]);
+        ASSERT_STREQ(buffer, sb.GetString());
+        sb.Clear();
+    }
+}
+
+template <typename Writer>
+void itoa_Writer_InsituStringStreamVerify() {
+    Writer writer;
+    for (size_t j = 0; j < randvalCount; j++) {
+        char buffer[32];
+        sprintf(buffer, "%d", randval[j]);
+        char buffer2[32];
+        rapidjson::InsituStringStream ss(buffer2);
+        writer.Reset(ss);
+        char* begin = ss.PutBegin();
+        writer.WriteInt(randval[j]);
+        ss.Put('\0');
+        ss.PutEnd(begin);
+        ASSERT_STREQ(buffer, buffer2);
+    }
+}
+
+template <typename Writer>
+void itoa_Writer_StringBuffer() {
+    size_t length = 0;
+
+    rapidjson::StringBuffer sb;
+    Writer writer(sb);
+
+    for (size_t i = 0; i < kItoaTrialCount; i++) {
+        for (size_t j = 0; j < randvalCount; j++) {
+            writer.WriteInt(randval[j]);
+            length += sb.GetSize();
+            sb.Clear();
+        }
+    }
+    OUTPUT_LENGTH(length);
+}
+
+template <typename Writer>
+void itoa_Writer_InsituStringStream() {
+    size_t length = 0;
+
+    char buffer[32];
+    Writer writer;
+    for (size_t i = 0; i < kItoaTrialCount; i++) {
+        for (size_t j = 0; j < randvalCount; j++) {
+            rapidjson::InsituStringStream ss(buffer);
+            writer.Reset(ss);
+            char* begin = ss.PutBegin();
+            writer.WriteInt(randval[j]);
+            length += ss.PutEnd(begin);
+        }
+    }
+    OUTPUT_LENGTH(length);
+};
+
+template <typename Writer>
+void itoa64_Writer_StringBufferVerify() {
+    rapidjson::StringBuffer sb;
+    Writer writer(sb);
+    for (size_t j = 0; j < randvalCount; j++) {
+        char buffer[32];
+        int64_t x = randval[j] * randval[j];
+        sprintf(buffer, "%" PRIi64, x);
+        writer.WriteInt64(x);
+        ASSERT_STREQ(buffer, sb.GetString());
+        sb.Clear();
+    }
+}
+
+template <typename Writer>
+void itoa64_Writer_InsituStringStreamVerify() {
+    Writer writer;
+    for (size_t j = 0; j < randvalCount; j++) {
+        char buffer[32];
+        int64_t x = randval[j] * randval[j];
+        sprintf(buffer, "%" PRIi64, x);
+        char buffer2[32];
+        rapidjson::InsituStringStream ss(buffer2);
+        writer.Reset(ss);
+        char* begin = ss.PutBegin();
+        writer.WriteInt64(x);
+        ss.Put('\0');
+        ss.PutEnd(begin);
+        ASSERT_STREQ(buffer, buffer2);
+    }
+}
+
+template <typename Writer>
+void itoa64_Writer_StringBuffer() {
+    size_t length = 0;
+
+    rapidjson::StringBuffer sb;
+    Writer writer(sb);
+
+    for (size_t i = 0; i < kItoaTrialCount; i++) {
+        for (size_t j = 0; j < randvalCount; j++) {
+            writer.WriteInt64(randval[j] * randval[j]);
+            length += sb.GetSize();
+            sb.Clear();
+        }
+    }
+    OUTPUT_LENGTH(length);
+}
+
+template <typename Writer>
+void itoa64_Writer_InsituStringStream() {
+    size_t length = 0;
+
+    char buffer[32];
+    Writer writer;
+    for (size_t i = 0; i < kItoaTrialCount; i++) {
+        for (size_t j = 0; j < randvalCount; j++) {
+            rapidjson::InsituStringStream ss(buffer);
+            writer.Reset(ss);
+            char* begin = ss.PutBegin();
+            writer.WriteInt64(randval[j] * randval[j]);
+            length += ss.PutEnd(begin);
+        }
+    }
+    OUTPUT_LENGTH(length);
+};
+
+// Full specialization for InsituStringStream to prevent memory copying 
+// (normally we will not use InsituStringStream for writing, just for testing)
+
+namespace rapidjson {
+
+template<>
+bool rapidjson::Writer<InsituStringStream>::WriteInt(int i) {
+    char *buffer = os_->Push(11);
+    const char* end = internal::i32toa(i, buffer);
+    os_->Pop(11 - (end - buffer));
+    return true;
+}
+
+template<>
+bool Writer<InsituStringStream>::WriteUint(unsigned u) {
+    char *buffer = os_->Push(10);
+    const char* end = internal::u32toa(u, buffer);
+    os_->Pop(10 - (end - buffer));
+    return true;
+}
+
+template<>
+bool Writer<InsituStringStream>::WriteInt64(int64_t i64) {
+    char *buffer = os_->Push(21);
+    const char* end = internal::i64toa(i64, buffer);
+    os_->Pop(21 - (end - buffer));
+    return true;
+}
+
+template<>
+bool Writer<InsituStringStream>::WriteUint64(uint64_t u) {
+    char *buffer = os_->Push(20);
+    const char* end = internal::u64toa(u, buffer);
+    os_->Pop(20 - (end - buffer));
+    return true;
+}
+
+} // namespace rapidjson
+
+TEST_F(Misc, itoa_Writer_StringBufferVerify) { itoa_Writer_StringBufferVerify<rapidjson::Writer<rapidjson::StringBuffer> >(); }
+TEST_F(Misc, itoa_Writer1_StringBufferVerify) { itoa_Writer_StringBufferVerify<Writer1<rapidjson::StringBuffer> >(); }
+TEST_F(Misc, itoa_Writer2_StringBufferVerify) { itoa_Writer_StringBufferVerify<Writer2<rapidjson::StringBuffer> >(); }
+TEST_F(Misc, itoa_Writer3_StringBufferVerify) { itoa_Writer_StringBufferVerify<Writer3<rapidjson::StringBuffer> >(); }
+TEST_F(Misc, itoa_Writer4_StringBufferVerify) { itoa_Writer_StringBufferVerify<Writer4<rapidjson::StringBuffer> >(); }
+TEST_F(Misc, itoa_Writer_InsituStringStreamVerify) { itoa_Writer_InsituStringStreamVerify<rapidjson::Writer<rapidjson::InsituStringStream> >(); }
+TEST_F(Misc, itoa_Writer1_InsituStringStreamVerify) { itoa_Writer_InsituStringStreamVerify<Writer1<rapidjson::InsituStringStream> >(); }
+TEST_F(Misc, itoa_Writer2_InsituStringStreamVerify) { itoa_Writer_InsituStringStreamVerify<Writer2<rapidjson::InsituStringStream> >(); }
+TEST_F(Misc, itoa_Writer3_InsituStringStreamVerify) { itoa_Writer_InsituStringStreamVerify<Writer3<rapidjson::InsituStringStream> >(); }
+TEST_F(Misc, itoa_Writer4_InsituStringStreamVerify) { itoa_Writer_InsituStringStreamVerify<Writer4<rapidjson::InsituStringStream> >(); }
+TEST_F(Misc, itoa_Writer_StringBuffer) { itoa_Writer_StringBuffer<rapidjson::Writer<rapidjson::StringBuffer> >(); }
+TEST_F(Misc, itoa_Writer1_StringBuffer) { itoa_Writer_StringBuffer<Writer1<rapidjson::StringBuffer> >(); }
+TEST_F(Misc, itoa_Writer2_StringBuffer) { itoa_Writer_StringBuffer<Writer2<rapidjson::StringBuffer> >(); }
+TEST_F(Misc, itoa_Writer3_StringBuffer) { itoa_Writer_StringBuffer<Writer3<rapidjson::StringBuffer> >(); }
+TEST_F(Misc, itoa_Writer4_StringBuffer) { itoa_Writer_StringBuffer<Writer4<rapidjson::StringBuffer> >(); }
+TEST_F(Misc, itoa_Writer_InsituStringStream) { itoa_Writer_InsituStringStream<rapidjson::Writer<rapidjson::InsituStringStream> >(); }
+TEST_F(Misc, itoa_Writer1_InsituStringStream) { itoa_Writer_InsituStringStream<Writer1<rapidjson::InsituStringStream> >(); }
+TEST_F(Misc, itoa_Writer2_InsituStringStream) { itoa_Writer_InsituStringStream<Writer2<rapidjson::InsituStringStream> >(); }
+TEST_F(Misc, itoa_Writer3_InsituStringStream) { itoa_Writer_InsituStringStream<Writer3<rapidjson::InsituStringStream> >(); }
+TEST_F(Misc, itoa_Writer4_InsituStringStream) { itoa_Writer_InsituStringStream<Writer4<rapidjson::InsituStringStream> >(); }
+
+TEST_F(Misc, itoa64_Writer_StringBufferVerify) { itoa64_Writer_StringBufferVerify<rapidjson::Writer<rapidjson::StringBuffer> >(); }
+TEST_F(Misc, itoa64_Writer1_StringBufferVerify) { itoa64_Writer_StringBufferVerify<Writer1<rapidjson::StringBuffer> >(); }
+TEST_F(Misc, itoa64_Writer2_StringBufferVerify) { itoa64_Writer_StringBufferVerify<Writer2<rapidjson::StringBuffer> >(); }
+TEST_F(Misc, itoa64_Writer3_StringBufferVerify) { itoa64_Writer_StringBufferVerify<Writer3<rapidjson::StringBuffer> >(); }
+TEST_F(Misc, itoa64_Writer4_StringBufferVerify) { itoa64_Writer_StringBufferVerify<Writer4<rapidjson::StringBuffer> >(); }
+TEST_F(Misc, itoa64_Writer_InsituStringStreamVerify) { itoa64_Writer_InsituStringStreamVerify<rapidjson::Writer<rapidjson::InsituStringStream> >(); }
+TEST_F(Misc, itoa64_Writer1_InsituStringStreamVerify) { itoa64_Writer_InsituStringStreamVerify<Writer1<rapidjson::InsituStringStream> >(); }
+TEST_F(Misc, itoa64_Writer2_InsituStringStreamVerify) { itoa64_Writer_InsituStringStreamVerify<Writer2<rapidjson::InsituStringStream> >(); }
+TEST_F(Misc, itoa64_Writer3_InsituStringStreamVerify) { itoa64_Writer_InsituStringStreamVerify<Writer3<rapidjson::InsituStringStream> >(); }
+TEST_F(Misc, itoa64_Writer4_InsituStringStreamVerify) { itoa64_Writer_InsituStringStreamVerify<Writer4<rapidjson::InsituStringStream> >(); }
+TEST_F(Misc, itoa64_Writer_StringBuffer) { itoa64_Writer_StringBuffer<rapidjson::Writer<rapidjson::StringBuffer> >(); }
+TEST_F(Misc, itoa64_Writer1_StringBuffer) { itoa64_Writer_StringBuffer<Writer1<rapidjson::StringBuffer> >(); }
+TEST_F(Misc, itoa64_Writer2_StringBuffer) { itoa64_Writer_StringBuffer<Writer2<rapidjson::StringBuffer> >(); }
+TEST_F(Misc, itoa64_Writer3_StringBuffer) { itoa64_Writer_StringBuffer<Writer3<rapidjson::StringBuffer> >(); }
+TEST_F(Misc, itoa64_Writer4_StringBuffer) { itoa64_Writer_StringBuffer<Writer4<rapidjson::StringBuffer> >(); }
+TEST_F(Misc, itoa64_Writer_InsituStringStream) { itoa64_Writer_InsituStringStream<rapidjson::Writer<rapidjson::InsituStringStream> >(); }
+TEST_F(Misc, itoa64_Writer1_InsituStringStream) { itoa64_Writer_InsituStringStream<Writer1<rapidjson::InsituStringStream> >(); }
+TEST_F(Misc, itoa64_Writer2_InsituStringStream) { itoa64_Writer_InsituStringStream<Writer2<rapidjson::InsituStringStream> >(); }
+TEST_F(Misc, itoa64_Writer3_InsituStringStream) { itoa64_Writer_InsituStringStream<Writer3<rapidjson::InsituStringStream> >(); }
+TEST_F(Misc, itoa64_Writer4_InsituStringStream) { itoa64_Writer_InsituStringStream<Writer4<rapidjson::InsituStringStream> >(); }
+
+#endif // TEST_MISC
diff --git a/test/perftest/perftest.cpp b/test/perftest/perftest.cpp
new file mode 100644
index 0000000..4e79f1f
--- /dev/null
+++ b/test/perftest/perftest.cpp
@@ -0,0 +1,24 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+// 
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed 
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the 
+// specific language governing permissions and limitations under the License.
+
+#include "perftest.h"
+
+int main(int argc, char **argv) {
+#if _MSC_VER
+    _CrtSetDbgFlag ( _CRTDBG_ALLOC_MEM_DF | _CRTDBG_LEAK_CHECK_DF );
+    //void *testWhetherMemoryLeakDetectionWorks = malloc(1);
+#endif
+    ::testing::InitGoogleTest(&argc, argv);
+    return RUN_ALL_TESTS();
+}
diff --git a/test/perftest/perftest.h b/test/perftest/perftest.h
new file mode 100644
index 0000000..953f95d
--- /dev/null
+++ b/test/perftest/perftest.h
@@ -0,0 +1,185 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+// 
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed 
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the 
+// specific language governing permissions and limitations under the License.
+
+#ifndef PERFTEST_H_
+#define PERFTEST_H_
+
+#define TEST_RAPIDJSON  1
+#define TEST_PLATFORM   0
+#define TEST_MISC       0
+
+#define TEST_VERSION_CODE(x,y,z) \
+  (((x)*100000) + ((y)*100) + (z))
+
+// __SSE2__ and __SSE4_2__ are recognized by gcc, clang, and the Intel compiler.
+// We use -march=native with gmake to enable -msse2 and -msse4.2, if supported.
+// Likewise, __ARM_NEON is used to detect Neon.
+#if defined(__SSE4_2__)
+#  define RAPIDJSON_SSE42
+#elif defined(__SSE2__)
+#  define RAPIDJSON_SSE2
+#elif defined(__ARM_NEON)
+#  define RAPIDJSON_NEON
+#endif
+
+#define RAPIDJSON_HAS_STDSTRING 1
+
+////////////////////////////////////////////////////////////////////////////////
+// Google Test
+
+#ifdef __cplusplus
+
+// gtest indirectly included inttypes.h, without __STDC_CONSTANT_MACROS.
+#ifndef __STDC_CONSTANT_MACROS
+#  define __STDC_CONSTANT_MACROS 1 // required by C++ standard
+#endif
+
+#if defined(__clang__) || defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 2))
+#if defined(__clang__) || (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))
+#pragma GCC diagnostic push
+#endif
+#pragma GCC diagnostic ignored "-Weffc++"
+#endif
+
+#include "gtest/gtest.h"
+
+#if defined(__clang__) || defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))
+#pragma GCC diagnostic pop
+#endif
+
+#ifdef _MSC_VER
+#define _CRTDBG_MAP_ALLOC
+#include <crtdbg.h>
+#pragma warning(disable : 4996) // 'function': was declared deprecated
+#endif
+
+//! Base class for all performance tests
+class PerfTest : public ::testing::Test {
+public:
+    PerfTest() : filename_(), json_(), length_(), whitespace_(), whitespace_length_() {}
+
+    virtual void SetUp() {
+        {
+            const char *paths[] = {
+                "data/sample.json",
+                "bin/data/sample.json",
+                "../bin/data/sample.json",
+                "../../bin/data/sample.json",
+                "../../../bin/data/sample.json"
+            };
+
+            FILE *fp = 0;
+            for (size_t i = 0; i < sizeof(paths) / sizeof(paths[0]); i++) {
+                fp = fopen(filename_ = paths[i], "rb");
+                if (fp)
+                    break;
+            }
+            ASSERT_TRUE(fp != 0);
+
+            fseek(fp, 0, SEEK_END);
+            length_ = (size_t)ftell(fp);
+            fseek(fp, 0, SEEK_SET);
+            json_ = (char*)malloc(length_ + 1);
+            ASSERT_EQ(length_, fread(json_, 1, length_, fp));
+            json_[length_] = '\0';
+            fclose(fp);
+        }
+
+        // whitespace test
+        {
+            whitespace_length_ = 1024 * 1024;
+            whitespace_ = (char *)malloc(whitespace_length_  + 4);
+            char *p = whitespace_;
+            for (size_t i = 0; i < whitespace_length_; i += 4) {
+                *p++ = ' ';
+                *p++ = '\n';
+                *p++ = '\r';
+                *p++ = '\t';
+            }
+            *p++ = '[';
+            *p++ = '0';
+            *p++ = ']';
+            *p++ = '\0';
+        }
+
+        // types test
+        {
+            const char *typespaths[] = {
+                "data/types",
+                "bin/types",
+                "../bin/types",
+                "../../bin/types/",
+                "../../../bin/types"
+            };
+
+            const char* typesfilenames[] = {
+                "booleans.json",
+                "floats.json",
+                "guids.json",
+                "integers.json",
+                "mixed.json",
+                "nulls.json",
+                "paragraphs.json"
+            };
+
+            for (size_t j = 0; j < sizeof(typesfilenames) / sizeof(typesfilenames[0]); j++) {
+                types_[j] = 0;
+                for (size_t i = 0; i < sizeof(typespaths) / sizeof(typespaths[0]); i++) {
+                    char filename[256];
+                    sprintf(filename, "%s/%s", typespaths[i], typesfilenames[j]);
+                    if (FILE* fp = fopen(filename, "rb")) {
+                        fseek(fp, 0, SEEK_END);
+                        typesLength_[j] = (size_t)ftell(fp);
+                        fseek(fp, 0, SEEK_SET);
+                        types_[j] = (char*)malloc(typesLength_[j] + 1);
+                        ASSERT_EQ(typesLength_[j], fread(types_[j], 1, typesLength_[j], fp));
+                        types_[j][typesLength_[j]] = '\0';
+                        fclose(fp);
+                        break;
+                    }
+                }
+            }
+        }
+    }
+
+    virtual void TearDown() {
+        free(json_);
+        free(whitespace_);
+        json_ = 0;
+        whitespace_ = 0;
+        for (size_t i = 0; i < 7; i++) {
+            free(types_[i]);
+            types_[i] = 0;
+        }
+    }
+
+private:
+    PerfTest(const PerfTest&);
+    PerfTest& operator=(const PerfTest&);
+
+protected:
+    const char* filename_;
+    char *json_;
+    size_t length_;
+    char *whitespace_;
+    size_t whitespace_length_;
+    char *types_[7];
+    size_t typesLength_[7];
+
+    static const size_t kTrialCount = 1000;
+};
+
+#endif // __cplusplus
+
+#endif // PERFTEST_H_
diff --git a/test/perftest/platformtest.cpp b/test/perftest/platformtest.cpp
new file mode 100644
index 0000000..9b9c246
--- /dev/null
+++ b/test/perftest/platformtest.cpp
@@ -0,0 +1,166 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+// 
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed 
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the 
+// specific language governing permissions and limitations under the License.
+
+#include "perftest.h"
+
+// This file is for giving the performance characteristics of the platform (compiler/OS/CPU).
+
+#if TEST_PLATFORM
+
+#include <cmath>
+#include <fcntl.h>
+
+// Windows
+#ifdef _WIN32
+#include <windows.h>
+#endif
+
+// UNIX
+#if defined(unix) || defined(__unix__) || defined(__unix)
+#include <unistd.h>
+#ifdef _POSIX_MAPPED_FILES
+#include <sys/mman.h>
+#endif
+#endif
+
+class Platform : public PerfTest {
+public:
+    virtual void SetUp() {
+        PerfTest::SetUp();
+
+        // temp buffer for testing
+        temp_ = (char *)malloc(length_ + 1);
+        memcpy(temp_, json_, length_);
+        checkSum_ = CheckSum();
+    }
+
+    char CheckSum() {
+        char c = 0;
+        for (size_t i = 0; i < length_; ++i)
+            c += temp_[i];
+        return c;
+    }
+
+    virtual void TearDown() {
+        PerfTest::TearDown();
+        free(temp_);
+    }
+
+protected:
+    char *temp_;
+    char checkSum_;
+};
+
+TEST_F(Platform, CheckSum) {
+    for (int i = 0; i < kTrialCount; i++)
+        EXPECT_EQ(checkSum_, CheckSum());
+}
+
+TEST_F(Platform, strlen) {
+    for (int i = 0; i < kTrialCount; i++) {
+        size_t l = strlen(json_);
+        EXPECT_EQ(length_, l);
+    }
+}
+
+TEST_F(Platform, memcmp) {
+    for (int i = 0; i < kTrialCount; i++) {
+        EXPECT_EQ(0u, memcmp(temp_, json_, length_));
+    }
+}
+
+TEST_F(Platform, pow) {
+    double sum = 0;
+    for (int i = 0; i < kTrialCount * kTrialCount; i++)
+        sum += pow(10.0, i & 255);
+    EXPECT_GT(sum, 0.0);
+}
+
+TEST_F(Platform, Whitespace_strlen) {
+    for (int i = 0; i < kTrialCount; i++) {
+        size_t l = strlen(whitespace_);
+        EXPECT_GT(l, whitespace_length_);
+    }       
+}
+
+TEST_F(Platform, Whitespace_strspn) {
+    for (int i = 0; i < kTrialCount; i++) {
+        size_t l = strspn(whitespace_, " \n\r\t");
+        EXPECT_EQ(whitespace_length_, l);
+    }       
+}
+
+TEST_F(Platform, fread) {
+    for (int i = 0; i < kTrialCount; i++) {
+        FILE *fp = fopen(filename_, "rb");
+        ASSERT_EQ(length_, fread(temp_, 1, length_, fp));
+        EXPECT_EQ(checkSum_, CheckSum());
+        fclose(fp);
+    }
+}
+
+#ifdef _MSC_VER
+TEST_F(Platform, read) {
+    for (int i = 0; i < kTrialCount; i++) {
+        int fd = _open(filename_, _O_BINARY | _O_RDONLY);
+        ASSERT_NE(-1, fd);
+        ASSERT_EQ(length_, _read(fd, temp_, length_));
+        EXPECT_EQ(checkSum_, CheckSum());
+        _close(fd);
+    }
+}
+#else
+TEST_F(Platform, read) {
+    for (int i = 0; i < kTrialCount; i++) {
+        int fd = open(filename_, O_RDONLY);
+        ASSERT_NE(-1, fd);
+        ASSERT_EQ(length_, read(fd, temp_, length_));
+        EXPECT_EQ(checkSum_, CheckSum());
+        close(fd);
+    }
+}
+#endif
+
+#ifdef _WIN32
+TEST_F(Platform, MapViewOfFile) {
+    for (int i = 0; i < kTrialCount; i++) {
+        HANDLE file = CreateFile(filename_, GENERIC_READ, 0, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
+        ASSERT_NE(INVALID_HANDLE_VALUE, file);
+        HANDLE mapObject = CreateFileMapping(file, NULL, PAGE_READONLY, 0, length_, NULL);
+        ASSERT_NE(INVALID_HANDLE_VALUE, mapObject);
+        void *p = MapViewOfFile(mapObject, FILE_MAP_READ, 0, 0, length_);
+        ASSERT_TRUE(p != NULL);
+        EXPECT_EQ(checkSum_, CheckSum());
+        ASSERT_TRUE(UnmapViewOfFile(p) == TRUE);
+        ASSERT_TRUE(CloseHandle(mapObject) == TRUE);
+        ASSERT_TRUE(CloseHandle(file) == TRUE);
+    }
+}
+#endif
+
+#ifdef _POSIX_MAPPED_FILES
+TEST_F(Platform, mmap) {
+    for (int i = 0; i < kTrialCount; i++) {
+        int fd = open(filename_, O_RDONLY);
+        ASSERT_NE(-1, fd);
+        void *p = mmap(NULL, length_, PROT_READ, MAP_PRIVATE, fd, 0);
+        ASSERT_TRUE(p != NULL);
+        EXPECT_EQ(checkSum_, CheckSum());
+        munmap(p, length_);
+        close(fd);
+    }
+}
+#endif
+
+#endif // TEST_PLATFORM
diff --git a/test/perftest/rapidjsontest.cpp b/test/perftest/rapidjsontest.cpp
new file mode 100644
index 0000000..9492cc5
--- /dev/null
+++ b/test/perftest/rapidjsontest.cpp
@@ -0,0 +1,546 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+// 
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed 
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the 
+// specific language governing permissions and limitations under the License.
+
+#include "perftest.h"
+
+#if TEST_RAPIDJSON
+
+#include "rapidjson/rapidjson.h"
+#include "rapidjson/document.h"
+#include "rapidjson/prettywriter.h"
+#include "rapidjson/stringbuffer.h"
+#include "rapidjson/filereadstream.h"
+#include "rapidjson/istreamwrapper.h"
+#include "rapidjson/encodedstream.h"
+#include "rapidjson/memorystream.h"
+
+#include <fstream>
+
+#ifdef RAPIDJSON_SSE2
+#define SIMD_SUFFIX(name) name##_SSE2
+#elif defined(RAPIDJSON_SSE42)
+#define SIMD_SUFFIX(name) name##_SSE42
+#elif defined(RAPIDJSON_NEON)
+#define SIMD_SUFFIX(name) name##_NEON
+#else
+#define SIMD_SUFFIX(name) name
+#endif
+
+using namespace rapidjson;
+
+class RapidJson : public PerfTest {
+public:
+    RapidJson() : temp_(), doc_() {}
+
+    virtual void SetUp() {
+        PerfTest::SetUp();
+
+        // temp buffer for insitu parsing.
+        temp_ = (char *)malloc(length_ + 1);
+
+        // Parse as a document
+        EXPECT_FALSE(doc_.Parse(json_).HasParseError());
+
+        for (size_t i = 0; i < 7; i++)
+            EXPECT_FALSE(typesDoc_[i].Parse(types_[i]).HasParseError());
+    }
+
+    virtual void TearDown() {
+        PerfTest::TearDown();
+        free(temp_);
+    }
+
+private:
+    RapidJson(const RapidJson&);
+    RapidJson& operator=(const RapidJson&);
+
+protected:
+    char *temp_;
+    Document doc_;
+    Document typesDoc_[7];
+};
+
+TEST_F(RapidJson, SIMD_SUFFIX(ReaderParseInsitu_DummyHandler)) {
+    for (size_t i = 0; i < kTrialCount; i++) {
+        memcpy(temp_, json_, length_ + 1);
+        InsituStringStream s(temp_);
+        BaseReaderHandler<> h;
+        Reader reader;
+        EXPECT_TRUE(reader.Parse<kParseInsituFlag>(s, h));
+    }
+}
+
+TEST_F(RapidJson, SIMD_SUFFIX(ReaderParseInsitu_DummyHandler_ValidateEncoding)) {
+    for (size_t i = 0; i < kTrialCount; i++) {
+        memcpy(temp_, json_, length_ + 1);
+        InsituStringStream s(temp_);
+        BaseReaderHandler<> h;
+        Reader reader;
+        EXPECT_TRUE(reader.Parse<kParseInsituFlag | kParseValidateEncodingFlag>(s, h));
+    }
+}
+
+TEST_F(RapidJson, SIMD_SUFFIX(ReaderParse_DummyHandler)) {
+    for (size_t i = 0; i < kTrialCount; i++) {
+        StringStream s(json_);
+        BaseReaderHandler<> h;
+        Reader reader;
+        EXPECT_TRUE(reader.Parse(s, h));
+    }
+}
+
+#define TEST_TYPED(index, Name)\
+TEST_F(RapidJson, SIMD_SUFFIX(ReaderParse_DummyHandler_##Name)) {\
+    for (size_t i = 0; i < kTrialCount * 10; i++) {\
+        StringStream s(types_[index]);\
+        BaseReaderHandler<> h;\
+        Reader reader;\
+        EXPECT_TRUE(reader.Parse(s, h));\
+    }\
+}\
+TEST_F(RapidJson, SIMD_SUFFIX(ReaderParseInsitu_DummyHandler_##Name)) {\
+    for (size_t i = 0; i < kTrialCount * 10; i++) {\
+        memcpy(temp_, types_[index], typesLength_[index] + 1);\
+        InsituStringStream s(temp_);\
+        BaseReaderHandler<> h;\
+        Reader reader;\
+        EXPECT_TRUE(reader.Parse<kParseInsituFlag>(s, h));\
+    }\
+}
+
+TEST_TYPED(0, Booleans)
+TEST_TYPED(1, Floats)
+TEST_TYPED(2, Guids)
+TEST_TYPED(3, Integers)
+TEST_TYPED(4, Mixed)
+TEST_TYPED(5, Nulls)
+TEST_TYPED(6, Paragraphs)
+
+#undef TEST_TYPED
+
+TEST_F(RapidJson, SIMD_SUFFIX(ReaderParse_DummyHandler_FullPrecision)) {
+    for (size_t i = 0; i < kTrialCount; i++) {
+        StringStream s(json_);
+        BaseReaderHandler<> h;
+        Reader reader;
+        EXPECT_TRUE(reader.Parse<kParseFullPrecisionFlag>(s, h));
+    }
+}
+
+TEST_F(RapidJson, SIMD_SUFFIX(ReaderParseIterative_DummyHandler)) {
+    for (size_t i = 0; i < kTrialCount; i++) {
+        StringStream s(json_);
+        BaseReaderHandler<> h;
+        Reader reader;
+        EXPECT_TRUE(reader.Parse<kParseIterativeFlag>(s, h));
+    }
+}
+
+TEST_F(RapidJson, SIMD_SUFFIX(ReaderParseIterativeInsitu_DummyHandler)) {
+    for (size_t i = 0; i < kTrialCount; i++) {
+        memcpy(temp_, json_, length_ + 1);
+        InsituStringStream s(temp_);
+        BaseReaderHandler<> h;
+        Reader reader;
+        EXPECT_TRUE(reader.Parse<kParseIterativeFlag|kParseInsituFlag>(s, h));
+    }
+}
+
+TEST_F(RapidJson, SIMD_SUFFIX(ReaderParseIterativePull_DummyHandler)) {
+    for (size_t i = 0; i < kTrialCount; i++) {
+        StringStream s(json_);
+        BaseReaderHandler<> h;
+        Reader reader;
+        reader.IterativeParseInit();
+        while (!reader.IterativeParseComplete()) {
+            if (!reader.IterativeParseNext<kParseDefaultFlags>(s, h))
+                break;
+        }
+        EXPECT_FALSE(reader.HasParseError());
+    }
+}
+
+TEST_F(RapidJson, SIMD_SUFFIX(ReaderParseIterativePullInsitu_DummyHandler)) {
+    for (size_t i = 0; i < kTrialCount; i++) {
+        memcpy(temp_, json_, length_ + 1);
+        InsituStringStream s(temp_);
+        BaseReaderHandler<> h;
+        Reader reader;
+        reader.IterativeParseInit();
+        while (!reader.IterativeParseComplete()) {
+            if (!reader.IterativeParseNext<kParseDefaultFlags|kParseInsituFlag>(s, h))
+                break;
+        }
+        EXPECT_FALSE(reader.HasParseError());
+    }
+}
+
+TEST_F(RapidJson, SIMD_SUFFIX(ReaderParse_DummyHandler_ValidateEncoding)) {
+    for (size_t i = 0; i < kTrialCount; i++) {
+        StringStream s(json_);
+        BaseReaderHandler<> h;
+        Reader reader;
+        EXPECT_TRUE(reader.Parse<kParseValidateEncodingFlag>(s, h));
+    }
+}
+
+TEST_F(RapidJson, SIMD_SUFFIX(DocumentParseInsitu_MemoryPoolAllocator)) {
+    for (size_t i = 0; i < kTrialCount; i++) {
+        memcpy(temp_, json_, length_ + 1);
+        Document doc;
+        doc.ParseInsitu(temp_);
+        ASSERT_TRUE(doc.IsObject());
+    }
+}
+
+TEST_F(RapidJson, SIMD_SUFFIX(DocumentParseIterativeInsitu_MemoryPoolAllocator)) {
+    for (size_t i = 0; i < kTrialCount; i++) {
+        memcpy(temp_, json_, length_ + 1);
+        Document doc;
+        doc.ParseInsitu<kParseIterativeFlag>(temp_);
+        ASSERT_TRUE(doc.IsObject());
+    }
+}
+
+TEST_F(RapidJson, SIMD_SUFFIX(DocumentParse_MemoryPoolAllocator)) {
+    for (size_t i = 0; i < kTrialCount; i++) {
+        Document doc;
+        doc.Parse(json_);
+        ASSERT_TRUE(doc.IsObject());
+    }
+}
+
+TEST_F(RapidJson, SIMD_SUFFIX(DocumentParseLength_MemoryPoolAllocator)) {
+    for (size_t i = 0; i < kTrialCount; i++) {
+        Document doc;
+        doc.Parse(json_, length_);
+        ASSERT_TRUE(doc.IsObject());
+    }
+}
+
+#if RAPIDJSON_HAS_STDSTRING
+TEST_F(RapidJson, SIMD_SUFFIX(DocumentParseStdString_MemoryPoolAllocator)) {
+    const std::string s(json_, length_);
+    for (size_t i = 0; i < kTrialCount; i++) {
+        Document doc;
+        doc.Parse(s);
+        ASSERT_TRUE(doc.IsObject());
+    }
+}
+#endif
+
+TEST_F(RapidJson, SIMD_SUFFIX(DocumentParseIterative_MemoryPoolAllocator)) {
+    for (size_t i = 0; i < kTrialCount; i++) {
+        Document doc;
+        doc.Parse<kParseIterativeFlag>(json_);
+        ASSERT_TRUE(doc.IsObject());
+    }
+}
+
+TEST_F(RapidJson, SIMD_SUFFIX(DocumentParse_CrtAllocator)) {
+    for (size_t i = 0; i < kTrialCount; i++) {
+        memcpy(temp_, json_, length_ + 1);
+        GenericDocument<UTF8<>, CrtAllocator> doc;
+        doc.Parse(temp_);
+        ASSERT_TRUE(doc.IsObject());
+    }
+}
+
+TEST_F(RapidJson, SIMD_SUFFIX(DocumentParseEncodedInputStream_MemoryStream)) {
+    for (size_t i = 0; i < kTrialCount; i++) {
+        MemoryStream ms(json_, length_);
+        EncodedInputStream<UTF8<>, MemoryStream> is(ms);
+        Document doc;
+        doc.ParseStream<0, UTF8<> >(is);
+        ASSERT_TRUE(doc.IsObject());
+    }
+}
+
+TEST_F(RapidJson, SIMD_SUFFIX(DocumentParseAutoUTFInputStream_MemoryStream)) {
+    for (size_t i = 0; i < kTrialCount; i++) {
+        MemoryStream ms(json_, length_);
+        AutoUTFInputStream<unsigned, MemoryStream> is(ms);
+        Document doc;
+        doc.ParseStream<0, AutoUTF<unsigned> >(is);
+        ASSERT_TRUE(doc.IsObject());
+    }
+}
+
+template<typename T>
+size_t Traverse(const T& value) {
+    size_t count = 1;
+    switch(value.GetType()) {
+        case kObjectType:
+            for (typename T::ConstMemberIterator itr = value.MemberBegin(); itr != value.MemberEnd(); ++itr) {
+                count++;    // name
+                count += Traverse(itr->value);
+            }
+            break;
+
+        case kArrayType:
+            for (typename T::ConstValueIterator itr = value.Begin(); itr != value.End(); ++itr)
+                count += Traverse(*itr);
+            break;
+
+        default:
+            // Do nothing.
+            break;
+    }
+    return count;
+}
+
+TEST_F(RapidJson, DocumentTraverse) {
+    for (size_t i = 0; i < kTrialCount; i++) {
+        size_t count = Traverse(doc_);
+        EXPECT_EQ(4339u, count);
+        //if (i == 0)
+        //  std::cout << count << std::endl;
+    }
+}
+
+#ifdef __GNUC__
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(effc++)
+#endif
+
+struct ValueCounter : public BaseReaderHandler<> {
+    ValueCounter() : count_(1) {}   // root
+
+    bool EndObject(SizeType memberCount) { count_ += memberCount * 2; return true; }
+    bool EndArray(SizeType elementCount) { count_ += elementCount; return true; }
+
+    SizeType count_;
+};
+
+#ifdef __GNUC__
+RAPIDJSON_DIAG_POP
+#endif
+
+TEST_F(RapidJson, DocumentAccept) {
+    for (size_t i = 0; i < kTrialCount; i++) {
+        ValueCounter counter;
+        doc_.Accept(counter);
+        EXPECT_EQ(4339u, counter.count_);
+    }
+}
+
+struct NullStream {
+    typedef char Ch;
+
+    NullStream() /*: length_(0)*/ {}
+    void Put(Ch) { /*++length_;*/ }
+    void Flush() {}
+    //size_t length_;
+};
+
+TEST_F(RapidJson, Writer_NullStream) {
+    for (size_t i = 0; i < kTrialCount; i++) {
+        NullStream s;
+        Writer<NullStream> writer(s);
+        doc_.Accept(writer);
+        //if (i == 0)
+        //  std::cout << s.length_ << std::endl;
+    }
+}
+
+TEST_F(RapidJson, SIMD_SUFFIX(Writer_StringBuffer)) {
+    for (size_t i = 0; i < kTrialCount; i++) {
+        StringBuffer s(0, 1024 * 1024);
+        Writer<StringBuffer> writer(s);
+        doc_.Accept(writer);
+        const char* str = s.GetString();
+        (void)str;
+        //if (i == 0)
+        //  std::cout << strlen(str) << std::endl;
+    }
+}
+
+#define TEST_TYPED(index, Name)\
+TEST_F(RapidJson, SIMD_SUFFIX(Writer_StringBuffer_##Name)) {\
+    for (size_t i = 0; i < kTrialCount * 10; i++) {\
+        StringBuffer s(0, 1024 * 1024);\
+        Writer<StringBuffer> writer(s);\
+        typesDoc_[index].Accept(writer);\
+        const char* str = s.GetString();\
+        (void)str;\
+    }\
+}
+
+TEST_TYPED(0, Booleans)
+TEST_TYPED(1, Floats)
+TEST_TYPED(2, Guids)
+TEST_TYPED(3, Integers)
+TEST_TYPED(4, Mixed)
+TEST_TYPED(5, Nulls)
+TEST_TYPED(6, Paragraphs)
+
+#undef TEST_TYPED
+
+TEST_F(RapidJson, SIMD_SUFFIX(PrettyWriter_StringBuffer)) {
+    for (size_t i = 0; i < kTrialCount; i++) {
+        StringBuffer s(0, 2048 * 1024);
+        PrettyWriter<StringBuffer> writer(s);
+        writer.SetIndent(' ', 1);
+        doc_.Accept(writer);
+        const char* str = s.GetString();
+        (void)str;
+        //if (i == 0)
+        //  std::cout << strlen(str) << std::endl;
+    }
+}
+
+TEST_F(RapidJson, internal_Pow10) {
+    double sum = 0;
+    for (size_t i = 0; i < kTrialCount * kTrialCount; i++)
+        sum += internal::Pow10(int(i & 255));
+    EXPECT_GT(sum, 0.0);
+}
+
+TEST_F(RapidJson, SkipWhitespace_Basic) {
+    for (size_t i = 0; i < kTrialCount; i++) {
+        rapidjson::StringStream s(whitespace_);
+        while (s.Peek() == ' ' || s.Peek() == '\n' || s.Peek() == '\r' || s.Peek() == '\t')
+            s.Take();
+        ASSERT_EQ('[', s.Peek());
+    }
+}
+
+TEST_F(RapidJson, SIMD_SUFFIX(SkipWhitespace)) {
+    for (size_t i = 0; i < kTrialCount; i++) {
+        rapidjson::StringStream s(whitespace_);
+        rapidjson::SkipWhitespace(s);
+        ASSERT_EQ('[', s.Peek());
+    }
+}
+
+TEST_F(RapidJson, SkipWhitespace_strspn) {
+    for (size_t i = 0; i < kTrialCount; i++) {
+        const char* s = whitespace_ + std::strspn(whitespace_, " \t\r\n");
+        ASSERT_EQ('[', *s);
+    }
+}
+
+TEST_F(RapidJson, UTF8_Validate) {
+    NullStream os;
+
+    for (size_t i = 0; i < kTrialCount; i++) {
+        StringStream is(json_);
+        bool result = true;
+        while (is.Peek() != '\0')
+            result &= UTF8<>::Validate(is, os);
+        EXPECT_TRUE(result);
+    }
+}
+
+TEST_F(RapidJson, FileReadStream) {
+    for (size_t i = 0; i < kTrialCount; i++) {
+        FILE *fp = fopen(filename_, "rb");
+        char buffer[65536];
+        FileReadStream s(fp, buffer, sizeof(buffer));
+        while (s.Take() != '\0')
+            ;
+        fclose(fp);
+    }
+}
+
+TEST_F(RapidJson, SIMD_SUFFIX(ReaderParse_DummyHandler_FileReadStream)) {
+    for (size_t i = 0; i < kTrialCount; i++) {
+        FILE *fp = fopen(filename_, "rb");
+        char buffer[65536];
+        FileReadStream s(fp, buffer, sizeof(buffer));
+        BaseReaderHandler<> h;
+        Reader reader;
+        reader.Parse(s, h);
+        fclose(fp);
+    }
+}
+
+TEST_F(RapidJson, IStreamWrapper) {
+    for (size_t i = 0; i < kTrialCount; i++) {
+        std::ifstream is(filename_, std::ios::in | std::ios::binary);
+        char buffer[65536];
+        IStreamWrapper isw(is, buffer, sizeof(buffer));
+        while (isw.Take() != '\0')
+            ;
+        is.close();
+    }
+}
+
+TEST_F(RapidJson, IStreamWrapper_Unbuffered) {
+    for (size_t i = 0; i < kTrialCount; i++) {
+        std::ifstream is(filename_, std::ios::in | std::ios::binary);
+        IStreamWrapper isw(is);
+        while (isw.Take() != '\0')
+            ;
+        is.close();
+    }
+}
+
+TEST_F(RapidJson, IStreamWrapper_Setbuffered) {
+    for (size_t i = 0; i < kTrialCount; i++) {
+        std::ifstream is;
+        char buffer[65536];
+        is.rdbuf()->pubsetbuf(buffer, sizeof(buffer));
+        is.open(filename_, std::ios::in | std::ios::binary);
+        IStreamWrapper isw(is);
+        while (isw.Take() != '\0')
+            ;
+        is.close();
+    }
+}
+
+TEST_F(RapidJson, SIMD_SUFFIX(ReaderParse_DummyHandler_IStreamWrapper)) {
+    for (size_t i = 0; i < kTrialCount; i++) {
+        std::ifstream is(filename_, std::ios::in | std::ios::binary);
+        char buffer[65536];
+        IStreamWrapper isw(is, buffer, sizeof(buffer));
+        BaseReaderHandler<> h;
+        Reader reader;
+        reader.Parse(isw, h);
+        is.close();
+    }
+}
+
+TEST_F(RapidJson, SIMD_SUFFIX(ReaderParse_DummyHandler_IStreamWrapper_Unbuffered)) {
+    for (size_t i = 0; i < kTrialCount; i++) {
+        std::ifstream is(filename_, std::ios::in | std::ios::binary);
+        IStreamWrapper isw(is);
+        BaseReaderHandler<> h;
+        Reader reader;
+        reader.Parse(isw, h);
+        is.close();
+    }
+}
+
+TEST_F(RapidJson, SIMD_SUFFIX(ReaderParse_DummyHandler_IStreamWrapper_Setbuffered)) {
+    for (size_t i = 0; i < kTrialCount; i++) {
+        std::ifstream is;
+        char buffer[65536];
+        is.rdbuf()->pubsetbuf(buffer, sizeof(buffer));
+        is.open(filename_, std::ios::in | std::ios::binary);
+        IStreamWrapper isw(is);
+        BaseReaderHandler<> h;
+        Reader reader;
+        reader.Parse(isw, h);
+        is.close();
+    }
+}
+
+TEST_F(RapidJson, StringBuffer) {
+    StringBuffer sb;
+    for (int i = 0; i < 32 * 1024 * 1024; i++)
+        sb.Put(i & 0x7f);
+}
+
+#endif // TEST_RAPIDJSON
diff --git a/test/perftest/schematest.cpp b/test/perftest/schematest.cpp
new file mode 100644
index 0000000..7d27344
--- /dev/null
+++ b/test/perftest/schematest.cpp
@@ -0,0 +1,223 @@
+#include "perftest.h"
+
+#if TEST_RAPIDJSON
+
+#include "rapidjson/schema.h"
+#include <ctime>
+#include <string>
+#include <vector>
+
+#define ARRAY_SIZE(a) sizeof(a) / sizeof(a[0])
+
+using namespace rapidjson;
+
+RAPIDJSON_DIAG_PUSH
+#if defined(__GNUC__) && __GNUC__ >= 7
+RAPIDJSON_DIAG_OFF(format-overflow)
+#endif
+
+template <typename Allocator>
+static char* ReadFile(const char* filename, Allocator& allocator) {
+    const char *paths[] = {
+        "",
+        "bin/",
+        "../bin/",
+        "../../bin/",
+        "../../../bin/"
+    };
+    char buffer[1024];
+    FILE *fp = 0;
+    for (size_t i = 0; i < sizeof(paths) / sizeof(paths[0]); i++) {
+        sprintf(buffer, "%s%s", paths[i], filename);
+        fp = fopen(buffer, "rb");
+        if (fp)
+            break;
+    }
+
+    if (!fp)
+        return 0;
+
+    fseek(fp, 0, SEEK_END);
+    size_t length = static_cast<size_t>(ftell(fp));
+    fseek(fp, 0, SEEK_SET);
+    char* json = reinterpret_cast<char*>(allocator.Malloc(length + 1));
+    size_t readLength = fread(json, 1, length, fp);
+    json[readLength] = '\0';
+    fclose(fp);
+    return json;
+}
+
+RAPIDJSON_DIAG_POP
+
+class Schema : public PerfTest {
+public:
+    Schema() {}
+
+    virtual void SetUp() {
+        PerfTest::SetUp();
+
+        const char* filenames[] = {
+            "additionalItems.json",
+            "additionalProperties.json",
+            "allOf.json",
+            "anyOf.json",
+            "default.json",
+            "definitions.json",
+            "dependencies.json",
+            "enum.json",
+            "items.json",
+            "maximum.json",
+            "maxItems.json",
+            "maxLength.json",
+            "maxProperties.json",
+            "minimum.json",
+            "minItems.json",
+            "minLength.json",
+            "minProperties.json",
+            "multipleOf.json",
+            "not.json",
+            "oneOf.json",
+            "pattern.json",
+            "patternProperties.json",
+            "properties.json",
+            "ref.json",
+            "refRemote.json",
+            "required.json",
+            "type.json",
+            "uniqueItems.json"
+        };
+
+        char jsonBuffer[65536];
+        MemoryPoolAllocator<> jsonAllocator(jsonBuffer, sizeof(jsonBuffer));
+
+        for (size_t i = 0; i < ARRAY_SIZE(filenames); i++) {
+            char filename[FILENAME_MAX];
+            sprintf(filename, "jsonschema/tests/draft4/%s", filenames[i]);
+            char* json = ReadFile(filename, jsonAllocator);
+            if (!json) {
+                printf("json test suite file %s not found", filename);
+                return;
+            }
+
+            Document d;
+            d.Parse(json);
+            if (d.HasParseError()) {
+                printf("json test suite file %s has parse error", filename);
+                return;
+            }
+
+            for (Value::ConstValueIterator schemaItr = d.Begin(); schemaItr != d.End(); ++schemaItr) {
+                std::string schemaDescription = (*schemaItr)["description"].GetString();
+                if (IsExcludeTestSuite(schemaDescription))
+                    continue;
+
+                TestSuite* ts = new TestSuite;
+                ts->schema = new SchemaDocument((*schemaItr)["schema"]);
+
+                const Value& tests = (*schemaItr)["tests"];
+                for (Value::ConstValueIterator testItr = tests.Begin(); testItr != tests.End(); ++testItr) {
+                    if (IsExcludeTest(schemaDescription + ", " + (*testItr)["description"].GetString()))
+                        continue;
+
+                    Document* d2 = new Document;
+                    d2->CopyFrom((*testItr)["data"], d2->GetAllocator());
+                    ts->tests.push_back(d2);
+                }
+                testSuites.push_back(ts);
+            }
+        }
+    }
+
+    virtual void TearDown() {
+        PerfTest::TearDown();
+        for (TestSuiteList::const_iterator itr = testSuites.begin(); itr != testSuites.end(); ++itr)
+            delete *itr;
+        testSuites.clear();
+    }
+
+private:
+    // Using the same exclusion in https://github.com/json-schema/JSON-Schema-Test-Suite
+    static bool IsExcludeTestSuite(const std::string& description) {
+        const char* excludeTestSuites[] = {
+            //lost failing these tests
+            "remote ref",
+            "remote ref, containing refs itself",
+            "fragment within remote ref",
+            "ref within remote ref",
+            "change resolution scope",
+            // these below were added to get jsck in the benchmarks)
+            "uniqueItems validation",
+            "valid definition",
+            "invalid definition"
+        };
+
+        for (size_t i = 0; i < ARRAY_SIZE(excludeTestSuites); i++)
+            if (excludeTestSuites[i] == description)
+                return true;
+        return false;
+    }
+
+    // Using the same exclusion in https://github.com/json-schema/JSON-Schema-Test-Suite
+    static bool IsExcludeTest(const std::string& description) {
+        const char* excludeTests[] = {
+            //lots of validators fail these
+            "invalid definition, invalid definition schema",
+            "maxLength validation, two supplementary Unicode code points is long enough",
+            "minLength validation, one supplementary Unicode code point is not long enough",
+            //this is to get tv4 in the benchmarks
+            "heterogeneous enum validation, something else is invalid"
+        };
+
+        for (size_t i = 0; i < ARRAY_SIZE(excludeTests); i++)
+            if (excludeTests[i] == description)
+                return true;
+        return false;
+    }
+
+    Schema(const Schema&);
+    Schema& operator=(const Schema&);
+
+protected:
+    typedef std::vector<Document*> DocumentList;
+
+    struct TestSuite {
+        TestSuite() : schema() {}
+        ~TestSuite() {
+            delete schema;
+            for (DocumentList::iterator itr = tests.begin(); itr != tests.end(); ++itr)
+                delete *itr;
+        }
+        SchemaDocument* schema;
+        DocumentList tests;
+    };
+
+    typedef std::vector<TestSuite* > TestSuiteList;
+    TestSuiteList testSuites;
+};
+
+TEST_F(Schema, TestSuite) {
+    char validatorBuffer[65536];
+    MemoryPoolAllocator<> validatorAllocator(validatorBuffer, sizeof(validatorBuffer));
+
+    const int trialCount = 100000;
+    int testCount = 0;
+    clock_t start = clock();
+    for (int i = 0; i < trialCount; i++) {
+        for (TestSuiteList::const_iterator itr = testSuites.begin(); itr != testSuites.end(); ++itr) {
+            const TestSuite& ts = **itr;
+            GenericSchemaValidator<SchemaDocument, BaseReaderHandler<UTF8<> >, MemoryPoolAllocator<> >  validator(*ts.schema, &validatorAllocator);
+            for (DocumentList::const_iterator testItr = ts.tests.begin(); testItr != ts.tests.end(); ++testItr) {
+                validator.Reset();
+                (*testItr)->Accept(validator);
+                testCount++;
+            }
+            validatorAllocator.Clear();
+        }
+    }
+    clock_t end = clock();
+    double duration = double(end - start) / CLOCKS_PER_SEC;
+    printf("%d trials in %f s -> %f trials per sec\n", trialCount, duration, trialCount / duration);
+    printf("%d tests per trial\n", testCount / trialCount);
+}
+
+#endif
diff --git a/test/unittest/CMakeLists.txt b/test/unittest/CMakeLists.txt
new file mode 100644
index 0000000..072b7b1
--- /dev/null
+++ b/test/unittest/CMakeLists.txt
@@ -0,0 +1,92 @@
+include(CheckCXXCompilerFlag)
+
+set(UNITTEST_SOURCES
+	allocatorstest.cpp
+    bigintegertest.cpp
+	cursorstreamwrappertest.cpp
+    documenttest.cpp
+    dtoatest.cpp
+    encodedstreamtest.cpp
+    encodingstest.cpp
+    fwdtest.cpp
+    filestreamtest.cpp
+    itoatest.cpp
+    istreamwrappertest.cpp
+    jsoncheckertest.cpp
+    namespacetest.cpp
+    pointertest.cpp
+    prettywritertest.cpp
+    ostreamwrappertest.cpp
+    readertest.cpp
+    regextest.cpp
+	schematest.cpp
+	simdtest.cpp
+    strfunctest.cpp
+    stringbuffertest.cpp
+    strtodtest.cpp
+    unittest.cpp
+    valuetest.cpp
+    writertest.cpp)
+
+find_program(CCACHE_FOUND ccache)
+if(CCACHE_FOUND)
+    set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache)
+    set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK ccache)
+    if (CMAKE_CXX_COMPILER_ID MATCHES "Clang")
+        set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Qunused-arguments -fcolor-diagnostics")
+		endif()
+endif(CCACHE_FOUND)
+
+set_property(DIRECTORY PROPERTY COMPILE_OPTIONS ${EXTRA_CXX_FLAGS})
+
+if (CMAKE_CXX_COMPILER_ID MATCHES "Clang")
+    # If the user is running a newer version of Clang that includes the
+    # -Wdouble-promotion, we will ignore that warning.
+    if (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 3.7)
+        CHECK_CXX_COMPILER_FLAG("-Wno-double-promotion" HAS_NO_DOUBLE_PROMOTION)
+        if (HAS_NO_DOUBLE_PROMOTION)
+            set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-double-promotion")
+        endif()
+    endif()
+elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC")
+    # Force to always compile with /W4
+    if(CMAKE_CXX_FLAGS MATCHES "/W[0-4]")
+        string(REGEX REPLACE "/W[0-4]" "/W4" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
+    else()
+        set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /W4")
+    endif()
+
+    # Force to always compile with /WX
+    if(CMAKE_CXX_FLAGS MATCHES "/WX-")
+        string(REGEX REPLACE "/WX-" "/WX" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
+    else()
+        set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /WX")
+    endif()
+endif()
+
+set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DRAPIDJSON_HAS_STDSTRING=1")
+
+add_library(namespacetest STATIC namespacetest.cpp)
+
+add_executable(unittest ${UNITTEST_SOURCES})
+target_link_libraries(unittest ${TEST_LIBRARIES} namespacetest)
+
+add_dependencies(tests unittest)
+
+add_test(NAME unittest
+    COMMAND ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/unittest
+    WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/bin)
+
+if(NOT MSVC)
+    # Not running SIMD.* unit test cases for Valgrind
+    add_test(NAME valgrind_unittest
+        COMMAND valgrind --suppressions=${CMAKE_SOURCE_DIR}/test/valgrind.supp --leak-check=full --error-exitcode=1 ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/unittest --gtest_filter=-SIMD.*
+        WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/bin)
+
+    if(CMAKE_BUILD_TYPE STREQUAL "Debug")
+        add_test(NAME symbol_check
+        COMMAND sh -c "objdump -t -C libnamespacetest.a | grep rapidjson ; test $? -ne 0"
+        WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
+    endif(CMAKE_BUILD_TYPE STREQUAL "Debug")
+
+endif(NOT MSVC)
diff --git a/test/unittest/allocatorstest.cpp b/test/unittest/allocatorstest.cpp
new file mode 100644
index 0000000..2202c11
--- /dev/null
+++ b/test/unittest/allocatorstest.cpp
@@ -0,0 +1,100 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+// 
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed 
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the 
+// specific language governing permissions and limitations under the License.
+
+#include "unittest.h"
+
+#include "rapidjson/allocators.h"
+
+using namespace rapidjson;
+
+template <typename Allocator>
+void TestAllocator(Allocator& a) {
+    EXPECT_TRUE(a.Malloc(0) == 0);
+
+    uint8_t* p = static_cast<uint8_t*>(a.Malloc(100));
+    EXPECT_TRUE(p != 0);
+    for (size_t i = 0; i < 100; i++)
+        p[i] = static_cast<uint8_t>(i);
+
+    // Expand
+    uint8_t* q = static_cast<uint8_t*>(a.Realloc(p, 100, 200));
+    EXPECT_TRUE(q != 0);
+    for (size_t i = 0; i < 100; i++)
+        EXPECT_EQ(i, q[i]);
+    for (size_t i = 100; i < 200; i++)
+        q[i] = static_cast<uint8_t>(i);
+
+    // Shrink
+    uint8_t *r = static_cast<uint8_t*>(a.Realloc(q, 200, 150));
+    EXPECT_TRUE(r != 0);
+    for (size_t i = 0; i < 150; i++)
+        EXPECT_EQ(i, r[i]);
+
+    Allocator::Free(r);
+
+    // Realloc to zero size
+    EXPECT_TRUE(a.Realloc(a.Malloc(1), 1, 0) == 0);
+}
+
+TEST(Allocator, CrtAllocator) {
+    CrtAllocator a;
+    TestAllocator(a);
+}
+
+TEST(Allocator, MemoryPoolAllocator) {
+    MemoryPoolAllocator<> a;
+    TestAllocator(a);
+
+    for (size_t i = 1; i < 1000; i++) {
+        EXPECT_TRUE(a.Malloc(i) != 0);
+        EXPECT_LE(a.Size(), a.Capacity());
+    }
+}
+
+TEST(Allocator, Alignment) {
+    if (sizeof(size_t) >= 8) {
+        EXPECT_EQ(RAPIDJSON_UINT64_C2(0x00000000, 0x00000000), RAPIDJSON_ALIGN(0));
+        for (uint64_t i = 1; i < 8; i++) {
+            EXPECT_EQ(RAPIDJSON_UINT64_C2(0x00000000, 0x00000008), RAPIDJSON_ALIGN(i));
+            EXPECT_EQ(RAPIDJSON_UINT64_C2(0x00000000, 0x00000010), RAPIDJSON_ALIGN(RAPIDJSON_UINT64_C2(0x00000000, 0x00000008) + i));
+            EXPECT_EQ(RAPIDJSON_UINT64_C2(0x00000001, 0x00000000), RAPIDJSON_ALIGN(RAPIDJSON_UINT64_C2(0x00000000, 0xFFFFFFF8) + i));
+            EXPECT_EQ(RAPIDJSON_UINT64_C2(0xFFFFFFFF, 0xFFFFFFF8), RAPIDJSON_ALIGN(RAPIDJSON_UINT64_C2(0xFFFFFFFF, 0xFFFFFFF0) + i));
+        }
+    }
+
+    EXPECT_EQ(0u, RAPIDJSON_ALIGN(0u));
+    for (uint32_t i = 1; i < 8; i++) {
+        EXPECT_EQ(8u, RAPIDJSON_ALIGN(i));
+        EXPECT_EQ(0xFFFFFFF8u, RAPIDJSON_ALIGN(0xFFFFFFF0u + i));
+    }
+}
+
+TEST(Allocator, Issue399) {
+    MemoryPoolAllocator<> a;
+    void* p = a.Malloc(100);
+    void* q = a.Realloc(p, 100, 200);
+    EXPECT_EQ(p, q);
+
+    // exhuasive testing
+    for (size_t j = 1; j < 32; j++) {
+        a.Clear();
+        a.Malloc(j); // some unaligned size
+        p = a.Malloc(1);
+        for (size_t i = 1; i < 1024; i++) {
+            q = a.Realloc(p, i, i + 1);
+            EXPECT_EQ(p, q);
+            p = q;
+        }
+    }
+}
diff --git a/test/unittest/bigintegertest.cpp b/test/unittest/bigintegertest.cpp
new file mode 100644
index 0000000..6e9d4c6
--- /dev/null
+++ b/test/unittest/bigintegertest.cpp
@@ -0,0 +1,138 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+// 
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed 
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the 
+// specific language governing permissions and limitations under the License.
+
+#include "unittest.h"
+
+#include "rapidjson/internal/biginteger.h"
+
+using namespace rapidjson::internal;
+
+#define BIGINTEGER_LITERAL(s) BigInteger(s, sizeof(s) - 1)
+
+static const BigInteger kZero(0);
+static const BigInteger kOne(1);
+static const BigInteger kUint64Max = BIGINTEGER_LITERAL("18446744073709551615");
+static const BigInteger kTwo64 = BIGINTEGER_LITERAL("18446744073709551616");
+
+TEST(BigInteger, Constructor) {
+    EXPECT_TRUE(kZero.IsZero());
+    EXPECT_TRUE(kZero == kZero);
+    EXPECT_TRUE(kZero == BIGINTEGER_LITERAL("0"));
+    EXPECT_TRUE(kZero == BIGINTEGER_LITERAL("00"));
+
+    const BigInteger a(123);
+    EXPECT_TRUE(a == a);
+    EXPECT_TRUE(a == BIGINTEGER_LITERAL("123"));
+    EXPECT_TRUE(a == BIGINTEGER_LITERAL("0123"));
+
+    EXPECT_EQ(2u, kTwo64.GetCount());
+    EXPECT_EQ(0u, kTwo64.GetDigit(0));
+    EXPECT_EQ(1u, kTwo64.GetDigit(1));
+}
+
+TEST(BigInteger, AddUint64) {
+    BigInteger a = kZero;
+    a += 0u;
+    EXPECT_TRUE(kZero == a);
+
+    a += 1u;
+    EXPECT_TRUE(kOne == a);
+
+    a += 1u;
+    EXPECT_TRUE(BigInteger(2) == a);
+
+    EXPECT_TRUE(BigInteger(RAPIDJSON_UINT64_C2(0xFFFFFFFF, 0xFFFFFFFF)) == kUint64Max);
+    BigInteger b = kUint64Max;
+    b += 1u;
+    EXPECT_TRUE(kTwo64 == b);
+    b += RAPIDJSON_UINT64_C2(0xFFFFFFFF, 0xFFFFFFFF);
+    EXPECT_TRUE(BIGINTEGER_LITERAL("36893488147419103231") == b);
+}
+
+TEST(BigInteger, MultiplyUint64) {
+    BigInteger a = kZero;
+    a *= static_cast <uint64_t>(0);
+    EXPECT_TRUE(kZero == a);
+    a *= static_cast <uint64_t>(123);
+    EXPECT_TRUE(kZero == a);
+
+    BigInteger b = kOne;
+    b *= static_cast<uint64_t>(1);
+    EXPECT_TRUE(kOne == b);
+    b *= static_cast<uint64_t>(0);
+    EXPECT_TRUE(kZero == b);
+
+    BigInteger c(123);
+    c *= static_cast<uint64_t>(456u);
+    EXPECT_TRUE(BigInteger(123u * 456u) == c);
+    c *= RAPIDJSON_UINT64_C2(0xFFFFFFFF, 0xFFFFFFFF);
+    EXPECT_TRUE(BIGINTEGER_LITERAL("1034640981606221330982120") == c);
+    c *= RAPIDJSON_UINT64_C2(0xFFFFFFFF, 0xFFFFFFFF);
+    EXPECT_TRUE(BIGINTEGER_LITERAL("19085757395861596536664473018420572782123800") == c);
+}
+
+TEST(BigInteger, MultiplyUint32) {
+    BigInteger a = kZero;
+    a *= static_cast <uint32_t>(0);
+    EXPECT_TRUE(kZero == a);
+    a *= static_cast <uint32_t>(123);
+    EXPECT_TRUE(kZero == a);
+
+    BigInteger b = kOne;
+    b *= static_cast<uint32_t>(1);
+    EXPECT_TRUE(kOne == b);
+    b *= static_cast<uint32_t>(0);
+    EXPECT_TRUE(kZero == b);
+
+    BigInteger c(123);
+    c *= static_cast<uint32_t>(456u);
+    EXPECT_TRUE(BigInteger(123u * 456u) == c);
+    c *= 0xFFFFFFFFu;
+    EXPECT_TRUE(BIGINTEGER_LITERAL("240896125641960") == c);
+    c *= 0xFFFFFFFFu;
+    EXPECT_TRUE(BIGINTEGER_LITERAL("1034640981124429079698200") == c);
+}
+
+TEST(BigInteger, LeftShift) {
+    BigInteger a = kZero;
+    a <<= 1;
+    EXPECT_TRUE(kZero == a);
+    a <<= 64;
+    EXPECT_TRUE(kZero == a);
+
+    a = BigInteger(123);
+    a <<= 0;
+    EXPECT_TRUE(BigInteger(123) == a);
+    a <<= 1;
+    EXPECT_TRUE(BigInteger(246) == a);
+    a <<= 64;
+    EXPECT_TRUE(BIGINTEGER_LITERAL("4537899042132549697536") == a);
+    a <<= 99;
+    EXPECT_TRUE(BIGINTEGER_LITERAL("2876235222267216943024851750785644982682875244576768") == a);
+
+    a = 1;
+    a <<= 64; // a.count_ != 1
+    a <<= 256; // interShift == 0
+    EXPECT_TRUE(BIGINTEGER_LITERAL("2135987035920910082395021706169552114602704522356652769947041607822219725780640550022962086936576") == a);
+}
+
+TEST(BigInteger, Compare) {
+    EXPECT_EQ(0, kZero.Compare(kZero));
+    EXPECT_EQ(1, kOne.Compare(kZero));
+    EXPECT_EQ(-1, kZero.Compare(kOne));
+    EXPECT_EQ(0, kUint64Max.Compare(kUint64Max));
+    EXPECT_EQ(0, kTwo64.Compare(kTwo64));
+    EXPECT_EQ(-1, kUint64Max.Compare(kTwo64));
+    EXPECT_EQ(1, kTwo64.Compare(kUint64Max));
+}
diff --git a/test/unittest/cursorstreamwrappertest.cpp b/test/unittest/cursorstreamwrappertest.cpp
new file mode 100644
index 0000000..2ce2810
--- /dev/null
+++ b/test/unittest/cursorstreamwrappertest.cpp
@@ -0,0 +1,115 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+#include "unittest.h"
+#include "rapidjson/document.h"
+#include "rapidjson/cursorstreamwrapper.h"
+
+using namespace rapidjson;
+
+// static const char json[] = "{\"string\"\n\n:\"my string\",\"array\"\n:[\"1\", \"2\", \"3\"]}";
+
+bool testJson(const char *json, size_t &line, size_t &col) {
+    StringStream ss(json);
+    CursorStreamWrapper<StringStream> csw(ss);
+    Document document;
+    document.ParseStream(csw);
+    bool ret = document.HasParseError();
+    if (ret) {
+        col = csw.GetColumn();
+        line = csw.GetLine();
+    }
+    return ret;
+}
+
+TEST(CursorStreamWrapper, MissingFirstBracket) {
+    const char json[] = "\"string\"\n\n:\"my string\",\"array\"\n:[\"1\", \"2\", \"3\"]}";
+    size_t col, line;
+    bool ret = testJson(json, line, col);
+    EXPECT_TRUE(ret);
+    EXPECT_EQ(line, 3u);
+    EXPECT_EQ(col, 0u);
+}
+
+TEST(CursorStreamWrapper, MissingQuotes) {
+    const char json[] = "{\"string\n\n:\"my string\",\"array\"\n:[\"1\", \"2\", \"3\"]}";
+    size_t col, line;
+    bool ret = testJson(json, line, col);
+    EXPECT_TRUE(ret);
+    EXPECT_EQ(line, 1u);
+    EXPECT_EQ(col, 8u);
+}
+
+TEST(CursorStreamWrapper, MissingColon) {
+    const char json[] = "{\"string\"\n\n\"my string\",\"array\"\n:[\"1\", \"2\", \"3\"]}";
+    size_t col, line;
+    bool ret = testJson(json, line, col);
+    EXPECT_TRUE(ret);
+    EXPECT_EQ(line, 3u);
+    EXPECT_EQ(col, 0u);
+}
+
+TEST(CursorStreamWrapper, MissingSecondQuotes) {
+    const char json[] = "{\"string\"\n\n:my string\",\"array\"\n:[\"1\", \"2\", \"3\"]}";
+    size_t col, line;
+    bool ret = testJson(json, line, col);
+    EXPECT_TRUE(ret);
+    EXPECT_EQ(line, 3u);
+    EXPECT_EQ(col, 1u);
+}
+
+TEST(CursorStreamWrapper, MissingComma) {
+    const char json[] = "{\"string\"\n\n:\"my string\"\"array\"\n:[\"1\", \"2\", \"3\"]}";
+    size_t col, line;
+    bool ret = testJson(json, line, col);
+    EXPECT_TRUE(ret);
+    EXPECT_EQ(line, 3u);
+    EXPECT_EQ(col, 12u);
+}
+
+TEST(CursorStreamWrapper, MissingArrayBracket) {
+    const char json[] = "{\"string\"\n\n:\"my string\",\"array\"\n:\"1\", \"2\", \"3\"]}";
+    size_t col, line;
+    bool ret = testJson(json, line, col);
+    EXPECT_TRUE(ret);
+    EXPECT_EQ(line, 4u);
+    EXPECT_EQ(col, 9u);
+}
+
+TEST(CursorStreamWrapper, MissingArrayComma) {
+    const char json[] = "{\"string\"\n\n:\"my string\",\"array\"\n:[\"1\" \"2\", \"3\"]}";
+    size_t col, line;
+    bool ret = testJson(json, line, col);
+    EXPECT_TRUE(ret);
+    EXPECT_EQ(line, 4u);
+    EXPECT_EQ(col, 6u);
+}
+
+TEST(CursorStreamWrapper, MissingLastArrayBracket) {
+    const char json8[] = "{\"string\"\n\n:\"my string\",\"array\"\n:[\"1\", \"2\", \"3\"}";
+    size_t col, line;
+    bool ret = testJson(json8, line, col);
+    EXPECT_TRUE(ret);
+    EXPECT_EQ(line, 4u);
+    EXPECT_EQ(col, 15u);
+}
+
+TEST(CursorStreamWrapper, MissingLastBracket) {
+    const char json9[] = "{\"string\"\n\n:\"my string\",\"array\"\n:[\"1\", \"2\", \"3\"]";
+    size_t col, line;
+    bool ret = testJson(json9, line, col);
+    EXPECT_TRUE(ret);
+    EXPECT_EQ(line, 4u);
+    EXPECT_EQ(col, 16u);
+}
diff --git a/test/unittest/documenttest.cpp b/test/unittest/documenttest.cpp
new file mode 100644
index 0000000..5429802
--- /dev/null
+++ b/test/unittest/documenttest.cpp
@@ -0,0 +1,672 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+// 
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed 
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the 
+// specific language governing permissions and limitations under the License.
+
+#include "unittest.h"
+#include "rapidjson/document.h"
+#include "rapidjson/writer.h"
+#include "rapidjson/filereadstream.h"
+#include "rapidjson/encodedstream.h"
+#include "rapidjson/stringbuffer.h"
+#include <sstream>
+#include <algorithm>
+
+#ifdef __clang__
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(c++98-compat)
+RAPIDJSON_DIAG_OFF(missing-variable-declarations)
+#endif
+
+using namespace rapidjson;
+
+template <typename DocumentType>
+void ParseCheck(DocumentType& doc) {
+    typedef typename DocumentType::ValueType ValueType;
+
+    EXPECT_FALSE(doc.HasParseError());
+    if (doc.HasParseError())
+        printf("Error: %d at %zu\n", static_cast<int>(doc.GetParseError()), doc.GetErrorOffset());
+    EXPECT_TRUE(static_cast<ParseResult>(doc));
+
+    EXPECT_TRUE(doc.IsObject());
+
+    EXPECT_TRUE(doc.HasMember("hello"));
+    const ValueType& hello = doc["hello"];
+    EXPECT_TRUE(hello.IsString());
+    EXPECT_STREQ("world", hello.GetString());
+
+    EXPECT_TRUE(doc.HasMember("t"));
+    const ValueType& t = doc["t"];
+    EXPECT_TRUE(t.IsTrue());
+
+    EXPECT_TRUE(doc.HasMember("f"));
+    const ValueType& f = doc["f"];
+    EXPECT_TRUE(f.IsFalse());
+
+    EXPECT_TRUE(doc.HasMember("n"));
+    const ValueType& n = doc["n"];
+    EXPECT_TRUE(n.IsNull());
+
+    EXPECT_TRUE(doc.HasMember("i"));
+    const ValueType& i = doc["i"];
+    EXPECT_TRUE(i.IsNumber());
+    EXPECT_EQ(123, i.GetInt());
+
+    EXPECT_TRUE(doc.HasMember("pi"));
+    const ValueType& pi = doc["pi"];
+    EXPECT_TRUE(pi.IsNumber());
+    EXPECT_DOUBLE_EQ(3.1416, pi.GetDouble());
+
+    EXPECT_TRUE(doc.HasMember("a"));
+    const ValueType& a = doc["a"];
+    EXPECT_TRUE(a.IsArray());
+    EXPECT_EQ(4u, a.Size());
+    for (SizeType j = 0; j < 4; j++)
+        EXPECT_EQ(j + 1, a[j].GetUint());
+}
+
+template <typename Allocator, typename StackAllocator>
+void ParseTest() {
+    typedef GenericDocument<UTF8<>, Allocator, StackAllocator> DocumentType;
+    DocumentType doc;
+
+    const char* json = " { \"hello\" : \"world\", \"t\" : true , \"f\" : false, \"n\": null, \"i\":123, \"pi\": 3.1416, \"a\":[1, 2, 3, 4] } ";
+
+    doc.Parse(json);
+    ParseCheck(doc);
+
+    doc.SetNull();
+    StringStream s(json);
+    doc.template ParseStream<0>(s);
+    ParseCheck(doc);
+
+    doc.SetNull();
+    char *buffer = strdup(json);
+    doc.ParseInsitu(buffer);
+    ParseCheck(doc);
+    free(buffer);
+
+    // Parse(const Ch*, size_t)
+    size_t length = strlen(json);
+    buffer = reinterpret_cast<char*>(malloc(length * 2));
+    memcpy(buffer, json, length);
+    memset(buffer + length, 'X', length);
+#if RAPIDJSON_HAS_STDSTRING
+    std::string s2(buffer, length); // backup buffer
+#endif
+    doc.SetNull();
+    doc.Parse(buffer, length);
+    free(buffer);
+    ParseCheck(doc);
+
+#if RAPIDJSON_HAS_STDSTRING
+    // Parse(std::string)
+    doc.SetNull();
+    doc.Parse(s2);
+    ParseCheck(doc);
+#endif
+}
+
+TEST(Document, Parse) {
+    ParseTest<MemoryPoolAllocator<>, CrtAllocator>();
+    ParseTest<MemoryPoolAllocator<>, MemoryPoolAllocator<> >();
+    ParseTest<CrtAllocator, MemoryPoolAllocator<> >();
+    ParseTest<CrtAllocator, CrtAllocator>();
+}
+
+TEST(Document, UnchangedOnParseError) {
+    Document doc;
+    doc.SetArray().PushBack(0, doc.GetAllocator());
+
+    ParseResult noError;
+    EXPECT_TRUE(noError);
+
+    ParseResult err = doc.Parse("{]");
+    EXPECT_TRUE(doc.HasParseError());
+    EXPECT_NE(err, noError);
+    EXPECT_NE(err.Code(), noError);
+    EXPECT_NE(noError, doc.GetParseError());
+    EXPECT_EQ(err.Code(), doc.GetParseError());
+    EXPECT_EQ(err.Offset(), doc.GetErrorOffset());
+    EXPECT_TRUE(doc.IsArray());
+    EXPECT_EQ(doc.Size(), 1u);
+
+    err = doc.Parse("{}");
+    EXPECT_FALSE(doc.HasParseError());
+    EXPECT_FALSE(err.IsError());
+    EXPECT_TRUE(err);
+    EXPECT_EQ(err, noError);
+    EXPECT_EQ(err.Code(), noError);
+    EXPECT_EQ(err.Code(), doc.GetParseError());
+    EXPECT_EQ(err.Offset(), doc.GetErrorOffset());
+    EXPECT_TRUE(doc.IsObject());
+    EXPECT_EQ(doc.MemberCount(), 0u);
+}
+
+static FILE* OpenEncodedFile(const char* filename) {
+    const char *paths[] = {
+        "encodings",
+        "bin/encodings",
+        "../bin/encodings",
+        "../../bin/encodings",
+        "../../../bin/encodings"
+    };
+    char buffer[1024];
+    for (size_t i = 0; i < sizeof(paths) / sizeof(paths[0]); i++) {
+        sprintf(buffer, "%s/%s", paths[i], filename);
+        FILE *fp = fopen(buffer, "rb");
+        if (fp)
+            return fp;
+    }
+    return 0;
+}
+
+TEST(Document, Parse_Encoding) {
+    const char* json = " { \"hello\" : \"world\", \"t\" : true , \"f\" : false, \"n\": null, \"i\":123, \"pi\": 3.1416, \"a\":[1, 2, 3, 4] } ";
+
+    typedef GenericDocument<UTF16<> > DocumentType;
+    DocumentType doc;
+    
+    // Parse<unsigned, SourceEncoding>(const SourceEncoding::Ch*)
+    // doc.Parse<kParseDefaultFlags, UTF8<> >(json);
+    // EXPECT_FALSE(doc.HasParseError());
+    // EXPECT_EQ(0, StrCmp(doc[L"hello"].GetString(), L"world"));
+
+    // Parse<unsigned, SourceEncoding>(const SourceEncoding::Ch*, size_t)
+    size_t length = strlen(json);
+    char* buffer = reinterpret_cast<char*>(malloc(length * 2));
+    memcpy(buffer, json, length);
+    memset(buffer + length, 'X', length);
+#if RAPIDJSON_HAS_STDSTRING
+    std::string s2(buffer, length); // backup buffer
+#endif
+    doc.SetNull();
+    doc.Parse<kParseDefaultFlags, UTF8<> >(buffer, length);
+    free(buffer);
+    EXPECT_FALSE(doc.HasParseError());
+    if (doc.HasParseError())
+        printf("Error: %d at %zu\n", static_cast<int>(doc.GetParseError()), doc.GetErrorOffset());
+    EXPECT_EQ(0, StrCmp(doc[L"hello"].GetString(), L"world"));
+
+#if RAPIDJSON_HAS_STDSTRING
+    // Parse<unsigned, SourceEncoding>(std::string)
+    doc.SetNull();
+
+#if defined(_MSC_VER) && _MSC_VER < 1800
+    doc.Parse<kParseDefaultFlags, UTF8<> >(s2.c_str()); // VS2010 or below cannot handle templated function overloading. Use const char* instead.
+#else
+    doc.Parse<kParseDefaultFlags, UTF8<> >(s2);
+#endif
+    EXPECT_FALSE(doc.HasParseError());
+    EXPECT_EQ(0, StrCmp(doc[L"hello"].GetString(), L"world"));
+#endif
+}
+
+TEST(Document, ParseStream_EncodedInputStream) {
+    // UTF8 -> UTF16
+    FILE* fp = OpenEncodedFile("utf8.json");
+    char buffer[256];
+    FileReadStream bis(fp, buffer, sizeof(buffer));
+    EncodedInputStream<UTF8<>, FileReadStream> eis(bis);
+
+    GenericDocument<UTF16<> > d;
+    d.ParseStream<0, UTF8<> >(eis);
+    EXPECT_FALSE(d.HasParseError());
+
+    fclose(fp);
+
+    wchar_t expected[] = L"I can eat glass and it doesn't hurt me.";
+    GenericValue<UTF16<> >& v = d[L"en"];
+    EXPECT_TRUE(v.IsString());
+    EXPECT_EQ(sizeof(expected) / sizeof(wchar_t) - 1, v.GetStringLength());
+    EXPECT_EQ(0, StrCmp(expected, v.GetString()));
+
+    // UTF16 -> UTF8 in memory
+    StringBuffer bos;
+    typedef EncodedOutputStream<UTF8<>, StringBuffer> OutputStream;
+    OutputStream eos(bos, false);   // Not writing BOM
+    {
+        Writer<OutputStream, UTF16<>, UTF8<> > writer(eos);
+        d.Accept(writer);
+    }
+
+    // Condense the original file and compare.
+    fp = OpenEncodedFile("utf8.json");
+    FileReadStream is(fp, buffer, sizeof(buffer));
+    Reader reader;
+    StringBuffer bos2;
+    Writer<StringBuffer> writer2(bos2);
+    reader.Parse(is, writer2);
+    fclose(fp);
+
+    EXPECT_EQ(bos.GetSize(), bos2.GetSize());
+    EXPECT_EQ(0, memcmp(bos.GetString(), bos2.GetString(), bos2.GetSize()));
+}
+
+TEST(Document, ParseStream_AutoUTFInputStream) {
+    // Any -> UTF8
+    FILE* fp = OpenEncodedFile("utf32be.json");
+    char buffer[256];
+    FileReadStream bis(fp, buffer, sizeof(buffer));
+    AutoUTFInputStream<unsigned, FileReadStream> eis(bis);
+
+    Document d;
+    d.ParseStream<0, AutoUTF<unsigned> >(eis);
+    EXPECT_FALSE(d.HasParseError());
+
+    fclose(fp);
+
+    char expected[] = "I can eat glass and it doesn't hurt me.";
+    Value& v = d["en"];
+    EXPECT_TRUE(v.IsString());
+    EXPECT_EQ(sizeof(expected) - 1, v.GetStringLength());
+    EXPECT_EQ(0, StrCmp(expected, v.GetString()));
+
+    // UTF8 -> UTF8 in memory
+    StringBuffer bos;
+    Writer<StringBuffer> writer(bos);
+    d.Accept(writer);
+
+    // Condense the original file and compare.
+    fp = OpenEncodedFile("utf8.json");
+    FileReadStream is(fp, buffer, sizeof(buffer));
+    Reader reader;
+    StringBuffer bos2;
+    Writer<StringBuffer> writer2(bos2);
+    reader.Parse(is, writer2);
+    fclose(fp);
+
+    EXPECT_EQ(bos.GetSize(), bos2.GetSize());
+    EXPECT_EQ(0, memcmp(bos.GetString(), bos2.GetString(), bos2.GetSize()));
+}
+
+TEST(Document, Swap) {
+    Document d1;
+    Document::AllocatorType& a = d1.GetAllocator();
+
+    d1.SetArray().PushBack(1, a).PushBack(2, a);
+
+    Value o;
+    o.SetObject().AddMember("a", 1, a);
+
+    // Swap between Document and Value
+    d1.Swap(o);
+    EXPECT_TRUE(d1.IsObject());
+    EXPECT_TRUE(o.IsArray());
+
+    d1.Swap(o);
+    EXPECT_TRUE(d1.IsArray());
+    EXPECT_TRUE(o.IsObject());
+
+    o.Swap(d1);
+    EXPECT_TRUE(d1.IsObject());
+    EXPECT_TRUE(o.IsArray());
+
+    // Swap between Document and Document
+    Document d2;
+    d2.SetArray().PushBack(3, a);
+    d1.Swap(d2);
+    EXPECT_TRUE(d1.IsArray());
+    EXPECT_TRUE(d2.IsObject());
+    EXPECT_EQ(&d2.GetAllocator(), &a);
+
+    // reset value
+    Value().Swap(d1);
+    EXPECT_TRUE(d1.IsNull());
+
+    // reset document, including allocator
+    Document().Swap(d2);
+    EXPECT_TRUE(d2.IsNull());
+    EXPECT_NE(&d2.GetAllocator(), &a);
+
+    // testing std::swap compatibility
+    d1.SetBool(true);
+    using std::swap;
+    swap(d1, d2);
+    EXPECT_TRUE(d1.IsNull());
+    EXPECT_TRUE(d2.IsTrue());
+
+    swap(o, d2);
+    EXPECT_TRUE(o.IsTrue());
+    EXPECT_TRUE(d2.IsArray());
+}
+
+
+// This should be slow due to assignment in inner-loop.
+struct OutputStringStream : public std::ostringstream {
+    typedef char Ch;
+
+    virtual ~OutputStringStream();
+
+    void Put(char c) {
+        put(c);
+    }
+    void Flush() {}
+};
+
+OutputStringStream::~OutputStringStream() {}
+
+TEST(Document, AcceptWriter) {
+    Document doc;
+    doc.Parse(" { \"hello\" : \"world\", \"t\" : true , \"f\" : false, \"n\": null, \"i\":123, \"pi\": 3.1416, \"a\":[1, 2, 3, 4] } ");
+
+    OutputStringStream os;
+    Writer<OutputStringStream> writer(os);
+    doc.Accept(writer);
+
+    EXPECT_EQ("{\"hello\":\"world\",\"t\":true,\"f\":false,\"n\":null,\"i\":123,\"pi\":3.1416,\"a\":[1,2,3,4]}", os.str());
+}
+
+TEST(Document, UserBuffer) {
+    typedef GenericDocument<UTF8<>, MemoryPoolAllocator<>, MemoryPoolAllocator<> > DocumentType;
+    char valueBuffer[4096];
+    char parseBuffer[1024];
+    MemoryPoolAllocator<> valueAllocator(valueBuffer, sizeof(valueBuffer));
+    MemoryPoolAllocator<> parseAllocator(parseBuffer, sizeof(parseBuffer));
+    DocumentType doc(&valueAllocator, sizeof(parseBuffer) / 2, &parseAllocator);
+    doc.Parse(" { \"hello\" : \"world\", \"t\" : true , \"f\" : false, \"n\": null, \"i\":123, \"pi\": 3.1416, \"a\":[1, 2, 3, 4] } ");
+    EXPECT_FALSE(doc.HasParseError());
+    EXPECT_LE(valueAllocator.Size(), sizeof(valueBuffer));
+    EXPECT_LE(parseAllocator.Size(), sizeof(parseBuffer));
+
+    // Cover MemoryPoolAllocator::Capacity()
+    EXPECT_LE(valueAllocator.Size(), valueAllocator.Capacity());
+    EXPECT_LE(parseAllocator.Size(), parseAllocator.Capacity());
+}
+
+// Issue 226: Value of string type should not point to NULL
+TEST(Document, AssertAcceptInvalidNameType) {
+    Document doc;
+    doc.SetObject();
+    doc.AddMember("a", 0, doc.GetAllocator());
+    doc.FindMember("a")->name.SetNull(); // Change name to non-string type.
+
+    OutputStringStream os;
+    Writer<OutputStringStream> writer(os);
+    ASSERT_THROW(doc.Accept(writer), AssertException);
+}
+
+// Issue 44:    SetStringRaw doesn't work with wchar_t
+TEST(Document, UTF16_Document) {
+    GenericDocument< UTF16<> > json;
+    json.Parse<kParseValidateEncodingFlag>(L"[{\"created_at\":\"Wed Oct 30 17:13:20 +0000 2012\"}]");
+
+    ASSERT_TRUE(json.IsArray());
+    GenericValue< UTF16<> >& v = json[0];
+    ASSERT_TRUE(v.IsObject());
+
+    GenericValue< UTF16<> >& s = v[L"created_at"];
+    ASSERT_TRUE(s.IsString());
+
+    EXPECT_EQ(0, memcmp(L"Wed Oct 30 17:13:20 +0000 2012", s.GetString(), (s.GetStringLength() + 1) * sizeof(wchar_t)));
+}
+
+#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
+
+#if 0 // Many old compiler does not support these. Turn it off temporaily.
+
+#include <type_traits>
+
+TEST(Document, Traits) {
+    static_assert(std::is_constructible<Document>::value, "");
+    static_assert(std::is_default_constructible<Document>::value, "");
+#ifndef _MSC_VER
+    static_assert(!std::is_copy_constructible<Document>::value, "");
+#endif
+    static_assert(std::is_move_constructible<Document>::value, "");
+
+    static_assert(!std::is_nothrow_constructible<Document>::value, "");
+    static_assert(!std::is_nothrow_default_constructible<Document>::value, "");
+#ifndef _MSC_VER
+    static_assert(!std::is_nothrow_copy_constructible<Document>::value, "");
+    static_assert(std::is_nothrow_move_constructible<Document>::value, "");
+#endif
+
+    static_assert(std::is_assignable<Document,Document>::value, "");
+#ifndef _MSC_VER
+  static_assert(!std::is_copy_assignable<Document>::value, "");
+#endif
+    static_assert(std::is_move_assignable<Document>::value, "");
+
+#ifndef _MSC_VER
+    static_assert(std::is_nothrow_assignable<Document, Document>::value, "");
+#endif
+    static_assert(!std::is_nothrow_copy_assignable<Document>::value, "");
+#ifndef _MSC_VER
+    static_assert(std::is_nothrow_move_assignable<Document>::value, "");
+#endif
+
+    static_assert( std::is_destructible<Document>::value, "");
+#ifndef _MSC_VER
+    static_assert(std::is_nothrow_destructible<Document>::value, "");
+#endif
+}
+
+#endif
+
+template <typename Allocator>
+struct DocumentMove: public ::testing::Test {
+};
+
+typedef ::testing::Types< CrtAllocator, MemoryPoolAllocator<> > MoveAllocatorTypes;
+TYPED_TEST_CASE(DocumentMove, MoveAllocatorTypes);
+
+TYPED_TEST(DocumentMove, MoveConstructor) {
+    typedef TypeParam Allocator;
+    typedef GenericDocument<UTF8<>, Allocator> D;
+    Allocator allocator;
+
+    D a(&allocator);
+    a.Parse("[\"one\", \"two\", \"three\"]");
+    EXPECT_FALSE(a.HasParseError());
+    EXPECT_TRUE(a.IsArray());
+    EXPECT_EQ(3u, a.Size());
+    EXPECT_EQ(&a.GetAllocator(), &allocator);
+
+    // Document b(a); // does not compile (!is_copy_constructible)
+    D b(std::move(a));
+    EXPECT_TRUE(a.IsNull());
+    EXPECT_TRUE(b.IsArray());
+    EXPECT_EQ(3u, b.Size());
+    EXPECT_THROW(a.GetAllocator(), AssertException);
+    EXPECT_EQ(&b.GetAllocator(), &allocator);
+
+    b.Parse("{\"Foo\": \"Bar\", \"Baz\": 42}");
+    EXPECT_FALSE(b.HasParseError());
+    EXPECT_TRUE(b.IsObject());
+    EXPECT_EQ(2u, b.MemberCount());
+
+    // Document c = a; // does not compile (!is_copy_constructible)
+    D c = std::move(b);
+    EXPECT_TRUE(b.IsNull());
+    EXPECT_TRUE(c.IsObject());
+    EXPECT_EQ(2u, c.MemberCount());
+    EXPECT_THROW(b.GetAllocator(), AssertException);
+    EXPECT_EQ(&c.GetAllocator(), &allocator);
+}
+
+TYPED_TEST(DocumentMove, MoveConstructorParseError) {
+    typedef TypeParam Allocator;
+    typedef GenericDocument<UTF8<>, Allocator> D;
+
+    ParseResult noError;
+    D a;
+    a.Parse("{ 4 = 4]");
+    ParseResult error(a.GetParseError(), a.GetErrorOffset());
+    EXPECT_TRUE(a.HasParseError());
+    EXPECT_NE(error, noError);
+    EXPECT_NE(error.Code(), noError);
+    EXPECT_NE(error.Code(), noError.Code());
+    EXPECT_NE(error.Offset(), noError.Offset());
+
+    D b(std::move(a));
+    EXPECT_FALSE(a.HasParseError());
+    EXPECT_TRUE(b.HasParseError());
+    EXPECT_EQ(a.GetParseError(), noError);
+    EXPECT_EQ(a.GetParseError(), noError.Code());
+    EXPECT_EQ(a.GetErrorOffset(), noError.Offset());
+    EXPECT_EQ(b.GetParseError(), error);
+    EXPECT_EQ(b.GetParseError(), error.Code());
+    EXPECT_EQ(b.GetErrorOffset(), error.Offset());
+
+    D c(std::move(b));
+    EXPECT_FALSE(b.HasParseError());
+    EXPECT_TRUE(c.HasParseError());
+    EXPECT_EQ(b.GetParseError(), noError.Code());
+    EXPECT_EQ(c.GetParseError(), error.Code());
+    EXPECT_EQ(b.GetErrorOffset(), noError.Offset());
+    EXPECT_EQ(c.GetErrorOffset(), error.Offset());
+}
+
+// This test does not properly use parsing, just for testing.
+// It must call ClearStack() explicitly to prevent memory leak.
+// But here we cannot as ClearStack() is private.
+#if 0
+TYPED_TEST(DocumentMove, MoveConstructorStack) {
+    typedef TypeParam Allocator;
+    typedef UTF8<> Encoding;
+    typedef GenericDocument<Encoding, Allocator> Document;
+
+    Document a;
+    size_t defaultCapacity = a.GetStackCapacity();
+
+    // Trick Document into getting GetStackCapacity() to return non-zero
+    typedef GenericReader<Encoding, Encoding, Allocator> Reader;
+    Reader reader(&a.GetAllocator());
+    GenericStringStream<Encoding> is("[\"one\", \"two\", \"three\"]");
+    reader.template Parse<kParseDefaultFlags>(is, a);
+    size_t capacity = a.GetStackCapacity();
+    EXPECT_GT(capacity, 0u);
+
+    Document b(std::move(a));
+    EXPECT_EQ(a.GetStackCapacity(), defaultCapacity);
+    EXPECT_EQ(b.GetStackCapacity(), capacity);
+
+    Document c = std::move(b);
+    EXPECT_EQ(b.GetStackCapacity(), defaultCapacity);
+    EXPECT_EQ(c.GetStackCapacity(), capacity);
+}
+#endif
+
+TYPED_TEST(DocumentMove, MoveAssignment) {
+    typedef TypeParam Allocator;
+    typedef GenericDocument<UTF8<>, Allocator> D;
+    Allocator allocator;
+
+    D a(&allocator);
+    a.Parse("[\"one\", \"two\", \"three\"]");
+    EXPECT_FALSE(a.HasParseError());
+    EXPECT_TRUE(a.IsArray());
+    EXPECT_EQ(3u, a.Size());
+    EXPECT_EQ(&a.GetAllocator(), &allocator);
+
+    // Document b; b = a; // does not compile (!is_copy_assignable)
+    D b;
+    b = std::move(a);
+    EXPECT_TRUE(a.IsNull());
+    EXPECT_TRUE(b.IsArray());
+    EXPECT_EQ(3u, b.Size());
+    EXPECT_THROW(a.GetAllocator(), AssertException);
+    EXPECT_EQ(&b.GetAllocator(), &allocator);
+
+    b.Parse("{\"Foo\": \"Bar\", \"Baz\": 42}");
+    EXPECT_FALSE(b.HasParseError());
+    EXPECT_TRUE(b.IsObject());
+    EXPECT_EQ(2u, b.MemberCount());
+
+    // Document c; c = a; // does not compile (see static_assert)
+    D c;
+    c = std::move(b);
+    EXPECT_TRUE(b.IsNull());
+    EXPECT_TRUE(c.IsObject());
+    EXPECT_EQ(2u, c.MemberCount());
+    EXPECT_THROW(b.GetAllocator(), AssertException);
+    EXPECT_EQ(&c.GetAllocator(), &allocator);
+}
+
+TYPED_TEST(DocumentMove, MoveAssignmentParseError) {
+    typedef TypeParam Allocator;
+    typedef GenericDocument<UTF8<>, Allocator> D;
+
+    ParseResult noError;
+    D a;
+    a.Parse("{ 4 = 4]");
+    ParseResult error(a.GetParseError(), a.GetErrorOffset());
+    EXPECT_TRUE(a.HasParseError());
+    EXPECT_NE(error.Code(), noError.Code());
+    EXPECT_NE(error.Offset(), noError.Offset());
+
+    D b;
+    b = std::move(a);
+    EXPECT_FALSE(a.HasParseError());
+    EXPECT_TRUE(b.HasParseError());
+    EXPECT_EQ(a.GetParseError(), noError.Code());
+    EXPECT_EQ(b.GetParseError(), error.Code());
+    EXPECT_EQ(a.GetErrorOffset(), noError.Offset());
+    EXPECT_EQ(b.GetErrorOffset(), error.Offset());
+
+    D c;
+    c = std::move(b);
+    EXPECT_FALSE(b.HasParseError());
+    EXPECT_TRUE(c.HasParseError());
+    EXPECT_EQ(b.GetParseError(), noError.Code());
+    EXPECT_EQ(c.GetParseError(), error.Code());
+    EXPECT_EQ(b.GetErrorOffset(), noError.Offset());
+    EXPECT_EQ(c.GetErrorOffset(), error.Offset());
+}
+
+// This test does not properly use parsing, just for testing.
+// It must call ClearStack() explicitly to prevent memory leak.
+// But here we cannot as ClearStack() is private.
+#if 0
+TYPED_TEST(DocumentMove, MoveAssignmentStack) {
+    typedef TypeParam Allocator;
+    typedef UTF8<> Encoding;
+    typedef GenericDocument<Encoding, Allocator> D;
+
+    D a;
+    size_t defaultCapacity = a.GetStackCapacity();
+
+    // Trick Document into getting GetStackCapacity() to return non-zero
+    typedef GenericReader<Encoding, Encoding, Allocator> Reader;
+    Reader reader(&a.GetAllocator());
+    GenericStringStream<Encoding> is("[\"one\", \"two\", \"three\"]");
+    reader.template Parse<kParseDefaultFlags>(is, a);
+    size_t capacity = a.GetStackCapacity();
+    EXPECT_GT(capacity, 0u);
+
+    D b;
+    b = std::move(a);
+    EXPECT_EQ(a.GetStackCapacity(), defaultCapacity);
+    EXPECT_EQ(b.GetStackCapacity(), capacity);
+
+    D c;
+    c = std::move(b);
+    EXPECT_EQ(b.GetStackCapacity(), defaultCapacity);
+    EXPECT_EQ(c.GetStackCapacity(), capacity);
+}
+#endif
+
+#endif // RAPIDJSON_HAS_CXX11_RVALUE_REFS
+
+// Issue 22: Memory corruption via operator=
+// Fixed by making unimplemented assignment operator private.
+//TEST(Document, Assignment) {
+//  Document d1;
+//  Document d2;
+//  d1 = d2;
+//}
+
+#ifdef __clang__
+RAPIDJSON_DIAG_POP
+#endif
diff --git a/test/unittest/dtoatest.cpp b/test/unittest/dtoatest.cpp
new file mode 100644
index 0000000..afd76eb
--- /dev/null
+++ b/test/unittest/dtoatest.cpp
@@ -0,0 +1,98 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+// 
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed 
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the 
+// specific language governing permissions and limitations under the License.
+
+#include "unittest.h"
+#include "rapidjson/internal/dtoa.h"
+
+#ifdef __GNUC__
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(type-limits)
+#endif
+
+using namespace rapidjson::internal;
+
+TEST(dtoa, normal) {
+    char buffer[30];
+
+#define TEST_DTOA(d, a)\
+    *dtoa(d, buffer) = '\0';\
+    EXPECT_STREQ(a, buffer)
+
+    TEST_DTOA(0.0, "0.0");
+    TEST_DTOA(-0.0, "-0.0");
+    TEST_DTOA(1.0, "1.0");
+    TEST_DTOA(-1.0, "-1.0");
+    TEST_DTOA(1.2345, "1.2345");
+    TEST_DTOA(1.2345678, "1.2345678");
+    TEST_DTOA(0.123456789012, "0.123456789012");
+    TEST_DTOA(1234567.8, "1234567.8");
+    TEST_DTOA(-79.39773355813419, "-79.39773355813419");
+    TEST_DTOA(0.000001, "0.000001");
+    TEST_DTOA(0.0000001, "1e-7");
+    TEST_DTOA(1e30, "1e30");
+    TEST_DTOA(1.234567890123456e30, "1.234567890123456e30");
+    TEST_DTOA(5e-324, "5e-324"); // Min subnormal positive double
+    TEST_DTOA(2.225073858507201e-308, "2.225073858507201e-308"); // Max subnormal positive double
+    TEST_DTOA(2.2250738585072014e-308, "2.2250738585072014e-308"); // Min normal positive double
+    TEST_DTOA(1.7976931348623157e308, "1.7976931348623157e308"); // Max double
+
+#undef TEST_DTOA
+}
+
+TEST(dtoa, maxDecimalPlaces) {
+    char buffer[30];
+
+#define TEST_DTOA(m, d, a)\
+    *dtoa(d, buffer, m) = '\0';\
+    EXPECT_STREQ(a, buffer)
+
+    TEST_DTOA(3, 0.0, "0.0");
+    TEST_DTOA(1, 0.0, "0.0");
+    TEST_DTOA(3, -0.0, "-0.0");
+    TEST_DTOA(3, 1.0, "1.0");
+    TEST_DTOA(3, -1.0, "-1.0");
+    TEST_DTOA(3, 1.2345, "1.234");
+    TEST_DTOA(2, 1.2345, "1.23");
+    TEST_DTOA(1, 1.2345, "1.2");
+    TEST_DTOA(3, 1.2345678, "1.234");
+    TEST_DTOA(3, 1.0001, "1.0");
+    TEST_DTOA(2, 1.0001, "1.0");
+    TEST_DTOA(1, 1.0001, "1.0");
+    TEST_DTOA(3, 0.123456789012, "0.123");
+    TEST_DTOA(2, 0.123456789012, "0.12");
+    TEST_DTOA(1, 0.123456789012, "0.1");
+    TEST_DTOA(4, 0.0001, "0.0001");
+    TEST_DTOA(3, 0.0001, "0.0");
+    TEST_DTOA(2, 0.0001, "0.0");
+    TEST_DTOA(1, 0.0001, "0.0");
+    TEST_DTOA(3, 1234567.8, "1234567.8");
+    TEST_DTOA(3, 1e30, "1e30");
+    TEST_DTOA(3, 5e-324, "0.0"); // Min subnormal positive double
+    TEST_DTOA(3, 2.225073858507201e-308, "0.0"); // Max subnormal positive double
+    TEST_DTOA(3, 2.2250738585072014e-308, "0.0"); // Min normal positive double
+    TEST_DTOA(3, 1.7976931348623157e308, "1.7976931348623157e308"); // Max double
+    TEST_DTOA(5, -0.14000000000000001, "-0.14");
+    TEST_DTOA(4, -0.14000000000000001, "-0.14");
+    TEST_DTOA(3, -0.14000000000000001, "-0.14");
+    TEST_DTOA(3, -0.10000000000000001, "-0.1");
+    TEST_DTOA(2, -0.10000000000000001, "-0.1");
+    TEST_DTOA(1, -0.10000000000000001, "-0.1");
+
+#undef TEST_DTOA
+}
+
+
+#ifdef __GNUC__
+RAPIDJSON_DIAG_POP
+#endif
diff --git a/test/unittest/encodedstreamtest.cpp b/test/unittest/encodedstreamtest.cpp
new file mode 100644
index 0000000..bc234d3
--- /dev/null
+++ b/test/unittest/encodedstreamtest.cpp
@@ -0,0 +1,313 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+// 
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed 
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the 
+// specific language governing permissions and limitations under the License.
+
+#include "unittest.h"
+#include "rapidjson/filereadstream.h"
+#include "rapidjson/filewritestream.h"
+#include "rapidjson/encodedstream.h"
+#include "rapidjson/stringbuffer.h"
+#include "rapidjson/memorystream.h"
+#include "rapidjson/memorybuffer.h"
+
+using namespace rapidjson;
+
+class EncodedStreamTest : public ::testing::Test {
+public:
+    EncodedStreamTest() : json_(), length_() {}
+    virtual ~EncodedStreamTest();
+
+    virtual void SetUp() {
+        json_ = ReadFile("utf8.json", true, &length_);
+    }
+
+    virtual void TearDown() {
+        free(json_);
+        json_ = 0;
+    }
+
+private:
+    EncodedStreamTest(const EncodedStreamTest&);
+    EncodedStreamTest& operator=(const EncodedStreamTest&);
+    
+protected:
+    static FILE* Open(const char* filename) {
+        const char *paths[] = {
+            "encodings",
+            "bin/encodings",
+            "../bin/encodings",
+            "../../bin/encodings",
+            "../../../bin/encodings"
+        };
+        char buffer[1024];
+        for (size_t i = 0; i < sizeof(paths) / sizeof(paths[0]); i++) {
+            sprintf(buffer, "%s/%s", paths[i], filename);
+            FILE *fp = fopen(buffer, "rb");
+            if (fp)
+                return fp;
+        }
+        return 0;
+    }
+
+    static char *ReadFile(const char* filename, bool appendPath, size_t* outLength) {
+        FILE *fp = appendPath ? Open(filename) : fopen(filename, "rb");
+
+        if (!fp) {
+            *outLength = 0;
+            return 0;
+        }
+
+        fseek(fp, 0, SEEK_END);
+        *outLength = static_cast<size_t>(ftell(fp));
+        fseek(fp, 0, SEEK_SET);
+        char* buffer = static_cast<char*>(malloc(*outLength + 1));
+        size_t readLength = fread(buffer, 1, *outLength, fp);
+        buffer[readLength] = '\0';
+        fclose(fp);
+        return buffer;
+    }
+
+    template <typename FileEncoding, typename MemoryEncoding>
+    void TestEncodedInputStream(const char* filename) {
+        // Test FileReadStream
+        {
+            char buffer[16];
+            FILE *fp = Open(filename);
+            ASSERT_TRUE(fp != 0);
+            FileReadStream fs(fp, buffer, sizeof(buffer));
+            EncodedInputStream<FileEncoding, FileReadStream> eis(fs);
+            StringStream s(json_);
+
+            while (eis.Peek() != '\0') {
+                unsigned expected, actual;
+                EXPECT_TRUE(UTF8<>::Decode(s, &expected));
+                EXPECT_TRUE(MemoryEncoding::Decode(eis, &actual));
+                EXPECT_EQ(expected, actual);
+            }
+            EXPECT_EQ('\0', s.Peek());
+            fclose(fp);
+        }
+
+        // Test MemoryStream
+        {
+            size_t size;
+            char* data = ReadFile(filename, true, &size);
+            MemoryStream ms(data, size);
+            EncodedInputStream<FileEncoding, MemoryStream> eis(ms);
+            StringStream s(json_);
+
+            while (eis.Peek() != '\0') {
+                unsigned expected, actual;
+                EXPECT_TRUE(UTF8<>::Decode(s, &expected));
+                EXPECT_TRUE(MemoryEncoding::Decode(eis, &actual));
+                EXPECT_EQ(expected, actual);
+            }
+            EXPECT_EQ('\0', s.Peek());
+            free(data);
+            EXPECT_EQ(size, eis.Tell());
+        }
+    }
+
+    void TestAutoUTFInputStream(const char *filename, bool expectHasBOM) {
+        // Test FileReadStream
+        {
+            char buffer[16];
+            FILE *fp = Open(filename);
+            ASSERT_TRUE(fp != 0);
+            FileReadStream fs(fp, buffer, sizeof(buffer));
+            AutoUTFInputStream<unsigned, FileReadStream> eis(fs);
+            EXPECT_EQ(expectHasBOM, eis.HasBOM());
+            StringStream s(json_);
+            while (eis.Peek() != '\0') {
+                unsigned expected, actual;
+                EXPECT_TRUE(UTF8<>::Decode(s, &expected));
+                EXPECT_TRUE(AutoUTF<unsigned>::Decode(eis, &actual));
+                EXPECT_EQ(expected, actual);
+            }
+            EXPECT_EQ('\0', s.Peek());
+            fclose(fp);
+        }
+
+        // Test MemoryStream
+        {
+            size_t size;
+            char* data = ReadFile(filename, true, &size);
+            MemoryStream ms(data, size);
+            AutoUTFInputStream<unsigned, MemoryStream> eis(ms);
+            EXPECT_EQ(expectHasBOM, eis.HasBOM());
+            StringStream s(json_);
+
+            while (eis.Peek() != '\0') {
+                unsigned expected, actual;
+                EXPECT_TRUE(UTF8<>::Decode(s, &expected));
+                EXPECT_TRUE(AutoUTF<unsigned>::Decode(eis, &actual));
+                EXPECT_EQ(expected, actual);
+            }
+            EXPECT_EQ('\0', s.Peek());
+            free(data);
+            EXPECT_EQ(size, eis.Tell());
+        }
+    }
+
+    template <typename FileEncoding, typename MemoryEncoding>
+    void TestEncodedOutputStream(const char* expectedFilename, bool putBOM) {
+        // Test FileWriteStream
+        {
+            char filename[L_tmpnam];
+            FILE* fp = TempFile(filename);
+            char buffer[16];
+            FileWriteStream os(fp, buffer, sizeof(buffer));
+            EncodedOutputStream<FileEncoding, FileWriteStream> eos(os, putBOM);
+            StringStream s(json_);
+            while (s.Peek() != '\0') {
+                bool success = Transcoder<UTF8<>, MemoryEncoding>::Transcode(s, eos);
+                EXPECT_TRUE(success);
+            }
+            eos.Flush();
+            fclose(fp);
+            EXPECT_TRUE(CompareFile(filename, expectedFilename));
+            remove(filename);
+        }
+
+        // Test MemoryBuffer
+        {
+            MemoryBuffer mb;
+            EncodedOutputStream<FileEncoding, MemoryBuffer> eos(mb, putBOM);
+            StringStream s(json_);
+            while (s.Peek() != '\0') {
+                bool success = Transcoder<UTF8<>, MemoryEncoding>::Transcode(s, eos);
+                EXPECT_TRUE(success);
+            }
+            eos.Flush();
+            EXPECT_TRUE(CompareBufferFile(mb.GetBuffer(), mb.GetSize(), expectedFilename));
+        }
+    }
+
+    void TestAutoUTFOutputStream(UTFType type, bool putBOM, const char *expectedFilename) {
+        // Test FileWriteStream
+        {
+            char filename[L_tmpnam];
+            FILE* fp = TempFile(filename);
+
+            char buffer[16];
+            FileWriteStream os(fp, buffer, sizeof(buffer));
+            AutoUTFOutputStream<unsigned, FileWriteStream> eos(os, type, putBOM);
+            StringStream s(json_);
+            while (s.Peek() != '\0') {
+                bool success = Transcoder<UTF8<>, AutoUTF<unsigned> >::Transcode(s, eos);
+                EXPECT_TRUE(success);
+            }
+            eos.Flush();
+            fclose(fp);
+            EXPECT_TRUE(CompareFile(filename, expectedFilename));
+            remove(filename);
+        }
+
+        // Test MemoryBuffer
+        {
+            MemoryBuffer mb;
+            AutoUTFOutputStream<unsigned, MemoryBuffer> eos(mb, type, putBOM);
+            StringStream s(json_);
+            while (s.Peek() != '\0') {
+                bool success = Transcoder<UTF8<>, AutoUTF<unsigned> >::Transcode(s, eos);
+                EXPECT_TRUE(success);
+            }
+            eos.Flush();
+            EXPECT_TRUE(CompareBufferFile(mb.GetBuffer(), mb.GetSize(), expectedFilename));
+        }
+    }
+
+    bool CompareFile(const char* filename, const char* expectedFilename) {
+        size_t actualLength, expectedLength;
+        char* actualBuffer = ReadFile(filename, false, &actualLength);
+        char* expectedBuffer = ReadFile(expectedFilename, true, &expectedLength);
+        bool ret = (expectedLength == actualLength) && memcmp(expectedBuffer, actualBuffer, actualLength) == 0;
+        free(actualBuffer);
+        free(expectedBuffer);
+        return ret;
+    }
+
+    bool CompareBufferFile(const char* actualBuffer, size_t actualLength, const char* expectedFilename) {
+        size_t expectedLength;
+        char* expectedBuffer = ReadFile(expectedFilename, true, &expectedLength);
+        bool ret = (expectedLength == actualLength) && memcmp(expectedBuffer, actualBuffer, actualLength) == 0;
+        free(expectedBuffer);
+        return ret;
+    }
+
+    char *json_;
+    size_t length_;
+};
+
+EncodedStreamTest::~EncodedStreamTest() {}
+
+TEST_F(EncodedStreamTest, EncodedInputStream) {
+    TestEncodedInputStream<UTF8<>,    UTF8<>  >("utf8.json");
+    TestEncodedInputStream<UTF8<>,    UTF8<>  >("utf8bom.json");
+    TestEncodedInputStream<UTF16LE<>, UTF16<> >("utf16le.json");
+    TestEncodedInputStream<UTF16LE<>, UTF16<> >("utf16lebom.json");
+    TestEncodedInputStream<UTF16BE<>, UTF16<> >("utf16be.json");
+    TestEncodedInputStream<UTF16BE<>, UTF16<> >("utf16bebom.json");
+    TestEncodedInputStream<UTF32LE<>, UTF32<> >("utf32le.json");
+    TestEncodedInputStream<UTF32LE<>, UTF32<> >("utf32lebom.json");
+    TestEncodedInputStream<UTF32BE<>, UTF32<> >("utf32be.json");
+    TestEncodedInputStream<UTF32BE<>, UTF32<> >("utf32bebom.json");
+}
+
+TEST_F(EncodedStreamTest, AutoUTFInputStream) {
+    TestAutoUTFInputStream("utf8.json",      false);
+    TestAutoUTFInputStream("utf8bom.json",   true);
+    TestAutoUTFInputStream("utf16le.json",   false);
+    TestAutoUTFInputStream("utf16lebom.json",true);
+    TestAutoUTFInputStream("utf16be.json",   false);
+    TestAutoUTFInputStream("utf16bebom.json",true);
+    TestAutoUTFInputStream("utf32le.json",   false);
+    TestAutoUTFInputStream("utf32lebom.json",true);
+    TestAutoUTFInputStream("utf32be.json",   false);
+    TestAutoUTFInputStream("utf32bebom.json", true);
+
+    {
+        // Auto detection fail, use user defined UTF type
+        const char json[] = "{ }";
+        MemoryStream ms(json, sizeof(json));
+        AutoUTFInputStream<unsigned, MemoryStream> eis(ms, kUTF8);
+        EXPECT_FALSE(eis.HasBOM());
+        EXPECT_EQ(kUTF8, eis.GetType());
+    }
+}
+
+TEST_F(EncodedStreamTest, EncodedOutputStream) {
+    TestEncodedOutputStream<UTF8<>,     UTF8<>  >("utf8.json",      false);
+    TestEncodedOutputStream<UTF8<>,     UTF8<>  >("utf8bom.json",   true);
+    TestEncodedOutputStream<UTF16LE<>,  UTF16<> >("utf16le.json",   false);
+    TestEncodedOutputStream<UTF16LE<>,  UTF16<> >("utf16lebom.json",true);
+    TestEncodedOutputStream<UTF16BE<>,  UTF16<> >("utf16be.json",   false);
+    TestEncodedOutputStream<UTF16BE<>,  UTF16<> >("utf16bebom.json",true);
+    TestEncodedOutputStream<UTF32LE<>,  UTF32<> >("utf32le.json",   false);
+    TestEncodedOutputStream<UTF32LE<>,  UTF32<> >("utf32lebom.json",true);
+    TestEncodedOutputStream<UTF32BE<>,  UTF32<> >("utf32be.json",   false);
+    TestEncodedOutputStream<UTF32BE<>,  UTF32<> >("utf32bebom.json",true);
+}
+
+TEST_F(EncodedStreamTest, AutoUTFOutputStream) {
+    TestAutoUTFOutputStream(kUTF8,      false,  "utf8.json");
+    TestAutoUTFOutputStream(kUTF8,      true,   "utf8bom.json");
+    TestAutoUTFOutputStream(kUTF16LE,   false,  "utf16le.json");
+    TestAutoUTFOutputStream(kUTF16LE,   true,   "utf16lebom.json");
+    TestAutoUTFOutputStream(kUTF16BE,   false,  "utf16be.json");
+    TestAutoUTFOutputStream(kUTF16BE,   true,   "utf16bebom.json");
+    TestAutoUTFOutputStream(kUTF32LE,   false,  "utf32le.json");
+    TestAutoUTFOutputStream(kUTF32LE,   true,   "utf32lebom.json");
+    TestAutoUTFOutputStream(kUTF32BE,   false,  "utf32be.json");
+    TestAutoUTFOutputStream(kUTF32BE,   true,   "utf32bebom.json");
+}
diff --git a/test/unittest/encodingstest.cpp b/test/unittest/encodingstest.cpp
new file mode 100644
index 0000000..82cf777
--- /dev/null
+++ b/test/unittest/encodingstest.cpp
@@ -0,0 +1,451 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+// 
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed 
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the 
+// specific language governing permissions and limitations under the License.
+
+#include "unittest.h"
+#include "rapidjson/filereadstream.h"
+#include "rapidjson/filewritestream.h"
+#include "rapidjson/encodedstream.h"
+#include "rapidjson/stringbuffer.h"
+
+using namespace rapidjson;
+
+// Verification of encoders/decoders with Hoehrmann's UTF8 decoder
+
+// http://www.unicode.org/Public/UNIDATA/Blocks.txt
+static const unsigned kCodepointRanges[] = {
+    0x0000,     0x007F,     // Basic Latin
+    0x0080,     0x00FF,     // Latin-1 Supplement
+    0x0100,     0x017F,     // Latin Extended-A
+    0x0180,     0x024F,     // Latin Extended-B
+    0x0250,     0x02AF,     // IPA Extensions
+    0x02B0,     0x02FF,     // Spacing Modifier Letters
+    0x0300,     0x036F,     // Combining Diacritical Marks
+    0x0370,     0x03FF,     // Greek and Coptic
+    0x0400,     0x04FF,     // Cyrillic
+    0x0500,     0x052F,     // Cyrillic Supplement
+    0x0530,     0x058F,     // Armenian
+    0x0590,     0x05FF,     // Hebrew
+    0x0600,     0x06FF,     // Arabic
+    0x0700,     0x074F,     // Syriac
+    0x0750,     0x077F,     // Arabic Supplement
+    0x0780,     0x07BF,     // Thaana
+    0x07C0,     0x07FF,     // NKo
+    0x0800,     0x083F,     // Samaritan
+    0x0840,     0x085F,     // Mandaic
+    0x0900,     0x097F,     // Devanagari
+    0x0980,     0x09FF,     // Bengali
+    0x0A00,     0x0A7F,     // Gurmukhi
+    0x0A80,     0x0AFF,     // Gujarati
+    0x0B00,     0x0B7F,     // Oriya
+    0x0B80,     0x0BFF,     // Tamil
+    0x0C00,     0x0C7F,     // Telugu
+    0x0C80,     0x0CFF,     // Kannada
+    0x0D00,     0x0D7F,     // Malayalam
+    0x0D80,     0x0DFF,     // Sinhala
+    0x0E00,     0x0E7F,     // Thai
+    0x0E80,     0x0EFF,     // Lao
+    0x0F00,     0x0FFF,     // Tibetan
+    0x1000,     0x109F,     // Myanmar
+    0x10A0,     0x10FF,     // Georgian
+    0x1100,     0x11FF,     // Hangul Jamo
+    0x1200,     0x137F,     // Ethiopic
+    0x1380,     0x139F,     // Ethiopic Supplement
+    0x13A0,     0x13FF,     // Cherokee
+    0x1400,     0x167F,     // Unified Canadian Aboriginal Syllabics
+    0x1680,     0x169F,     // Ogham
+    0x16A0,     0x16FF,     // Runic
+    0x1700,     0x171F,     // Tagalog
+    0x1720,     0x173F,     // Hanunoo
+    0x1740,     0x175F,     // Buhid
+    0x1760,     0x177F,     // Tagbanwa
+    0x1780,     0x17FF,     // Khmer
+    0x1800,     0x18AF,     // Mongolian
+    0x18B0,     0x18FF,     // Unified Canadian Aboriginal Syllabics Extended
+    0x1900,     0x194F,     // Limbu
+    0x1950,     0x197F,     // Tai Le
+    0x1980,     0x19DF,     // New Tai Lue
+    0x19E0,     0x19FF,     // Khmer Symbols
+    0x1A00,     0x1A1F,     // Buginese
+    0x1A20,     0x1AAF,     // Tai Tham
+    0x1B00,     0x1B7F,     // Balinese
+    0x1B80,     0x1BBF,     // Sundanese
+    0x1BC0,     0x1BFF,     // Batak
+    0x1C00,     0x1C4F,     // Lepcha
+    0x1C50,     0x1C7F,     // Ol Chiki
+    0x1CD0,     0x1CFF,     // Vedic Extensions
+    0x1D00,     0x1D7F,     // Phonetic Extensions
+    0x1D80,     0x1DBF,     // Phonetic Extensions Supplement
+    0x1DC0,     0x1DFF,     // Combining Diacritical Marks Supplement
+    0x1E00,     0x1EFF,     // Latin Extended Additional
+    0x1F00,     0x1FFF,     // Greek Extended
+    0x2000,     0x206F,     // General Punctuation
+    0x2070,     0x209F,     // Superscripts and Subscripts
+    0x20A0,     0x20CF,     // Currency Symbols
+    0x20D0,     0x20FF,     // Combining Diacritical Marks for Symbols
+    0x2100,     0x214F,     // Letterlike Symbols
+    0x2150,     0x218F,     // Number Forms
+    0x2190,     0x21FF,     // Arrows
+    0x2200,     0x22FF,     // Mathematical Operators
+    0x2300,     0x23FF,     // Miscellaneous Technical
+    0x2400,     0x243F,     // Control Pictures
+    0x2440,     0x245F,     // Optical Character Recognition
+    0x2460,     0x24FF,     // Enclosed Alphanumerics
+    0x2500,     0x257F,     // Box Drawing
+    0x2580,     0x259F,     // Block Elements
+    0x25A0,     0x25FF,     // Geometric Shapes
+    0x2600,     0x26FF,     // Miscellaneous Symbols
+    0x2700,     0x27BF,     // Dingbats
+    0x27C0,     0x27EF,     // Miscellaneous Mathematical Symbols-A
+    0x27F0,     0x27FF,     // Supplemental Arrows-A
+    0x2800,     0x28FF,     // Braille Patterns
+    0x2900,     0x297F,     // Supplemental Arrows-B
+    0x2980,     0x29FF,     // Miscellaneous Mathematical Symbols-B
+    0x2A00,     0x2AFF,     // Supplemental Mathematical Operators
+    0x2B00,     0x2BFF,     // Miscellaneous Symbols and Arrows
+    0x2C00,     0x2C5F,     // Glagolitic
+    0x2C60,     0x2C7F,     // Latin Extended-C
+    0x2C80,     0x2CFF,     // Coptic
+    0x2D00,     0x2D2F,     // Georgian Supplement
+    0x2D30,     0x2D7F,     // Tifinagh
+    0x2D80,     0x2DDF,     // Ethiopic Extended
+    0x2DE0,     0x2DFF,     // Cyrillic Extended-A
+    0x2E00,     0x2E7F,     // Supplemental Punctuation
+    0x2E80,     0x2EFF,     // CJK Radicals Supplement
+    0x2F00,     0x2FDF,     // Kangxi Radicals
+    0x2FF0,     0x2FFF,     // Ideographic Description Characters
+    0x3000,     0x303F,     // CJK Symbols and Punctuation
+    0x3040,     0x309F,     // Hiragana
+    0x30A0,     0x30FF,     // Katakana
+    0x3100,     0x312F,     // Bopomofo
+    0x3130,     0x318F,     // Hangul Compatibility Jamo
+    0x3190,     0x319F,     // Kanbun
+    0x31A0,     0x31BF,     // Bopomofo Extended
+    0x31C0,     0x31EF,     // CJK Strokes
+    0x31F0,     0x31FF,     // Katakana Phonetic Extensions
+    0x3200,     0x32FF,     // Enclosed CJK Letters and Months
+    0x3300,     0x33FF,     // CJK Compatibility
+    0x3400,     0x4DBF,     // CJK Unified Ideographs Extension A
+    0x4DC0,     0x4DFF,     // Yijing Hexagram Symbols
+    0x4E00,     0x9FFF,     // CJK Unified Ideographs
+    0xA000,     0xA48F,     // Yi Syllables
+    0xA490,     0xA4CF,     // Yi Radicals
+    0xA4D0,     0xA4FF,     // Lisu
+    0xA500,     0xA63F,     // Vai
+    0xA640,     0xA69F,     // Cyrillic Extended-B
+    0xA6A0,     0xA6FF,     // Bamum
+    0xA700,     0xA71F,     // Modifier Tone Letters
+    0xA720,     0xA7FF,     // Latin Extended-D
+    0xA800,     0xA82F,     // Syloti Nagri
+    0xA830,     0xA83F,     // Common Indic Number Forms
+    0xA840,     0xA87F,     // Phags-pa
+    0xA880,     0xA8DF,     // Saurashtra
+    0xA8E0,     0xA8FF,     // Devanagari Extended
+    0xA900,     0xA92F,     // Kayah Li
+    0xA930,     0xA95F,     // Rejang
+    0xA960,     0xA97F,     // Hangul Jamo Extended-A
+    0xA980,     0xA9DF,     // Javanese
+    0xAA00,     0xAA5F,     // Cham
+    0xAA60,     0xAA7F,     // Myanmar Extended-A
+    0xAA80,     0xAADF,     // Tai Viet
+    0xAB00,     0xAB2F,     // Ethiopic Extended-A
+    0xABC0,     0xABFF,     // Meetei Mayek
+    0xAC00,     0xD7AF,     // Hangul Syllables
+    0xD7B0,     0xD7FF,     // Hangul Jamo Extended-B
+    //0xD800,       0xDB7F,     // High Surrogates
+    //0xDB80,       0xDBFF,     // High Private Use Surrogates
+    //0xDC00,       0xDFFF,     // Low Surrogates
+    0xE000,     0xF8FF,     // Private Use Area
+    0xF900,     0xFAFF,     // CJK Compatibility Ideographs
+    0xFB00,     0xFB4F,     // Alphabetic Presentation Forms
+    0xFB50,     0xFDFF,     // Arabic Presentation Forms-A
+    0xFE00,     0xFE0F,     // Variation Selectors
+    0xFE10,     0xFE1F,     // Vertical Forms
+    0xFE20,     0xFE2F,     // Combining Half Marks
+    0xFE30,     0xFE4F,     // CJK Compatibility Forms
+    0xFE50,     0xFE6F,     // Small Form Variants
+    0xFE70,     0xFEFF,     // Arabic Presentation Forms-B
+    0xFF00,     0xFFEF,     // Halfwidth and Fullwidth Forms
+    0xFFF0,     0xFFFF,     // Specials
+    0x10000,    0x1007F,    // Linear B Syllabary
+    0x10080,    0x100FF,    // Linear B Ideograms
+    0x10100,    0x1013F,    // Aegean Numbers
+    0x10140,    0x1018F,    // Ancient Greek Numbers
+    0x10190,    0x101CF,    // Ancient Symbols
+    0x101D0,    0x101FF,    // Phaistos Disc
+    0x10280,    0x1029F,    // Lycian
+    0x102A0,    0x102DF,    // Carian
+    0x10300,    0x1032F,    // Old Italic
+    0x10330,    0x1034F,    // Gothic
+    0x10380,    0x1039F,    // Ugaritic
+    0x103A0,    0x103DF,    // Old Persian
+    0x10400,    0x1044F,    // Deseret
+    0x10450,    0x1047F,    // Shavian
+    0x10480,    0x104AF,    // Osmanya
+    0x10800,    0x1083F,    // Cypriot Syllabary
+    0x10840,    0x1085F,    // Imperial Aramaic
+    0x10900,    0x1091F,    // Phoenician
+    0x10920,    0x1093F,    // Lydian
+    0x10A00,    0x10A5F,    // Kharoshthi
+    0x10A60,    0x10A7F,    // Old South Arabian
+    0x10B00,    0x10B3F,    // Avestan
+    0x10B40,    0x10B5F,    // Inscriptional Parthian
+    0x10B60,    0x10B7F,    // Inscriptional Pahlavi
+    0x10C00,    0x10C4F,    // Old Turkic
+    0x10E60,    0x10E7F,    // Rumi Numeral Symbols
+    0x11000,    0x1107F,    // Brahmi
+    0x11080,    0x110CF,    // Kaithi
+    0x12000,    0x123FF,    // Cuneiform
+    0x12400,    0x1247F,    // Cuneiform Numbers and Punctuation
+    0x13000,    0x1342F,    // Egyptian Hieroglyphs
+    0x16800,    0x16A3F,    // Bamum Supplement
+    0x1B000,    0x1B0FF,    // Kana Supplement
+    0x1D000,    0x1D0FF,    // Byzantine Musical Symbols
+    0x1D100,    0x1D1FF,    // Musical Symbols
+    0x1D200,    0x1D24F,    // Ancient Greek Musical Notation
+    0x1D300,    0x1D35F,    // Tai Xuan Jing Symbols
+    0x1D360,    0x1D37F,    // Counting Rod Numerals
+    0x1D400,    0x1D7FF,    // Mathematical Alphanumeric Symbols
+    0x1F000,    0x1F02F,    // Mahjong Tiles
+    0x1F030,    0x1F09F,    // Domino Tiles
+    0x1F0A0,    0x1F0FF,    // Playing Cards
+    0x1F100,    0x1F1FF,    // Enclosed Alphanumeric Supplement
+    0x1F200,    0x1F2FF,    // Enclosed Ideographic Supplement
+    0x1F300,    0x1F5FF,    // Miscellaneous Symbols And Pictographs
+    0x1F600,    0x1F64F,    // Emoticons
+    0x1F680,    0x1F6FF,    // Transport And Map Symbols
+    0x1F700,    0x1F77F,    // Alchemical Symbols
+    0x20000,    0x2A6DF,    // CJK Unified Ideographs Extension B
+    0x2A700,    0x2B73F,    // CJK Unified Ideographs Extension C
+    0x2B740,    0x2B81F,    // CJK Unified Ideographs Extension D
+    0x2F800,    0x2FA1F,    // CJK Compatibility Ideographs Supplement
+    0xE0000,    0xE007F,    // Tags
+    0xE0100,    0xE01EF,    // Variation Selectors Supplement
+    0xF0000,    0xFFFFF,    // Supplementary Private Use Area-A
+    0x100000,   0x10FFFF,   // Supplementary Private Use Area-B
+    0xFFFFFFFF
+};
+
+// Copyright (c) 2008-2010 Bjoern Hoehrmann <bjoern@hoehrmann.de>
+// See http://bjoern.hoehrmann.de/utf-8/decoder/dfa/ for details.
+
+#define UTF8_ACCEPT 0u
+
+static const unsigned char utf8d[] = {
+    // The first part of the table maps bytes to character classes that
+    // to reduce the size of the transition table and create bitmasks.
+    0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,  0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+    0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,  0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+    0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,  0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+    0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,  0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+    1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,  9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
+    7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,  7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+    8,8,2,2,2,2,2,2,2,2,2,2,2,2,2,2,  2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
+    10,3,3,3,3,3,3,3,3,3,3,3,3,4,3,3, 11,6,6,6,5,8,8,8,8,8,8,8,8,8,8,8,
+
+    // The second part is a transition table that maps a combination
+    // of a state of the automaton and a character class to a state.
+    0,12,24,36,60,96,84,12,12,12,48,72, 12,12,12,12,12,12,12,12,12,12,12,12,
+    12, 0,12,12,12,12,12, 0,12, 0,12,12, 12,24,12,12,12,12,12,24,12,24,12,12,
+    12,12,12,12,12,12,12,24,12,12,12,12, 12,24,12,12,12,12,12,12,12,24,12,12,
+    12,12,12,12,12,12,12,36,12,36,12,12, 12,36,12,12,12,12,12,36,12,36,12,12,
+    12,36,12,12,12,12,12,12,12,12,12,12, 
+};
+
+static unsigned inline decode(unsigned* state, unsigned* codep, unsigned byte) {
+    unsigned type = utf8d[byte];
+
+    *codep = (*state != UTF8_ACCEPT) ?
+        (byte & 0x3fu) | (*codep << 6) :
+    (0xffu >> type) & (byte);
+
+    *state = utf8d[256 + *state + type];
+    return *state;
+}
+
+//static bool IsUTF8(unsigned char* s) {
+//  unsigned codepoint, state = 0;
+//
+//  while (*s)
+//      decode(&state, &codepoint, *s++);
+//
+//  return state == UTF8_ACCEPT;
+//}
+
+TEST(EncodingsTest, UTF8) {
+    StringBuffer os, os2;
+    for (const unsigned* range = kCodepointRanges; *range != 0xFFFFFFFF; range += 2) {
+        for (unsigned codepoint = range[0]; codepoint <= range[1]; ++codepoint) {
+            os.Clear();
+            UTF8<>::Encode(os, codepoint);
+            const char* encodedStr = os.GetString();
+
+            // Decode with Hoehrmann
+            {
+                unsigned decodedCodepoint = 0;
+                unsigned state = 0;
+
+                unsigned decodedCount = 0;
+                for (const char* s = encodedStr; *s; ++s)
+                    if (!decode(&state, &decodedCodepoint, static_cast<unsigned char>(*s))) {
+                        EXPECT_EQ(codepoint, decodedCodepoint);
+                        decodedCount++;
+                    }
+
+                if (*encodedStr) {                  // This decoder cannot handle U+0000
+                    EXPECT_EQ(1u, decodedCount);    // Should only contain one code point
+                }
+
+                EXPECT_EQ(UTF8_ACCEPT, state);
+                if (UTF8_ACCEPT != state)
+                    std::cout << std::hex << codepoint << " " << decodedCodepoint << std::endl;
+            }
+
+            // Decode
+            {
+                StringStream is(encodedStr);
+                unsigned decodedCodepoint;
+                bool result = UTF8<>::Decode(is, &decodedCodepoint);
+                EXPECT_TRUE(result);
+                EXPECT_EQ(codepoint, decodedCodepoint);
+                if (!result || codepoint != decodedCodepoint)
+                    std::cout << std::hex << codepoint << " " << decodedCodepoint << std::endl;
+            }
+
+            // Validate
+            {
+                StringStream is(encodedStr);
+                os2.Clear();
+                bool result = UTF8<>::Validate(is, os2);
+                EXPECT_TRUE(result);
+                EXPECT_EQ(0, StrCmp(encodedStr, os2.GetString()));
+            }
+        }
+    }
+}
+
+TEST(EncodingsTest, UTF16) {
+    GenericStringBuffer<UTF16<> > os, os2;
+    GenericStringBuffer<UTF8<> > utf8os;
+    for (const unsigned* range = kCodepointRanges; *range != 0xFFFFFFFF; range += 2) {
+        for (unsigned codepoint = range[0]; codepoint <= range[1]; ++codepoint) {
+            os.Clear();
+            UTF16<>::Encode(os, codepoint);
+            const UTF16<>::Ch* encodedStr = os.GetString();
+
+            // Encode with Hoehrmann's code
+            if (codepoint != 0) // cannot handle U+0000
+            {
+                // encode with UTF8<> first
+                utf8os.Clear();
+                UTF8<>::Encode(utf8os, codepoint);
+
+                // transcode from UTF8 to UTF16 with Hoehrmann's code
+                unsigned decodedCodepoint = 0;
+                unsigned state = 0;
+                UTF16<>::Ch buffer[3], *p = &buffer[0];
+                for (const char* s = utf8os.GetString(); *s; ++s) {
+                    if (!decode(&state, &decodedCodepoint, static_cast<unsigned char>(*s)))
+                        break;
+                }
+
+                if (codepoint <= 0xFFFF)
+                    *p++ = static_cast<UTF16<>::Ch>(decodedCodepoint);
+                else {
+                    // Encode code points above U+FFFF as surrogate pair.
+                    *p++ = static_cast<UTF16<>::Ch>(0xD7C0 + (decodedCodepoint >> 10));
+                    *p++ = static_cast<UTF16<>::Ch>(0xDC00 + (decodedCodepoint & 0x3FF));
+                }
+                *p++ = '\0';
+
+                EXPECT_EQ(0, StrCmp(buffer, encodedStr));
+            }
+
+            // Decode
+            {
+                GenericStringStream<UTF16<> > is(encodedStr);
+                unsigned decodedCodepoint;
+                bool result = UTF16<>::Decode(is, &decodedCodepoint);
+                EXPECT_TRUE(result);
+                EXPECT_EQ(codepoint, decodedCodepoint);         
+                if (!result || codepoint != decodedCodepoint)
+                    std::cout << std::hex << codepoint << " " << decodedCodepoint << std::endl;
+            }
+
+            // Validate
+            {
+                GenericStringStream<UTF16<> > is(encodedStr);
+                os2.Clear();
+                bool result = UTF16<>::Validate(is, os2);
+                EXPECT_TRUE(result);
+                EXPECT_EQ(0, StrCmp(encodedStr, os2.GetString()));
+            }
+        }
+    }
+}
+
+TEST(EncodingsTest, UTF32) {
+    GenericStringBuffer<UTF32<> > os, os2;
+    for (const unsigned* range = kCodepointRanges; *range != 0xFFFFFFFF; range += 2) {
+        for (unsigned codepoint = range[0]; codepoint <= range[1]; ++codepoint) {
+            os.Clear();
+            UTF32<>::Encode(os, codepoint);
+            const UTF32<>::Ch* encodedStr = os.GetString();
+
+            // Decode
+            {
+                GenericStringStream<UTF32<> > is(encodedStr);
+                unsigned decodedCodepoint;
+                bool result = UTF32<>::Decode(is, &decodedCodepoint);
+                EXPECT_TRUE(result);
+                EXPECT_EQ(codepoint, decodedCodepoint);         
+                if (!result || codepoint != decodedCodepoint)
+                    std::cout << std::hex << codepoint << " " << decodedCodepoint << std::endl;
+            }
+
+            // Validate
+            {
+                GenericStringStream<UTF32<> > is(encodedStr);
+                os2.Clear();
+                bool result = UTF32<>::Validate(is, os2);
+                EXPECT_TRUE(result);
+                EXPECT_EQ(0, StrCmp(encodedStr, os2.GetString()));
+            }
+        }
+    }
+}
+
+TEST(EncodingsTest, ASCII) {
+    StringBuffer os, os2;
+    for (unsigned codepoint = 0; codepoint < 128; codepoint++) {
+        os.Clear();
+        ASCII<>::Encode(os, codepoint);
+        const ASCII<>::Ch* encodedStr = os.GetString();
+        {
+            StringStream is(encodedStr);
+            unsigned decodedCodepoint;
+            bool result = ASCII<>::Decode(is, &decodedCodepoint);
+            if (!result || codepoint != decodedCodepoint)
+                std::cout << std::hex << codepoint << " " << decodedCodepoint << std::endl;
+        }
+
+        // Validate
+        {
+            StringStream is(encodedStr);
+            os2.Clear();
+            bool result = ASCII<>::Validate(is, os2);
+            EXPECT_TRUE(result);
+            EXPECT_EQ(0, StrCmp(encodedStr, os2.GetString()));
+        }
+    }
+}
diff --git a/test/unittest/filestreamtest.cpp b/test/unittest/filestreamtest.cpp
new file mode 100644
index 0000000..0e243ab
--- /dev/null
+++ b/test/unittest/filestreamtest.cpp
@@ -0,0 +1,155 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+// 
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed 
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the 
+// specific language governing permissions and limitations under the License.
+
+#include "unittest.h"
+#include "rapidjson/filereadstream.h"
+#include "rapidjson/filewritestream.h"
+#include "rapidjson/encodedstream.h"
+
+using namespace rapidjson;
+
+class FileStreamTest : public ::testing::Test {
+public:
+    FileStreamTest() : filename_(), json_(), length_(), abcde_() {}
+    virtual ~FileStreamTest();
+
+    virtual void SetUp() {
+        const char *paths[] = {
+            "data/sample.json",
+            "bin/data/sample.json",
+            "../bin/data/sample.json",
+            "../../bin/data/sample.json",
+            "../../../bin/data/sample.json"
+        };
+        FILE* fp = 0;
+        for (size_t i = 0; i < sizeof(paths) / sizeof(paths[0]); i++) {
+            fp = fopen(paths[i], "rb");
+            if (fp) {
+                filename_ = paths[i];
+                break;
+            }
+        }
+        ASSERT_TRUE(fp != 0);
+
+        fseek(fp, 0, SEEK_END);
+        length_ = static_cast<size_t>(ftell(fp));
+        fseek(fp, 0, SEEK_SET);
+        json_ = static_cast<char*>(malloc(length_ + 1));
+        size_t readLength = fread(json_, 1, length_, fp);
+        json_[readLength] = '\0';
+        fclose(fp);
+
+        const char *abcde_paths[] = {
+            "data/abcde.txt",
+            "bin/data/abcde.txt",
+            "../bin/data/abcde.txt",
+            "../../bin/data/abcde.txt",
+            "../../../bin/data/abcde.txt"
+        };
+        fp = 0;
+        for (size_t i = 0; i < sizeof(abcde_paths) / sizeof(abcde_paths[0]); i++) {
+            fp = fopen(abcde_paths[i], "rb");
+            if (fp) {
+                abcde_ = abcde_paths[i];
+                break;
+            }
+        }
+        ASSERT_TRUE(fp != 0);
+        fclose(fp);
+    }
+
+    virtual void TearDown() {
+        free(json_);
+        json_ = 0;
+    }
+
+private:
+    FileStreamTest(const FileStreamTest&);
+    FileStreamTest& operator=(const FileStreamTest&);
+    
+protected:
+    const char* filename_;
+    char *json_;
+    size_t length_;
+    const char* abcde_;
+};
+
+FileStreamTest::~FileStreamTest() {}
+
+TEST_F(FileStreamTest, FileReadStream) {
+    FILE *fp = fopen(filename_, "rb");
+    ASSERT_TRUE(fp != 0);
+    char buffer[65536];
+    FileReadStream s(fp, buffer, sizeof(buffer));
+
+    for (size_t i = 0; i < length_; i++) {
+        EXPECT_EQ(json_[i], s.Peek());
+        EXPECT_EQ(json_[i], s.Peek());  // 2nd time should be the same
+        EXPECT_EQ(json_[i], s.Take());
+    }
+
+    EXPECT_EQ(length_, s.Tell());
+    EXPECT_EQ('\0', s.Peek());
+
+    fclose(fp);
+}
+
+TEST_F(FileStreamTest, FileReadStream_Peek4) {
+    FILE *fp = fopen(abcde_, "rb");
+    ASSERT_TRUE(fp != 0);
+    char buffer[4];
+    FileReadStream s(fp, buffer, sizeof(buffer));
+
+    const char* c = s.Peek4();
+    for (int i = 0; i < 4; i++)
+        EXPECT_EQ('a' + i, c[i]);
+    EXPECT_EQ(0u, s.Tell());
+
+    for (int i = 0; i < 5; i++) {
+        EXPECT_EQ(static_cast<size_t>(i), s.Tell());
+        EXPECT_EQ('a' + i, s.Peek());
+        EXPECT_EQ('a' + i, s.Peek());
+        EXPECT_EQ('a' + i, s.Take());
+    }
+    EXPECT_EQ(5u, s.Tell());
+    EXPECT_EQ(0, s.Peek());
+    EXPECT_EQ(0, s.Take());
+
+    fclose(fp);
+}
+
+TEST_F(FileStreamTest, FileWriteStream) {
+    char filename[L_tmpnam];
+    FILE* fp = TempFile(filename);
+
+    char buffer[65536];
+    FileWriteStream os(fp, buffer, sizeof(buffer));
+    for (size_t i = 0; i < length_; i++)
+        os.Put(json_[i]);
+    os.Flush();
+    fclose(fp);
+
+    // Read it back to verify
+    fp = fopen(filename, "rb");
+    FileReadStream is(fp, buffer, sizeof(buffer));
+
+    for (size_t i = 0; i < length_; i++)
+        EXPECT_EQ(json_[i], is.Take());
+
+    EXPECT_EQ(length_, is.Tell());
+    fclose(fp);
+
+    //std::cout << filename << std::endl;
+    remove(filename);
+}
diff --git a/test/unittest/fwdtest.cpp b/test/unittest/fwdtest.cpp
new file mode 100644
index 0000000..1936d97
--- /dev/null
+++ b/test/unittest/fwdtest.cpp
@@ -0,0 +1,230 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+// 
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed 
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the 
+// specific language governing permissions and limitations under the License.
+
+#include "unittest.h"
+
+// Using forward declared types here.
+
+#include "rapidjson/fwd.h"
+
+#ifdef __GNUC__
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(effc++)
+#endif
+
+using namespace rapidjson;
+
+struct Foo {
+    Foo();
+    ~Foo();
+
+    // encodings.h
+    UTF8<char>* utf8;
+    UTF16<wchar_t>* utf16;
+    UTF16BE<wchar_t>* utf16be;
+    UTF16LE<wchar_t>* utf16le;
+    UTF32<unsigned>* utf32;
+    UTF32BE<unsigned>* utf32be;
+    UTF32LE<unsigned>* utf32le;
+    ASCII<char>* ascii;
+    AutoUTF<unsigned>* autoutf;
+    Transcoder<UTF8<char>, UTF8<char> >* transcoder;
+
+    // allocators.h
+    CrtAllocator* crtallocator;
+    MemoryPoolAllocator<CrtAllocator>* memorypoolallocator;
+
+    // stream.h
+    StringStream* stringstream;
+    InsituStringStream* insitustringstream;
+
+    // stringbuffer.h
+    StringBuffer* stringbuffer;
+
+    // // filereadstream.h
+    // FileReadStream* filereadstream;
+
+    // // filewritestream.h
+    // FileWriteStream* filewritestream;
+
+    // memorybuffer.h
+    MemoryBuffer* memorybuffer;
+
+    // memorystream.h
+    MemoryStream* memorystream;
+
+    // reader.h
+    BaseReaderHandler<UTF8<char>, void>* basereaderhandler;
+    Reader* reader;
+
+    // writer.h
+    Writer<StringBuffer, UTF8<char>, UTF8<char>, CrtAllocator, 0>* writer;
+
+    // prettywriter.h
+    PrettyWriter<StringBuffer, UTF8<char>, UTF8<char>, CrtAllocator, 0>* prettywriter;
+
+    // document.h
+    Value* value;
+    Document* document;
+
+    // pointer.h
+    Pointer* pointer;
+
+    // schema.h
+    SchemaDocument* schemadocument;
+    SchemaValidator* schemavalidator;
+
+    // char buffer[16];
+};
+
+// Using type definitions here.
+
+#include "rapidjson/stringbuffer.h"
+#include "rapidjson/filereadstream.h"
+#include "rapidjson/filewritestream.h"
+#include "rapidjson/memorybuffer.h"
+#include "rapidjson/memorystream.h"
+#include "rapidjson/document.h" // -> reader.h
+#include "rapidjson/writer.h"
+#include "rapidjson/prettywriter.h"
+#include "rapidjson/schema.h"   // -> pointer.h
+
+typedef Transcoder<UTF8<>, UTF8<> > TranscoderUtf8ToUtf8;
+typedef BaseReaderHandler<UTF8<>, void> BaseReaderHandlerUtf8Void;
+
+Foo::Foo() : 
+    // encodings.h
+    utf8(RAPIDJSON_NEW(UTF8<>)),
+    utf16(RAPIDJSON_NEW(UTF16<>)),
+    utf16be(RAPIDJSON_NEW(UTF16BE<>)),
+    utf16le(RAPIDJSON_NEW(UTF16LE<>)),
+    utf32(RAPIDJSON_NEW(UTF32<>)),
+    utf32be(RAPIDJSON_NEW(UTF32BE<>)),
+    utf32le(RAPIDJSON_NEW(UTF32LE<>)),
+    ascii(RAPIDJSON_NEW(ASCII<>)),
+    autoutf(RAPIDJSON_NEW(AutoUTF<unsigned>)),
+    transcoder(RAPIDJSON_NEW(TranscoderUtf8ToUtf8)),
+
+    // allocators.h
+    crtallocator(RAPIDJSON_NEW(CrtAllocator)),
+    memorypoolallocator(RAPIDJSON_NEW(MemoryPoolAllocator<>)),
+
+    // stream.h
+    stringstream(RAPIDJSON_NEW(StringStream)(NULL)),
+    insitustringstream(RAPIDJSON_NEW(InsituStringStream)(NULL)),
+
+    // stringbuffer.h
+    stringbuffer(RAPIDJSON_NEW(StringBuffer)),
+
+    // // filereadstream.h
+    // filereadstream(RAPIDJSON_NEW(FileReadStream)(stdout, buffer, sizeof(buffer))),
+
+    // // filewritestream.h
+    // filewritestream(RAPIDJSON_NEW(FileWriteStream)(stdout, buffer, sizeof(buffer))),
+
+    // memorybuffer.h
+    memorybuffer(RAPIDJSON_NEW(MemoryBuffer)),
+
+    // memorystream.h
+    memorystream(RAPIDJSON_NEW(MemoryStream)(NULL, 0)),
+
+    // reader.h
+    basereaderhandler(RAPIDJSON_NEW(BaseReaderHandlerUtf8Void)),
+    reader(RAPIDJSON_NEW(Reader)),
+
+    // writer.h
+    writer(RAPIDJSON_NEW(Writer<StringBuffer>)),
+
+    // prettywriter.h
+    prettywriter(RAPIDJSON_NEW(PrettyWriter<StringBuffer>)),
+
+    // document.h
+    value(RAPIDJSON_NEW(Value)),
+    document(RAPIDJSON_NEW(Document)),
+
+    // pointer.h
+    pointer(RAPIDJSON_NEW(Pointer)),
+
+    // schema.h
+    schemadocument(RAPIDJSON_NEW(SchemaDocument)(*document)),
+    schemavalidator(RAPIDJSON_NEW(SchemaValidator)(*schemadocument))
+{
+
+}
+
+Foo::~Foo() {
+    // encodings.h
+    RAPIDJSON_DELETE(utf8);
+    RAPIDJSON_DELETE(utf16);
+    RAPIDJSON_DELETE(utf16be);
+    RAPIDJSON_DELETE(utf16le);
+    RAPIDJSON_DELETE(utf32);
+    RAPIDJSON_DELETE(utf32be);
+    RAPIDJSON_DELETE(utf32le);
+    RAPIDJSON_DELETE(ascii);
+    RAPIDJSON_DELETE(autoutf);
+    RAPIDJSON_DELETE(transcoder);
+
+    // allocators.h
+    RAPIDJSON_DELETE(crtallocator);
+    RAPIDJSON_DELETE(memorypoolallocator);
+
+    // stream.h
+    RAPIDJSON_DELETE(stringstream);
+    RAPIDJSON_DELETE(insitustringstream);
+
+    // stringbuffer.h
+    RAPIDJSON_DELETE(stringbuffer);
+
+    // // filereadstream.h
+    // RAPIDJSON_DELETE(filereadstream);
+
+    // // filewritestream.h
+    // RAPIDJSON_DELETE(filewritestream);
+
+    // memorybuffer.h
+    RAPIDJSON_DELETE(memorybuffer);
+
+    // memorystream.h
+    RAPIDJSON_DELETE(memorystream);
+
+    // reader.h
+    RAPIDJSON_DELETE(basereaderhandler);
+    RAPIDJSON_DELETE(reader);
+
+    // writer.h
+    RAPIDJSON_DELETE(writer);
+
+    // prettywriter.h
+    RAPIDJSON_DELETE(prettywriter);
+
+    // document.h
+    RAPIDJSON_DELETE(value);
+    RAPIDJSON_DELETE(document);
+
+    // pointer.h
+    RAPIDJSON_DELETE(pointer);
+
+    // schema.h
+    RAPIDJSON_DELETE(schemadocument);
+    RAPIDJSON_DELETE(schemavalidator);
+}
+
+TEST(Fwd, Fwd) {
+    Foo f;
+}
+
+#ifdef __GNUC__
+RAPIDJSON_DIAG_POP
+#endif
diff --git a/test/unittest/istreamwrappertest.cpp b/test/unittest/istreamwrappertest.cpp
new file mode 100644
index 0000000..0c3e5c4
--- /dev/null
+++ b/test/unittest/istreamwrappertest.cpp
@@ -0,0 +1,181 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+// 
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed 
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the 
+// specific language governing permissions and limitations under the License.
+
+#include "unittest.h"
+
+#include "rapidjson/istreamwrapper.h"
+#include "rapidjson/encodedstream.h"
+#include "rapidjson/document.h"
+#include <sstream>
+#include <fstream>
+
+#if defined(_MSC_VER) && !defined(__clang__)
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(4702) // unreachable code
+#endif
+
+using namespace rapidjson;
+using namespace std;
+
+template <typename StringStreamType>
+static void TestStringStream() {
+    typedef typename StringStreamType::char_type Ch;
+
+    {
+        StringStreamType iss;
+        BasicIStreamWrapper<StringStreamType> is(iss);
+        EXPECT_EQ(0u, is.Tell());
+        if (sizeof(Ch) == 1) {
+            EXPECT_EQ(0, is.Peek4());
+            EXPECT_EQ(0u, is.Tell());
+        }
+        EXPECT_EQ(0, is.Peek());
+        EXPECT_EQ(0, is.Take());
+        EXPECT_EQ(0u, is.Tell());
+    }
+
+    {
+        Ch s[] = { 'A', 'B', 'C', '\0' };
+        StringStreamType iss(s);
+        BasicIStreamWrapper<StringStreamType> is(iss);
+        EXPECT_EQ(0u, is.Tell());
+        if (sizeof(Ch) == 1) {
+            EXPECT_EQ(0, is.Peek4()); // less than 4 bytes
+        }
+        for (int i = 0; i < 3; i++) {
+            EXPECT_EQ(static_cast<size_t>(i), is.Tell());
+            EXPECT_EQ('A' + i, is.Peek());
+            EXPECT_EQ('A' + i, is.Peek());
+            EXPECT_EQ('A' + i, is.Take());
+        }
+        EXPECT_EQ(3u, is.Tell());
+        EXPECT_EQ(0, is.Peek());
+        EXPECT_EQ(0, is.Take());
+    }
+
+    {
+        Ch s[] = { 'A', 'B', 'C', 'D', 'E', '\0' };
+        StringStreamType iss(s);
+        BasicIStreamWrapper<StringStreamType> is(iss);
+        if (sizeof(Ch) == 1) {
+            const Ch* c = is.Peek4();
+            for (int i = 0; i < 4; i++)
+                EXPECT_EQ('A' + i, c[i]);
+            EXPECT_EQ(0u, is.Tell());
+        }
+        for (int i = 0; i < 5; i++) {
+            EXPECT_EQ(static_cast<size_t>(i), is.Tell());
+            EXPECT_EQ('A' + i, is.Peek());
+            EXPECT_EQ('A' + i, is.Peek());
+            EXPECT_EQ('A' + i, is.Take());
+        }
+        EXPECT_EQ(5u, is.Tell());
+        EXPECT_EQ(0, is.Peek());
+        EXPECT_EQ(0, is.Take());
+    }
+}
+
+TEST(IStreamWrapper, istringstream) {
+    TestStringStream<istringstream>();
+}
+
+TEST(IStreamWrapper, stringstream) {
+    TestStringStream<stringstream>();
+}
+
+TEST(IStreamWrapper, wistringstream) {
+    TestStringStream<wistringstream>();
+}
+
+TEST(IStreamWrapper, wstringstream) {
+    TestStringStream<wstringstream>();
+}
+
+template <typename FileStreamType>
+static bool Open(FileStreamType& fs, const char* filename) {
+    const char *paths[] = {
+        "encodings",
+        "bin/encodings",
+        "../bin/encodings",
+        "../../bin/encodings",
+        "../../../bin/encodings"
+    };
+    char buffer[1024];
+    for (size_t i = 0; i < sizeof(paths) / sizeof(paths[0]); i++) {
+        sprintf(buffer, "%s/%s", paths[i], filename);
+        fs.open(buffer, ios_base::in | ios_base::binary);
+        if (fs.is_open())
+            return true;
+    }
+    return false;
+}
+
+TEST(IStreamWrapper, ifstream) {
+    ifstream ifs;
+    ASSERT_TRUE(Open(ifs, "utf8bom.json"));
+    IStreamWrapper isw(ifs);
+    EncodedInputStream<UTF8<>, IStreamWrapper> eis(isw);
+    Document d;
+    EXPECT_TRUE(!d.ParseStream(eis).HasParseError());
+    EXPECT_TRUE(d.IsObject());
+    EXPECT_EQ(5u, d.MemberCount());
+}
+
+TEST(IStreamWrapper, fstream) {
+    fstream fs;
+    ASSERT_TRUE(Open(fs, "utf8bom.json"));
+    IStreamWrapper isw(fs);
+    EncodedInputStream<UTF8<>, IStreamWrapper> eis(isw);
+    Document d;
+    EXPECT_TRUE(!d.ParseStream(eis).HasParseError());
+    EXPECT_TRUE(d.IsObject());
+    EXPECT_EQ(5u, d.MemberCount());
+}
+
+// wifstream/wfstream only works on C++11 with codecvt_utf16
+// But many C++11 library still not have it.
+#if 0
+#include <codecvt>
+
+TEST(IStreamWrapper, wifstream) {
+    wifstream ifs;
+    ASSERT_TRUE(Open(ifs, "utf16bebom.json"));
+    ifs.imbue(std::locale(ifs.getloc(),
+       new std::codecvt_utf16<wchar_t, 0x10ffff, std::consume_header>));
+    WIStreamWrapper isw(ifs);
+    GenericDocument<UTF16<> > d;
+    d.ParseStream<kParseDefaultFlags, UTF16<>, WIStreamWrapper>(isw);
+    EXPECT_TRUE(!d.HasParseError());
+    EXPECT_TRUE(d.IsObject());
+    EXPECT_EQ(5, d.MemberCount());
+}
+
+TEST(IStreamWrapper, wfstream) {
+    wfstream fs;
+    ASSERT_TRUE(Open(fs, "utf16bebom.json"));
+    fs.imbue(std::locale(fs.getloc(),
+       new std::codecvt_utf16<wchar_t, 0x10ffff, std::consume_header>));
+    WIStreamWrapper isw(fs);
+    GenericDocument<UTF16<> > d;
+    d.ParseStream<kParseDefaultFlags, UTF16<>, WIStreamWrapper>(isw);
+    EXPECT_TRUE(!d.HasParseError());
+    EXPECT_TRUE(d.IsObject());
+    EXPECT_EQ(5, d.MemberCount());
+}
+
+#endif
+
+#if defined(_MSC_VER) && !defined(__clang__)
+RAPIDJSON_DIAG_POP
+#endif
diff --git a/test/unittest/itoatest.cpp b/test/unittest/itoatest.cpp
new file mode 100644
index 0000000..f7524b8
--- /dev/null
+++ b/test/unittest/itoatest.cpp
@@ -0,0 +1,160 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+#include "unittest.h"
+#include "rapidjson/internal/itoa.h"
+
+#ifdef __GNUC__
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(type-limits)
+#endif
+
+using namespace rapidjson::internal;
+
+template <typename T>
+struct Traits {
+};
+
+template <>
+struct Traits<uint32_t> {
+    enum { kBufferSize = 11 };
+    enum { kMaxDigit = 10 };
+    static uint32_t Negate(uint32_t x) { return x; }
+};
+
+template <>
+struct Traits<int32_t> {
+    enum { kBufferSize = 12 };
+    enum { kMaxDigit = 10 };
+    static int32_t Negate(int32_t x) { return -x; }
+};
+
+template <>
+struct Traits<uint64_t> {
+    enum { kBufferSize = 21 };
+    enum { kMaxDigit = 20 };
+    static uint64_t Negate(uint64_t x) { return x; }
+};
+
+template <>
+struct Traits<int64_t> {
+    enum { kBufferSize = 22 };
+    enum { kMaxDigit = 20 };
+    static int64_t Negate(int64_t x) { return -x; }
+};
+
+template <typename T>
+static void VerifyValue(T value, void(*f)(T, char*), char* (*g)(T, char*)) {
+    char buffer1[Traits<T>::kBufferSize];
+    char buffer2[Traits<T>::kBufferSize];
+
+    f(value, buffer1);
+    *g(value, buffer2) = '\0';
+
+
+    EXPECT_STREQ(buffer1, buffer2);
+}
+
+template <typename T>
+static void Verify(void(*f)(T, char*), char* (*g)(T, char*)) {
+    // Boundary cases
+    VerifyValue<T>(0, f, g);
+    VerifyValue<T>((std::numeric_limits<T>::min)(), f, g);
+    VerifyValue<T>((std::numeric_limits<T>::max)(), f, g);
+
+    // 2^n - 1, 2^n, 10^n - 1, 10^n until overflow
+    for (int power = 2; power <= 10; power += 8) {
+        T i = 1, last;
+        do {
+            VerifyValue<T>(i - 1, f, g);
+            VerifyValue<T>(i, f, g);
+            if ((std::numeric_limits<T>::min)() < 0) {
+                VerifyValue<T>(Traits<T>::Negate(i), f, g);
+                VerifyValue<T>(Traits<T>::Negate(i + 1), f, g);
+            }
+            last = i;
+            if (i > static_cast<T>((std::numeric_limits<T>::max)() / static_cast<T>(power)))
+                break;
+            i *= static_cast<T>(power);
+        } while (last < i);
+    }
+}
+
+static void u32toa_naive(uint32_t value, char* buffer) {
+    char temp[10];
+    char *p = temp;
+    do {
+        *p++ = static_cast<char>(char(value % 10) + '0');
+        value /= 10;
+    } while (value > 0);
+
+    do {
+        *buffer++ = *--p;
+    } while (p != temp);
+
+    *buffer = '\0';
+}
+
+static void i32toa_naive(int32_t value, char* buffer) {
+    uint32_t u = static_cast<uint32_t>(value);
+    if (value < 0) {
+        *buffer++ = '-';
+        u = ~u + 1;
+    }
+    u32toa_naive(u, buffer);
+}
+
+static void u64toa_naive(uint64_t value, char* buffer) {
+    char temp[20];
+    char *p = temp;
+    do {
+        *p++ = static_cast<char>(char(value % 10) + '0');
+        value /= 10;
+    } while (value > 0);
+
+    do {
+        *buffer++ = *--p;
+    } while (p != temp);
+
+    *buffer = '\0';
+}
+
+static void i64toa_naive(int64_t value, char* buffer) {
+    uint64_t u = static_cast<uint64_t>(value);
+    if (value < 0) {
+        *buffer++ = '-';
+        u = ~u + 1;
+    }
+    u64toa_naive(u, buffer);
+}
+
+TEST(itoa, u32toa) {
+    Verify(u32toa_naive, u32toa);
+}
+
+TEST(itoa, i32toa) {
+    Verify(i32toa_naive, i32toa);
+}
+
+TEST(itoa, u64toa) {
+    Verify(u64toa_naive, u64toa);
+}
+
+TEST(itoa, i64toa) {
+    Verify(i64toa_naive, i64toa);
+}
+
+#ifdef __GNUC__
+RAPIDJSON_DIAG_POP
+#endif
diff --git a/test/unittest/jsoncheckertest.cpp b/test/unittest/jsoncheckertest.cpp
new file mode 100644
index 0000000..47c2b56
--- /dev/null
+++ b/test/unittest/jsoncheckertest.cpp
@@ -0,0 +1,143 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+// 
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed 
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the 
+// specific language governing permissions and limitations under the License.
+
+#include "unittest.h"
+
+#include "rapidjson/document.h"
+
+using namespace rapidjson;
+
+static char* ReadFile(const char* filename, size_t& length) {
+    const char *paths[] = {
+        "jsonchecker",
+        "bin/jsonchecker",
+        "../bin/jsonchecker",
+        "../../bin/jsonchecker",
+        "../../../bin/jsonchecker"
+    };
+    char buffer[1024];
+    FILE *fp = 0;
+    for (size_t i = 0; i < sizeof(paths) / sizeof(paths[0]); i++) {
+        sprintf(buffer, "%s/%s", paths[i], filename);
+        fp = fopen(buffer, "rb");
+        if (fp)
+            break;
+    }
+
+    if (!fp)
+        return 0;
+
+    fseek(fp, 0, SEEK_END);
+    length = static_cast<size_t>(ftell(fp));
+    fseek(fp, 0, SEEK_SET);
+    char* json = static_cast<char*>(malloc(length + 1));
+    size_t readLength = fread(json, 1, length, fp);
+    json[readLength] = '\0';
+    fclose(fp);
+    return json;
+}
+
+struct NoOpHandler {
+    bool Null() { return true; }
+    bool Bool(bool) { return true; }
+    bool Int(int) { return true; }
+    bool Uint(unsigned) { return true; }
+    bool Int64(int64_t) { return true; }
+    bool Uint64(uint64_t) { return true; }
+    bool Double(double) { return true; }
+    bool RawNumber(const char*, SizeType, bool) { return true; }
+    bool String(const char*, SizeType, bool) { return true; }
+    bool StartObject() { return true; }
+    bool Key(const char*, SizeType, bool) { return true; }
+    bool EndObject(SizeType) { return true; }
+    bool StartArray() { return true; }
+    bool EndArray(SizeType) { return true; }
+};
+
+
+TEST(JsonChecker, Reader) {
+    char filename[256];
+
+    // jsonchecker/failXX.json
+    for (int i = 1; i <= 33; i++) {
+        if (i == 1) // fail1.json is valid in rapidjson, which has no limitation on type of root element (RFC 7159).
+            continue;
+        if (i == 18)    // fail18.json is valid in rapidjson, which has no limitation on depth of nesting.
+            continue;
+
+        sprintf(filename, "fail%d.json", i);
+        size_t length;
+        char* json = ReadFile(filename, length);
+        if (!json) {
+            printf("jsonchecker file %s not found", filename);
+            ADD_FAILURE();
+            continue;
+        }
+
+        // Test stack-based parsing.
+        GenericDocument<UTF8<>, CrtAllocator> document; // Use Crt allocator to check exception-safety (no memory leak)
+        document.Parse(json);
+        EXPECT_TRUE(document.HasParseError()) << filename;
+
+        // Test iterative parsing.
+        document.Parse<kParseIterativeFlag>(json);
+        EXPECT_TRUE(document.HasParseError()) << filename;
+
+        // Test iterative pull-parsing.
+        Reader reader;
+        StringStream ss(json);
+        NoOpHandler h;
+        reader.IterativeParseInit();
+        while (!reader.IterativeParseComplete()) {
+            if (!reader.IterativeParseNext<kParseDefaultFlags>(ss, h))
+                break;
+        }
+        EXPECT_TRUE(reader.HasParseError()) << filename;
+        
+        free(json);
+    }
+
+    // passX.json
+    for (int i = 1; i <= 3; i++) {
+        sprintf(filename, "pass%d.json", i);
+        size_t length;
+        char* json = ReadFile(filename, length);
+        if (!json) {
+            printf("jsonchecker file %s not found", filename);
+            continue;
+        }
+
+        // Test stack-based parsing.
+        GenericDocument<UTF8<>, CrtAllocator> document; // Use Crt allocator to check exception-safety (no memory leak)
+        document.Parse(json);
+        EXPECT_FALSE(document.HasParseError()) << filename;
+
+        // Test iterative parsing.
+        document.Parse<kParseIterativeFlag>(json);
+        EXPECT_FALSE(document.HasParseError()) << filename;
+        
+        // Test iterative pull-parsing.
+        Reader reader;
+        StringStream ss(json);
+        NoOpHandler h;
+        reader.IterativeParseInit();
+        while (!reader.IterativeParseComplete()) {
+            if (!reader.IterativeParseNext<kParseDefaultFlags>(ss, h))
+                break;
+        }
+        EXPECT_FALSE(reader.HasParseError()) << filename;
+
+        free(json);
+    }
+}
diff --git a/test/unittest/namespacetest.cpp b/test/unittest/namespacetest.cpp
new file mode 100644
index 0000000..1814724
--- /dev/null
+++ b/test/unittest/namespacetest.cpp
@@ -0,0 +1,70 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+// 
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed 
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the 
+// specific language governing permissions and limitations under the License.
+
+#include "unittest.h"
+
+// test another instantiation of RapidJSON in a different namespace 
+
+#define RAPIDJSON_NAMESPACE my::rapid::json
+#define RAPIDJSON_NAMESPACE_BEGIN namespace my { namespace rapid { namespace json {
+#define RAPIDJSON_NAMESPACE_END } } }
+
+// include lots of RapidJSON files
+
+#include "rapidjson/document.h"
+#include "rapidjson/writer.h"
+#include "rapidjson/filereadstream.h"
+#include "rapidjson/filewritestream.h"
+#include "rapidjson/encodedstream.h"
+#include "rapidjson/stringbuffer.h"
+
+static const char json[] = "{\"hello\":\"world\",\"t\":true,\"f\":false,\"n\":null,\"i\":123,\"pi\":3.1416,\"a\":[1,2,3,4]}";
+
+TEST(NamespaceTest,Using) {
+    using namespace RAPIDJSON_NAMESPACE;
+    typedef GenericDocument<UTF8<>, CrtAllocator> DocumentType;
+    DocumentType doc;
+
+    doc.Parse(json);
+    EXPECT_TRUE(!doc.HasParseError());
+}
+
+TEST(NamespaceTest,Direct) {
+    typedef RAPIDJSON_NAMESPACE::Document Document;
+    typedef RAPIDJSON_NAMESPACE::Reader Reader;
+    typedef RAPIDJSON_NAMESPACE::StringStream StringStream;
+    typedef RAPIDJSON_NAMESPACE::StringBuffer StringBuffer;
+    typedef RAPIDJSON_NAMESPACE::Writer<StringBuffer> WriterType;
+
+    StringStream s(json);
+    StringBuffer buffer;
+    WriterType writer(buffer);
+    buffer.ShrinkToFit();
+    Reader reader;
+    reader.Parse(s, writer);
+
+    EXPECT_STREQ(json, buffer.GetString());
+    EXPECT_EQ(sizeof(json)-1, buffer.GetSize());
+    EXPECT_TRUE(writer.IsComplete());
+
+    Document doc;
+    doc.Parse(buffer.GetString());
+    EXPECT_TRUE(!doc.HasParseError());
+
+    buffer.Clear();
+    writer.Reset(buffer);
+    doc.Accept(writer);
+    EXPECT_STREQ(json, buffer.GetString());
+    EXPECT_TRUE(writer.IsComplete());
+}
diff --git a/test/unittest/ostreamwrappertest.cpp b/test/unittest/ostreamwrappertest.cpp
new file mode 100644
index 0000000..50f8da6
--- /dev/null
+++ b/test/unittest/ostreamwrappertest.cpp
@@ -0,0 +1,92 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+// 
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed 
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the 
+// specific language governing permissions and limitations under the License.
+
+#include "unittest.h"
+
+#include "rapidjson/ostreamwrapper.h"
+#include "rapidjson/encodedstream.h"
+#include "rapidjson/document.h"
+#include <sstream>
+#include <fstream>
+
+using namespace rapidjson;
+using namespace std;
+
+template <typename StringStreamType>
+static void TestStringStream() {
+    typedef typename StringStreamType::char_type Ch;
+
+    Ch s[] = { 'A', 'B', 'C', '\0' };
+    StringStreamType oss(s);
+    BasicOStreamWrapper<StringStreamType> os(oss);
+    for (size_t i = 0; i < 3; i++)
+        os.Put(s[i]);
+    os.Flush();
+    for (size_t i = 0; i < 3; i++)
+        EXPECT_EQ(s[i], oss.str()[i]);
+}
+
+TEST(OStreamWrapper, ostringstream) {
+    TestStringStream<ostringstream>();
+}
+
+TEST(OStreamWrapper, stringstream) {
+    TestStringStream<stringstream>();
+}
+
+TEST(OStreamWrapper, wostringstream) {
+    TestStringStream<wostringstream>();
+}
+
+TEST(OStreamWrapper, wstringstream) {
+    TestStringStream<wstringstream>();
+}
+
+TEST(OStreamWrapper, cout) {
+    OStreamWrapper os(cout);
+    const char* s = "Hello World!\n";
+    while (*s)
+        os.Put(*s++);
+    os.Flush();
+}
+
+template <typename FileStreamType>
+static void TestFileStream() {
+    char filename[L_tmpnam];
+    FILE* fp = TempFile(filename);
+    fclose(fp);
+
+    const char* s = "Hello World!\n";
+    {
+        FileStreamType ofs(filename, ios::out | ios::binary);
+        BasicOStreamWrapper<FileStreamType> osw(ofs);
+        for (const char* p = s; *p; p++)
+            osw.Put(*p);
+        osw.Flush();
+    }
+
+    fp = fopen(filename, "r");
+	ASSERT_TRUE( fp != NULL );
+    for (const char* p = s; *p; p++)
+        EXPECT_EQ(*p, static_cast<char>(fgetc(fp)));
+    fclose(fp);
+}
+
+TEST(OStreamWrapper, ofstream) {
+    TestFileStream<ofstream>();
+}
+
+TEST(OStreamWrapper, fstream) {
+    TestFileStream<fstream>();
+}
diff --git a/test/unittest/pointertest.cpp b/test/unittest/pointertest.cpp
new file mode 100644
index 0000000..858fd2a
--- /dev/null
+++ b/test/unittest/pointertest.cpp
@@ -0,0 +1,1635 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+// 
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed 
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the 
+// specific language governing permissions and limitations under the License.
+
+#include "unittest.h"
+#include "rapidjson/pointer.h"
+#include "rapidjson/stringbuffer.h"
+#include "rapidjson/ostreamwrapper.h"
+#include <sstream>
+#include <map>
+#include <algorithm>
+
+using namespace rapidjson;
+
+static const char kJson[] = "{\n"
+"    \"foo\":[\"bar\", \"baz\"],\n"
+"    \"\" : 0,\n"
+"    \"a/b\" : 1,\n"
+"    \"c%d\" : 2,\n"
+"    \"e^f\" : 3,\n"
+"    \"g|h\" : 4,\n"
+"    \"i\\\\j\" : 5,\n"
+"    \"k\\\"l\" : 6,\n"
+"    \" \" : 7,\n"
+"    \"m~n\" : 8\n"
+"}";
+
+TEST(Pointer, DefaultConstructor) {
+    Pointer p;
+    EXPECT_TRUE(p.IsValid());
+    EXPECT_EQ(0u, p.GetTokenCount());
+}
+
+TEST(Pointer, Parse) {
+    {
+        Pointer p("");
+        EXPECT_TRUE(p.IsValid());
+        EXPECT_EQ(0u, p.GetTokenCount());
+    }
+
+    {
+        Pointer p("/");
+        EXPECT_TRUE(p.IsValid());
+        EXPECT_EQ(1u, p.GetTokenCount());
+        EXPECT_EQ(0u, p.GetTokens()[0].length);
+        EXPECT_STREQ("", p.GetTokens()[0].name);
+        EXPECT_EQ(kPointerInvalidIndex, p.GetTokens()[0].index);
+    }
+
+    {
+        Pointer p("/foo");
+        EXPECT_TRUE(p.IsValid());
+        EXPECT_EQ(1u, p.GetTokenCount());
+        EXPECT_EQ(3u, p.GetTokens()[0].length);
+        EXPECT_STREQ("foo", p.GetTokens()[0].name);
+        EXPECT_EQ(kPointerInvalidIndex, p.GetTokens()[0].index);
+    }
+
+    #if RAPIDJSON_HAS_STDSTRING
+    {
+        Pointer p(std::string("/foo"));
+        EXPECT_TRUE(p.IsValid());
+        EXPECT_EQ(1u, p.GetTokenCount());
+        EXPECT_EQ(3u, p.GetTokens()[0].length);
+        EXPECT_STREQ("foo", p.GetTokens()[0].name);
+        EXPECT_EQ(kPointerInvalidIndex, p.GetTokens()[0].index);
+    }
+    #endif
+
+    {
+        Pointer p("/foo/0");
+        EXPECT_TRUE(p.IsValid());
+        EXPECT_EQ(2u, p.GetTokenCount());
+        EXPECT_EQ(3u, p.GetTokens()[0].length);
+        EXPECT_STREQ("foo", p.GetTokens()[0].name);
+        EXPECT_EQ(kPointerInvalidIndex, p.GetTokens()[0].index);
+        EXPECT_EQ(1u, p.GetTokens()[1].length);
+        EXPECT_STREQ("0", p.GetTokens()[1].name);
+        EXPECT_EQ(0u, p.GetTokens()[1].index);
+    }
+
+    {
+        // Unescape ~1
+        Pointer p("/a~1b");
+        EXPECT_TRUE(p.IsValid());
+        EXPECT_EQ(1u, p.GetTokenCount());
+        EXPECT_EQ(3u, p.GetTokens()[0].length);
+        EXPECT_STREQ("a/b", p.GetTokens()[0].name);
+    }
+
+    {
+        // Unescape ~0
+        Pointer p("/m~0n");
+        EXPECT_TRUE(p.IsValid());
+        EXPECT_EQ(1u, p.GetTokenCount());
+        EXPECT_EQ(3u, p.GetTokens()[0].length);
+        EXPECT_STREQ("m~n", p.GetTokens()[0].name);
+    }
+
+    {
+        // empty name
+        Pointer p("/");
+        EXPECT_TRUE(p.IsValid());
+        EXPECT_EQ(1u, p.GetTokenCount());
+        EXPECT_EQ(0u, p.GetTokens()[0].length);
+        EXPECT_STREQ("", p.GetTokens()[0].name);
+    }
+
+    {
+        // empty and non-empty name
+        Pointer p("//a");
+        EXPECT_TRUE(p.IsValid());
+        EXPECT_EQ(2u, p.GetTokenCount());
+        EXPECT_EQ(0u, p.GetTokens()[0].length);
+        EXPECT_STREQ("", p.GetTokens()[0].name);
+        EXPECT_EQ(1u, p.GetTokens()[1].length);
+        EXPECT_STREQ("a", p.GetTokens()[1].name);
+    }
+
+    {
+        // Null characters
+        Pointer p("/\0\0", 3);
+        EXPECT_TRUE(p.IsValid());
+        EXPECT_EQ(1u, p.GetTokenCount());
+        EXPECT_EQ(2u, p.GetTokens()[0].length);
+        EXPECT_EQ('\0', p.GetTokens()[0].name[0]);
+        EXPECT_EQ('\0', p.GetTokens()[0].name[1]);
+        EXPECT_EQ('\0', p.GetTokens()[0].name[2]);
+    }
+
+    {
+        // Valid index
+        Pointer p("/123");
+        EXPECT_TRUE(p.IsValid());
+        EXPECT_EQ(1u, p.GetTokenCount());
+        EXPECT_STREQ("123", p.GetTokens()[0].name);
+        EXPECT_EQ(123u, p.GetTokens()[0].index);
+    }
+
+    {
+        // Invalid index (with leading zero)
+        Pointer p("/01");
+        EXPECT_TRUE(p.IsValid());
+        EXPECT_EQ(1u, p.GetTokenCount());
+        EXPECT_STREQ("01", p.GetTokens()[0].name);
+        EXPECT_EQ(kPointerInvalidIndex, p.GetTokens()[0].index);
+    }
+
+    if (sizeof(SizeType) == 4) {
+        // Invalid index (overflow)
+        Pointer p("/4294967296");
+        EXPECT_TRUE(p.IsValid());
+        EXPECT_EQ(1u, p.GetTokenCount());
+        EXPECT_STREQ("4294967296", p.GetTokens()[0].name);
+        EXPECT_EQ(kPointerInvalidIndex, p.GetTokens()[0].index);
+    }
+
+    {
+        // kPointerParseErrorTokenMustBeginWithSolidus
+        Pointer p(" ");
+        EXPECT_FALSE(p.IsValid());
+        EXPECT_EQ(kPointerParseErrorTokenMustBeginWithSolidus, p.GetParseErrorCode());
+        EXPECT_EQ(0u, p.GetParseErrorOffset());
+    }
+
+    {
+        // kPointerParseErrorInvalidEscape
+        Pointer p("/~");
+        EXPECT_FALSE(p.IsValid());
+        EXPECT_EQ(kPointerParseErrorInvalidEscape, p.GetParseErrorCode());
+        EXPECT_EQ(2u, p.GetParseErrorOffset());
+    }
+
+    {
+        // kPointerParseErrorInvalidEscape
+        Pointer p("/~2");
+        EXPECT_FALSE(p.IsValid());
+        EXPECT_EQ(kPointerParseErrorInvalidEscape, p.GetParseErrorCode());
+        EXPECT_EQ(2u, p.GetParseErrorOffset());
+    }
+}
+
+TEST(Pointer, Parse_URIFragment) {
+    {
+        Pointer p("#");
+        EXPECT_TRUE(p.IsValid());
+        EXPECT_EQ(0u, p.GetTokenCount());
+    }
+
+    {
+        Pointer p("#/foo");
+        EXPECT_TRUE(p.IsValid());
+        EXPECT_EQ(1u, p.GetTokenCount());
+        EXPECT_EQ(3u, p.GetTokens()[0].length);
+        EXPECT_STREQ("foo", p.GetTokens()[0].name);
+    }
+
+    {
+        Pointer p("#/foo/0");
+        EXPECT_TRUE(p.IsValid());
+        EXPECT_EQ(2u, p.GetTokenCount());
+        EXPECT_EQ(3u, p.GetTokens()[0].length);
+        EXPECT_STREQ("foo", p.GetTokens()[0].name);
+        EXPECT_EQ(1u, p.GetTokens()[1].length);
+        EXPECT_STREQ("0", p.GetTokens()[1].name);
+        EXPECT_EQ(0u, p.GetTokens()[1].index);
+    }
+
+    {
+        // Unescape ~1
+        Pointer p("#/a~1b");
+        EXPECT_TRUE(p.IsValid());
+        EXPECT_EQ(1u, p.GetTokenCount());
+        EXPECT_EQ(3u, p.GetTokens()[0].length);
+        EXPECT_STREQ("a/b", p.GetTokens()[0].name);
+    }
+
+    {
+        // Unescape ~0
+        Pointer p("#/m~0n");
+        EXPECT_TRUE(p.IsValid());
+        EXPECT_EQ(1u, p.GetTokenCount());
+        EXPECT_EQ(3u, p.GetTokens()[0].length);
+        EXPECT_STREQ("m~n", p.GetTokens()[0].name);
+    }
+
+    {
+        // empty name
+        Pointer p("#/");
+        EXPECT_TRUE(p.IsValid());
+        EXPECT_EQ(1u, p.GetTokenCount());
+        EXPECT_EQ(0u, p.GetTokens()[0].length);
+        EXPECT_STREQ("", p.GetTokens()[0].name);
+    }
+
+    {
+        // empty and non-empty name
+        Pointer p("#//a");
+        EXPECT_TRUE(p.IsValid());
+        EXPECT_EQ(2u, p.GetTokenCount());
+        EXPECT_EQ(0u, p.GetTokens()[0].length);
+        EXPECT_STREQ("", p.GetTokens()[0].name);
+        EXPECT_EQ(1u, p.GetTokens()[1].length);
+        EXPECT_STREQ("a", p.GetTokens()[1].name);
+    }
+
+    {
+        // Null characters
+        Pointer p("#/%00%00");
+        EXPECT_TRUE(p.IsValid());
+        EXPECT_EQ(1u, p.GetTokenCount());
+        EXPECT_EQ(2u, p.GetTokens()[0].length);
+        EXPECT_EQ('\0', p.GetTokens()[0].name[0]);
+        EXPECT_EQ('\0', p.GetTokens()[0].name[1]);
+        EXPECT_EQ('\0', p.GetTokens()[0].name[2]);
+    }
+
+    {
+        // Percentage Escapes
+        EXPECT_STREQ("c%d", Pointer("#/c%25d").GetTokens()[0].name);
+        EXPECT_STREQ("e^f", Pointer("#/e%5Ef").GetTokens()[0].name);
+        EXPECT_STREQ("g|h", Pointer("#/g%7Ch").GetTokens()[0].name);
+        EXPECT_STREQ("i\\j", Pointer("#/i%5Cj").GetTokens()[0].name);
+        EXPECT_STREQ("k\"l", Pointer("#/k%22l").GetTokens()[0].name);
+        EXPECT_STREQ(" ", Pointer("#/%20").GetTokens()[0].name);
+    }
+
+    {
+        // Valid index
+        Pointer p("#/123");
+        EXPECT_TRUE(p.IsValid());
+        EXPECT_EQ(1u, p.GetTokenCount());
+        EXPECT_STREQ("123", p.GetTokens()[0].name);
+        EXPECT_EQ(123u, p.GetTokens()[0].index);
+    }
+
+    {
+        // Invalid index (with leading zero)
+        Pointer p("#/01");
+        EXPECT_TRUE(p.IsValid());
+        EXPECT_EQ(1u, p.GetTokenCount());
+        EXPECT_STREQ("01", p.GetTokens()[0].name);
+        EXPECT_EQ(kPointerInvalidIndex, p.GetTokens()[0].index);
+    }
+
+    if (sizeof(SizeType) == 4) {
+        // Invalid index (overflow)
+        Pointer p("#/4294967296");
+        EXPECT_TRUE(p.IsValid());
+        EXPECT_EQ(1u, p.GetTokenCount());
+        EXPECT_STREQ("4294967296", p.GetTokens()[0].name);
+        EXPECT_EQ(kPointerInvalidIndex, p.GetTokens()[0].index);
+    }
+
+    {
+        // Decode UTF-8 perecent encoding to UTF-8
+        Pointer p("#/%C2%A2");
+        EXPECT_TRUE(p.IsValid());
+        EXPECT_EQ(1u, p.GetTokenCount());
+        EXPECT_STREQ("\xC2\xA2", p.GetTokens()[0].name);
+    }
+
+    {
+        // Decode UTF-8 perecent encoding to UTF-16
+        GenericPointer<GenericValue<UTF16<> > > p(L"#/%C2%A2");
+        EXPECT_TRUE(p.IsValid());
+        EXPECT_EQ(1u, p.GetTokenCount());
+        EXPECT_EQ(static_cast<UTF16<>::Ch>(0x00A2), p.GetTokens()[0].name[0]);
+        EXPECT_EQ(1u, p.GetTokens()[0].length);
+    }
+
+    {
+        // Decode UTF-8 perecent encoding to UTF-16
+        GenericPointer<GenericValue<UTF16<> > > p(L"#/%E2%82%AC");
+        EXPECT_TRUE(p.IsValid());
+        EXPECT_EQ(1u, p.GetTokenCount());
+        EXPECT_EQ(static_cast<UTF16<>::Ch>(0x20AC), p.GetTokens()[0].name[0]);
+        EXPECT_EQ(1u, p.GetTokens()[0].length);
+    }
+
+    {
+        // kPointerParseErrorTokenMustBeginWithSolidus
+        Pointer p("# ");
+        EXPECT_FALSE(p.IsValid());
+        EXPECT_EQ(kPointerParseErrorTokenMustBeginWithSolidus, p.GetParseErrorCode());
+        EXPECT_EQ(1u, p.GetParseErrorOffset());
+    }
+
+    {
+        // kPointerParseErrorInvalidEscape
+        Pointer p("#/~");
+        EXPECT_FALSE(p.IsValid());
+        EXPECT_EQ(kPointerParseErrorInvalidEscape, p.GetParseErrorCode());
+        EXPECT_EQ(3u, p.GetParseErrorOffset());
+    }
+
+    {
+        // kPointerParseErrorInvalidEscape
+        Pointer p("#/~2");
+        EXPECT_FALSE(p.IsValid());
+        EXPECT_EQ(kPointerParseErrorInvalidEscape, p.GetParseErrorCode());
+        EXPECT_EQ(3u, p.GetParseErrorOffset());
+    }
+
+    {
+        // kPointerParseErrorInvalidPercentEncoding
+        Pointer p("#/%");
+        EXPECT_FALSE(p.IsValid());
+        EXPECT_EQ(kPointerParseErrorInvalidPercentEncoding, p.GetParseErrorCode());
+        EXPECT_EQ(2u, p.GetParseErrorOffset());
+    }
+
+    {
+        // kPointerParseErrorInvalidPercentEncoding (invalid hex)
+        Pointer p("#/%g0");
+        EXPECT_FALSE(p.IsValid());
+        EXPECT_EQ(kPointerParseErrorInvalidPercentEncoding, p.GetParseErrorCode());
+        EXPECT_EQ(2u, p.GetParseErrorOffset());
+    }
+
+    {
+        // kPointerParseErrorInvalidPercentEncoding (invalid hex)
+        Pointer p("#/%0g");
+        EXPECT_FALSE(p.IsValid());
+        EXPECT_EQ(kPointerParseErrorInvalidPercentEncoding, p.GetParseErrorCode());
+        EXPECT_EQ(2u, p.GetParseErrorOffset());
+    }
+
+    {
+        // kPointerParseErrorInvalidPercentEncoding (incomplete UTF-8 sequence)
+        Pointer p("#/%C2");
+        EXPECT_FALSE(p.IsValid());
+        EXPECT_EQ(kPointerParseErrorInvalidPercentEncoding, p.GetParseErrorCode());
+        EXPECT_EQ(2u, p.GetParseErrorOffset());
+    }
+
+    {
+        // kPointerParseErrorCharacterMustPercentEncode
+        Pointer p("#/ ");
+        EXPECT_FALSE(p.IsValid());
+        EXPECT_EQ(kPointerParseErrorCharacterMustPercentEncode, p.GetParseErrorCode());
+        EXPECT_EQ(2u, p.GetParseErrorOffset());
+    }
+
+    {
+        // kPointerParseErrorCharacterMustPercentEncode
+        Pointer p("#/\n");
+        EXPECT_FALSE(p.IsValid());
+        EXPECT_EQ(kPointerParseErrorCharacterMustPercentEncode, p.GetParseErrorCode());
+        EXPECT_EQ(2u, p.GetParseErrorOffset());
+    }
+}
+
+TEST(Pointer, Stringify) {
+    // Test by roundtrip
+    const char* sources[] = {
+        "",
+        "/foo",
+        "/foo/0",
+        "/",
+        "/a~1b",
+        "/c%d",
+        "/e^f",
+        "/g|h",
+        "/i\\j",
+        "/k\"l",
+        "/ ",
+        "/m~0n",
+        "/\xC2\xA2",
+        "/\xE2\x82\xAC",
+        "/\xF0\x9D\x84\x9E"
+    };
+
+    for (size_t i = 0; i < sizeof(sources) / sizeof(sources[0]); i++) {
+        Pointer p(sources[i]);
+        StringBuffer s;
+        EXPECT_TRUE(p.Stringify(s));
+        EXPECT_STREQ(sources[i], s.GetString());
+
+        // Stringify to URI fragment
+        StringBuffer s2;
+        EXPECT_TRUE(p.StringifyUriFragment(s2));
+        Pointer p2(s2.GetString(), s2.GetSize());
+        EXPECT_TRUE(p2.IsValid());
+        EXPECT_TRUE(p == p2);
+    }
+
+    {
+        // Strigify to URI fragment with an invalid UTF-8 sequence
+        Pointer p("/\xC2");
+        StringBuffer s;
+        EXPECT_FALSE(p.StringifyUriFragment(s));
+    }
+}
+
+// Construct a Pointer with static tokens, no dynamic allocation involved.
+#define NAME(s) { s, static_cast<SizeType>(sizeof(s) / sizeof(s[0]) - 1), kPointerInvalidIndex }
+#define INDEX(i) { #i, static_cast<SizeType>(sizeof(#i) - 1), i }
+
+static const Pointer::Token kTokens[] = { NAME("foo"), INDEX(0) }; // equivalent to "/foo/0"
+
+#undef NAME
+#undef INDEX
+
+TEST(Pointer, ConstructorWithToken) {
+    Pointer p(kTokens, sizeof(kTokens) / sizeof(kTokens[0]));
+    EXPECT_TRUE(p.IsValid());
+    EXPECT_EQ(2u, p.GetTokenCount());
+    EXPECT_EQ(3u, p.GetTokens()[0].length);
+    EXPECT_STREQ("foo", p.GetTokens()[0].name);
+    EXPECT_EQ(1u, p.GetTokens()[1].length);
+    EXPECT_STREQ("0", p.GetTokens()[1].name);
+    EXPECT_EQ(0u, p.GetTokens()[1].index);
+}
+
+TEST(Pointer, CopyConstructor) {
+    {
+        CrtAllocator allocator;
+        Pointer p("/foo/0", &allocator);
+        Pointer q(p);
+        EXPECT_TRUE(q.IsValid());
+        EXPECT_EQ(2u, q.GetTokenCount());
+        EXPECT_EQ(3u, q.GetTokens()[0].length);
+        EXPECT_STREQ("foo", q.GetTokens()[0].name);
+        EXPECT_EQ(1u, q.GetTokens()[1].length);
+        EXPECT_STREQ("0", q.GetTokens()[1].name);
+        EXPECT_EQ(0u, q.GetTokens()[1].index);
+        EXPECT_EQ(&p.GetAllocator(), &q.GetAllocator());
+    }
+
+    // Static tokens
+    {
+        Pointer p(kTokens, sizeof(kTokens) / sizeof(kTokens[0]));
+        Pointer q(p);
+        EXPECT_TRUE(q.IsValid());
+        EXPECT_EQ(2u, q.GetTokenCount());
+        EXPECT_EQ(3u, q.GetTokens()[0].length);
+        EXPECT_STREQ("foo", q.GetTokens()[0].name);
+        EXPECT_EQ(1u, q.GetTokens()[1].length);
+        EXPECT_STREQ("0", q.GetTokens()[1].name);
+        EXPECT_EQ(0u, q.GetTokens()[1].index);
+    }
+}
+
+TEST(Pointer, Assignment) {
+    {
+        CrtAllocator allocator;
+        Pointer p("/foo/0", &allocator);
+        Pointer q;
+        q = p;
+        EXPECT_TRUE(q.IsValid());
+        EXPECT_EQ(2u, q.GetTokenCount());
+        EXPECT_EQ(3u, q.GetTokens()[0].length);
+        EXPECT_STREQ("foo", q.GetTokens()[0].name);
+        EXPECT_EQ(1u, q.GetTokens()[1].length);
+        EXPECT_STREQ("0", q.GetTokens()[1].name);
+        EXPECT_EQ(0u, q.GetTokens()[1].index);
+        EXPECT_NE(&p.GetAllocator(), &q.GetAllocator());
+        q = static_cast<const Pointer &>(q);
+        EXPECT_TRUE(q.IsValid());
+        EXPECT_EQ(2u, q.GetTokenCount());
+        EXPECT_EQ(3u, q.GetTokens()[0].length);
+        EXPECT_STREQ("foo", q.GetTokens()[0].name);
+        EXPECT_EQ(1u, q.GetTokens()[1].length);
+        EXPECT_STREQ("0", q.GetTokens()[1].name);
+        EXPECT_EQ(0u, q.GetTokens()[1].index);
+        EXPECT_NE(&p.GetAllocator(), &q.GetAllocator());
+    }
+
+    // Static tokens
+    {
+        Pointer p(kTokens, sizeof(kTokens) / sizeof(kTokens[0]));
+        Pointer q;
+        q = p;
+        EXPECT_TRUE(q.IsValid());
+        EXPECT_EQ(2u, q.GetTokenCount());
+        EXPECT_EQ(3u, q.GetTokens()[0].length);
+        EXPECT_STREQ("foo", q.GetTokens()[0].name);
+        EXPECT_EQ(1u, q.GetTokens()[1].length);
+        EXPECT_STREQ("0", q.GetTokens()[1].name);
+        EXPECT_EQ(0u, q.GetTokens()[1].index);
+    }
+}
+
+TEST(Pointer, Swap) {
+    Pointer p("/foo/0");
+    Pointer q(&p.GetAllocator());
+
+    q.Swap(p);
+    EXPECT_EQ(&q.GetAllocator(), &p.GetAllocator());
+    EXPECT_TRUE(p.IsValid());
+    EXPECT_TRUE(q.IsValid());
+    EXPECT_EQ(0u, p.GetTokenCount());
+    EXPECT_EQ(2u, q.GetTokenCount());
+    EXPECT_EQ(3u, q.GetTokens()[0].length);
+    EXPECT_STREQ("foo", q.GetTokens()[0].name);
+    EXPECT_EQ(1u, q.GetTokens()[1].length);
+    EXPECT_STREQ("0", q.GetTokens()[1].name);
+    EXPECT_EQ(0u, q.GetTokens()[1].index);
+
+    // std::swap compatibility
+    std::swap(p, q);
+    EXPECT_EQ(&p.GetAllocator(), &q.GetAllocator());
+    EXPECT_TRUE(q.IsValid());
+    EXPECT_TRUE(p.IsValid());
+    EXPECT_EQ(0u, q.GetTokenCount());
+    EXPECT_EQ(2u, p.GetTokenCount());
+    EXPECT_EQ(3u, p.GetTokens()[0].length);
+    EXPECT_STREQ("foo", p.GetTokens()[0].name);
+    EXPECT_EQ(1u, p.GetTokens()[1].length);
+    EXPECT_STREQ("0", p.GetTokens()[1].name);
+    EXPECT_EQ(0u, p.GetTokens()[1].index);
+}
+
+TEST(Pointer, Append) {
+    {
+        Pointer p;
+        Pointer q = p.Append("foo");
+        EXPECT_TRUE(Pointer("/foo") == q);
+        q = q.Append(1234);
+        EXPECT_TRUE(Pointer("/foo/1234") == q);
+        q = q.Append("");
+        EXPECT_TRUE(Pointer("/foo/1234/") == q);
+    }
+
+    {
+        Pointer p;
+        Pointer q = p.Append(Value("foo").Move());
+        EXPECT_TRUE(Pointer("/foo") == q);
+        q = q.Append(Value(1234).Move());
+        EXPECT_TRUE(Pointer("/foo/1234") == q);
+        q = q.Append(Value(kStringType).Move());
+        EXPECT_TRUE(Pointer("/foo/1234/") == q);
+    }
+
+#if RAPIDJSON_HAS_STDSTRING
+    {
+        Pointer p;
+        Pointer q = p.Append(std::string("foo"));
+        EXPECT_TRUE(Pointer("/foo") == q);
+    }
+#endif
+}
+
+TEST(Pointer, Equality) {
+    EXPECT_TRUE(Pointer("/foo/0") == Pointer("/foo/0"));
+    EXPECT_FALSE(Pointer("/foo/0") == Pointer("/foo/1"));
+    EXPECT_FALSE(Pointer("/foo/0") == Pointer("/foo/0/1"));
+    EXPECT_FALSE(Pointer("/foo/0") == Pointer("a"));
+    EXPECT_FALSE(Pointer("a") == Pointer("a")); // Invalid always not equal
+}
+
+TEST(Pointer, Inequality) {
+    EXPECT_FALSE(Pointer("/foo/0") != Pointer("/foo/0"));
+    EXPECT_TRUE(Pointer("/foo/0") != Pointer("/foo/1"));
+    EXPECT_TRUE(Pointer("/foo/0") != Pointer("/foo/0/1"));
+    EXPECT_TRUE(Pointer("/foo/0") != Pointer("a"));
+    EXPECT_TRUE(Pointer("a") != Pointer("a")); // Invalid always not equal
+}
+
+TEST(Pointer, Create) {
+    Document d;
+    {
+        Value* v = &Pointer("").Create(d, d.GetAllocator());
+        EXPECT_EQ(&d, v);
+    }
+    {
+        Value* v = &Pointer("/foo").Create(d, d.GetAllocator());
+        EXPECT_EQ(&d["foo"], v);
+    }
+    {
+        Value* v = &Pointer("/foo/0").Create(d, d.GetAllocator());
+        EXPECT_EQ(&d["foo"][0], v);
+    }
+    {
+        Value* v = &Pointer("/foo/-").Create(d, d.GetAllocator());
+        EXPECT_EQ(&d["foo"][1], v);
+    }
+
+    {
+        Value* v = &Pointer("/foo/-/-").Create(d, d.GetAllocator());
+        // "foo/-" is a newly created null value x.
+        // "foo/-/-" finds that x is not an array, it converts x to empty object
+        // and treats - as "-" member name
+        EXPECT_EQ(&d["foo"][2]["-"], v);
+    }
+
+    {
+        // Document with no allocator
+        Value* v = &Pointer("/foo/-").Create(d);
+        EXPECT_EQ(&d["foo"][3], v);
+    }
+
+    {
+        // Value (not document) must give allocator
+        Value* v = &Pointer("/-").Create(d["foo"], d.GetAllocator());
+        EXPECT_EQ(&d["foo"][4], v);
+    }
+}
+
+TEST(Pointer, Get) {
+    Document d;
+    d.Parse(kJson);
+
+    EXPECT_EQ(&d, Pointer("").Get(d));
+    EXPECT_EQ(&d["foo"], Pointer("/foo").Get(d));
+    EXPECT_EQ(&d["foo"][0], Pointer("/foo/0").Get(d));
+    EXPECT_EQ(&d[""], Pointer("/").Get(d));
+    EXPECT_EQ(&d["a/b"], Pointer("/a~1b").Get(d));
+    EXPECT_EQ(&d["c%d"], Pointer("/c%d").Get(d));
+    EXPECT_EQ(&d["e^f"], Pointer("/e^f").Get(d));
+    EXPECT_EQ(&d["g|h"], Pointer("/g|h").Get(d));
+    EXPECT_EQ(&d["i\\j"], Pointer("/i\\j").Get(d));
+    EXPECT_EQ(&d["k\"l"], Pointer("/k\"l").Get(d));
+    EXPECT_EQ(&d[" "], Pointer("/ ").Get(d));
+    EXPECT_EQ(&d["m~n"], Pointer("/m~0n").Get(d));
+    EXPECT_TRUE(Pointer("/abc").Get(d) == 0);
+    size_t unresolvedTokenIndex;
+    EXPECT_TRUE(Pointer("/foo/2").Get(d, &unresolvedTokenIndex) == 0); // Out of boundary
+    EXPECT_EQ(1u, unresolvedTokenIndex);
+    EXPECT_TRUE(Pointer("/foo/a").Get(d, &unresolvedTokenIndex) == 0); // "/foo" is an array, cannot query by "a"
+    EXPECT_EQ(1u, unresolvedTokenIndex);
+    EXPECT_TRUE(Pointer("/foo/0/0").Get(d, &unresolvedTokenIndex) == 0); // "/foo/0" is an string, cannot further query
+    EXPECT_EQ(2u, unresolvedTokenIndex);
+    EXPECT_TRUE(Pointer("/foo/0/a").Get(d, &unresolvedTokenIndex) == 0); // "/foo/0" is an string, cannot further query
+    EXPECT_EQ(2u, unresolvedTokenIndex);
+}
+
+TEST(Pointer, GetWithDefault) {
+    Document d;
+    d.Parse(kJson);
+
+    // Value version
+    Document::AllocatorType& a = d.GetAllocator();
+    const Value v("qux");
+    EXPECT_TRUE(Value("bar") == Pointer("/foo/0").GetWithDefault(d, v, a));
+    EXPECT_TRUE(Value("baz") == Pointer("/foo/1").GetWithDefault(d, v, a));
+    EXPECT_TRUE(Value("qux") == Pointer("/foo/2").GetWithDefault(d, v, a));
+    EXPECT_TRUE(Value("last") == Pointer("/foo/-").GetWithDefault(d, Value("last").Move(), a));
+    EXPECT_STREQ("last", d["foo"][3].GetString());
+
+    EXPECT_TRUE(Pointer("/foo/null").GetWithDefault(d, Value().Move(), a).IsNull());
+    EXPECT_TRUE(Pointer("/foo/null").GetWithDefault(d, "x", a).IsNull());
+
+    // Generic version
+    EXPECT_EQ(-1, Pointer("/foo/int").GetWithDefault(d, -1, a).GetInt());
+    EXPECT_EQ(-1, Pointer("/foo/int").GetWithDefault(d, -2, a).GetInt());
+    EXPECT_EQ(0x87654321, Pointer("/foo/uint").GetWithDefault(d, 0x87654321, a).GetUint());
+    EXPECT_EQ(0x87654321, Pointer("/foo/uint").GetWithDefault(d, 0x12345678, a).GetUint());
+
+    const int64_t i64 = static_cast<int64_t>(RAPIDJSON_UINT64_C2(0x80000000, 0));
+    EXPECT_EQ(i64, Pointer("/foo/int64").GetWithDefault(d, i64, a).GetInt64());
+    EXPECT_EQ(i64, Pointer("/foo/int64").GetWithDefault(d, i64 + 1, a).GetInt64());
+
+    const uint64_t u64 = RAPIDJSON_UINT64_C2(0xFFFFFFFFF, 0xFFFFFFFFF);
+    EXPECT_EQ(u64, Pointer("/foo/uint64").GetWithDefault(d, u64, a).GetUint64());
+    EXPECT_EQ(u64, Pointer("/foo/uint64").GetWithDefault(d, u64 - 1, a).GetUint64());
+
+    EXPECT_TRUE(Pointer("/foo/true").GetWithDefault(d, true, a).IsTrue());
+    EXPECT_TRUE(Pointer("/foo/true").GetWithDefault(d, false, a).IsTrue());
+
+    EXPECT_TRUE(Pointer("/foo/false").GetWithDefault(d, false, a).IsFalse());
+    EXPECT_TRUE(Pointer("/foo/false").GetWithDefault(d, true, a).IsFalse());
+
+    // StringRef version
+    EXPECT_STREQ("Hello", Pointer("/foo/hello").GetWithDefault(d, "Hello", a).GetString());
+
+    // Copy string version
+    {
+        char buffer[256];
+        strcpy(buffer, "World");
+        EXPECT_STREQ("World", Pointer("/foo/world").GetWithDefault(d, buffer, a).GetString());
+        memset(buffer, 0, sizeof(buffer));
+    }
+    EXPECT_STREQ("World", GetValueByPointer(d, "/foo/world")->GetString());
+
+#if RAPIDJSON_HAS_STDSTRING
+    EXPECT_STREQ("C++", Pointer("/foo/C++").GetWithDefault(d, std::string("C++"), a).GetString());
+#endif
+}
+
+TEST(Pointer, GetWithDefault_NoAllocator) {
+    Document d;
+    d.Parse(kJson);
+
+    // Value version
+    const Value v("qux");
+    EXPECT_TRUE(Value("bar") == Pointer("/foo/0").GetWithDefault(d, v));
+    EXPECT_TRUE(Value("baz") == Pointer("/foo/1").GetWithDefault(d, v));
+    EXPECT_TRUE(Value("qux") == Pointer("/foo/2").GetWithDefault(d, v));
+    EXPECT_TRUE(Value("last") == Pointer("/foo/-").GetWithDefault(d, Value("last").Move()));
+    EXPECT_STREQ("last", d["foo"][3].GetString());
+
+    EXPECT_TRUE(Pointer("/foo/null").GetWithDefault(d, Value().Move()).IsNull());
+    EXPECT_TRUE(Pointer("/foo/null").GetWithDefault(d, "x").IsNull());
+
+    // Generic version
+    EXPECT_EQ(-1, Pointer("/foo/int").GetWithDefault(d, -1).GetInt());
+    EXPECT_EQ(-1, Pointer("/foo/int").GetWithDefault(d, -2).GetInt());
+    EXPECT_EQ(0x87654321, Pointer("/foo/uint").GetWithDefault(d, 0x87654321).GetUint());
+    EXPECT_EQ(0x87654321, Pointer("/foo/uint").GetWithDefault(d, 0x12345678).GetUint());
+
+    const int64_t i64 = static_cast<int64_t>(RAPIDJSON_UINT64_C2(0x80000000, 0));
+    EXPECT_EQ(i64, Pointer("/foo/int64").GetWithDefault(d, i64).GetInt64());
+    EXPECT_EQ(i64, Pointer("/foo/int64").GetWithDefault(d, i64 + 1).GetInt64());
+
+    const uint64_t u64 = RAPIDJSON_UINT64_C2(0xFFFFFFFFF, 0xFFFFFFFFF);
+    EXPECT_EQ(u64, Pointer("/foo/uint64").GetWithDefault(d, u64).GetUint64());
+    EXPECT_EQ(u64, Pointer("/foo/uint64").GetWithDefault(d, u64 - 1).GetUint64());
+
+    EXPECT_TRUE(Pointer("/foo/true").GetWithDefault(d, true).IsTrue());
+    EXPECT_TRUE(Pointer("/foo/true").GetWithDefault(d, false).IsTrue());
+
+    EXPECT_TRUE(Pointer("/foo/false").GetWithDefault(d, false).IsFalse());
+    EXPECT_TRUE(Pointer("/foo/false").GetWithDefault(d, true).IsFalse());
+
+    // StringRef version
+    EXPECT_STREQ("Hello", Pointer("/foo/hello").GetWithDefault(d, "Hello").GetString());
+
+    // Copy string version
+    {
+        char buffer[256];
+        strcpy(buffer, "World");
+        EXPECT_STREQ("World", Pointer("/foo/world").GetWithDefault(d, buffer).GetString());
+        memset(buffer, 0, sizeof(buffer));
+    }
+    EXPECT_STREQ("World", GetValueByPointer(d, "/foo/world")->GetString());
+
+#if RAPIDJSON_HAS_STDSTRING
+    EXPECT_STREQ("C++", Pointer("/foo/C++").GetWithDefault(d, std::string("C++")).GetString());
+#endif
+}
+
+TEST(Pointer, Set) {
+    Document d;
+    d.Parse(kJson);
+    Document::AllocatorType& a = d.GetAllocator();
+    
+    // Value version
+    Pointer("/foo/0").Set(d, Value(123).Move(), a);
+    EXPECT_EQ(123, d["foo"][0].GetInt());
+
+    Pointer("/foo/-").Set(d, Value(456).Move(), a);
+    EXPECT_EQ(456, d["foo"][2].GetInt());
+
+    Pointer("/foo/null").Set(d, Value().Move(), a);
+    EXPECT_TRUE(GetValueByPointer(d, "/foo/null")->IsNull());
+
+    // Const Value version
+    const Value foo(d["foo"], a);
+    Pointer("/clone").Set(d, foo, a);
+    EXPECT_EQ(foo, *GetValueByPointer(d, "/clone"));
+
+    // Generic version
+    Pointer("/foo/int").Set(d, -1, a);
+    EXPECT_EQ(-1, GetValueByPointer(d, "/foo/int")->GetInt());
+
+    Pointer("/foo/uint").Set(d, 0x87654321, a);
+    EXPECT_EQ(0x87654321, GetValueByPointer(d, "/foo/uint")->GetUint());
+
+    const int64_t i64 = static_cast<int64_t>(RAPIDJSON_UINT64_C2(0x80000000, 0));
+    Pointer("/foo/int64").Set(d, i64, a);
+    EXPECT_EQ(i64, GetValueByPointer(d, "/foo/int64")->GetInt64());
+
+    const uint64_t u64 = RAPIDJSON_UINT64_C2(0xFFFFFFFFF, 0xFFFFFFFFF);
+    Pointer("/foo/uint64").Set(d, u64, a);
+    EXPECT_EQ(u64, GetValueByPointer(d, "/foo/uint64")->GetUint64());
+
+    Pointer("/foo/true").Set(d, true, a);
+    EXPECT_TRUE(GetValueByPointer(d, "/foo/true")->IsTrue());
+
+    Pointer("/foo/false").Set(d, false, a);
+    EXPECT_TRUE(GetValueByPointer(d, "/foo/false")->IsFalse());
+
+    // StringRef version
+    Pointer("/foo/hello").Set(d, "Hello", a);
+    EXPECT_STREQ("Hello", GetValueByPointer(d, "/foo/hello")->GetString());
+
+    // Copy string version
+    {
+        char buffer[256];
+        strcpy(buffer, "World");
+        Pointer("/foo/world").Set(d, buffer, a);
+        memset(buffer, 0, sizeof(buffer));
+    }
+    EXPECT_STREQ("World", GetValueByPointer(d, "/foo/world")->GetString());
+
+#if RAPIDJSON_HAS_STDSTRING
+    Pointer("/foo/c++").Set(d, std::string("C++"), a);
+    EXPECT_STREQ("C++", GetValueByPointer(d, "/foo/c++")->GetString());
+#endif
+}
+
+TEST(Pointer, Set_NoAllocator) {
+    Document d;
+    d.Parse(kJson);
+    
+    // Value version
+    Pointer("/foo/0").Set(d, Value(123).Move());
+    EXPECT_EQ(123, d["foo"][0].GetInt());
+
+    Pointer("/foo/-").Set(d, Value(456).Move());
+    EXPECT_EQ(456, d["foo"][2].GetInt());
+
+    Pointer("/foo/null").Set(d, Value().Move());
+    EXPECT_TRUE(GetValueByPointer(d, "/foo/null")->IsNull());
+
+    // Const Value version
+    const Value foo(d["foo"], d.GetAllocator());
+    Pointer("/clone").Set(d, foo);
+    EXPECT_EQ(foo, *GetValueByPointer(d, "/clone"));
+
+    // Generic version
+    Pointer("/foo/int").Set(d, -1);
+    EXPECT_EQ(-1, GetValueByPointer(d, "/foo/int")->GetInt());
+
+    Pointer("/foo/uint").Set(d, 0x87654321);
+    EXPECT_EQ(0x87654321, GetValueByPointer(d, "/foo/uint")->GetUint());
+
+    const int64_t i64 = static_cast<int64_t>(RAPIDJSON_UINT64_C2(0x80000000, 0));
+    Pointer("/foo/int64").Set(d, i64);
+    EXPECT_EQ(i64, GetValueByPointer(d, "/foo/int64")->GetInt64());
+
+    const uint64_t u64 = RAPIDJSON_UINT64_C2(0xFFFFFFFFF, 0xFFFFFFFFF);
+    Pointer("/foo/uint64").Set(d, u64);
+    EXPECT_EQ(u64, GetValueByPointer(d, "/foo/uint64")->GetUint64());
+
+    Pointer("/foo/true").Set(d, true);
+    EXPECT_TRUE(GetValueByPointer(d, "/foo/true")->IsTrue());
+
+    Pointer("/foo/false").Set(d, false);
+    EXPECT_TRUE(GetValueByPointer(d, "/foo/false")->IsFalse());
+
+    // StringRef version
+    Pointer("/foo/hello").Set(d, "Hello");
+    EXPECT_STREQ("Hello", GetValueByPointer(d, "/foo/hello")->GetString());
+
+    // Copy string version
+    {
+        char buffer[256];
+        strcpy(buffer, "World");
+        Pointer("/foo/world").Set(d, buffer);
+        memset(buffer, 0, sizeof(buffer));
+    }
+    EXPECT_STREQ("World", GetValueByPointer(d, "/foo/world")->GetString());
+
+#if RAPIDJSON_HAS_STDSTRING
+    Pointer("/foo/c++").Set(d, std::string("C++"));
+    EXPECT_STREQ("C++", GetValueByPointer(d, "/foo/c++")->GetString());
+#endif
+}
+
+TEST(Pointer, Swap_Value) {
+    Document d;
+    d.Parse(kJson);
+    Document::AllocatorType& a = d.GetAllocator();
+    Pointer("/foo/0").Swap(d, *Pointer("/foo/1").Get(d), a);
+    EXPECT_STREQ("baz", d["foo"][0].GetString());
+    EXPECT_STREQ("bar", d["foo"][1].GetString());
+}
+
+TEST(Pointer, Swap_Value_NoAllocator) {
+    Document d;
+    d.Parse(kJson);
+    Pointer("/foo/0").Swap(d, *Pointer("/foo/1").Get(d));
+    EXPECT_STREQ("baz", d["foo"][0].GetString());
+    EXPECT_STREQ("bar", d["foo"][1].GetString());
+}
+
+TEST(Pointer, Erase) {
+    Document d;
+    d.Parse(kJson);
+
+    EXPECT_FALSE(Pointer("").Erase(d));
+    EXPECT_FALSE(Pointer("/nonexist").Erase(d));
+    EXPECT_FALSE(Pointer("/nonexist/nonexist").Erase(d));
+    EXPECT_FALSE(Pointer("/foo/nonexist").Erase(d));
+    EXPECT_FALSE(Pointer("/foo/nonexist/nonexist").Erase(d));
+    EXPECT_FALSE(Pointer("/foo/0/nonexist").Erase(d));
+    EXPECT_FALSE(Pointer("/foo/0/nonexist/nonexist").Erase(d));
+    EXPECT_FALSE(Pointer("/foo/2/nonexist").Erase(d));
+    EXPECT_TRUE(Pointer("/foo/0").Erase(d));
+    EXPECT_EQ(1u, d["foo"].Size());
+    EXPECT_STREQ("baz", d["foo"][0].GetString());
+    EXPECT_TRUE(Pointer("/foo/0").Erase(d));
+    EXPECT_TRUE(d["foo"].Empty());
+    EXPECT_TRUE(Pointer("/foo").Erase(d));
+    EXPECT_TRUE(Pointer("/foo").Get(d) == 0);
+
+    Pointer("/a/0/b/0").Create(d);
+
+    EXPECT_TRUE(Pointer("/a/0/b/0").Get(d) != 0);
+    EXPECT_TRUE(Pointer("/a/0/b/0").Erase(d));
+    EXPECT_TRUE(Pointer("/a/0/b/0").Get(d) == 0);
+
+    EXPECT_TRUE(Pointer("/a/0/b").Get(d) != 0);
+    EXPECT_TRUE(Pointer("/a/0/b").Erase(d));
+    EXPECT_TRUE(Pointer("/a/0/b").Get(d) == 0);
+
+    EXPECT_TRUE(Pointer("/a/0").Get(d) != 0);
+    EXPECT_TRUE(Pointer("/a/0").Erase(d));
+    EXPECT_TRUE(Pointer("/a/0").Get(d) == 0);
+
+    EXPECT_TRUE(Pointer("/a").Get(d) != 0);
+    EXPECT_TRUE(Pointer("/a").Erase(d));
+    EXPECT_TRUE(Pointer("/a").Get(d) == 0);
+}
+
+TEST(Pointer, CreateValueByPointer) {
+    Document d;
+    Document::AllocatorType& a = d.GetAllocator();
+
+    {
+        Value& v = CreateValueByPointer(d, Pointer("/foo/0"), a);
+        EXPECT_EQ(&d["foo"][0], &v);
+    }
+    {
+        Value& v = CreateValueByPointer(d, "/foo/1", a);
+        EXPECT_EQ(&d["foo"][1], &v);
+    }
+}
+
+TEST(Pointer, CreateValueByPointer_NoAllocator) {
+    Document d;
+
+    {
+        Value& v = CreateValueByPointer(d, Pointer("/foo/0"));
+        EXPECT_EQ(&d["foo"][0], &v);
+    }
+    {
+        Value& v = CreateValueByPointer(d, "/foo/1");
+        EXPECT_EQ(&d["foo"][1], &v);
+    }
+}
+
+TEST(Pointer, GetValueByPointer) {
+    Document d;
+    d.Parse(kJson);
+
+    EXPECT_EQ(&d["foo"][0], GetValueByPointer(d, Pointer("/foo/0")));
+    EXPECT_EQ(&d["foo"][0], GetValueByPointer(d, "/foo/0"));
+
+    size_t unresolvedTokenIndex;
+    EXPECT_TRUE(GetValueByPointer(d, "/foo/2", &unresolvedTokenIndex) == 0); // Out of boundary
+    EXPECT_EQ(1u, unresolvedTokenIndex);
+    EXPECT_TRUE(GetValueByPointer(d, "/foo/a", &unresolvedTokenIndex) == 0); // "/foo" is an array, cannot query by "a"
+    EXPECT_EQ(1u, unresolvedTokenIndex);
+    EXPECT_TRUE(GetValueByPointer(d, "/foo/0/0", &unresolvedTokenIndex) == 0); // "/foo/0" is an string, cannot further query
+    EXPECT_EQ(2u, unresolvedTokenIndex);
+    EXPECT_TRUE(GetValueByPointer(d, "/foo/0/a", &unresolvedTokenIndex) == 0); // "/foo/0" is an string, cannot further query
+    EXPECT_EQ(2u, unresolvedTokenIndex);
+
+    // const version
+    const Value& v = d;
+    EXPECT_EQ(&d["foo"][0], GetValueByPointer(v, Pointer("/foo/0")));
+    EXPECT_EQ(&d["foo"][0], GetValueByPointer(v, "/foo/0"));
+
+    EXPECT_TRUE(GetValueByPointer(v, "/foo/2", &unresolvedTokenIndex) == 0); // Out of boundary
+    EXPECT_EQ(1u, unresolvedTokenIndex);
+    EXPECT_TRUE(GetValueByPointer(v, "/foo/a", &unresolvedTokenIndex) == 0); // "/foo" is an array, cannot query by "a"
+    EXPECT_EQ(1u, unresolvedTokenIndex);
+    EXPECT_TRUE(GetValueByPointer(v, "/foo/0/0", &unresolvedTokenIndex) == 0); // "/foo/0" is an string, cannot further query
+    EXPECT_EQ(2u, unresolvedTokenIndex);
+    EXPECT_TRUE(GetValueByPointer(v, "/foo/0/a", &unresolvedTokenIndex) == 0); // "/foo/0" is an string, cannot further query
+    EXPECT_EQ(2u, unresolvedTokenIndex);
+
+}
+
+TEST(Pointer, GetValueByPointerWithDefault_Pointer) {
+    Document d;
+    d.Parse(kJson);
+
+    Document::AllocatorType& a = d.GetAllocator();
+    const Value v("qux");
+    EXPECT_TRUE(Value("bar") == GetValueByPointerWithDefault(d, Pointer("/foo/0"), v, a));
+    EXPECT_TRUE(Value("bar") == GetValueByPointerWithDefault(d, Pointer("/foo/0"), v, a));
+    EXPECT_TRUE(Value("baz") == GetValueByPointerWithDefault(d, Pointer("/foo/1"), v, a));
+    EXPECT_TRUE(Value("qux") == GetValueByPointerWithDefault(d, Pointer("/foo/2"), v, a));
+    EXPECT_TRUE(Value("last") == GetValueByPointerWithDefault(d, Pointer("/foo/-"), Value("last").Move(), a));
+    EXPECT_STREQ("last", d["foo"][3].GetString());
+
+    EXPECT_TRUE(GetValueByPointerWithDefault(d, Pointer("/foo/null"), Value().Move(), a).IsNull());
+    EXPECT_TRUE(GetValueByPointerWithDefault(d, Pointer("/foo/null"), "x", a).IsNull());
+
+    // Generic version
+    EXPECT_EQ(-1, GetValueByPointerWithDefault(d, Pointer("/foo/int"), -1, a).GetInt());
+    EXPECT_EQ(-1, GetValueByPointerWithDefault(d, Pointer("/foo/int"), -2, a).GetInt());
+    EXPECT_EQ(0x87654321, GetValueByPointerWithDefault(d, Pointer("/foo/uint"), 0x87654321, a).GetUint());
+    EXPECT_EQ(0x87654321, GetValueByPointerWithDefault(d, Pointer("/foo/uint"), 0x12345678, a).GetUint());
+
+    const int64_t i64 = static_cast<int64_t>(RAPIDJSON_UINT64_C2(0x80000000, 0));
+    EXPECT_EQ(i64, GetValueByPointerWithDefault(d, Pointer("/foo/int64"), i64, a).GetInt64());
+    EXPECT_EQ(i64, GetValueByPointerWithDefault(d, Pointer("/foo/int64"), i64 + 1, a).GetInt64());
+
+    const uint64_t u64 = RAPIDJSON_UINT64_C2(0xFFFFFFFFF, 0xFFFFFFFFF);
+    EXPECT_EQ(u64, GetValueByPointerWithDefault(d, Pointer("/foo/uint64"), u64, a).GetUint64());
+    EXPECT_EQ(u64, GetValueByPointerWithDefault(d, Pointer("/foo/uint64"), u64 - 1, a).GetUint64());
+
+    EXPECT_TRUE(GetValueByPointerWithDefault(d, Pointer("/foo/true"), true, a).IsTrue());
+    EXPECT_TRUE(GetValueByPointerWithDefault(d, Pointer("/foo/true"), false, a).IsTrue());
+
+    EXPECT_TRUE(GetValueByPointerWithDefault(d, Pointer("/foo/false"), false, a).IsFalse());
+    EXPECT_TRUE(GetValueByPointerWithDefault(d, Pointer("/foo/false"), true, a).IsFalse());
+
+    // StringRef version
+    EXPECT_STREQ("Hello", GetValueByPointerWithDefault(d, Pointer("/foo/hello"), "Hello", a).GetString());
+
+    // Copy string version
+    {
+        char buffer[256];
+        strcpy(buffer, "World");
+        EXPECT_STREQ("World", GetValueByPointerWithDefault(d, Pointer("/foo/world"), buffer, a).GetString());
+        memset(buffer, 0, sizeof(buffer));
+    }
+    EXPECT_STREQ("World", GetValueByPointer(d, Pointer("/foo/world"))->GetString());
+
+#if RAPIDJSON_HAS_STDSTRING
+    EXPECT_STREQ("C++", GetValueByPointerWithDefault(d, Pointer("/foo/C++"), std::string("C++"), a).GetString());
+#endif
+}
+
+TEST(Pointer, GetValueByPointerWithDefault_String) {
+    Document d;
+    d.Parse(kJson);
+
+    Document::AllocatorType& a = d.GetAllocator();
+    const Value v("qux");
+    EXPECT_TRUE(Value("bar") == GetValueByPointerWithDefault(d, "/foo/0", v, a));
+    EXPECT_TRUE(Value("bar") == GetValueByPointerWithDefault(d, "/foo/0", v, a));
+    EXPECT_TRUE(Value("baz") == GetValueByPointerWithDefault(d, "/foo/1", v, a));
+    EXPECT_TRUE(Value("qux") == GetValueByPointerWithDefault(d, "/foo/2", v, a));
+    EXPECT_TRUE(Value("last") == GetValueByPointerWithDefault(d, "/foo/-", Value("last").Move(), a));
+    EXPECT_STREQ("last", d["foo"][3].GetString());
+
+    EXPECT_TRUE(GetValueByPointerWithDefault(d, "/foo/null", Value().Move(), a).IsNull());
+    EXPECT_TRUE(GetValueByPointerWithDefault(d, "/foo/null", "x", a).IsNull());
+
+    // Generic version
+    EXPECT_EQ(-1, GetValueByPointerWithDefault(d, "/foo/int", -1, a).GetInt());
+    EXPECT_EQ(-1, GetValueByPointerWithDefault(d, "/foo/int", -2, a).GetInt());
+    EXPECT_EQ(0x87654321, GetValueByPointerWithDefault(d, "/foo/uint", 0x87654321, a).GetUint());
+    EXPECT_EQ(0x87654321, GetValueByPointerWithDefault(d, "/foo/uint", 0x12345678, a).GetUint());
+
+    const int64_t i64 = static_cast<int64_t>(RAPIDJSON_UINT64_C2(0x80000000, 0));
+    EXPECT_EQ(i64, GetValueByPointerWithDefault(d, "/foo/int64", i64, a).GetInt64());
+    EXPECT_EQ(i64, GetValueByPointerWithDefault(d, "/foo/int64", i64 + 1, a).GetInt64());
+
+    const uint64_t u64 = RAPIDJSON_UINT64_C2(0xFFFFFFFFF, 0xFFFFFFFFF);
+    EXPECT_EQ(u64, GetValueByPointerWithDefault(d, "/foo/uint64", u64, a).GetUint64());
+    EXPECT_EQ(u64, GetValueByPointerWithDefault(d, "/foo/uint64", u64 - 1, a).GetUint64());
+
+    EXPECT_TRUE(GetValueByPointerWithDefault(d, "/foo/true", true, a).IsTrue());
+    EXPECT_TRUE(GetValueByPointerWithDefault(d, "/foo/true", false, a).IsTrue());
+
+    EXPECT_TRUE(GetValueByPointerWithDefault(d, "/foo/false", false, a).IsFalse());
+    EXPECT_TRUE(GetValueByPointerWithDefault(d, "/foo/false", true, a).IsFalse());
+
+    // StringRef version
+    EXPECT_STREQ("Hello", GetValueByPointerWithDefault(d, "/foo/hello", "Hello", a).GetString());
+
+    // Copy string version
+    {
+        char buffer[256];
+        strcpy(buffer, "World");
+        EXPECT_STREQ("World", GetValueByPointerWithDefault(d, "/foo/world", buffer, a).GetString());
+        memset(buffer, 0, sizeof(buffer));
+    }
+    EXPECT_STREQ("World", GetValueByPointer(d, "/foo/world")->GetString());
+
+#if RAPIDJSON_HAS_STDSTRING
+    EXPECT_STREQ("C++", GetValueByPointerWithDefault(d, "/foo/C++", std::string("C++"), a).GetString());
+#endif
+}
+
+TEST(Pointer, GetValueByPointerWithDefault_Pointer_NoAllocator) {
+    Document d;
+    d.Parse(kJson);
+
+    const Value v("qux");
+    EXPECT_TRUE(Value("bar") == GetValueByPointerWithDefault(d, Pointer("/foo/0"), v));
+    EXPECT_TRUE(Value("bar") == GetValueByPointerWithDefault(d, Pointer("/foo/0"), v));
+    EXPECT_TRUE(Value("baz") == GetValueByPointerWithDefault(d, Pointer("/foo/1"), v));
+    EXPECT_TRUE(Value("qux") == GetValueByPointerWithDefault(d, Pointer("/foo/2"), v));
+    EXPECT_TRUE(Value("last") == GetValueByPointerWithDefault(d, Pointer("/foo/-"), Value("last").Move()));
+    EXPECT_STREQ("last", d["foo"][3].GetString());
+
+    EXPECT_TRUE(GetValueByPointerWithDefault(d, Pointer("/foo/null"), Value().Move()).IsNull());
+    EXPECT_TRUE(GetValueByPointerWithDefault(d, Pointer("/foo/null"), "x").IsNull());
+
+    // Generic version
+    EXPECT_EQ(-1, GetValueByPointerWithDefault(d, Pointer("/foo/int"), -1).GetInt());
+    EXPECT_EQ(-1, GetValueByPointerWithDefault(d, Pointer("/foo/int"), -2).GetInt());
+    EXPECT_EQ(0x87654321, GetValueByPointerWithDefault(d, Pointer("/foo/uint"), 0x87654321).GetUint());
+    EXPECT_EQ(0x87654321, GetValueByPointerWithDefault(d, Pointer("/foo/uint"), 0x12345678).GetUint());
+
+    const int64_t i64 = static_cast<int64_t>(RAPIDJSON_UINT64_C2(0x80000000, 0));
+    EXPECT_EQ(i64, GetValueByPointerWithDefault(d, Pointer("/foo/int64"), i64).GetInt64());
+    EXPECT_EQ(i64, GetValueByPointerWithDefault(d, Pointer("/foo/int64"), i64 + 1).GetInt64());
+
+    const uint64_t u64 = RAPIDJSON_UINT64_C2(0xFFFFFFFFF, 0xFFFFFFFFF);
+    EXPECT_EQ(u64, GetValueByPointerWithDefault(d, Pointer("/foo/uint64"), u64).GetUint64());
+    EXPECT_EQ(u64, GetValueByPointerWithDefault(d, Pointer("/foo/uint64"), u64 - 1).GetUint64());
+
+    EXPECT_TRUE(GetValueByPointerWithDefault(d, Pointer("/foo/true"), true).IsTrue());
+    EXPECT_TRUE(GetValueByPointerWithDefault(d, Pointer("/foo/true"), false).IsTrue());
+
+    EXPECT_TRUE(GetValueByPointerWithDefault(d, Pointer("/foo/false"), false).IsFalse());
+    EXPECT_TRUE(GetValueByPointerWithDefault(d, Pointer("/foo/false"), true).IsFalse());
+
+    // StringRef version
+    EXPECT_STREQ("Hello", GetValueByPointerWithDefault(d, Pointer("/foo/hello"), "Hello").GetString());
+
+    // Copy string version
+    {
+        char buffer[256];
+        strcpy(buffer, "World");
+        EXPECT_STREQ("World", GetValueByPointerWithDefault(d, Pointer("/foo/world"), buffer).GetString());
+        memset(buffer, 0, sizeof(buffer));
+    }
+    EXPECT_STREQ("World", GetValueByPointer(d, Pointer("/foo/world"))->GetString());
+
+#if RAPIDJSON_HAS_STDSTRING
+    EXPECT_STREQ("C++", GetValueByPointerWithDefault(d, Pointer("/foo/C++"), std::string("C++")).GetString());
+#endif
+}
+
+TEST(Pointer, GetValueByPointerWithDefault_String_NoAllocator) {
+    Document d;
+    d.Parse(kJson);
+
+    const Value v("qux");
+    EXPECT_TRUE(Value("bar") == GetValueByPointerWithDefault(d, "/foo/0", v));
+    EXPECT_TRUE(Value("bar") == GetValueByPointerWithDefault(d, "/foo/0", v));
+    EXPECT_TRUE(Value("baz") == GetValueByPointerWithDefault(d, "/foo/1", v));
+    EXPECT_TRUE(Value("qux") == GetValueByPointerWithDefault(d, "/foo/2", v));
+    EXPECT_TRUE(Value("last") == GetValueByPointerWithDefault(d, "/foo/-", Value("last").Move()));
+    EXPECT_STREQ("last", d["foo"][3].GetString());
+
+    EXPECT_TRUE(GetValueByPointerWithDefault(d, "/foo/null", Value().Move()).IsNull());
+    EXPECT_TRUE(GetValueByPointerWithDefault(d, "/foo/null", "x").IsNull());
+
+    // Generic version
+    EXPECT_EQ(-1, GetValueByPointerWithDefault(d, "/foo/int", -1).GetInt());
+    EXPECT_EQ(-1, GetValueByPointerWithDefault(d, "/foo/int", -2).GetInt());
+    EXPECT_EQ(0x87654321, GetValueByPointerWithDefault(d, "/foo/uint", 0x87654321).GetUint());
+    EXPECT_EQ(0x87654321, GetValueByPointerWithDefault(d, "/foo/uint", 0x12345678).GetUint());
+
+    const int64_t i64 = static_cast<int64_t>(RAPIDJSON_UINT64_C2(0x80000000, 0));
+    EXPECT_EQ(i64, GetValueByPointerWithDefault(d, "/foo/int64", i64).GetInt64());
+    EXPECT_EQ(i64, GetValueByPointerWithDefault(d, "/foo/int64", i64 + 1).GetInt64());
+
+    const uint64_t u64 = RAPIDJSON_UINT64_C2(0xFFFFFFFFF, 0xFFFFFFFFF);
+    EXPECT_EQ(u64, GetValueByPointerWithDefault(d, "/foo/uint64", u64).GetUint64());
+    EXPECT_EQ(u64, GetValueByPointerWithDefault(d, "/foo/uint64", u64 - 1).GetUint64());
+
+    EXPECT_TRUE(GetValueByPointerWithDefault(d, "/foo/true", true).IsTrue());
+    EXPECT_TRUE(GetValueByPointerWithDefault(d, "/foo/true", false).IsTrue());
+
+    EXPECT_TRUE(GetValueByPointerWithDefault(d, "/foo/false", false).IsFalse());
+    EXPECT_TRUE(GetValueByPointerWithDefault(d, "/foo/false", true).IsFalse());
+
+    // StringRef version
+    EXPECT_STREQ("Hello", GetValueByPointerWithDefault(d, "/foo/hello", "Hello").GetString());
+
+    // Copy string version
+    {
+        char buffer[256];
+        strcpy(buffer, "World");
+        EXPECT_STREQ("World", GetValueByPointerWithDefault(d, "/foo/world", buffer).GetString());
+        memset(buffer, 0, sizeof(buffer));
+    }
+    EXPECT_STREQ("World", GetValueByPointer(d, "/foo/world")->GetString());
+
+#if RAPIDJSON_HAS_STDSTRING
+    EXPECT_STREQ("C++", GetValueByPointerWithDefault(d, Pointer("/foo/C++"), std::string("C++")).GetString());
+#endif
+}
+
+TEST(Pointer, SetValueByPointer_Pointer) {
+    Document d;
+    d.Parse(kJson);
+    Document::AllocatorType& a = d.GetAllocator();
+
+    // Value version
+    SetValueByPointer(d, Pointer("/foo/0"), Value(123).Move(), a);
+    EXPECT_EQ(123, d["foo"][0].GetInt());
+
+    SetValueByPointer(d, Pointer("/foo/null"), Value().Move(), a);
+    EXPECT_TRUE(GetValueByPointer(d, "/foo/null")->IsNull());
+
+    // Const Value version
+    const Value foo(d["foo"], d.GetAllocator());
+    SetValueByPointer(d, Pointer("/clone"), foo, a);
+    EXPECT_EQ(foo, *GetValueByPointer(d, "/clone"));
+
+    // Generic version
+    SetValueByPointer(d, Pointer("/foo/int"), -1, a);
+    EXPECT_EQ(-1, GetValueByPointer(d, "/foo/int")->GetInt());
+
+    SetValueByPointer(d, Pointer("/foo/uint"), 0x87654321, a);
+    EXPECT_EQ(0x87654321, GetValueByPointer(d, "/foo/uint")->GetUint());
+
+    const int64_t i64 = static_cast<int64_t>(RAPIDJSON_UINT64_C2(0x80000000, 0));
+    SetValueByPointer(d, Pointer("/foo/int64"), i64, a);
+    EXPECT_EQ(i64, GetValueByPointer(d, "/foo/int64")->GetInt64());
+
+    const uint64_t u64 = RAPIDJSON_UINT64_C2(0xFFFFFFFFF, 0xFFFFFFFFF);
+    SetValueByPointer(d, Pointer("/foo/uint64"), u64, a);
+    EXPECT_EQ(u64, GetValueByPointer(d, "/foo/uint64")->GetUint64());
+
+    SetValueByPointer(d, Pointer("/foo/true"), true, a);
+    EXPECT_TRUE(GetValueByPointer(d, "/foo/true")->IsTrue());
+
+    SetValueByPointer(d, Pointer("/foo/false"), false, a);
+    EXPECT_TRUE(GetValueByPointer(d, "/foo/false")->IsFalse());
+
+    // StringRef version
+    SetValueByPointer(d, Pointer("/foo/hello"), "Hello", a);
+    EXPECT_STREQ("Hello", GetValueByPointer(d, "/foo/hello")->GetString());
+
+    // Copy string version
+    {
+        char buffer[256];
+        strcpy(buffer, "World");
+        SetValueByPointer(d, Pointer("/foo/world"), buffer, a);
+        memset(buffer, 0, sizeof(buffer));
+    }
+    EXPECT_STREQ("World", GetValueByPointer(d, "/foo/world")->GetString());
+
+#if RAPIDJSON_HAS_STDSTRING
+    SetValueByPointer(d, Pointer("/foo/c++"), std::string("C++"), a);
+    EXPECT_STREQ("C++", GetValueByPointer(d, "/foo/c++")->GetString());
+#endif
+}
+
+TEST(Pointer, SetValueByPointer_String) {
+    Document d;
+    d.Parse(kJson);
+    Document::AllocatorType& a = d.GetAllocator();
+
+    // Value version
+    SetValueByPointer(d, "/foo/0", Value(123).Move(), a);
+    EXPECT_EQ(123, d["foo"][0].GetInt());
+
+    SetValueByPointer(d, "/foo/null", Value().Move(), a);
+    EXPECT_TRUE(GetValueByPointer(d, "/foo/null")->IsNull());
+
+    // Const Value version
+    const Value foo(d["foo"], d.GetAllocator());
+    SetValueByPointer(d, "/clone", foo, a);
+    EXPECT_EQ(foo, *GetValueByPointer(d, "/clone"));
+
+    // Generic version
+    SetValueByPointer(d, "/foo/int", -1, a);
+    EXPECT_EQ(-1, GetValueByPointer(d, "/foo/int")->GetInt());
+
+    SetValueByPointer(d, "/foo/uint", 0x87654321, a);
+    EXPECT_EQ(0x87654321, GetValueByPointer(d, "/foo/uint")->GetUint());
+
+    const int64_t i64 = static_cast<int64_t>(RAPIDJSON_UINT64_C2(0x80000000, 0));
+    SetValueByPointer(d, "/foo/int64", i64, a);
+    EXPECT_EQ(i64, GetValueByPointer(d, "/foo/int64")->GetInt64());
+
+    const uint64_t u64 = RAPIDJSON_UINT64_C2(0xFFFFFFFFF, 0xFFFFFFFFF);
+    SetValueByPointer(d, "/foo/uint64", u64, a);
+    EXPECT_EQ(u64, GetValueByPointer(d, "/foo/uint64")->GetUint64());
+
+    SetValueByPointer(d, "/foo/true", true, a);
+    EXPECT_TRUE(GetValueByPointer(d, "/foo/true")->IsTrue());
+
+    SetValueByPointer(d, "/foo/false", false, a);
+    EXPECT_TRUE(GetValueByPointer(d, "/foo/false")->IsFalse());
+
+    // StringRef version
+    SetValueByPointer(d, "/foo/hello", "Hello", a);
+    EXPECT_STREQ("Hello", GetValueByPointer(d, "/foo/hello")->GetString());
+
+    // Copy string version
+    {
+        char buffer[256];
+        strcpy(buffer, "World");
+        SetValueByPointer(d, "/foo/world", buffer, a);
+        memset(buffer, 0, sizeof(buffer));
+    }
+    EXPECT_STREQ("World", GetValueByPointer(d, "/foo/world")->GetString());
+
+#if RAPIDJSON_HAS_STDSTRING
+    SetValueByPointer(d, "/foo/c++", std::string("C++"), a);
+    EXPECT_STREQ("C++", GetValueByPointer(d, "/foo/c++")->GetString());
+#endif
+}
+
+TEST(Pointer, SetValueByPointer_Pointer_NoAllocator) {
+    Document d;
+    d.Parse(kJson);
+
+    // Value version
+    SetValueByPointer(d, Pointer("/foo/0"), Value(123).Move());
+    EXPECT_EQ(123, d["foo"][0].GetInt());
+
+    SetValueByPointer(d, Pointer("/foo/null"), Value().Move());
+    EXPECT_TRUE(GetValueByPointer(d, "/foo/null")->IsNull());
+
+    // Const Value version
+    const Value foo(d["foo"], d.GetAllocator());
+    SetValueByPointer(d, Pointer("/clone"), foo);
+    EXPECT_EQ(foo, *GetValueByPointer(d, "/clone"));
+
+    // Generic version
+    SetValueByPointer(d, Pointer("/foo/int"), -1);
+    EXPECT_EQ(-1, GetValueByPointer(d, "/foo/int")->GetInt());
+
+    SetValueByPointer(d, Pointer("/foo/uint"), 0x87654321);
+    EXPECT_EQ(0x87654321, GetValueByPointer(d, "/foo/uint")->GetUint());
+
+    const int64_t i64 = static_cast<int64_t>(RAPIDJSON_UINT64_C2(0x80000000, 0));
+    SetValueByPointer(d, Pointer("/foo/int64"), i64);
+    EXPECT_EQ(i64, GetValueByPointer(d, "/foo/int64")->GetInt64());
+
+    const uint64_t u64 = RAPIDJSON_UINT64_C2(0xFFFFFFFFF, 0xFFFFFFFFF);
+    SetValueByPointer(d, Pointer("/foo/uint64"), u64);
+    EXPECT_EQ(u64, GetValueByPointer(d, "/foo/uint64")->GetUint64());
+
+    SetValueByPointer(d, Pointer("/foo/true"), true);
+    EXPECT_TRUE(GetValueByPointer(d, "/foo/true")->IsTrue());
+
+    SetValueByPointer(d, Pointer("/foo/false"), false);
+    EXPECT_TRUE(GetValueByPointer(d, "/foo/false")->IsFalse());
+
+    // StringRef version
+    SetValueByPointer(d, Pointer("/foo/hello"), "Hello");
+    EXPECT_STREQ("Hello", GetValueByPointer(d, "/foo/hello")->GetString());
+
+    // Copy string version
+    {
+        char buffer[256];
+        strcpy(buffer, "World");
+        SetValueByPointer(d, Pointer("/foo/world"), buffer);
+        memset(buffer, 0, sizeof(buffer));
+    }
+    EXPECT_STREQ("World", GetValueByPointer(d, "/foo/world")->GetString());
+
+#if RAPIDJSON_HAS_STDSTRING
+    SetValueByPointer(d, Pointer("/foo/c++"), std::string("C++"));
+    EXPECT_STREQ("C++", GetValueByPointer(d, "/foo/c++")->GetString());
+#endif
+}
+
+TEST(Pointer, SetValueByPointer_String_NoAllocator) {
+    Document d;
+    d.Parse(kJson);
+
+    // Value version
+    SetValueByPointer(d, "/foo/0", Value(123).Move());
+    EXPECT_EQ(123, d["foo"][0].GetInt());
+
+    SetValueByPointer(d, "/foo/null", Value().Move());
+    EXPECT_TRUE(GetValueByPointer(d, "/foo/null")->IsNull());
+
+    // Const Value version
+    const Value foo(d["foo"], d.GetAllocator());
+    SetValueByPointer(d, "/clone", foo);
+    EXPECT_EQ(foo, *GetValueByPointer(d, "/clone"));
+
+    // Generic version
+    SetValueByPointer(d, "/foo/int", -1);
+    EXPECT_EQ(-1, GetValueByPointer(d, "/foo/int")->GetInt());
+
+    SetValueByPointer(d, "/foo/uint", 0x87654321);
+    EXPECT_EQ(0x87654321, GetValueByPointer(d, "/foo/uint")->GetUint());
+
+    const int64_t i64 = static_cast<int64_t>(RAPIDJSON_UINT64_C2(0x80000000, 0));
+    SetValueByPointer(d, "/foo/int64", i64);
+    EXPECT_EQ(i64, GetValueByPointer(d, "/foo/int64")->GetInt64());
+
+    const uint64_t u64 = RAPIDJSON_UINT64_C2(0xFFFFFFFFF, 0xFFFFFFFFF);
+    SetValueByPointer(d, "/foo/uint64", u64);
+    EXPECT_EQ(u64, GetValueByPointer(d, "/foo/uint64")->GetUint64());
+
+    SetValueByPointer(d, "/foo/true", true);
+    EXPECT_TRUE(GetValueByPointer(d, "/foo/true")->IsTrue());
+
+    SetValueByPointer(d, "/foo/false", false);
+    EXPECT_TRUE(GetValueByPointer(d, "/foo/false")->IsFalse());
+
+    // StringRef version
+    SetValueByPointer(d, "/foo/hello", "Hello");
+    EXPECT_STREQ("Hello", GetValueByPointer(d, "/foo/hello")->GetString());
+
+    // Copy string version
+    {
+        char buffer[256];
+        strcpy(buffer, "World");
+        SetValueByPointer(d, "/foo/world", buffer);
+        memset(buffer, 0, sizeof(buffer));
+    }
+    EXPECT_STREQ("World", GetValueByPointer(d, "/foo/world")->GetString());
+
+#if RAPIDJSON_HAS_STDSTRING
+    SetValueByPointer(d, "/foo/c++", std::string("C++"));
+    EXPECT_STREQ("C++", GetValueByPointer(d, "/foo/c++")->GetString());
+#endif
+}
+
+TEST(Pointer, SwapValueByPointer) {
+    Document d;
+    d.Parse(kJson);
+    Document::AllocatorType& a = d.GetAllocator();
+    SwapValueByPointer(d, Pointer("/foo/0"), *GetValueByPointer(d, "/foo/1"), a);
+    EXPECT_STREQ("baz", d["foo"][0].GetString());
+    EXPECT_STREQ("bar", d["foo"][1].GetString());
+
+    SwapValueByPointer(d, "/foo/0", *GetValueByPointer(d, "/foo/1"), a);
+    EXPECT_STREQ("bar", d["foo"][0].GetString());
+    EXPECT_STREQ("baz", d["foo"][1].GetString());
+}
+
+TEST(Pointer, SwapValueByPointer_NoAllocator) {
+    Document d;
+    d.Parse(kJson);
+    SwapValueByPointer(d, Pointer("/foo/0"), *GetValueByPointer(d, "/foo/1"));
+    EXPECT_STREQ("baz", d["foo"][0].GetString());
+    EXPECT_STREQ("bar", d["foo"][1].GetString());
+
+    SwapValueByPointer(d, "/foo/0", *GetValueByPointer(d, "/foo/1"));
+    EXPECT_STREQ("bar", d["foo"][0].GetString());
+    EXPECT_STREQ("baz", d["foo"][1].GetString());
+}
+
+TEST(Pointer, EraseValueByPointer_Pointer) {
+    Document d;
+    d.Parse(kJson);
+
+    EXPECT_FALSE(EraseValueByPointer(d, Pointer("")));
+    EXPECT_FALSE(Pointer("/foo/nonexist").Erase(d));
+    EXPECT_TRUE(EraseValueByPointer(d, Pointer("/foo/0")));
+    EXPECT_EQ(1u, d["foo"].Size());
+    EXPECT_STREQ("baz", d["foo"][0].GetString());
+    EXPECT_TRUE(EraseValueByPointer(d, Pointer("/foo/0")));
+    EXPECT_TRUE(d["foo"].Empty());
+    EXPECT_TRUE(EraseValueByPointer(d, Pointer("/foo")));
+    EXPECT_TRUE(Pointer("/foo").Get(d) == 0);
+}
+
+TEST(Pointer, EraseValueByPointer_String) {
+    Document d;
+    d.Parse(kJson);
+
+    EXPECT_FALSE(EraseValueByPointer(d, ""));
+    EXPECT_FALSE(Pointer("/foo/nonexist").Erase(d));
+    EXPECT_TRUE(EraseValueByPointer(d, "/foo/0"));
+    EXPECT_EQ(1u, d["foo"].Size());
+    EXPECT_STREQ("baz", d["foo"][0].GetString());
+    EXPECT_TRUE(EraseValueByPointer(d, "/foo/0"));
+    EXPECT_TRUE(d["foo"].Empty());
+    EXPECT_TRUE(EraseValueByPointer(d, "/foo"));
+    EXPECT_TRUE(Pointer("/foo").Get(d) == 0);
+}
+
+TEST(Pointer, Ambiguity) {
+    {
+        Document d;
+        d.Parse("{\"0\" : [123]}");
+        EXPECT_EQ(123, Pointer("/0/0").Get(d)->GetInt());
+        Pointer("/0/a").Set(d, 456);    // Change array [123] to object {456}
+        EXPECT_EQ(456, Pointer("/0/a").Get(d)->GetInt());
+    }
+
+    {
+        Document d;
+        EXPECT_FALSE(d.Parse("[{\"0\": 123}]").HasParseError());
+        EXPECT_EQ(123, Pointer("/0/0").Get(d)->GetInt());
+        Pointer("/0/1").Set(d, 456); // 1 is treated as "1" to index object
+        EXPECT_EQ(123, Pointer("/0/0").Get(d)->GetInt());
+        EXPECT_EQ(456, Pointer("/0/1").Get(d)->GetInt());
+    }
+}
+
+TEST(Pointer, LessThan) {
+    static const struct {
+        const char *str;
+        bool valid;
+    } pointers[] = {
+        { "/a/b",       true },
+        { "/a",         true },
+        { "/d/1",       true },
+        { "/d/2/z",     true },
+        { "/d/2/3",     true },
+        { "/d/2",       true },
+        { "/a/c",       true },
+        { "/e/f~g",     false },
+        { "/d/2/zz",    true },
+        { "/d/1",       true },
+        { "/d/2/z",     true },
+        { "/e/f~~g",    false },
+        { "/e/f~0g",    true },
+        { "/e/f~1g",    true },
+        { "/e/f.g",     true },
+        { "",           true }
+    };
+    static const char *ordered_pointers[] = {
+        "",
+        "/a",
+        "/a/b",
+        "/a/c",
+        "/d/1",
+        "/d/1",
+        "/d/2",
+        "/e/f.g",
+        "/e/f~1g",
+        "/e/f~0g",
+        "/d/2/3",
+        "/d/2/z",
+        "/d/2/z",
+        "/d/2/zz",
+        NULL,       // was invalid "/e/f~g"
+        NULL        // was invalid "/e/f~~g"
+    };
+    typedef MemoryPoolAllocator<> AllocatorType;
+    typedef GenericPointer<Value, AllocatorType> PointerType;
+    typedef std::multimap<PointerType, size_t> PointerMap;
+    PointerMap map;
+    PointerMap::iterator it;
+    AllocatorType allocator;
+    size_t i;
+
+    EXPECT_EQ(sizeof(pointers) / sizeof(pointers[0]),
+              sizeof(ordered_pointers) / sizeof(ordered_pointers[0]));
+
+    for (i = 0; i < sizeof(pointers) / sizeof(pointers[0]); ++i) {
+        it = map.insert(PointerMap::value_type(PointerType(pointers[i].str, &allocator), i));
+        if (!it->first.IsValid()) {
+            EXPECT_EQ(++it, map.end());
+        }
+    }
+
+    for (i = 0, it = map.begin(); it != map.end(); ++it, ++i) {
+        EXPECT_TRUE(it->second < sizeof(pointers) / sizeof(pointers[0]));
+        EXPECT_EQ(it->first.IsValid(), pointers[it->second].valid);
+        EXPECT_TRUE(i < sizeof(ordered_pointers) / sizeof(ordered_pointers[0]));
+        EXPECT_EQ(it->first.IsValid(), !!ordered_pointers[i]);
+        if (it->first.IsValid()) {
+            std::stringstream ss;
+            OStreamWrapper os(ss);
+            EXPECT_TRUE(it->first.Stringify(os));
+            EXPECT_EQ(ss.str(), pointers[it->second].str);
+            EXPECT_EQ(ss.str(), ordered_pointers[i]);
+        }
+    }
+}
+
+// https://github.com/Tencent/rapidjson/issues/483
+namespace myjson {
+
+class MyAllocator
+{
+public:
+    static const bool kNeedFree = true;
+    void * Malloc(size_t _size) { return malloc(_size); }
+    void * Realloc(void *_org_p, size_t _org_size, size_t _new_size) { (void)_org_size; return realloc(_org_p, _new_size); }
+    static void Free(void *_p) { return free(_p); }
+};
+
+typedef rapidjson::GenericDocument<
+            rapidjson::UTF8<>,
+            rapidjson::MemoryPoolAllocator< MyAllocator >,
+            MyAllocator
+        > Document;
+
+typedef rapidjson::GenericPointer<
+            ::myjson::Document::ValueType,
+            MyAllocator
+        > Pointer;
+
+typedef ::myjson::Document::ValueType Value;
+
+}
+
+TEST(Pointer, Issue483) {
+    std::string mystr, path;
+    myjson::Document document;
+    myjson::Value value(rapidjson::kStringType);
+    value.SetString(mystr.c_str(), static_cast<SizeType>(mystr.length()), document.GetAllocator());
+    myjson::Pointer(path.c_str()).Set(document, value, document.GetAllocator());
+}
diff --git a/test/unittest/prettywritertest.cpp b/test/unittest/prettywritertest.cpp
new file mode 100644
index 0000000..4bf02bd
--- /dev/null
+++ b/test/unittest/prettywritertest.cpp
@@ -0,0 +1,373 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+// 
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed 
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the 
+// specific language governing permissions and limitations under the License.
+
+#include "unittest.h"
+#include "rapidjson/reader.h"
+#include "rapidjson/prettywriter.h"
+#include "rapidjson/stringbuffer.h"
+#include "rapidjson/filewritestream.h"
+
+#ifdef __clang__
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(c++98-compat)
+#endif
+
+using namespace rapidjson;
+
+static const char kJson[] = "{\"hello\":\"world\",\"t\":true,\"f\":false,\"n\":null,\"i\":123,\"pi\":3.1416,\"a\":[1,2,3,-1],\"u64\":1234567890123456789,\"i64\":-1234567890123456789}";
+static const char kPrettyJson[] =
+"{\n"
+"    \"hello\": \"world\",\n"
+"    \"t\": true,\n"
+"    \"f\": false,\n"
+"    \"n\": null,\n"
+"    \"i\": 123,\n"
+"    \"pi\": 3.1416,\n"
+"    \"a\": [\n"
+"        1,\n"
+"        2,\n"
+"        3,\n"
+"        -1\n"
+"    ],\n"
+"    \"u64\": 1234567890123456789,\n"
+"    \"i64\": -1234567890123456789\n"
+"}";
+
+static const char kPrettyJson_FormatOptions_SLA[] =
+"{\n"
+"    \"hello\": \"world\",\n"
+"    \"t\": true,\n"
+"    \"f\": false,\n"
+"    \"n\": null,\n"
+"    \"i\": 123,\n"
+"    \"pi\": 3.1416,\n"
+"    \"a\": [1, 2, 3, -1],\n"
+"    \"u64\": 1234567890123456789,\n"
+"    \"i64\": -1234567890123456789\n"
+"}";
+
+TEST(PrettyWriter, Basic) {
+    StringBuffer buffer;
+    PrettyWriter<StringBuffer> writer(buffer);
+    Reader reader;
+    StringStream s(kJson);
+    reader.Parse(s, writer);
+    EXPECT_STREQ(kPrettyJson, buffer.GetString());
+}
+
+TEST(PrettyWriter, FormatOptions) {
+    StringBuffer buffer;
+    PrettyWriter<StringBuffer> writer(buffer);
+    writer.SetFormatOptions(kFormatSingleLineArray);
+    Reader reader;
+    StringStream s(kJson);
+    reader.Parse(s, writer);
+    EXPECT_STREQ(kPrettyJson_FormatOptions_SLA, buffer.GetString());
+}
+
+TEST(PrettyWriter, SetIndent) {
+    StringBuffer buffer;
+    PrettyWriter<StringBuffer> writer(buffer);
+    writer.SetIndent('\t', 1);
+    Reader reader;
+    StringStream s(kJson);
+    reader.Parse(s, writer);
+    EXPECT_STREQ(
+        "{\n"
+        "\t\"hello\": \"world\",\n"
+        "\t\"t\": true,\n"
+        "\t\"f\": false,\n"
+        "\t\"n\": null,\n"
+        "\t\"i\": 123,\n"
+        "\t\"pi\": 3.1416,\n"
+        "\t\"a\": [\n"
+        "\t\t1,\n"
+        "\t\t2,\n"
+        "\t\t3,\n"
+        "\t\t-1\n"
+        "\t],\n"
+        "\t\"u64\": 1234567890123456789,\n"
+        "\t\"i64\": -1234567890123456789\n"
+        "}",
+        buffer.GetString());
+}
+
+TEST(PrettyWriter, String) {
+    StringBuffer buffer;
+    PrettyWriter<StringBuffer> writer(buffer);
+    EXPECT_TRUE(writer.StartArray());
+    EXPECT_TRUE(writer.String("Hello\n"));
+    EXPECT_TRUE(writer.EndArray());
+    EXPECT_STREQ("[\n    \"Hello\\n\"\n]", buffer.GetString());
+}
+
+#if RAPIDJSON_HAS_STDSTRING
+TEST(PrettyWriter, String_STDSTRING) {
+    StringBuffer buffer;
+    PrettyWriter<StringBuffer> writer(buffer);
+    EXPECT_TRUE(writer.StartArray());
+    EXPECT_TRUE(writer.String(std::string("Hello\n")));
+    EXPECT_TRUE(writer.EndArray());
+    EXPECT_STREQ("[\n    \"Hello\\n\"\n]", buffer.GetString());
+}
+#endif
+
+#include <sstream>
+
+class OStreamWrapper {
+public:
+    typedef char Ch;
+
+    OStreamWrapper(std::ostream& os) : os_(os) {}
+
+    Ch Peek() const { assert(false); return '\0'; }
+    Ch Take() { assert(false); return '\0'; }
+    size_t Tell() const { return 0; }
+
+    Ch* PutBegin() { assert(false); return 0; }
+    void Put(Ch c) { os_.put(c); }
+    void Flush() { os_.flush(); }
+    size_t PutEnd(Ch*) { assert(false); return 0; }
+
+private:
+    OStreamWrapper(const OStreamWrapper&);
+    OStreamWrapper& operator=(const OStreamWrapper&);
+
+    std::ostream& os_;
+};
+
+// For covering PutN() generic version
+TEST(PrettyWriter, OStreamWrapper) {
+    StringStream s(kJson);
+    
+    std::stringstream ss;
+    OStreamWrapper os(ss);
+    
+    PrettyWriter<OStreamWrapper> writer(os);
+
+    Reader reader;
+    reader.Parse(s, writer);
+    
+    std::string actual = ss.str();
+    EXPECT_STREQ(kPrettyJson, actual.c_str());
+}
+
+// For covering FileWriteStream::PutN()
+TEST(PrettyWriter, FileWriteStream) {
+    char filename[L_tmpnam];
+    FILE* fp = TempFile(filename);
+    ASSERT_TRUE(fp!=NULL);
+    char buffer[16];
+    FileWriteStream os(fp, buffer, sizeof(buffer));
+    PrettyWriter<FileWriteStream> writer(os);
+    Reader reader;
+    StringStream s(kJson);
+    reader.Parse(s, writer);
+    fclose(fp);
+
+    fp = fopen(filename, "rb");
+    fseek(fp, 0, SEEK_END);
+    size_t size = static_cast<size_t>(ftell(fp));
+    fseek(fp, 0, SEEK_SET);
+    char* json = static_cast<char*>(malloc(size + 1));
+    size_t readLength = fread(json, 1, size, fp);
+    json[readLength] = '\0';
+    fclose(fp);
+    remove(filename);
+    EXPECT_STREQ(kPrettyJson, json);
+    free(json);
+}
+
+TEST(PrettyWriter, RawValue) {
+    StringBuffer buffer;
+    PrettyWriter<StringBuffer> writer(buffer);
+    writer.StartObject();
+    writer.Key("a");
+    writer.Int(1);
+    writer.Key("raw");
+    const char json[] = "[\"Hello\\nWorld\", 123.456]";
+    writer.RawValue(json, strlen(json), kArrayType);
+    writer.EndObject();
+    EXPECT_TRUE(writer.IsComplete());
+    EXPECT_STREQ(
+        "{\n"
+        "    \"a\": 1,\n"
+        "    \"raw\": [\"Hello\\nWorld\", 123.456]\n" // no indentation within raw value
+        "}",
+        buffer.GetString());
+}
+
+TEST(PrettyWriter, InvalidEventSequence) {
+    // {]
+    {
+        StringBuffer buffer;
+        PrettyWriter<StringBuffer> writer(buffer);
+        writer.StartObject();
+        EXPECT_THROW(writer.EndArray(), AssertException);
+        EXPECT_FALSE(writer.IsComplete());
+    }
+    
+    // [}
+    {
+        StringBuffer buffer;
+        PrettyWriter<StringBuffer> writer(buffer);
+        writer.StartArray();
+        EXPECT_THROW(writer.EndObject(), AssertException);
+        EXPECT_FALSE(writer.IsComplete());
+    }
+    
+    // { 1:
+    {
+        StringBuffer buffer;
+        PrettyWriter<StringBuffer> writer(buffer);
+        writer.StartObject();
+        EXPECT_THROW(writer.Int(1), AssertException);
+        EXPECT_FALSE(writer.IsComplete());
+    }
+    
+    // { 'a' }
+    {
+        StringBuffer buffer;
+        PrettyWriter<StringBuffer> writer(buffer);
+        writer.StartObject();
+        writer.Key("a");
+        EXPECT_THROW(writer.EndObject(), AssertException);
+        EXPECT_FALSE(writer.IsComplete());
+    }
+    
+    // { 'a':'b','c' }
+    {
+        StringBuffer buffer;
+        PrettyWriter<StringBuffer> writer(buffer);
+        writer.StartObject();
+        writer.Key("a");
+        writer.String("b");
+        writer.Key("c");
+        EXPECT_THROW(writer.EndObject(), AssertException);
+        EXPECT_FALSE(writer.IsComplete());
+    }
+}
+
+TEST(PrettyWriter, NaN) {
+    double nan = std::numeric_limits<double>::quiet_NaN();
+
+    EXPECT_TRUE(internal::Double(nan).IsNan());
+    StringBuffer buffer;
+    {
+        PrettyWriter<StringBuffer> writer(buffer);
+        EXPECT_FALSE(writer.Double(nan));
+    }
+    {
+        PrettyWriter<StringBuffer, UTF8<>, UTF8<>, CrtAllocator, kWriteNanAndInfFlag> writer(buffer);
+        EXPECT_TRUE(writer.Double(nan));
+        EXPECT_STREQ("NaN", buffer.GetString());
+    }
+    GenericStringBuffer<UTF16<> > buffer2;
+    PrettyWriter<GenericStringBuffer<UTF16<> > > writer2(buffer2);
+    EXPECT_FALSE(writer2.Double(nan));
+}
+
+TEST(PrettyWriter, Inf) {
+    double inf = std::numeric_limits<double>::infinity();
+
+    EXPECT_TRUE(internal::Double(inf).IsInf());
+    StringBuffer buffer;
+    {
+        PrettyWriter<StringBuffer> writer(buffer);
+        EXPECT_FALSE(writer.Double(inf));
+    }
+    {
+        PrettyWriter<StringBuffer> writer(buffer);
+        EXPECT_FALSE(writer.Double(-inf));
+    }
+    {
+        PrettyWriter<StringBuffer, UTF8<>, UTF8<>, CrtAllocator, kWriteNanAndInfFlag> writer(buffer);
+        EXPECT_TRUE(writer.Double(inf));
+    }
+    {
+        PrettyWriter<StringBuffer, UTF8<>, UTF8<>, CrtAllocator, kWriteNanAndInfFlag> writer(buffer);
+        EXPECT_TRUE(writer.Double(-inf));
+    }
+    EXPECT_STREQ("Infinity-Infinity", buffer.GetString());
+}
+
+TEST(PrettyWriter, Issue_889) {
+    char buf[100] = "Hello";
+    
+    StringBuffer buffer;
+    PrettyWriter<StringBuffer> writer(buffer);
+    writer.StartArray();
+    writer.String(buf);
+    writer.EndArray();
+    
+    EXPECT_STREQ("[\n    \"Hello\"\n]", buffer.GetString());
+    EXPECT_TRUE(writer.IsComplete()); \
+}
+
+
+#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
+
+static PrettyWriter<StringBuffer> WriterGen(StringBuffer &target) {
+    PrettyWriter<StringBuffer> writer(target);
+    writer.StartObject();
+    writer.Key("a");
+    writer.Int(1);
+    return writer;
+}
+
+TEST(PrettyWriter, MoveCtor) {
+    StringBuffer buffer;
+    PrettyWriter<StringBuffer> writer(WriterGen(buffer));
+    writer.EndObject();
+    EXPECT_TRUE(writer.IsComplete());
+    EXPECT_STREQ(
+        "{\n"
+        "    \"a\": 1\n"
+        "}",
+        buffer.GetString());
+}
+#endif
+
+TEST(PrettyWriter, Issue_1336) {
+#define T(meth, val, expected)                          \
+    {                                                   \
+        StringBuffer buffer;                            \
+        PrettyWriter<StringBuffer> writer(buffer);      \
+        writer.meth(val);                               \
+                                                        \
+        EXPECT_STREQ(expected, buffer.GetString());     \
+        EXPECT_TRUE(writer.IsComplete());               \
+    }
+
+    T(Bool, false, "false");
+    T(Bool, true, "true");
+    T(Int, 0, "0");
+    T(Uint, 0, "0");
+    T(Int64, 0, "0");
+    T(Uint64, 0, "0");
+    T(Double, 0, "0.0");
+    T(String, "Hello", "\"Hello\"");
+#undef T
+
+    StringBuffer buffer;
+    PrettyWriter<StringBuffer> writer(buffer);
+    writer.Null();
+
+    EXPECT_STREQ("null", buffer.GetString());
+    EXPECT_TRUE(writer.IsComplete());
+}
+
+#ifdef __clang__
+RAPIDJSON_DIAG_POP
+#endif
diff --git a/test/unittest/readertest.cpp b/test/unittest/readertest.cpp
new file mode 100644
index 0000000..e3d5148
--- /dev/null
+++ b/test/unittest/readertest.cpp
@@ -0,0 +1,2201 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+#include "unittest.h"
+
+#include "rapidjson/reader.h"
+#include "rapidjson/internal/dtoa.h"
+#include "rapidjson/internal/itoa.h"
+#include "rapidjson/memorystream.h"
+
+#include <limits>
+
+using namespace rapidjson;
+
+RAPIDJSON_DIAG_PUSH
+#ifdef __GNUC__
+RAPIDJSON_DIAG_OFF(effc++)
+RAPIDJSON_DIAG_OFF(float-equal)
+RAPIDJSON_DIAG_OFF(missing-noreturn)
+#if __GNUC__ >= 7
+RAPIDJSON_DIAG_OFF(dangling-else)
+#endif
+#endif // __GNUC__
+
+#ifdef __clang__
+RAPIDJSON_DIAG_OFF(variadic-macros)
+RAPIDJSON_DIAG_OFF(c++98-compat-pedantic)
+#endif
+
+template<bool expect>
+struct ParseBoolHandler : BaseReaderHandler<UTF8<>, ParseBoolHandler<expect> > {
+    ParseBoolHandler() : step_(0) {}
+    bool Default() { ADD_FAILURE(); return false; }
+    // gcc 4.8.x generates warning in EXPECT_EQ(bool, bool) on this gtest version.
+    // Workaround with EXPECT_TRUE().
+    bool Bool(bool b) { /*EXPECT_EQ(expect, b); */EXPECT_TRUE(expect == b);  ++step_; return true; }
+
+    unsigned step_;
+};
+
+TEST(Reader, ParseTrue) {
+    StringStream s("true");
+    ParseBoolHandler<true> h;
+    Reader reader;
+    reader.Parse(s, h);
+    EXPECT_EQ(1u, h.step_);
+}
+
+TEST(Reader, ParseFalse) {
+    StringStream s("false");
+    ParseBoolHandler<false> h;
+    Reader reader;
+    reader.Parse(s, h);
+    EXPECT_EQ(1u, h.step_);
+}
+
+struct ParseIntHandler : BaseReaderHandler<UTF8<>, ParseIntHandler> {
+    ParseIntHandler() : step_(0), actual_() {}
+    bool Default() { ADD_FAILURE(); return false; }
+    bool Int(int i) { actual_ = i; step_++; return true; }
+
+    unsigned step_;
+    int actual_;
+};
+
+struct ParseUintHandler : BaseReaderHandler<UTF8<>, ParseUintHandler> {
+    ParseUintHandler() : step_(0), actual_() {}
+    bool Default() { ADD_FAILURE(); return false; }
+    bool Uint(unsigned i) { actual_ = i; step_++; return true; }
+
+    unsigned step_;
+    unsigned actual_;
+};
+
+struct ParseInt64Handler : BaseReaderHandler<UTF8<>, ParseInt64Handler> {
+    ParseInt64Handler() : step_(0), actual_() {}
+    bool Default() { ADD_FAILURE(); return false; }
+    bool Int64(int64_t i) { actual_ = i; step_++; return true; }
+
+    unsigned step_;
+    int64_t actual_;
+};
+
+struct ParseUint64Handler : BaseReaderHandler<UTF8<>, ParseUint64Handler> {
+    ParseUint64Handler() : step_(0), actual_() {}
+    bool Default() { ADD_FAILURE(); return false; }
+    bool Uint64(uint64_t i) { actual_ = i; step_++; return true; }
+
+    unsigned step_;
+    uint64_t actual_;
+};
+
+struct ParseDoubleHandler : BaseReaderHandler<UTF8<>, ParseDoubleHandler> {
+    ParseDoubleHandler() : step_(0), actual_() {}
+    bool Default() { ADD_FAILURE(); return false; }
+    bool Double(double d) { actual_ = d; step_++; return true; }
+
+    unsigned step_;
+    double actual_;
+};
+
+TEST(Reader, ParseNumber_Integer) {
+#define TEST_INTEGER(Handler, str, x) \
+    { \
+        StringStream s(str); \
+        Handler h; \
+        Reader reader; \
+        reader.Parse(s, h); \
+        EXPECT_EQ(1u, h.step_); \
+        EXPECT_EQ(x, h.actual_); \
+    }
+
+    TEST_INTEGER(ParseUintHandler, "0", 0u);
+    TEST_INTEGER(ParseUintHandler, "123", 123u);
+    TEST_INTEGER(ParseUintHandler, "2147483648", 2147483648u);       // 2^31 - 1 (cannot be stored in int)
+    TEST_INTEGER(ParseUintHandler, "4294967295", 4294967295u);
+
+    TEST_INTEGER(ParseIntHandler, "-123", -123);
+    TEST_INTEGER(ParseIntHandler, "-2147483648", static_cast<int32_t>(0x80000000));     // -2^31 (min of int)
+
+    TEST_INTEGER(ParseUint64Handler, "4294967296", RAPIDJSON_UINT64_C2(1, 0));   // 2^32 (max of unsigned + 1, force to use uint64_t)
+    TEST_INTEGER(ParseUint64Handler, "18446744073709551615", RAPIDJSON_UINT64_C2(0xFFFFFFFF, 0xFFFFFFFF));   // 2^64 - 1 (max of uint64_t)
+
+    TEST_INTEGER(ParseInt64Handler, "-2147483649", static_cast<int64_t>(RAPIDJSON_UINT64_C2(0xFFFFFFFF, 0x7FFFFFFF)));   // -2^31 -1 (min of int - 1, force to use int64_t)
+    TEST_INTEGER(ParseInt64Handler, "-9223372036854775808", static_cast<int64_t>(RAPIDJSON_UINT64_C2(0x80000000, 0x00000000)));       // -2^63 (min of int64_t)
+
+    // Random test for uint32_t/int32_t
+    {
+        union {
+            uint32_t u;
+            int32_t i;
+        }u;
+        Random r;
+
+        for (unsigned i = 0; i < 100000; i++) {
+            u.u = r();
+
+            char buffer[32];
+            *internal::u32toa(u.u, buffer) = '\0';
+            TEST_INTEGER(ParseUintHandler, buffer, u.u);
+
+            if (u.i < 0) {
+                *internal::i32toa(u.i, buffer) = '\0';
+                TEST_INTEGER(ParseIntHandler, buffer, u.i);
+            }
+        }
+    }
+
+    // Random test for uint64_t/int64_t
+    {
+        union {
+            uint64_t u;
+            int64_t i;
+        }u;
+        Random r;
+
+        for (unsigned i = 0; i < 100000; i++) {
+            u.u = uint64_t(r()) << 32;
+            u.u |= r();
+
+            char buffer[32];
+            if (u.u > uint64_t(4294967295u)) {
+                *internal::u64toa(u.u, buffer) = '\0';
+                TEST_INTEGER(ParseUint64Handler, buffer, u.u);
+            }
+
+            if (u.i < -int64_t(2147483648u)) {
+                *internal::i64toa(u.i, buffer) = '\0';
+                TEST_INTEGER(ParseInt64Handler, buffer, u.i);
+            }
+        }
+    }
+#undef TEST_INTEGER
+}
+
+template<bool fullPrecision>
+static void TestParseDouble() {
+#define TEST_DOUBLE(fullPrecision, str, x) \
+    { \
+        StringStream s(str); \
+        ParseDoubleHandler h; \
+        Reader reader; \
+        ASSERT_EQ(kParseErrorNone, reader.Parse<fullPrecision ? kParseFullPrecisionFlag : 0>(s, h).Code()); \
+        EXPECT_EQ(1u, h.step_); \
+        internal::Double e(x), a(h.actual_); \
+        if (fullPrecision) { \
+            EXPECT_EQ(e.Uint64Value(), a.Uint64Value()); \
+            if (e.Uint64Value() != a.Uint64Value()) \
+                printf("  String: %s\n  Actual: %.17g\nExpected: %.17g\n", str, h.actual_, x); \
+        } \
+        else { \
+            EXPECT_EQ(e.Sign(), a.Sign()); /* for 0.0 != -0.0 */ \
+            EXPECT_DOUBLE_EQ(x, h.actual_); \
+        } \
+    }
+
+    TEST_DOUBLE(fullPrecision, "0.0", 0.0);
+    TEST_DOUBLE(fullPrecision, "-0.0", -0.0); // For checking issue #289
+    TEST_DOUBLE(fullPrecision, "0e100", 0.0); // For checking issue #1249
+    TEST_DOUBLE(fullPrecision, "1.0", 1.0);
+    TEST_DOUBLE(fullPrecision, "-1.0", -1.0);
+    TEST_DOUBLE(fullPrecision, "1.5", 1.5);
+    TEST_DOUBLE(fullPrecision, "-1.5", -1.5);
+    TEST_DOUBLE(fullPrecision, "3.1416", 3.1416);
+    TEST_DOUBLE(fullPrecision, "1E10", 1E10);
+    TEST_DOUBLE(fullPrecision, "1e10", 1e10);
+    TEST_DOUBLE(fullPrecision, "1E+10", 1E+10);
+    TEST_DOUBLE(fullPrecision, "1E-10", 1E-10);
+    TEST_DOUBLE(fullPrecision, "-1E10", -1E10);
+    TEST_DOUBLE(fullPrecision, "-1e10", -1e10);
+    TEST_DOUBLE(fullPrecision, "-1E+10", -1E+10);
+    TEST_DOUBLE(fullPrecision, "-1E-10", -1E-10);
+    TEST_DOUBLE(fullPrecision, "1.234E+10", 1.234E+10);
+    TEST_DOUBLE(fullPrecision, "1.234E-10", 1.234E-10);
+    TEST_DOUBLE(fullPrecision, "1.79769e+308", 1.79769e+308);
+    TEST_DOUBLE(fullPrecision, "2.22507e-308", 2.22507e-308);
+    TEST_DOUBLE(fullPrecision, "-1.79769e+308", -1.79769e+308);
+    TEST_DOUBLE(fullPrecision, "-2.22507e-308", -2.22507e-308);
+    TEST_DOUBLE(fullPrecision, "4.9406564584124654e-324", 4.9406564584124654e-324); // minimum denormal
+    TEST_DOUBLE(fullPrecision, "2.2250738585072009e-308", 2.2250738585072009e-308); // Max subnormal double
+    TEST_DOUBLE(fullPrecision, "2.2250738585072014e-308", 2.2250738585072014e-308); // Min normal positive double
+    TEST_DOUBLE(fullPrecision, "1.7976931348623157e+308", 1.7976931348623157e+308); // Max double
+    TEST_DOUBLE(fullPrecision, "1e-10000", 0.0);                                    // must underflow
+    TEST_DOUBLE(fullPrecision, "18446744073709551616", 18446744073709551616.0);     // 2^64 (max of uint64_t + 1, force to use double)
+    TEST_DOUBLE(fullPrecision, "-9223372036854775809", -9223372036854775809.0);     // -2^63 - 1(min of int64_t + 1, force to use double)
+    TEST_DOUBLE(fullPrecision, "0.9868011474609375", 0.9868011474609375);           // https://github.com/Tencent/rapidjson/issues/120
+    TEST_DOUBLE(fullPrecision, "123e34", 123e34);                                   // Fast Path Cases In Disguise
+    TEST_DOUBLE(fullPrecision, "45913141877270640000.0", 45913141877270640000.0);
+    TEST_DOUBLE(fullPrecision, "2.2250738585072011e-308", 2.2250738585072011e-308); // http://www.exploringbinary.com/php-hangs-on-numeric-value-2-2250738585072011e-308/
+    TEST_DOUBLE(fullPrecision, "1e-00011111111111", 0.0);                           // Issue #313
+    TEST_DOUBLE(fullPrecision, "-1e-00011111111111", -0.0);
+    TEST_DOUBLE(fullPrecision, "1e-214748363", 0.0);                                  // Maximum supported negative exponent
+    TEST_DOUBLE(fullPrecision, "1e-214748364", 0.0);
+    TEST_DOUBLE(fullPrecision, "1e-21474836311", 0.0);
+    TEST_DOUBLE(fullPrecision, "1.00000000001e-2147483638", 0.0);
+    TEST_DOUBLE(fullPrecision, "0.017976931348623157e+310", 1.7976931348623157e+308); // Max double in another form
+    TEST_DOUBLE(fullPrecision, "128.74836467836484838364836483643636483648e-336", 0.0); // Issue #1251
+
+    // Since
+    // abs((2^-1022 - 2^-1074) - 2.2250738585072012e-308) = 3.109754131239141401123495768877590405345064751974375599... x 10^-324
+    // abs((2^-1022) - 2.2250738585072012e-308) = 1.830902327173324040642192159804623318305533274168872044... x 10 ^ -324
+    // So 2.2250738585072012e-308 should round to 2^-1022 = 2.2250738585072014e-308
+    TEST_DOUBLE(fullPrecision, "2.2250738585072012e-308", 2.2250738585072014e-308); // http://www.exploringbinary.com/java-hangs-when-converting-2-2250738585072012e-308/
+
+    // More closer to normal/subnormal boundary
+    // boundary = 2^-1022 - 2^-1075 = 2.225073858507201136057409796709131975934819546351645648... x 10^-308
+    TEST_DOUBLE(fullPrecision, "2.22507385850720113605740979670913197593481954635164564e-308", 2.2250738585072009e-308);
+    TEST_DOUBLE(fullPrecision, "2.22507385850720113605740979670913197593481954635164565e-308", 2.2250738585072014e-308);
+
+    // 1.0 is in (1.0 - 2^-54, 1.0 + 2^-53)
+    // 1.0 - 2^-54 = 0.999999999999999944488848768742172978818416595458984375
+    TEST_DOUBLE(fullPrecision, "0.999999999999999944488848768742172978818416595458984375", 1.0); // round to even
+    TEST_DOUBLE(fullPrecision, "0.999999999999999944488848768742172978818416595458984374", 0.99999999999999989); // previous double
+    TEST_DOUBLE(fullPrecision, "0.999999999999999944488848768742172978818416595458984376", 1.0); // next double
+    // 1.0 + 2^-53 = 1.00000000000000011102230246251565404236316680908203125
+    TEST_DOUBLE(fullPrecision, "1.00000000000000011102230246251565404236316680908203125", 1.0); // round to even
+    TEST_DOUBLE(fullPrecision, "1.00000000000000011102230246251565404236316680908203124", 1.0); // previous double
+    TEST_DOUBLE(fullPrecision, "1.00000000000000011102230246251565404236316680908203126", 1.00000000000000022); // next double
+
+    // Numbers from https://github.com/floitsch/double-conversion/blob/master/test/cctest/test-strtod.cc
+
+    TEST_DOUBLE(fullPrecision, "72057594037927928.0", 72057594037927928.0);
+    TEST_DOUBLE(fullPrecision, "72057594037927936.0", 72057594037927936.0);
+    TEST_DOUBLE(fullPrecision, "72057594037927932.0", 72057594037927936.0);
+    TEST_DOUBLE(fullPrecision, "7205759403792793199999e-5", 72057594037927928.0);
+    TEST_DOUBLE(fullPrecision, "7205759403792793200001e-5", 72057594037927936.0);
+
+    TEST_DOUBLE(fullPrecision, "9223372036854774784.0", 9223372036854774784.0);
+    TEST_DOUBLE(fullPrecision, "9223372036854775808.0", 9223372036854775808.0);
+    TEST_DOUBLE(fullPrecision, "9223372036854775296.0", 9223372036854775808.0);
+    TEST_DOUBLE(fullPrecision, "922337203685477529599999e-5", 9223372036854774784.0);
+    TEST_DOUBLE(fullPrecision, "922337203685477529600001e-5", 9223372036854775808.0);
+
+    TEST_DOUBLE(fullPrecision, "10141204801825834086073718800384", 10141204801825834086073718800384.0);
+    TEST_DOUBLE(fullPrecision, "10141204801825835211973625643008", 10141204801825835211973625643008.0);
+    TEST_DOUBLE(fullPrecision, "10141204801825834649023672221696", 10141204801825835211973625643008.0);
+    TEST_DOUBLE(fullPrecision, "1014120480182583464902367222169599999e-5", 10141204801825834086073718800384.0);
+    TEST_DOUBLE(fullPrecision, "1014120480182583464902367222169600001e-5", 10141204801825835211973625643008.0);
+
+    TEST_DOUBLE(fullPrecision, "5708990770823838890407843763683279797179383808", 5708990770823838890407843763683279797179383808.0);
+    TEST_DOUBLE(fullPrecision, "5708990770823839524233143877797980545530986496", 5708990770823839524233143877797980545530986496.0);
+    TEST_DOUBLE(fullPrecision, "5708990770823839207320493820740630171355185152", 5708990770823839524233143877797980545530986496.0);
+    TEST_DOUBLE(fullPrecision, "5708990770823839207320493820740630171355185151999e-3", 5708990770823838890407843763683279797179383808.0);
+    TEST_DOUBLE(fullPrecision, "5708990770823839207320493820740630171355185152001e-3", 5708990770823839524233143877797980545530986496.0);
+
+    {
+        char n1e308[310];   // '1' followed by 308 '0'
+        n1e308[0] = '1';
+        for (int i = 1; i < 309; i++)
+            n1e308[i] = '0';
+        n1e308[309] = '\0';
+        TEST_DOUBLE(fullPrecision, n1e308, 1E308);
+    }
+
+    // Cover trimming
+    TEST_DOUBLE(fullPrecision,
+"2.22507385850720113605740979670913197593481954635164564802342610972482222202107694551652952390813508"
+"7914149158913039621106870086438694594645527657207407820621743379988141063267329253552286881372149012"
+"9811224514518898490572223072852551331557550159143974763979834118019993239625482890171070818506906306"
+"6665599493827577257201576306269066333264756530000924588831643303777979186961204949739037782970490505"
+"1080609940730262937128958950003583799967207254304360284078895771796150945516748243471030702609144621"
+"5722898802581825451803257070188608721131280795122334262883686223215037756666225039825343359745688844"
+"2390026549819838548794829220689472168983109969836584681402285424333066033985088644580400103493397042"
+"7567186443383770486037861622771738545623065874679014086723327636718751234567890123456789012345678901"
+"e-308",
+    2.2250738585072014e-308);
+
+    {
+        static const unsigned count = 100; // Tested with 1000000 locally
+        Random r;
+        Reader reader; // Reusing reader to prevent heap allocation
+
+        // Exhaustively test different exponents with random significant
+        for (uint64_t exp = 0; exp < 2047; exp++) {
+            ;
+            for (unsigned i = 0; i < count; i++) {
+                // Need to call r() in two statements for cross-platform coherent sequence.
+                uint64_t u = (exp << 52) | uint64_t(r() & 0x000FFFFF) << 32;
+                u |= uint64_t(r());
+                internal::Double d = internal::Double(u);
+
+                char buffer[32];
+                *internal::dtoa(d.Value(), buffer) = '\0';
+
+                StringStream s(buffer);
+                ParseDoubleHandler h;
+                ASSERT_EQ(kParseErrorNone, reader.Parse<fullPrecision ? kParseFullPrecisionFlag : 0>(s, h).Code());
+                EXPECT_EQ(1u, h.step_);
+                internal::Double a(h.actual_);
+                if (fullPrecision) {
+                    EXPECT_EQ(d.Uint64Value(), a.Uint64Value());
+                    if (d.Uint64Value() != a.Uint64Value())
+                        printf("  String: %s\n  Actual: %.17g\nExpected: %.17g\n", buffer, h.actual_, d.Value());
+                }
+                else {
+                    EXPECT_EQ(d.Sign(), a.Sign()); // for 0.0 != -0.0
+                    EXPECT_DOUBLE_EQ(d.Value(), h.actual_);
+                }
+            }
+        }
+    }
+
+    // Issue #340
+    TEST_DOUBLE(fullPrecision, "7.450580596923828e-9", 7.450580596923828e-9);
+    {
+        internal::Double d(1.0);
+        for (int i = 0; i < 324; i++) {
+            char buffer[32];
+            *internal::dtoa(d.Value(), buffer) = '\0';
+
+            StringStream s(buffer);
+            ParseDoubleHandler h;
+            Reader reader;
+            ASSERT_EQ(kParseErrorNone, reader.Parse<fullPrecision ? kParseFullPrecisionFlag : 0>(s, h).Code());
+            EXPECT_EQ(1u, h.step_);
+            internal::Double a(h.actual_);
+            if (fullPrecision) {
+                EXPECT_EQ(d.Uint64Value(), a.Uint64Value());
+                if (d.Uint64Value() != a.Uint64Value())
+                    printf("  String: %s\n  Actual: %.17g\nExpected: %.17g\n", buffer, h.actual_, d.Value());
+            }
+            else {
+                EXPECT_EQ(d.Sign(), a.Sign()); // for 0.0 != -0.0
+                EXPECT_DOUBLE_EQ(d.Value(), h.actual_);
+            }
+
+
+            d = d.Value() * 0.5;
+        }
+    }
+
+    // Issue 1249
+    TEST_DOUBLE(fullPrecision, "0e100", 0.0);
+
+    // Issue 1251
+    TEST_DOUBLE(fullPrecision, "128.74836467836484838364836483643636483648e-336", 0.0);
+
+    // Issue 1256
+    TEST_DOUBLE(fullPrecision,
+        "6223372036854775296.1701512723685473547372536854755293372036854685477"
+        "529752233737201701512337200972013723685473123372036872036854236854737"
+        "247372368372367752975258547752975254729752547372368737201701512354737"
+        "83723677529752585477247372368372368547354737253685475529752",
+        6223372036854775808.0);
+
+#if 0
+    // Test (length + exponent) overflow
+    TEST_DOUBLE(fullPrecision, "0e+2147483647", 0.0);
+    TEST_DOUBLE(fullPrecision, "0e-2147483648", 0.0);
+    TEST_DOUBLE(fullPrecision, "1e-2147483648", 0.0);
+    TEST_DOUBLE(fullPrecision, "0e+9223372036854775807", 0.0);
+    TEST_DOUBLE(fullPrecision, "0e-9223372036854775808", 0.0);
+#endif
+
+    if (fullPrecision)
+    {
+        TEST_DOUBLE(fullPrecision, "1e-325", 0.0);
+        TEST_DOUBLE(fullPrecision, "1e-324", 0.0);
+        TEST_DOUBLE(fullPrecision, "2e-324", 0.0);
+        TEST_DOUBLE(fullPrecision, "2.4703282292062327e-324", 0.0);
+        TEST_DOUBLE(fullPrecision, "2.4703282292062328e-324", 5e-324);
+        TEST_DOUBLE(fullPrecision, "2.48e-324",5e-324);
+        TEST_DOUBLE(fullPrecision, "2.5e-324", 5e-324);
+
+        // Slightly above max-normal
+        TEST_DOUBLE(fullPrecision, "1.7976931348623158e+308", 1.7976931348623158e+308);
+
+        TEST_DOUBLE(fullPrecision,
+            "17976931348623157081452742373170435679807056752584499659891747680315726"
+            "07800285387605895586327668781715404589535143824642343213268894641827684"
+            "67546703537516986049910576551282076245490090389328944075868508455133942"
+            "30458323690322294816580855933212334827479782620414472316873817718091929"
+            "9881250404026184124858368",
+            (std::numeric_limits<double>::max)());
+
+        TEST_DOUBLE(fullPrecision,
+            "243546080556034731077856379609316893158278902575447060151047"
+            "212703405344938119816206067372775299130836050315842578309818"
+            "316450894337978612745889730079163798234256495613858256849283"
+            "467066859489192118352020514036083287319232435355752493038825"
+            "828481044358810649108367633313557305310641892225870327827273"
+            "41408256.000000",
+            2.4354608055603473e+307);
+        // 9007199254740991 * 2^971 (max normal)
+        TEST_DOUBLE(fullPrecision,
+            "1.797693134862315708145274237317043567980705675258449965989174768031572607800285"
+            "38760589558632766878171540458953514382464234321326889464182768467546703537516986"
+            "04991057655128207624549009038932894407586850845513394230458323690322294816580855"
+            "9332123348274797826204144723168738177180919299881250404026184124858368e+308",
+            1.797693134862315708e+308 //        0x1.fffffffffffffp1023
+            );
+#if 0
+        // TODO:
+        // Should work at least in full-precision mode...
+        TEST_DOUBLE(fullPrecision,
+            "0.00000000000000000000000000000000000000000000000000000000000"
+            "0000000000000000000000000000000000000000000000000000000000000"
+            "0000000000000000000000000000000000000000000000000000000000000"
+            "0000000000000000000000000000000000000000000000000000000000000"
+            "0000000000000000000000000000000000000000000000000000000000000"
+            "0000000000000000000024703282292062327208828439643411068618252"
+            "9901307162382212792841250337753635104375932649918180817996189"
+            "8982823477228588654633283551779698981993873980053909390631503"
+            "5659515570226392290858392449105184435931802849936536152500319"
+            "3704576782492193656236698636584807570015857692699037063119282"
+            "7955855133292783433840935197801553124659726357957462276646527"
+            "2827220056374006485499977096599470454020828166226237857393450"
+            "7363390079677619305775067401763246736009689513405355374585166"
+            "6113422376667860416215968046191446729184030053005753084904876"
+            "5391711386591646239524912623653881879636239373280423891018672"
+            "3484976682350898633885879256283027559956575244555072551893136"
+            "9083625477918694866799496832404970582102851318545139621383772"
+            "2826145437693412532098591327667236328125",
+            0.0);
+#endif
+        // 9007199254740991 * 2^-1074 = (2^53 - 1) * 2^-1074
+        TEST_DOUBLE(fullPrecision,
+            "4.450147717014402272114819593418263951869639092703291296046852219449644444042153"
+            "89103305904781627017582829831782607924221374017287738918929105531441481564124348"
+            "67599762821265346585071045737627442980259622449029037796981144446145705102663115"
+            "10031828794952795966823603998647925096578034214163701381261333311989876551545144"
+            "03152612538132666529513060001849177663286607555958373922409899478075565940981010"
+            "21612198814605258742579179000071675999344145086087205681577915435923018910334964"
+            "86942061405218289243144579760516365090360651414037721744226256159024466852576737"
+            "24464300755133324500796506867194913776884780053099639677097589658441378944337966"
+            "21993967316936280457084866613206797017728916080020698679408551343728867675409720"
+            "757232455434770912461317493580281734466552734375e-308",
+            4.450147717014402272e-308 //        0x1.fffffffffffffp-1022
+            );
+        // 9007199254740990 * 2^-1074
+        TEST_DOUBLE(fullPrecision,
+            "4.450147717014401778049173752171719775300846224481918930987049605124880018456471"
+            "39035755177760751831052846195619008686241717547743167145836439860405887584484471"
+            "19639655002484083577939142623582164522087943959208000909794783876158397872163051"
+            "22622675229968408654350206725478309956546318828765627255022767720818849892988457"
+            "26333908582101604036318532842699932130356061901518261174396928478121372742040102"
+            "17446565569357687263889031732270082446958029584739170416643195242132750803227473"
+            "16608838720742955671061336566907126801014814608027120593609275183716632624844904"
+            "31985250929886016737037234388448352929102742708402644340627409931664203093081360"
+            "70794835812045179006047003875039546061891526346421705014598610179523165038319441"
+            "51446491086954182492263498716056346893310546875e-308",
+            4.450147717014401778e-308 //        0x1.ffffffffffffep-1022
+            );
+        // half way between the two numbers above.
+        // round to nearest even.
+        TEST_DOUBLE(fullPrecision,
+            "4.450147717014402025081996672794991863585242658592605113516950912287262231249312"
+            "64069530541271189424317838013700808305231545782515453032382772695923684574304409"
+            "93619708911874715081505094180604803751173783204118519353387964161152051487413083"
+            "16327252012460602310586905362063117526562176521464664318142050516404363222266800"
+            "64743260560117135282915796422274554896821334728738317548403413978098469341510556"
+            "19529382191981473003234105366170879223151087335413188049110555339027884856781219"
+            "01775450062980622457102958163711745945687733011032421168917765671370549738710820"
+            "78224775842509670618916870627821633352993761380751142008862499795052791018709663"
+            "46394401564490729731565935244123171539810221213221201847003580761626016356864581"
+            "1358486831521563686919762403704226016998291015625e-308",
+            4.450147717014401778e-308 //        0x1.ffffffffffffep-1022
+            );
+        TEST_DOUBLE(fullPrecision,
+            "4.450147717014402025081996672794991863585242658592605113516950912287262231249312"
+            "64069530541271189424317838013700808305231545782515453032382772695923684574304409"
+            "93619708911874715081505094180604803751173783204118519353387964161152051487413083"
+            "16327252012460602310586905362063117526562176521464664318142050516404363222266800"
+            "64743260560117135282915796422274554896821334728738317548403413978098469341510556"
+            "19529382191981473003234105366170879223151087335413188049110555339027884856781219"
+            "01775450062980622457102958163711745945687733011032421168917765671370549738710820"
+            "78224775842509670618916870627821633352993761380751142008862499795052791018709663"
+            "46394401564490729731565935244123171539810221213221201847003580761626016356864581"
+            "13584868315215636869197624037042260169982910156250000000000000000000000000000000"
+            "00000000000000000000000000000000000000000000000000000000000000000000000000000000"
+            "00000000000000000000000000000000000000000000000000000000000000000000000000000000e-308",
+            4.450147717014401778e-308 //        0x1.ffffffffffffep-1022
+            );
+#if 0
+        // ... round up
+        // TODO:
+        // Should work at least in full-precision mode...
+        TEST_DOUBLE(fullPrecision,
+            "4.450147717014402025081996672794991863585242658592605113516950912287262231249312"
+            "64069530541271189424317838013700808305231545782515453032382772695923684574304409"
+            "93619708911874715081505094180604803751173783204118519353387964161152051487413083"
+            "16327252012460602310586905362063117526562176521464664318142050516404363222266800"
+            "64743260560117135282915796422274554896821334728738317548403413978098469341510556"
+            "19529382191981473003234105366170879223151087335413188049110555339027884856781219"
+            "01775450062980622457102958163711745945687733011032421168917765671370549738710820"
+            "78224775842509670618916870627821633352993761380751142008862499795052791018709663"
+            "46394401564490729731565935244123171539810221213221201847003580761626016356864581"
+            "13584868315215636869197624037042260169982910156250000000000000000000000000000000"
+            "00000000000000000000000000000000000000000000000000000000000000000000000000000000"
+            "00000000000000000000000000000000000000000000000000000000000000000000000000000001e-308",
+            4.450147717014402272e-308 //        0x1.fffffffffffffp-1022
+            );
+#endif
+        // ... round down
+        TEST_DOUBLE(fullPrecision,
+            "4.450147717014402025081996672794991863585242658592605113516950912287262231249312"
+            "64069530541271189424317838013700808305231545782515453032382772695923684574304409"
+            "93619708911874715081505094180604803751173783204118519353387964161152051487413083"
+            "16327252012460602310586905362063117526562176521464664318142050516404363222266800"
+            "64743260560117135282915796422274554896821334728738317548403413978098469341510556"
+            "19529382191981473003234105366170879223151087335413188049110555339027884856781219"
+            "01775450062980622457102958163711745945687733011032421168917765671370549738710820"
+            "78224775842509670618916870627821633352993761380751142008862499795052791018709663"
+            "46394401564490729731565935244123171539810221213221201847003580761626016356864581"
+            "13584868315215636869197624037042260169982910156249999999999999999999999999999999"
+            "99999999999999999999999999999999999999999999999999999999999999999999999999999999"
+            "99999999999999999999999999999999999999999999999999999999999999999999999999999999e-308",
+            4.450147717014401778e-308 //        0x1.ffffffffffffep-1022
+            );
+        // Slightly below half way between max-normal and infinity.
+        // Should round down.
+        TEST_DOUBLE(fullPrecision,
+            "1.797693134862315807937289714053034150799341327100378269361737789804449682927647"
+            "50946649017977587207096330286416692887910946555547851940402630657488671505820681"
+            "90890200070838367627385484581771153176447573027006985557136695962284291481986083"
+            "49364752927190741684443655107043427115596995080930428801779041744977919999999999"
+            "99999999999999999999999999999999999999999999999999999999999999999999999999999999"
+            "99999999999999999999999999999999999999999999999999999999999999999999999999999999"
+            "99999999999999999999999999999999999999999999999999999999999999999999999999999999"
+            "99999999999999999999999999999999999999999999999999999999999999999999999999999999"
+            "99999999999999999999999999999999999999999999999999999999999999999999999999999999"
+            "99999999999999999999999999999999999999999999999999999999999999999999999999999999"
+            "99999999999999999999999999999999999999999999999999999999999999999999999999999999"
+            "99999999999999999999999999999999999999999999999999999999999999999999999999999999"
+            "99999999999999999999999999999999999999999999999999999999999999999999999999999999"
+            "99999999999999999999999999999999999999999999999999999999999999999999999999999999"
+            "99999999999999999999999999999999999999999999999999999999999999999999999999999999"
+            "99999999999999999999999999999999999999999999999999999999999999999999999999999999e+308",
+            1.797693134862315708e+308 //        0x1.fffffffffffffp1023
+            );
+    }
+
+#undef TEST_DOUBLE
+}
+
+TEST(Reader, ParseNumber_NormalPrecisionDouble) {
+    TestParseDouble<false>();
+}
+
+TEST(Reader, ParseNumber_FullPrecisionDouble) {
+    TestParseDouble<true>();
+}
+
+TEST(Reader, ParseNumber_NormalPrecisionError) {
+    static unsigned count = 1000000;
+    Random r;
+
+    double ulpSum = 0.0;
+    double ulpMax = 0.0;
+    for (unsigned i = 0; i < count; i++) {
+        internal::Double e, a;
+        do {
+            // Need to call r() in two statements for cross-platform coherent sequence.
+            uint64_t u = uint64_t(r()) << 32;
+            u |= uint64_t(r());
+            e = u;
+        } while (e.IsNan() || e.IsInf() || !e.IsNormal());
+
+        char buffer[32];
+        *internal::dtoa(e.Value(), buffer) = '\0';
+
+        StringStream s(buffer);
+        ParseDoubleHandler h;
+        Reader reader;
+        ASSERT_EQ(kParseErrorNone, reader.Parse(s, h).Code());
+        EXPECT_EQ(1u, h.step_);
+
+        a = h.actual_;
+        uint64_t bias1 = e.ToBias();
+        uint64_t bias2 = a.ToBias();
+        double ulp = static_cast<double>(bias1 >= bias2 ? bias1 - bias2 : bias2 - bias1);
+        ulpMax = (std::max)(ulpMax, ulp);
+        ulpSum += ulp;
+    }
+    printf("ULP Average = %g, Max = %g \n", ulpSum / count, ulpMax);
+}
+
+template<bool fullPrecision>
+static void TestParseNumberError() {
+#define TEST_NUMBER_ERROR(errorCode, str, errorOffset, streamPos) \
+    { \
+        char buffer[2048]; \
+        ASSERT_LT(std::strlen(str), 2048u); \
+        sprintf(buffer, "%s", str); \
+        InsituStringStream s(buffer); \
+        BaseReaderHandler<> h; \
+        Reader reader; \
+        EXPECT_FALSE(reader.Parse<fullPrecision ? kParseFullPrecisionFlag : 0>(s, h)); \
+        EXPECT_EQ(errorCode, reader.GetParseErrorCode());\
+        EXPECT_EQ(errorOffset, reader.GetErrorOffset());\
+        EXPECT_EQ(streamPos, s.Tell());\
+    }
+
+    // Number too big to be stored in double.
+    {
+        char n1e309[311];   // '1' followed by 309 '0'
+        n1e309[0] = '1';
+        for (int i = 1; i < 310; i++)
+            n1e309[i] = '0';
+        n1e309[310] = '\0';
+        TEST_NUMBER_ERROR(kParseErrorNumberTooBig, n1e309, 0u, 310u);
+    }
+    TEST_NUMBER_ERROR(kParseErrorNumberTooBig, "1e309", 0u, 5u);
+
+    // Miss fraction part in number.
+    TEST_NUMBER_ERROR(kParseErrorNumberMissFraction, "1.", 2u, 2u);
+    TEST_NUMBER_ERROR(kParseErrorNumberMissFraction, "1.a", 2u, 2u);
+
+    // Miss exponent in number.
+    TEST_NUMBER_ERROR(kParseErrorNumberMissExponent, "1e", 2u, 2u);
+    TEST_NUMBER_ERROR(kParseErrorNumberMissExponent, "1e_", 2u, 2u);
+
+    // Issue 849
+    TEST_NUMBER_ERROR(kParseErrorNumberTooBig, "1.8e308", 0u, 7u);
+    TEST_NUMBER_ERROR(kParseErrorNumberTooBig, "5e308", 0u, 5u);
+    TEST_NUMBER_ERROR(kParseErrorNumberTooBig, "1e309", 0u, 5u);
+    TEST_NUMBER_ERROR(kParseErrorNumberTooBig, "1.0e310", 0u, 7u);
+    TEST_NUMBER_ERROR(kParseErrorNumberTooBig, "1.00e310", 0u, 8u);
+    TEST_NUMBER_ERROR(kParseErrorNumberTooBig, "-1.8e308", 0u, 8u);
+    TEST_NUMBER_ERROR(kParseErrorNumberTooBig, "-1e309", 0u, 6u);
+
+    // Issue 1253
+    TEST_NUMBER_ERROR(kParseErrorNumberTooBig, "2e308", 0u, 5u);
+
+    // Issue 1259
+    TEST_NUMBER_ERROR(kParseErrorNumberTooBig,
+        "88474320368547737236837236775298547354737253685475547552933720368546854775297525"
+        "29337203685468547770151233720097201372368547312337203687203685423685123372036872"
+        "03685473724737236837236775297525854775297525472975254737236873720170151235473783"
+        "7236737247372368772473723683723456789012E66", 0u, 283u);
+
+#if 0
+    // Test (length + exponent) overflow
+    TEST_NUMBER_ERROR(kParseErrorNumberTooBig, "1e+2147483647", 0u, 13u);
+    TEST_NUMBER_ERROR(kParseErrorNumberTooBig, "1e+9223372036854775807", 0u, 22u);
+    TEST_NUMBER_ERROR(kParseErrorNumberTooBig, "1e+10000", 0u, 8u);
+    TEST_NUMBER_ERROR(kParseErrorNumberTooBig, "1e+50000", 0u, 8u);
+#endif
+
+    // 9007199254740992 * 2^971 ("infinity")
+    TEST_NUMBER_ERROR(kParseErrorNumberTooBig,
+        "1.797693134862315907729305190789024733617976978942306572734300811577326758055009"
+        "63132708477322407536021120113879871393357658789768814416622492847430639474124377"
+        "76789342486548527630221960124609411945308295208500576883815068234246288147391311"
+        "0540827237163350510684586298239947245938479716304835356329624224137216e+308", 0u, 315u);
+
+    // TODO:
+    // These tests (currently) fail in normal-precision mode
+    if (fullPrecision)
+    {
+        // Half way between max-normal and infinity
+        // Should round to infinity in nearest-even mode.
+        TEST_NUMBER_ERROR(kParseErrorNumberTooBig,
+            "1.797693134862315807937289714053034150799341327100378269361737789804449682927647"
+            "50946649017977587207096330286416692887910946555547851940402630657488671505820681"
+            "90890200070838367627385484581771153176447573027006985557136695962284291481986083"
+            "49364752927190741684443655107043427115596995080930428801779041744977920000000000"
+            "00000000000000000000000000000000000000000000000000000000000000000000000000000000"
+            "00000000000000000000000000000000000000000000000000000000000000000000000000000000"
+            "00000000000000000000000000000000000000000000000000000000000000000000000000000000"
+            "00000000000000000000000000000000000000000000000000000000000000000000000000000000"
+            "00000000000000000000000000000000000000000000000000000000000000000000000000000000"
+            "00000000000000000000000000000000000000000000000000000000000000000000000000000000"
+            "00000000000000000000000000000000000000000000000000000000000000000000000000000000"
+            "00000000000000000000000000000000000000000000000000000000000000000000000000000000"
+            "00000000000000000000000000000000000000000000000000000000000000000000000000000000"
+            "00000000000000000000000000000000000000000000000000000000000000000000000000000000e+308", 0u, 1125u);
+        // ...round up
+        TEST_NUMBER_ERROR(kParseErrorNumberTooBig,
+            "1.797693134862315807937289714053034150799341327100378269361737789804449682927647"
+            "50946649017977587207096330286416692887910946555547851940402630657488671505820681"
+            "90890200070838367627385484581771153176447573027006985557136695962284291481986083"
+            "49364752927190741684443655107043427115596995080930428801779041744977920000000000"
+            "00000000000000000000000000000000000000000000000000000000000000000000000000000000"
+            "00000000000000000000000000000000000000000000000000000000000000000000000000000000"
+            "00000000000000000000000000000000000000000000000000000000000000000000000000000000"
+            "00000000000000000000000000000000000000000000000000000000000000000000000000000000"
+            "00000000000000000000000000000000000000000000000000000000000000000000000000000000"
+            "00000000000000000000000000000000000000000000000000000000000000000000000000000000"
+            "00000000000000000000000000000000000000000000000000000000000000000000000000000000"
+            "00000000000000000000000000000000000000000000000000000000000000000000000000000000"
+            "00000000000000000000000000000000000000000000000000000000000000000000000000000000"
+            "00000000000000000000000000000000000000000000000000000000000000000000000000000000"
+            "00000000000000000000000000000000000000000000000000000000000000000000000000000001e+308", 0u, 1205u);
+    }
+
+    TEST_NUMBER_ERROR(kParseErrorNumberTooBig,
+        "10000000000000000000000000000000000000000000000000000000000000000000000000000000"
+        "00000000000000000000000000000000000000000000000000000000000000000000000000000000"
+        "00000000000000000000000000000000000000000000000000000000000000000000000000000000"
+        "0000000000000000000000000000000000000000000000000000000000000000000001", 0u, 310u);
+
+#undef TEST_NUMBER_ERROR
+}
+
+TEST(Reader, ParseNumberError_NormalPrecisionDouble) {
+    TestParseNumberError<false>();
+}
+
+TEST(Reader, ParseNumberError_FullPrecisionDouble) {
+    TestParseNumberError<true>();
+}
+
+template <typename Encoding>
+struct ParseStringHandler : BaseReaderHandler<Encoding, ParseStringHandler<Encoding> > {
+    ParseStringHandler() : str_(0), length_(0), copy_() {}
+    ~ParseStringHandler() { EXPECT_TRUE(str_ != 0); if (copy_) free(const_cast<typename Encoding::Ch*>(str_)); }
+
+    ParseStringHandler(const ParseStringHandler&);
+    ParseStringHandler& operator=(const ParseStringHandler&);
+
+    bool Default() { ADD_FAILURE(); return false; }
+    bool String(const typename Encoding::Ch* str, size_t length, bool copy) {
+        EXPECT_EQ(0, str_);
+        if (copy) {
+            str_ = static_cast<typename Encoding::Ch*>(malloc((length + 1) * sizeof(typename Encoding::Ch)));
+            memcpy(const_cast<typename Encoding::Ch*>(str_), str, (length + 1) * sizeof(typename Encoding::Ch));
+        }
+        else
+            str_ = str;
+        length_ = length;
+        copy_ = copy;
+        return true;
+    }
+
+    const typename Encoding::Ch* str_;
+    size_t length_;
+    bool copy_;
+};
+
+TEST(Reader, ParseString) {
+#define TEST_STRING(Encoding, e, x) \
+    { \
+        Encoding::Ch* buffer = StrDup(x); \
+        GenericInsituStringStream<Encoding> is(buffer); \
+        ParseStringHandler<Encoding> h; \
+        GenericReader<Encoding, Encoding> reader; \
+        reader.Parse<kParseInsituFlag | kParseValidateEncodingFlag>(is, h); \
+        EXPECT_EQ(0, StrCmp<Encoding::Ch>(e, h.str_)); \
+        EXPECT_EQ(StrLen(e), h.length_); \
+        free(buffer); \
+        GenericStringStream<Encoding> s(x); \
+        ParseStringHandler<Encoding> h2; \
+        GenericReader<Encoding, Encoding> reader2; \
+        reader2.Parse(s, h2); \
+        EXPECT_EQ(0, StrCmp<Encoding::Ch>(e, h2.str_)); \
+        EXPECT_EQ(StrLen(e), h2.length_); \
+    }
+
+    // String constant L"\xXX" can only specify character code in bytes, which is not endianness-neutral.
+    // And old compiler does not support u"" and U"" string literal. So here specify string literal by array of Ch.
+    // In addition, GCC 4.8 generates -Wnarrowing warnings when character code >= 128 are assigned to signed integer types.
+    // Therefore, utype is added for declaring unsigned array, and then cast it to Encoding::Ch.
+#define ARRAY(...) { __VA_ARGS__ }
+#define TEST_STRINGARRAY(Encoding, utype, array, x) \
+    { \
+        static const utype ue[] = array; \
+        static const Encoding::Ch* e = reinterpret_cast<const Encoding::Ch *>(&ue[0]); \
+        TEST_STRING(Encoding, e, x); \
+    }
+
+#define TEST_STRINGARRAY2(Encoding, utype, earray, xarray) \
+    { \
+        static const utype ue[] = earray; \
+        static const utype xe[] = xarray; \
+        static const Encoding::Ch* e = reinterpret_cast<const Encoding::Ch *>(&ue[0]); \
+        static const Encoding::Ch* x = reinterpret_cast<const Encoding::Ch *>(&xe[0]); \
+        TEST_STRING(Encoding, e, x); \
+    }
+
+    TEST_STRING(UTF8<>, "", "\"\"");
+    TEST_STRING(UTF8<>, "Hello", "\"Hello\"");
+    TEST_STRING(UTF8<>, "Hello\nWorld", "\"Hello\\nWorld\"");
+    TEST_STRING(UTF8<>, "\"\\/\b\f\n\r\t", "\"\\\"\\\\/\\b\\f\\n\\r\\t\"");
+    TEST_STRING(UTF8<>, "\x24", "\"\\u0024\"");         // Dollar sign U+0024
+    TEST_STRING(UTF8<>, "\xC2\xA2", "\"\\u00A2\"");     // Cents sign U+00A2
+    TEST_STRING(UTF8<>, "\xE2\x82\xAC", "\"\\u20AC\""); // Euro sign U+20AC
+    TEST_STRING(UTF8<>, "\xF0\x9D\x84\x9E", "\"\\uD834\\uDD1E\"");  // G clef sign U+1D11E
+
+    // UTF16
+    TEST_STRING(UTF16<>, L"", L"\"\"");
+    TEST_STRING(UTF16<>, L"Hello", L"\"Hello\"");
+    TEST_STRING(UTF16<>, L"Hello\nWorld", L"\"Hello\\nWorld\"");
+    TEST_STRING(UTF16<>, L"\"\\/\b\f\n\r\t", L"\"\\\"\\\\/\\b\\f\\n\\r\\t\"");
+    TEST_STRINGARRAY(UTF16<>, wchar_t, ARRAY(0x0024, 0x0000), L"\"\\u0024\"");
+    TEST_STRINGARRAY(UTF16<>, wchar_t, ARRAY(0x00A2, 0x0000), L"\"\\u00A2\"");  // Cents sign U+00A2
+    TEST_STRINGARRAY(UTF16<>, wchar_t, ARRAY(0x20AC, 0x0000), L"\"\\u20AC\"");  // Euro sign U+20AC
+    TEST_STRINGARRAY(UTF16<>, wchar_t, ARRAY(0xD834, 0xDD1E, 0x0000), L"\"\\uD834\\uDD1E\"");   // G clef sign U+1D11E
+
+    // UTF32
+    TEST_STRINGARRAY2(UTF32<>, unsigned, ARRAY('\0'), ARRAY('\"', '\"', '\0'));
+    TEST_STRINGARRAY2(UTF32<>, unsigned, ARRAY('H', 'e', 'l', 'l', 'o', '\0'), ARRAY('\"', 'H', 'e', 'l', 'l', 'o', '\"', '\0'));
+    TEST_STRINGARRAY2(UTF32<>, unsigned, ARRAY('H', 'e', 'l', 'l', 'o', '\n', 'W', 'o', 'r', 'l', 'd', '\0'), ARRAY('\"', 'H', 'e', 'l', 'l', 'o', '\\', 'n', 'W', 'o', 'r', 'l', 'd', '\"', '\0'));
+    TEST_STRINGARRAY2(UTF32<>, unsigned, ARRAY('\"', '\\', '/', '\b', '\f', '\n', '\r', '\t', '\0'), ARRAY('\"', '\\', '\"', '\\', '\\', '/', '\\', 'b', '\\', 'f', '\\', 'n', '\\', 'r', '\\', 't', '\"', '\0'));
+    TEST_STRINGARRAY2(UTF32<>, unsigned, ARRAY(0x00024, 0x0000), ARRAY('\"', '\\', 'u', '0', '0', '2', '4', '\"', '\0'));
+    TEST_STRINGARRAY2(UTF32<>, unsigned, ARRAY(0x000A2, 0x0000), ARRAY('\"', '\\', 'u', '0', '0', 'A', '2', '\"', '\0'));   // Cents sign U+00A2
+    TEST_STRINGARRAY2(UTF32<>, unsigned, ARRAY(0x020AC, 0x0000), ARRAY('\"', '\\', 'u', '2', '0', 'A', 'C', '\"', '\0'));   // Euro sign U+20AC
+    TEST_STRINGARRAY2(UTF32<>, unsigned, ARRAY(0x1D11E, 0x0000), ARRAY('\"', '\\', 'u', 'D', '8', '3', '4', '\\', 'u', 'D', 'D', '1', 'E', '\"', '\0'));    // G clef sign U+1D11E
+
+#undef TEST_STRINGARRAY
+#undef ARRAY
+#undef TEST_STRING
+
+    // Support of null character in string
+    {
+        StringStream s("\"Hello\\u0000World\"");
+        const char e[] = "Hello\0World";
+        ParseStringHandler<UTF8<> > h;
+        Reader reader;
+        reader.Parse(s, h);
+        EXPECT_EQ(0, memcmp(e, h.str_, h.length_ + 1));
+        EXPECT_EQ(11u, h.length_);
+    }
+}
+
+TEST(Reader, ParseString_Transcoding) {
+    const char* x = "\"Hello\"";
+    const wchar_t* e = L"Hello";
+    GenericStringStream<UTF8<> > is(x);
+    GenericReader<UTF8<>, UTF16<> > reader;
+    ParseStringHandler<UTF16<> > h;
+    reader.Parse(is, h);
+    EXPECT_EQ(0, StrCmp<UTF16<>::Ch>(e, h.str_));
+    EXPECT_EQ(StrLen(e), h.length_);
+}
+
+TEST(Reader, ParseString_TranscodingWithValidation) {
+    const char* x = "\"Hello\"";
+    const wchar_t* e = L"Hello";
+    GenericStringStream<UTF8<> > is(x);
+    GenericReader<UTF8<>, UTF16<> > reader;
+    ParseStringHandler<UTF16<> > h;
+    reader.Parse<kParseValidateEncodingFlag>(is, h);
+    EXPECT_EQ(0, StrCmp<UTF16<>::Ch>(e, h.str_));
+    EXPECT_EQ(StrLen(e), h.length_);
+}
+
+TEST(Reader, ParseString_NonDestructive) {
+    StringStream s("\"Hello\\nWorld\"");
+    ParseStringHandler<UTF8<> > h;
+    Reader reader;
+    reader.Parse(s, h);
+    EXPECT_EQ(0, StrCmp("Hello\nWorld", h.str_));
+    EXPECT_EQ(11u, h.length_);
+}
+
+template <typename Encoding>
+ParseErrorCode TestString(const typename Encoding::Ch* str) {
+    GenericStringStream<Encoding> s(str);
+    BaseReaderHandler<Encoding> h;
+    GenericReader<Encoding, Encoding> reader;
+    reader.template Parse<kParseValidateEncodingFlag>(s, h);
+    return reader.GetParseErrorCode();
+}
+
+TEST(Reader, ParseString_Error) {
+#define TEST_STRING_ERROR(errorCode, str, errorOffset, streamPos)\
+{\
+    GenericStringStream<UTF8<> > s(str);\
+    BaseReaderHandler<UTF8<> > h;\
+    GenericReader<UTF8<> , UTF8<> > reader;\
+    reader.Parse<kParseValidateEncodingFlag>(s, h);\
+    EXPECT_EQ(errorCode, reader.GetParseErrorCode());\
+    EXPECT_EQ(errorOffset, reader.GetErrorOffset());\
+    EXPECT_EQ(streamPos, s.Tell());\
+}
+
+#define ARRAY(...) { __VA_ARGS__ }
+#define TEST_STRINGENCODING_ERROR(Encoding, TargetEncoding, utype, array) \
+    { \
+        static const utype ue[] = array; \
+        static const Encoding::Ch* e = reinterpret_cast<const Encoding::Ch *>(&ue[0]); \
+        EXPECT_EQ(kParseErrorStringInvalidEncoding, TestString<Encoding>(e));\
+        /* decode error */\
+        GenericStringStream<Encoding> s(e);\
+        BaseReaderHandler<TargetEncoding> h;\
+        GenericReader<Encoding, TargetEncoding> reader;\
+        reader.Parse(s, h);\
+        EXPECT_EQ(kParseErrorStringInvalidEncoding, reader.GetParseErrorCode());\
+    }
+
+    // Invalid escape character in string.
+    TEST_STRING_ERROR(kParseErrorStringEscapeInvalid, "[\"\\a\"]", 2u, 3u);
+
+    // Incorrect hex digit after \\u escape in string.
+    TEST_STRING_ERROR(kParseErrorStringUnicodeEscapeInvalidHex, "[\"\\uABCG\"]", 2u, 7u);
+
+    // Quotation in \\u escape in string (Issue #288)
+    TEST_STRING_ERROR(kParseErrorStringUnicodeEscapeInvalidHex, "[\"\\uaaa\"]", 2u, 7u);
+    TEST_STRING_ERROR(kParseErrorStringUnicodeEscapeInvalidHex, "[\"\\uD800\\uFFF\"]", 2u, 13u);
+
+    // The surrogate pair in string is invalid.
+    TEST_STRING_ERROR(kParseErrorStringUnicodeSurrogateInvalid, "[\"\\uD800X\"]", 2u, 8u);
+    TEST_STRING_ERROR(kParseErrorStringUnicodeSurrogateInvalid, "[\"\\uD800\\uFFFF\"]", 2u, 14u);
+
+    // Missing a closing quotation mark in string.
+    TEST_STRING_ERROR(kParseErrorStringMissQuotationMark, "[\"Test]", 7u, 7u);
+
+    // http://www.cl.cam.ac.uk/~mgk25/ucs/examples/UTF-8-test.txt
+
+    // 3  Malformed sequences
+
+    // 3.1 Unexpected continuation bytes
+    {
+         char e[] = { '[', '\"', 0, '\"', ']', '\0' };
+         for (unsigned char c = 0x80u; c <= 0xBFu; c++) {
+            e[2] = static_cast<char>(c);
+            ParseErrorCode error = TestString<UTF8<> >(e);
+            EXPECT_EQ(kParseErrorStringInvalidEncoding, error);
+            if (error != kParseErrorStringInvalidEncoding)
+                std::cout << static_cast<unsigned>(c) << std::endl;
+         }
+    }
+
+    // 3.2 Lonely start characters, 3.5 Impossible bytes
+    {
+        char e[] = { '[', '\"', 0, ' ', '\"', ']', '\0' };
+        for (unsigned c = 0xC0u; c <= 0xFFu; c++) {
+            e[2] = static_cast<char>(c);
+            unsigned streamPos;
+            if (c <= 0xC1u)
+                streamPos = 3; // 0xC0 - 0xC1
+            else if (c <= 0xDFu)
+                streamPos = 4; // 0xC2 - 0xDF
+            else if (c <= 0xEFu)
+                streamPos = 5; // 0xE0 - 0xEF
+            else if (c <= 0xF4u)
+                streamPos = 6; // 0xF0 - 0xF4
+            else
+                streamPos = 3; // 0xF5 - 0xFF
+            TEST_STRING_ERROR(kParseErrorStringInvalidEncoding, e, 2u, streamPos);
+        }
+    }
+
+    // 4  Overlong sequences
+
+    // 4.1  Examples of an overlong ASCII character
+    TEST_STRINGENCODING_ERROR(UTF8<>, UTF16<>, unsigned char, ARRAY('[', '\"', 0xC0u, 0xAFu, '\"', ']', '\0'));
+    TEST_STRINGENCODING_ERROR(UTF8<>, UTF16<>, unsigned char, ARRAY('[', '\"', 0xE0u, 0x80u, 0xAFu, '\"', ']', '\0'));
+    TEST_STRINGENCODING_ERROR(UTF8<>, UTF16<>, unsigned char, ARRAY('[', '\"', 0xF0u, 0x80u, 0x80u, 0xAFu, '\"', ']', '\0'));
+
+    // 4.2  Maximum overlong sequences
+    TEST_STRINGENCODING_ERROR(UTF8<>, UTF16<>, unsigned char, ARRAY('[', '\"', 0xC1u, 0xBFu, '\"', ']', '\0'));
+    TEST_STRINGENCODING_ERROR(UTF8<>, UTF16<>, unsigned char, ARRAY('[', '\"', 0xE0u, 0x9Fu, 0xBFu, '\"', ']', '\0'));
+    TEST_STRINGENCODING_ERROR(UTF8<>, UTF16<>, unsigned char, ARRAY('[', '\"', 0xF0u, 0x8Fu, 0xBFu, 0xBFu, '\"', ']', '\0'));
+
+    // 4.3  Overlong representation of the NUL character
+    TEST_STRINGENCODING_ERROR(UTF8<>, UTF16<>, unsigned char, ARRAY('[', '\"', 0xC0u, 0x80u, '\"', ']', '\0'));
+    TEST_STRINGENCODING_ERROR(UTF8<>, UTF16<>, unsigned char, ARRAY('[', '\"', 0xE0u, 0x80u, 0x80u, '\"', ']', '\0'));
+    TEST_STRINGENCODING_ERROR(UTF8<>, UTF16<>, unsigned char, ARRAY('[', '\"', 0xF0u, 0x80u, 0x80u, 0x80u, '\"', ']', '\0'));
+
+    // 5  Illegal code positions
+
+    // 5.1 Single UTF-16 surrogates
+    TEST_STRINGENCODING_ERROR(UTF8<>, UTF16<>, unsigned char, ARRAY('[', '\"', 0xEDu, 0xA0u, 0x80u, '\"', ']', '\0'));
+    TEST_STRINGENCODING_ERROR(UTF8<>, UTF16<>, unsigned char, ARRAY('[', '\"', 0xEDu, 0xADu, 0xBFu, '\"', ']', '\0'));
+    TEST_STRINGENCODING_ERROR(UTF8<>, UTF16<>, unsigned char, ARRAY('[', '\"', 0xEDu, 0xAEu, 0x80u, '\"', ']', '\0'));
+    TEST_STRINGENCODING_ERROR(UTF8<>, UTF16<>, unsigned char, ARRAY('[', '\"', 0xEDu, 0xAFu, 0xBFu, '\"', ']', '\0'));
+    TEST_STRINGENCODING_ERROR(UTF8<>, UTF16<>, unsigned char, ARRAY('[', '\"', 0xEDu, 0xB0u, 0x80u, '\"', ']', '\0'));
+    TEST_STRINGENCODING_ERROR(UTF8<>, UTF16<>, unsigned char, ARRAY('[', '\"', 0xEDu, 0xBEu, 0x80u, '\"', ']', '\0'));
+    TEST_STRINGENCODING_ERROR(UTF8<>, UTF16<>, unsigned char, ARRAY('[', '\"', 0xEDu, 0xBFu, 0xBFu, '\"', ']', '\0'));
+
+    // Malform UTF-16 sequences
+    TEST_STRINGENCODING_ERROR(UTF16<>, UTF8<>, wchar_t, ARRAY('[', '\"', 0xDC00, 0xDC00, '\"', ']', '\0'));
+    TEST_STRINGENCODING_ERROR(UTF16<>, UTF8<>, wchar_t, ARRAY('[', '\"', 0xD800, 0xD800, '\"', ']', '\0'));
+
+    // Malform UTF-32 sequence
+    TEST_STRINGENCODING_ERROR(UTF32<>, UTF8<>, unsigned, ARRAY('[', '\"', 0x110000, '\"', ']', '\0'));
+
+    // Malform ASCII sequence
+    TEST_STRINGENCODING_ERROR(ASCII<>, UTF8<>, char, ARRAY('[', '\"', char(0x80u), '\"', ']', '\0'));
+    TEST_STRINGENCODING_ERROR(ASCII<>, UTF8<>, char, ARRAY('[', '\"', char(0x01u), '\"', ']', '\0'));
+    TEST_STRINGENCODING_ERROR(ASCII<>, UTF8<>, char, ARRAY('[', '\"', char(0x1Cu), '\"', ']', '\0'));
+
+#undef ARRAY
+#undef TEST_STRINGARRAY_ERROR
+}
+
+template <unsigned count>
+struct ParseArrayHandler : BaseReaderHandler<UTF8<>, ParseArrayHandler<count> > {
+    ParseArrayHandler() : step_(0) {}
+
+    bool Default() { ADD_FAILURE(); return false; }
+    bool Uint(unsigned i) { EXPECT_EQ(step_, i); step_++; return true; }
+    bool StartArray() { EXPECT_EQ(0u, step_); step_++; return true; }
+    bool EndArray(SizeType) { step_++; return true; }
+
+    unsigned step_;
+};
+
+TEST(Reader, ParseEmptyArray) {
+    char *json = StrDup("[ ] ");
+    InsituStringStream s(json);
+    ParseArrayHandler<0> h;
+    Reader reader;
+    reader.Parse(s, h);
+    EXPECT_EQ(2u, h.step_);
+    free(json);
+}
+
+TEST(Reader, ParseArray) {
+    char *json = StrDup("[1, 2, 3, 4]");
+    InsituStringStream s(json);
+    ParseArrayHandler<4> h;
+    Reader reader;
+    reader.Parse(s, h);
+    EXPECT_EQ(6u, h.step_);
+    free(json);
+}
+
+TEST(Reader, ParseArray_Error) {
+#define TEST_ARRAY_ERROR(errorCode, str, errorOffset) \
+    { \
+        unsigned streamPos = errorOffset; \
+        char buffer[1001]; \
+        strncpy(buffer, str, 1000); \
+        InsituStringStream s(buffer); \
+        BaseReaderHandler<> h; \
+        GenericReader<UTF8<>, UTF8<>, CrtAllocator> reader; \
+        EXPECT_FALSE(reader.Parse(s, h)); \
+        EXPECT_EQ(errorCode, reader.GetParseErrorCode());\
+        EXPECT_EQ(errorOffset, reader.GetErrorOffset());\
+        EXPECT_EQ(streamPos, s.Tell());\
+    }
+
+    // Missing a comma or ']' after an array element.
+    TEST_ARRAY_ERROR(kParseErrorArrayMissCommaOrSquareBracket, "[1", 2u);
+    TEST_ARRAY_ERROR(kParseErrorArrayMissCommaOrSquareBracket, "[1}", 2u);
+    TEST_ARRAY_ERROR(kParseErrorArrayMissCommaOrSquareBracket, "[1 2]", 3u);
+
+    // Array cannot have a trailing comma (without kParseTrailingCommasFlag);
+    // a value must follow a comma
+    TEST_ARRAY_ERROR(kParseErrorValueInvalid, "[1,]", 3u);
+
+#undef TEST_ARRAY_ERROR
+}
+
+struct ParseObjectHandler : BaseReaderHandler<UTF8<>, ParseObjectHandler> {
+    ParseObjectHandler() : step_(0) {}
+
+    bool Default() { ADD_FAILURE(); return false; }
+    bool Null() { EXPECT_EQ(8u, step_); step_++; return true; }
+    bool Bool(bool b) {
+        switch(step_) {
+            case 4: EXPECT_TRUE(b); step_++; return true;
+            case 6: EXPECT_FALSE(b); step_++; return true;
+            default: ADD_FAILURE(); return false;
+        }
+    }
+    bool Int(int i) {
+        switch(step_) {
+            case 10: EXPECT_EQ(123, i); step_++; return true;
+            case 15: EXPECT_EQ(1, i); step_++; return true;
+            case 16: EXPECT_EQ(2, i); step_++; return true;
+            case 17: EXPECT_EQ(3, i); step_++; return true;
+            default: ADD_FAILURE(); return false;
+        }
+    }
+    bool Uint(unsigned i) { return Int(static_cast<int>(i)); }
+    bool Double(double d) { EXPECT_EQ(12u, step_); EXPECT_DOUBLE_EQ(3.1416, d); step_++; return true; }
+    bool String(const char* str, size_t, bool) {
+        switch(step_) {
+            case 1: EXPECT_STREQ("hello", str); step_++; return true;
+            case 2: EXPECT_STREQ("world", str); step_++; return true;
+            case 3: EXPECT_STREQ("t", str); step_++; return true;
+            case 5: EXPECT_STREQ("f", str); step_++; return true;
+            case 7: EXPECT_STREQ("n", str); step_++; return true;
+            case 9: EXPECT_STREQ("i", str); step_++; return true;
+            case 11: EXPECT_STREQ("pi", str); step_++; return true;
+            case 13: EXPECT_STREQ("a", str); step_++; return true;
+            default: ADD_FAILURE(); return false;
+        }
+    }
+    bool StartObject() { EXPECT_EQ(0u, step_); step_++; return true; }
+    bool EndObject(SizeType memberCount) { EXPECT_EQ(19u, step_); EXPECT_EQ(7u, memberCount); step_++; return true; }
+    bool StartArray() { EXPECT_EQ(14u, step_); step_++; return true; }
+    bool EndArray(SizeType elementCount) { EXPECT_EQ(18u, step_); EXPECT_EQ(3u, elementCount); step_++; return true; }
+
+    unsigned step_;
+};
+
+TEST(Reader, ParseObject) {
+    const char* json = "{ \"hello\" : \"world\", \"t\" : true , \"f\" : false, \"n\": null, \"i\":123, \"pi\": 3.1416, \"a\":[1, 2, 3] } ";
+
+    // Insitu
+    {
+        char* json2 = StrDup(json);
+        InsituStringStream s(json2);
+        ParseObjectHandler h;
+        Reader reader;
+        reader.Parse<kParseInsituFlag>(s, h);
+        EXPECT_EQ(20u, h.step_);
+        free(json2);
+    }
+
+    // Normal
+    {
+        StringStream s(json);
+        ParseObjectHandler h;
+        Reader reader;
+        reader.Parse(s, h);
+        EXPECT_EQ(20u, h.step_);
+    }
+}
+
+struct ParseEmptyObjectHandler : BaseReaderHandler<UTF8<>, ParseEmptyObjectHandler> {
+    ParseEmptyObjectHandler() : step_(0) {}
+
+    bool Default() { ADD_FAILURE(); return false; }
+    bool StartObject() { EXPECT_EQ(0u, step_); step_++; return true; }
+    bool EndObject(SizeType) { EXPECT_EQ(1u, step_); step_++; return true; }
+
+    unsigned step_;
+};
+
+TEST(Reader, Parse_EmptyObject) {
+    StringStream s("{ } ");
+    ParseEmptyObjectHandler h;
+    Reader reader;
+    reader.Parse(s, h);
+    EXPECT_EQ(2u, h.step_);
+}
+
+struct ParseMultipleRootHandler : BaseReaderHandler<UTF8<>, ParseMultipleRootHandler> {
+    ParseMultipleRootHandler() : step_(0) {}
+
+    bool Default() { ADD_FAILURE(); return false; }
+    bool StartObject() { EXPECT_EQ(0u, step_); step_++; return true; }
+    bool EndObject(SizeType) { EXPECT_EQ(1u, step_); step_++; return true; }
+    bool StartArray() { EXPECT_EQ(2u, step_); step_++; return true; }
+    bool EndArray(SizeType) { EXPECT_EQ(3u, step_); step_++; return true; }
+
+    unsigned step_;
+};
+
+template <unsigned parseFlags>
+void TestMultipleRoot() {
+    StringStream s("{}[] a");
+    ParseMultipleRootHandler h;
+    Reader reader;
+    EXPECT_TRUE(reader.Parse<parseFlags>(s, h));
+    EXPECT_EQ(2u, h.step_);
+    EXPECT_TRUE(reader.Parse<parseFlags>(s, h));
+    EXPECT_EQ(4u, h.step_);
+    EXPECT_EQ(' ', s.Take());
+    EXPECT_EQ('a', s.Take());
+}
+
+TEST(Reader, Parse_MultipleRoot) {
+    TestMultipleRoot<kParseStopWhenDoneFlag>();
+}
+
+TEST(Reader, ParseIterative_MultipleRoot) {
+    TestMultipleRoot<kParseIterativeFlag | kParseStopWhenDoneFlag>();
+}
+
+template <unsigned parseFlags>
+void TestInsituMultipleRoot() {
+    char* buffer = strdup("{}[] a");
+    InsituStringStream s(buffer);
+    ParseMultipleRootHandler h;
+    Reader reader;
+    EXPECT_TRUE(reader.Parse<kParseInsituFlag | parseFlags>(s, h));
+    EXPECT_EQ(2u, h.step_);
+    EXPECT_TRUE(reader.Parse<kParseInsituFlag | parseFlags>(s, h));
+    EXPECT_EQ(4u, h.step_);
+    EXPECT_EQ(' ', s.Take());
+    EXPECT_EQ('a', s.Take());
+    free(buffer);
+}
+
+TEST(Reader, ParseInsitu_MultipleRoot) {
+    TestInsituMultipleRoot<kParseStopWhenDoneFlag>();
+}
+
+TEST(Reader, ParseInsituIterative_MultipleRoot) {
+    TestInsituMultipleRoot<kParseIterativeFlag | kParseStopWhenDoneFlag>();
+}
+
+#define TEST_ERROR(errorCode, str, errorOffset) \
+    { \
+        unsigned streamPos = errorOffset; \
+        char buffer[1001]; \
+        strncpy(buffer, str, 1000); \
+        InsituStringStream s(buffer); \
+        BaseReaderHandler<> h; \
+        Reader reader; \
+        EXPECT_FALSE(reader.Parse(s, h)); \
+        EXPECT_EQ(errorCode, reader.GetParseErrorCode());\
+        EXPECT_EQ(errorOffset, reader.GetErrorOffset());\
+        EXPECT_EQ(streamPos, s.Tell());\
+    }
+
+TEST(Reader, ParseDocument_Error) {
+    // The document is empty.
+    TEST_ERROR(kParseErrorDocumentEmpty, "", 0u);
+    TEST_ERROR(kParseErrorDocumentEmpty, " ", 1u);
+    TEST_ERROR(kParseErrorDocumentEmpty, " \n", 2u);
+
+    // The document root must not follow by other values.
+    TEST_ERROR(kParseErrorDocumentRootNotSingular, "[] 0", 3u);
+    TEST_ERROR(kParseErrorDocumentRootNotSingular, "{} 0", 3u);
+    TEST_ERROR(kParseErrorDocumentRootNotSingular, "null []", 5u);
+    TEST_ERROR(kParseErrorDocumentRootNotSingular, "0 {}", 2u);
+}
+
+TEST(Reader, ParseValue_Error) {
+    // Invalid value.
+    TEST_ERROR(kParseErrorValueInvalid, "nulL", 3u);
+    TEST_ERROR(kParseErrorValueInvalid, "truE", 3u);
+    TEST_ERROR(kParseErrorValueInvalid, "falsE", 4u);
+    TEST_ERROR(kParseErrorValueInvalid, "a]", 0u);
+    TEST_ERROR(kParseErrorValueInvalid, ".1", 0u);
+}
+
+TEST(Reader, ParseObject_Error) {
+    // Missing a name for object member.
+    TEST_ERROR(kParseErrorObjectMissName, "{1}", 1u);
+    TEST_ERROR(kParseErrorObjectMissName, "{:1}", 1u);
+    TEST_ERROR(kParseErrorObjectMissName, "{null:1}", 1u);
+    TEST_ERROR(kParseErrorObjectMissName, "{true:1}", 1u);
+    TEST_ERROR(kParseErrorObjectMissName, "{false:1}", 1u);
+    TEST_ERROR(kParseErrorObjectMissName, "{1:1}", 1u);
+    TEST_ERROR(kParseErrorObjectMissName, "{[]:1}", 1u);
+    TEST_ERROR(kParseErrorObjectMissName, "{{}:1}", 1u);
+    TEST_ERROR(kParseErrorObjectMissName, "{xyz:1}", 1u);
+
+    // Missing a colon after a name of object member.
+    TEST_ERROR(kParseErrorObjectMissColon, "{\"a\" 1}", 5u);
+    TEST_ERROR(kParseErrorObjectMissColon, "{\"a\",1}", 4u);
+
+    // Must be a comma or '}' after an object member
+    TEST_ERROR(kParseErrorObjectMissCommaOrCurlyBracket, "{\"a\":1]", 6u);
+
+    // Object cannot have a trailing comma (without kParseTrailingCommasFlag);
+    // an object member name must follow a comma
+    TEST_ERROR(kParseErrorObjectMissName, "{\"a\":1,}", 7u);
+
+    // This tests that MemoryStream is checking the length in Peek().
+    {
+        MemoryStream ms("{\"a\"", 1);
+        BaseReaderHandler<> h;
+        Reader reader;
+        EXPECT_FALSE(reader.Parse<kParseStopWhenDoneFlag>(ms, h));
+        EXPECT_EQ(kParseErrorObjectMissName, reader.GetParseErrorCode());
+    }
+}
+
+#undef TEST_ERROR
+
+TEST(Reader, SkipWhitespace) {
+    StringStream ss(" A \t\tB\n \n\nC\r\r \rD \t\n\r E");
+    const char* expected = "ABCDE";
+    for (size_t i = 0; i < 5; i++) {
+        SkipWhitespace(ss);
+        EXPECT_EQ(expected[i], ss.Take());
+    }
+}
+
+// Test implementing a stream without copy stream optimization.
+// Clone from GenericStringStream except that copy constructor is disabled.
+template <typename Encoding>
+class CustomStringStream {
+public:
+    typedef typename Encoding::Ch Ch;
+
+    CustomStringStream(const Ch *src) : src_(src), head_(src) {}
+
+    Ch Peek() const { return *src_; }
+    Ch Take() { return *src_++; }
+    size_t Tell() const { return static_cast<size_t>(src_ - head_); }
+
+    Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; }
+    void Put(Ch) { RAPIDJSON_ASSERT(false); }
+    void Flush() { RAPIDJSON_ASSERT(false); }
+    size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; }
+
+private:
+    // Prohibit copy constructor & assignment operator.
+    CustomStringStream(const CustomStringStream&);
+    CustomStringStream& operator=(const CustomStringStream&);
+
+    const Ch* src_;     //!< Current read position.
+    const Ch* head_;    //!< Original head of the string.
+};
+
+// If the following code is compiled, it should generate compilation error as predicted.
+// Because CustomStringStream<> is not copyable via making copy constructor private.
+#if 0
+namespace rapidjson {
+
+template <typename Encoding>
+struct StreamTraits<CustomStringStream<Encoding> > {
+    enum { copyOptimization = 1 };
+};
+
+} // namespace rapidjson
+#endif
+
+TEST(Reader, CustomStringStream) {
+    const char* json = "{ \"hello\" : \"world\", \"t\" : true , \"f\" : false, \"n\": null, \"i\":123, \"pi\": 3.1416, \"a\":[1, 2, 3] } ";
+    CustomStringStream<UTF8<char> > s(json);
+    ParseObjectHandler h;
+    Reader reader;
+    reader.Parse(s, h);
+    EXPECT_EQ(20u, h.step_);
+}
+
+#include <sstream>
+
+class IStreamWrapper {
+public:
+    typedef char Ch;
+
+    IStreamWrapper(std::istream& is) : is_(is) {}
+
+    Ch Peek() const {
+        int c = is_.peek();
+        return c == std::char_traits<char>::eof() ? '\0' : static_cast<Ch>(c);
+    }
+
+    Ch Take() {
+        int c = is_.get();
+        return c == std::char_traits<char>::eof() ? '\0' : static_cast<Ch>(c);
+    }
+
+    size_t Tell() const { return static_cast<size_t>(is_.tellg()); }
+
+    Ch* PutBegin() { assert(false); return 0; }
+    void Put(Ch) { assert(false); }
+    void Flush() { assert(false); }
+    size_t PutEnd(Ch*) { assert(false); return 0; }
+
+private:
+    IStreamWrapper(const IStreamWrapper&);
+    IStreamWrapper& operator=(const IStreamWrapper&);
+
+    std::istream& is_;
+};
+
+TEST(Reader, Parse_IStreamWrapper_StringStream) {
+    const char* json = "[1,2,3,4]";
+
+    std::stringstream ss(json);
+    IStreamWrapper is(ss);
+
+    Reader reader;
+    ParseArrayHandler<4> h;
+    reader.Parse(is, h);
+    EXPECT_FALSE(reader.HasParseError());
+}
+
+// Test iterative parsing.
+
+#define TESTERRORHANDLING(text, errorCode, offset)\
+{\
+    unsigned streamPos = offset; \
+    StringStream json(text); \
+    BaseReaderHandler<> handler; \
+    Reader reader; \
+    reader.Parse<kParseIterativeFlag>(json, handler); \
+    EXPECT_TRUE(reader.HasParseError()); \
+    EXPECT_EQ(errorCode, reader.GetParseErrorCode()); \
+    EXPECT_EQ(offset, reader.GetErrorOffset()); \
+    EXPECT_EQ(streamPos, json.Tell()); \
+}
+
+TEST(Reader, IterativeParsing_ErrorHandling) {
+    TESTERRORHANDLING("{\"a\": a}", kParseErrorValueInvalid, 6u);
+
+    TESTERRORHANDLING("", kParseErrorDocumentEmpty, 0u);
+    TESTERRORHANDLING("{}{}", kParseErrorDocumentRootNotSingular, 2u);
+
+    TESTERRORHANDLING("{1}", kParseErrorObjectMissName, 1u);
+    TESTERRORHANDLING("{\"a\", 1}", kParseErrorObjectMissColon, 4u);
+    TESTERRORHANDLING("{\"a\"}", kParseErrorObjectMissColon, 4u);
+    TESTERRORHANDLING("{\"a\": 1", kParseErrorObjectMissCommaOrCurlyBracket, 7u);
+    TESTERRORHANDLING("[1 2 3]", kParseErrorArrayMissCommaOrSquareBracket, 3u);
+    TESTERRORHANDLING("{\"a: 1", kParseErrorStringMissQuotationMark, 6u);
+    TESTERRORHANDLING("{\"a\":}", kParseErrorValueInvalid, 5u);
+    TESTERRORHANDLING("{\"a\":]", kParseErrorValueInvalid, 5u);
+    TESTERRORHANDLING("[1,2,}", kParseErrorValueInvalid, 5u);
+    TESTERRORHANDLING("[}]", kParseErrorValueInvalid, 1u);
+    TESTERRORHANDLING("[,]", kParseErrorValueInvalid, 1u);
+    TESTERRORHANDLING("[1,,]", kParseErrorValueInvalid, 3u);
+
+    // Trailing commas are not allowed without kParseTrailingCommasFlag
+    TESTERRORHANDLING("{\"a\": 1,}", kParseErrorObjectMissName, 8u);
+    TESTERRORHANDLING("[1,2,3,]", kParseErrorValueInvalid, 7u);
+
+    // Any JSON value can be a valid root element in RFC7159.
+    TESTERRORHANDLING("\"ab", kParseErrorStringMissQuotationMark, 3u);
+    TESTERRORHANDLING("truE", kParseErrorValueInvalid, 3u);
+    TESTERRORHANDLING("False", kParseErrorValueInvalid, 0u);
+    TESTERRORHANDLING("true, false", kParseErrorDocumentRootNotSingular, 4u);
+    TESTERRORHANDLING("false, false", kParseErrorDocumentRootNotSingular, 5u);
+    TESTERRORHANDLING("nulL", kParseErrorValueInvalid, 3u);
+    TESTERRORHANDLING("null , null", kParseErrorDocumentRootNotSingular, 5u);
+    TESTERRORHANDLING("1a", kParseErrorDocumentRootNotSingular, 1u);
+}
+
+template<typename Encoding = UTF8<> >
+struct IterativeParsingReaderHandler {
+    typedef typename Encoding::Ch Ch;
+
+    const static uint32_t LOG_NULL        = 0x10000000;
+    const static uint32_t LOG_BOOL        = 0x20000000;
+    const static uint32_t LOG_INT         = 0x30000000;
+    const static uint32_t LOG_UINT        = 0x40000000;
+    const static uint32_t LOG_INT64       = 0x50000000;
+    const static uint32_t LOG_UINT64      = 0x60000000;
+    const static uint32_t LOG_DOUBLE      = 0x70000000;
+    const static uint32_t LOG_STRING      = 0x80000000;
+    const static uint32_t LOG_STARTOBJECT = 0x90000000;
+    const static uint32_t LOG_KEY         = 0xA0000000;
+    const static uint32_t LOG_ENDOBJECT   = 0xB0000000;
+    const static uint32_t LOG_STARTARRAY  = 0xC0000000;
+    const static uint32_t LOG_ENDARRAY    = 0xD0000000;
+
+    const static size_t LogCapacity = 256;
+    uint32_t Logs[LogCapacity];
+    size_t LogCount;
+
+    IterativeParsingReaderHandler() : LogCount(0) {
+    }
+
+    bool Null() { RAPIDJSON_ASSERT(LogCount < LogCapacity); Logs[LogCount++] = LOG_NULL; return true; }
+
+    bool Bool(bool) { RAPIDJSON_ASSERT(LogCount < LogCapacity); Logs[LogCount++] = LOG_BOOL; return true; }
+
+    bool Int(int) { RAPIDJSON_ASSERT(LogCount < LogCapacity); Logs[LogCount++] = LOG_INT; return true; }
+
+    bool Uint(unsigned) { RAPIDJSON_ASSERT(LogCount < LogCapacity); Logs[LogCount++] = LOG_INT; return true; }
+
+    bool Int64(int64_t) { RAPIDJSON_ASSERT(LogCount < LogCapacity); Logs[LogCount++] = LOG_INT64; return true; }
+
+    bool Uint64(uint64_t) { RAPIDJSON_ASSERT(LogCount < LogCapacity); Logs[LogCount++] = LOG_UINT64; return true; }
+
+    bool Double(double) { RAPIDJSON_ASSERT(LogCount < LogCapacity); Logs[LogCount++] = LOG_DOUBLE; return true; }
+
+    bool RawNumber(const Ch*, SizeType, bool) { RAPIDJSON_ASSERT(LogCount < LogCapacity); Logs[LogCount++] = LOG_STRING; return true; }
+
+    bool String(const Ch*, SizeType, bool) { RAPIDJSON_ASSERT(LogCount < LogCapacity); Logs[LogCount++] = LOG_STRING; return true; }
+
+    bool StartObject() { RAPIDJSON_ASSERT(LogCount < LogCapacity); Logs[LogCount++] = LOG_STARTOBJECT; return true; }
+
+    bool Key (const Ch*, SizeType, bool) { RAPIDJSON_ASSERT(LogCount < LogCapacity); Logs[LogCount++] = LOG_KEY; return true; }
+
+    bool EndObject(SizeType c) {
+        RAPIDJSON_ASSERT(LogCount < LogCapacity);
+        RAPIDJSON_ASSERT((static_cast<uint32_t>(c) & 0xF0000000) == 0);
+        Logs[LogCount++] = LOG_ENDOBJECT | static_cast<uint32_t>(c);
+        return true;
+    }
+
+    bool StartArray() { RAPIDJSON_ASSERT(LogCount < LogCapacity); Logs[LogCount++] = LOG_STARTARRAY; return true; }
+
+    bool EndArray(SizeType c) {
+        RAPIDJSON_ASSERT(LogCount < LogCapacity);
+        RAPIDJSON_ASSERT((static_cast<uint32_t>(c) & 0xF0000000) == 0);
+        Logs[LogCount++] = LOG_ENDARRAY | static_cast<uint32_t>(c);
+        return true;
+    }
+};
+
+TEST(Reader, IterativeParsing_General) {
+    {
+        StringStream is("[1, {\"k\": [1, 2]}, null, false, true, \"string\", 1.2]");
+        Reader reader;
+        IterativeParsingReaderHandler<> handler;
+
+        ParseResult r = reader.Parse<kParseIterativeFlag>(is, handler);
+
+        EXPECT_FALSE(r.IsError());
+        EXPECT_FALSE(reader.HasParseError());
+
+        uint32_t e[] = {
+            handler.LOG_STARTARRAY,
+            handler.LOG_INT,
+            handler.LOG_STARTOBJECT,
+            handler.LOG_KEY,
+            handler.LOG_STARTARRAY,
+            handler.LOG_INT,
+            handler.LOG_INT,
+            handler.LOG_ENDARRAY | 2,
+            handler.LOG_ENDOBJECT | 1,
+            handler.LOG_NULL,
+            handler.LOG_BOOL,
+            handler.LOG_BOOL,
+            handler.LOG_STRING,
+            handler.LOG_DOUBLE,
+            handler.LOG_ENDARRAY | 7
+        };
+
+        EXPECT_EQ(sizeof(e) / sizeof(int), handler.LogCount);
+
+        for (size_t i = 0; i < handler.LogCount; ++i) {
+            EXPECT_EQ(e[i], handler.Logs[i]) << "i = " << i;
+        }
+    }
+}
+
+TEST(Reader, IterativeParsing_Count) {
+    {
+        StringStream is("[{}, {\"k\": 1}, [1], []]");
+        Reader reader;
+        IterativeParsingReaderHandler<> handler;
+
+        ParseResult r = reader.Parse<kParseIterativeFlag>(is, handler);
+
+        EXPECT_FALSE(r.IsError());
+        EXPECT_FALSE(reader.HasParseError());
+
+        uint32_t e[] = {
+            handler.LOG_STARTARRAY,
+            handler.LOG_STARTOBJECT,
+            handler.LOG_ENDOBJECT | 0,
+            handler.LOG_STARTOBJECT,
+            handler.LOG_KEY,
+            handler.LOG_INT,
+            handler.LOG_ENDOBJECT | 1,
+            handler.LOG_STARTARRAY,
+            handler.LOG_INT,
+            handler.LOG_ENDARRAY | 1,
+            handler.LOG_STARTARRAY,
+            handler.LOG_ENDARRAY | 0,
+            handler.LOG_ENDARRAY | 4
+        };
+
+        EXPECT_EQ(sizeof(e) / sizeof(int), handler.LogCount);
+
+        for (size_t i = 0; i < handler.LogCount; ++i) {
+            EXPECT_EQ(e[i], handler.Logs[i]) << "i = " << i;
+        }
+    }
+}
+
+TEST(Reader, IterativePullParsing_General) {
+    {
+        IterativeParsingReaderHandler<> handler;
+        uint32_t e[] = {
+            handler.LOG_STARTARRAY,
+            handler.LOG_INT,
+            handler.LOG_STARTOBJECT,
+            handler.LOG_KEY,
+            handler.LOG_STARTARRAY,
+            handler.LOG_INT,
+            handler.LOG_INT,
+            handler.LOG_ENDARRAY | 2,
+            handler.LOG_ENDOBJECT | 1,
+            handler.LOG_NULL,
+            handler.LOG_BOOL,
+            handler.LOG_BOOL,
+            handler.LOG_STRING,
+            handler.LOG_DOUBLE,
+            handler.LOG_ENDARRAY | 7
+        };
+
+        StringStream is("[1, {\"k\": [1, 2]}, null, false, true, \"string\", 1.2]");
+        Reader reader;
+
+        reader.IterativeParseInit();
+        while (!reader.IterativeParseComplete()) {
+            size_t oldLogCount = handler.LogCount;
+            EXPECT_TRUE(oldLogCount < sizeof(e) / sizeof(int)) << "overrun";
+
+            EXPECT_TRUE(reader.IterativeParseNext<kParseDefaultFlags>(is, handler)) << "parse fail";
+            EXPECT_EQ(handler.LogCount, oldLogCount + 1) << "handler should be invoked exactly once each time";
+            EXPECT_EQ(e[oldLogCount], handler.Logs[oldLogCount]) << "wrong event returned";
+        }
+
+        EXPECT_FALSE(reader.HasParseError());
+        EXPECT_EQ(sizeof(e) / sizeof(int), handler.LogCount) << "handler invoked wrong number of times";
+
+        // The handler should not be invoked when the JSON has been fully read, but it should not fail
+        size_t oldLogCount = handler.LogCount;
+        EXPECT_TRUE(reader.IterativeParseNext<kParseDefaultFlags>(is, handler)) << "parse-next past complete is allowed";
+        EXPECT_EQ(handler.LogCount, oldLogCount) << "parse-next past complete should not invoke handler";
+        EXPECT_FALSE(reader.HasParseError()) << "parse-next past complete should not generate parse error";
+    }
+}
+
+// Test iterative parsing on kParseErrorTermination.
+struct HandlerTerminateAtStartObject : public IterativeParsingReaderHandler<> {
+    bool StartObject() { return false; }
+};
+
+struct HandlerTerminateAtStartArray : public IterativeParsingReaderHandler<> {
+    bool StartArray() { return false; }
+};
+
+struct HandlerTerminateAtEndObject : public IterativeParsingReaderHandler<> {
+    bool EndObject(SizeType) { return false; }
+};
+
+struct HandlerTerminateAtEndArray : public IterativeParsingReaderHandler<> {
+    bool EndArray(SizeType) { return false; }
+};
+
+TEST(Reader, IterativeParsing_ShortCircuit) {
+    {
+        HandlerTerminateAtStartObject handler;
+        Reader reader;
+        StringStream is("[1, {}]");
+
+        ParseResult r = reader.Parse<kParseIterativeFlag>(is, handler);
+
+        EXPECT_TRUE(reader.HasParseError());
+        EXPECT_EQ(kParseErrorTermination, r.Code());
+        EXPECT_EQ(4u, r.Offset());
+    }
+
+    {
+        HandlerTerminateAtStartArray handler;
+        Reader reader;
+        StringStream is("{\"a\": []}");
+
+        ParseResult r = reader.Parse<kParseIterativeFlag>(is, handler);
+
+        EXPECT_TRUE(reader.HasParseError());
+        EXPECT_EQ(kParseErrorTermination, r.Code());
+        EXPECT_EQ(6u, r.Offset());
+    }
+
+    {
+        HandlerTerminateAtEndObject handler;
+        Reader reader;
+        StringStream is("[1, {}]");
+
+        ParseResult r = reader.Parse<kParseIterativeFlag>(is, handler);
+
+        EXPECT_TRUE(reader.HasParseError());
+        EXPECT_EQ(kParseErrorTermination, r.Code());
+        EXPECT_EQ(5u, r.Offset());
+    }
+
+    {
+        HandlerTerminateAtEndArray handler;
+        Reader reader;
+        StringStream is("{\"a\": []}");
+
+        ParseResult r = reader.Parse<kParseIterativeFlag>(is, handler);
+
+        EXPECT_TRUE(reader.HasParseError());
+        EXPECT_EQ(kParseErrorTermination, r.Code());
+        EXPECT_EQ(7u, r.Offset());
+    }
+}
+
+// For covering BaseReaderHandler default functions
+TEST(Reader, BaseReaderHandler_Default) {
+    BaseReaderHandler<> h;
+    Reader reader;
+    StringStream is("[null, true, -1, 1, -1234567890123456789, 1234567890123456789, 3.14, \"s\", { \"a\" : 1 }]");
+    EXPECT_TRUE(reader.Parse(is, h));
+}
+
+template <int e>
+struct TerminateHandler {
+    bool Null() { return e != 0; }
+    bool Bool(bool) { return e != 1; }
+    bool Int(int) { return e != 2; }
+    bool Uint(unsigned) { return e != 3; }
+    bool Int64(int64_t) { return e != 4; }
+    bool Uint64(uint64_t) { return e != 5;  }
+    bool Double(double) { return e != 6; }
+    bool RawNumber(const char*, SizeType, bool) { return e != 7; }
+    bool String(const char*, SizeType, bool) { return e != 8; }
+    bool StartObject() { return e != 9; }
+    bool Key(const char*, SizeType, bool)  { return e != 10; }
+    bool EndObject(SizeType) { return e != 11; }
+    bool StartArray() { return e != 12; }
+    bool EndArray(SizeType) { return e != 13; }
+};
+
+#define TEST_TERMINATION(e, json)\
+{\
+    Reader reader;\
+    TerminateHandler<e> h;\
+    StringStream is(json);\
+    EXPECT_FALSE(reader.Parse(is, h));\
+    EXPECT_EQ(kParseErrorTermination, reader.GetParseErrorCode());\
+}
+
+TEST(Reader, ParseTerminationByHandler) {
+    TEST_TERMINATION(0, "[null");
+    TEST_TERMINATION(1, "[true");
+    TEST_TERMINATION(1, "[false");
+    TEST_TERMINATION(2, "[-1");
+    TEST_TERMINATION(3, "[1");
+    TEST_TERMINATION(4, "[-1234567890123456789");
+    TEST_TERMINATION(5, "[1234567890123456789");
+    TEST_TERMINATION(6, "[0.5]");
+    // RawNumber() is never called
+    TEST_TERMINATION(8, "[\"a\"");
+    TEST_TERMINATION(9, "[{");
+    TEST_TERMINATION(10, "[{\"a\"");
+    TEST_TERMINATION(11, "[{}");
+    TEST_TERMINATION(11, "[{\"a\":1}"); // non-empty object
+    TEST_TERMINATION(12, "{\"a\":[");
+    TEST_TERMINATION(13, "{\"a\":[]");
+    TEST_TERMINATION(13, "{\"a\":[1]"); // non-empty array
+}
+
+TEST(Reader, ParseComments) {
+    const char* json =
+    "// Here is a one-line comment.\n"
+    "{// And here's another one\n"
+    "   /*And here's an in-line one.*/\"hello\" : \"world\","
+    "   \"t\" :/* And one with '*' symbol*/true ,"
+    "/* A multiline comment\n"
+    "   goes here*/"
+    "   \"f\" : false, \"n\": null, \"i\":123, \"pi\": 3.1416, \"a\":[1, 2, 3]"
+    "}/*And the last one to be sure */";
+
+    StringStream s(json);
+    ParseObjectHandler h;
+    Reader reader;
+    EXPECT_TRUE(reader.Parse<kParseCommentsFlag>(s, h));
+    EXPECT_EQ(20u, h.step_);
+}
+
+TEST(Reader, ParseEmptyInlineComment) {
+    const char* json = "{/**/\"hello\" : \"world\", \"t\" : true, \"f\" : false, \"n\": null, \"i\":123, \"pi\": 3.1416, \"a\":[1, 2, 3] }";
+
+    StringStream s(json);
+    ParseObjectHandler h;
+    Reader reader;
+    EXPECT_TRUE(reader.Parse<kParseCommentsFlag>(s, h));
+    EXPECT_EQ(20u, h.step_);
+}
+
+TEST(Reader, ParseEmptyOnelineComment) {
+    const char* json = "{//\n\"hello\" : \"world\", \"t\" : true, \"f\" : false, \"n\": null, \"i\":123, \"pi\": 3.1416, \"a\":[1, 2, 3] }";
+
+    StringStream s(json);
+    ParseObjectHandler h;
+    Reader reader;
+    EXPECT_TRUE(reader.Parse<kParseCommentsFlag>(s, h));
+    EXPECT_EQ(20u, h.step_);
+}
+
+TEST(Reader, ParseMultipleCommentsInARow) {
+    const char* json =
+    "{/* first comment *//* second */\n"
+    "/* third */ /*fourth*/// last one\n"
+    "\"hello\" : \"world\", \"t\" : true, \"f\" : false, \"n\": null, \"i\":123, \"pi\": 3.1416, \"a\":[1, 2, 3] }";
+
+    StringStream s(json);
+    ParseObjectHandler h;
+    Reader reader;
+    EXPECT_TRUE(reader.Parse<kParseCommentsFlag>(s, h));
+    EXPECT_EQ(20u, h.step_);
+}
+
+TEST(Reader, InlineCommentsAreDisabledByDefault) {
+    {
+        const char* json = "{/* Inline comment. */\"hello\" : \"world\", \"t\" : true, \"f\" : false, \"n\": null, \"i\":123, \"pi\": 3.1416, \"a\":[1, 2, 3] }";
+
+        StringStream s(json);
+        ParseObjectHandler h;
+        Reader reader;
+        EXPECT_FALSE(reader.Parse<kParseDefaultFlags>(s, h));
+    }
+
+    {
+        const char* json =
+        "{\"hello\" : /* Multiline comment starts here\n"
+        " continues here\n"
+        " and ends here */\"world\", \"t\" :true , \"f\" : false, \"n\": null, \"i\":123, \"pi\": 3.1416, \"a\":[1, 2, 3] }";
+
+        StringStream s(json);
+        ParseObjectHandler h;
+        Reader reader;
+        EXPECT_FALSE(reader.Parse<kParseDefaultFlags>(s, h));
+    }
+}
+
+TEST(Reader, OnelineCommentsAreDisabledByDefault) {
+    const char* json = "{// One-line comment\n\"hello\" : \"world\", \"t\" : true , \"f\" : false, \"n\": null, \"i\":123, \"pi\": 3.1416, \"a\":[1, 2, 3] }";
+
+    StringStream s(json);
+    ParseObjectHandler h;
+    Reader reader;
+    EXPECT_FALSE(reader.Parse<kParseDefaultFlags>(s, h));
+}
+
+TEST(Reader, EofAfterOneLineComment) {
+    const char* json = "{\"hello\" : \"world\" // EOF is here -->\0 \n}";
+
+    StringStream s(json);
+    ParseObjectHandler h;
+    Reader reader;
+    EXPECT_FALSE(reader.Parse<kParseCommentsFlag>(s, h));
+    EXPECT_EQ(kParseErrorObjectMissCommaOrCurlyBracket, reader.GetParseErrorCode());
+}
+
+TEST(Reader, IncompleteMultilineComment) {
+    const char* json = "{\"hello\" : \"world\" /* EOF is here -->\0 */}";
+
+    StringStream s(json);
+    ParseObjectHandler h;
+    Reader reader;
+    EXPECT_FALSE(reader.Parse<kParseCommentsFlag>(s, h));
+    EXPECT_EQ(kParseErrorUnspecificSyntaxError, reader.GetParseErrorCode());
+}
+
+TEST(Reader, IncompleteMultilineComment2) {
+    const char* json = "{\"hello\" : \"world\" /* *\0 */}";
+
+    StringStream s(json);
+    ParseObjectHandler h;
+    Reader reader;
+    EXPECT_FALSE(reader.Parse<kParseCommentsFlag>(s, h));
+    EXPECT_EQ(kParseErrorUnspecificSyntaxError, reader.GetParseErrorCode());
+}
+
+TEST(Reader, UnrecognizedComment) {
+    const char* json = "{\"hello\" : \"world\" /! }";
+
+    StringStream s(json);
+    ParseObjectHandler h;
+    Reader reader;
+    EXPECT_FALSE(reader.Parse<kParseCommentsFlag>(s, h));
+    EXPECT_EQ(kParseErrorUnspecificSyntaxError, reader.GetParseErrorCode());
+}
+
+struct NumbersAsStringsHandler {
+    bool Null() { return true; }
+    bool Bool(bool) { return true; }
+    bool Int(int) { return true; }
+    bool Uint(unsigned) { return true; }
+    bool Int64(int64_t) { return true; }
+    bool Uint64(uint64_t) { return true;  }
+    bool Double(double) { return true; }
+    // 'str' is not null-terminated
+    bool RawNumber(const char* str, SizeType length, bool) {
+        EXPECT_TRUE(str != 0);
+        EXPECT_TRUE(expected_len_ == length);
+        EXPECT_TRUE(strncmp(str, expected_, length) == 0);
+        return true;
+    }
+    bool String(const char*, SizeType, bool) { return true; }
+    bool StartObject() { return true; }
+    bool Key(const char*, SizeType, bool) { return true; }
+    bool EndObject(SizeType) { return true; }
+    bool StartArray() { return true; }
+    bool EndArray(SizeType) { return true; }
+
+    NumbersAsStringsHandler(const char* expected)
+        : expected_(expected)
+        , expected_len_(strlen(expected)) {}
+
+    const char* expected_;
+    size_t expected_len_;
+};
+
+TEST(Reader, NumbersAsStrings) {
+    {
+        const char* json = "{ \"pi\": 3.1416 } ";
+        StringStream s(json);
+        NumbersAsStringsHandler h("3.1416");
+        Reader reader;
+        EXPECT_TRUE(reader.Parse<kParseNumbersAsStringsFlag>(s, h));
+    }
+    {
+        char* json = StrDup("{ \"pi\": 3.1416 } ");
+        InsituStringStream s(json);
+        NumbersAsStringsHandler h("3.1416");
+        Reader reader;
+        EXPECT_TRUE(reader.Parse<kParseInsituFlag|kParseNumbersAsStringsFlag>(s, h));
+        free(json);
+    }
+    {
+        const char* json = "{ \"gigabyte\": 1.0e9 } ";
+        StringStream s(json);
+        NumbersAsStringsHandler h("1.0e9");
+        Reader reader;
+        EXPECT_TRUE(reader.Parse<kParseNumbersAsStringsFlag>(s, h));
+    }
+    {
+        char* json = StrDup("{ \"gigabyte\": 1.0e9 } ");
+        InsituStringStream s(json);
+        NumbersAsStringsHandler h("1.0e9");
+        Reader reader;
+        EXPECT_TRUE(reader.Parse<kParseInsituFlag|kParseNumbersAsStringsFlag>(s, h));
+        free(json);
+    }
+    {
+        const char* json = "{ \"pi\": 314.159e-2 } ";
+        StringStream s(json);
+        NumbersAsStringsHandler h("314.159e-2");
+        Reader reader;
+        EXPECT_TRUE(reader.Parse<kParseNumbersAsStringsFlag>(s, h));
+    }
+    {
+        char* json = StrDup("{ \"gigabyte\": 314.159e-2 } ");
+        InsituStringStream s(json);
+        NumbersAsStringsHandler h("314.159e-2");
+        Reader reader;
+        EXPECT_TRUE(reader.Parse<kParseInsituFlag|kParseNumbersAsStringsFlag>(s, h));
+        free(json);
+    }
+    {
+        const char* json = "{ \"negative\": -1.54321 } ";
+        StringStream s(json);
+        NumbersAsStringsHandler h("-1.54321");
+        Reader reader;
+        EXPECT_TRUE(reader.Parse<kParseNumbersAsStringsFlag>(s, h));
+    }
+    {
+        char* json = StrDup("{ \"negative\": -1.54321 } ");
+        InsituStringStream s(json);
+        NumbersAsStringsHandler h("-1.54321");
+        Reader reader;
+        EXPECT_TRUE(reader.Parse<kParseInsituFlag|kParseNumbersAsStringsFlag>(s, h));
+        free(json);
+    }
+    {
+        const char* json = "{ \"pi\": 314.159e-2 } ";
+        std::stringstream ss(json);
+        IStreamWrapper s(ss);
+        NumbersAsStringsHandler h("314.159e-2");
+        Reader reader;
+        EXPECT_TRUE(reader.Parse<kParseNumbersAsStringsFlag>(s, h));
+    }
+    {
+        char n1e319[321];   // '1' followed by 319 '0'
+        n1e319[0] = '1';
+        for (int i = 1; i < 320; i++)
+            n1e319[i] = '0';
+        n1e319[320] = '\0';
+        StringStream s(n1e319);
+        NumbersAsStringsHandler h(n1e319);
+        Reader reader;
+        EXPECT_TRUE(reader.Parse<kParseNumbersAsStringsFlag>(s, h));
+    }
+}
+
+template <unsigned extraFlags>
+void TestTrailingCommas() {
+    {
+        StringStream s("[1,2,3,]");
+        ParseArrayHandler<3> h;
+        Reader reader;
+        EXPECT_TRUE(reader.Parse<extraFlags|kParseTrailingCommasFlag>(s, h));
+        EXPECT_EQ(5u, h.step_);
+    }
+    {
+        const char* json = "{ \"hello\" : \"world\", \"t\" : true , \"f\" : false,"
+                "\"n\": null, \"i\":123, \"pi\": 3.1416, \"a\":[1, 2, 3],}";
+        StringStream s(json);
+        ParseObjectHandler h;
+        Reader reader;
+        EXPECT_TRUE(reader.Parse<extraFlags|kParseTrailingCommasFlag>(s, h));
+        EXPECT_EQ(20u, h.step_);
+    }
+    {
+        // whitespace around trailing commas
+        const char* json = "{ \"hello\" : \"world\", \"t\" : true , \"f\" : false,"
+                "\"n\": null, \"i\":123, \"pi\": 3.1416, \"a\":[1, 2, 3\n,\n]\n,\n} ";
+        StringStream s(json);
+        ParseObjectHandler h;
+        Reader reader;
+        EXPECT_TRUE(reader.Parse<extraFlags|kParseTrailingCommasFlag>(s, h));
+        EXPECT_EQ(20u, h.step_);
+    }
+    {
+        // comments around trailing commas
+        const char* json = "{ \"hello\" : \"world\", \"t\" : true , \"f\" : false, \"n\": null,"
+                "\"i\":123, \"pi\": 3.1416, \"a\":[1, 2, 3/*test*/,/*test*/]/*test*/,/*test*/}";
+        StringStream s(json);
+        ParseObjectHandler h;
+        Reader reader;
+        EXPECT_TRUE(reader.Parse<extraFlags|kParseTrailingCommasFlag|kParseCommentsFlag>(s, h));
+        EXPECT_EQ(20u, h.step_);
+    }
+}
+
+TEST(Reader, TrailingCommas) {
+    TestTrailingCommas<kParseNoFlags>();
+}
+
+TEST(Reader, TrailingCommasIterative) {
+    TestTrailingCommas<kParseIterativeFlag>();
+}
+
+template <unsigned extraFlags>
+void TestMultipleTrailingCommaErrors() {
+    // only a single trailing comma is allowed.
+    {
+        StringStream s("[1,2,3,,]");
+        ParseArrayHandler<3> h;
+        Reader reader;
+        ParseResult r = reader.Parse<extraFlags|kParseTrailingCommasFlag>(s, h);
+        EXPECT_TRUE(reader.HasParseError());
+        EXPECT_EQ(kParseErrorValueInvalid, r.Code());
+        EXPECT_EQ(7u, r.Offset());
+    }
+    {
+        const char* json = "{ \"hello\" : \"world\", \"t\" : true , \"f\" : false,"
+                "\"n\": null, \"i\":123, \"pi\": 3.1416, \"a\":[1, 2, 3,],,}";
+        StringStream s(json);
+        ParseObjectHandler h;
+        Reader reader;
+        ParseResult r = reader.Parse<extraFlags|kParseTrailingCommasFlag>(s, h);
+        EXPECT_TRUE(reader.HasParseError());
+        EXPECT_EQ(kParseErrorObjectMissName, r.Code());
+        EXPECT_EQ(95u, r.Offset());
+    }
+}
+
+TEST(Reader, MultipleTrailingCommaErrors) {
+    TestMultipleTrailingCommaErrors<kParseNoFlags>();
+}
+
+TEST(Reader, MultipleTrailingCommaErrorsIterative) {
+    TestMultipleTrailingCommaErrors<kParseIterativeFlag>();
+}
+
+template <unsigned extraFlags>
+void TestEmptyExceptForCommaErrors() {
+    // not allowed even with trailing commas enabled; the
+    // trailing comma must follow a value.
+    {
+        StringStream s("[,]");
+        ParseArrayHandler<3> h;
+        Reader reader;
+        ParseResult r = reader.Parse<extraFlags|kParseTrailingCommasFlag>(s, h);
+        EXPECT_TRUE(reader.HasParseError());
+        EXPECT_EQ(kParseErrorValueInvalid, r.Code());
+        EXPECT_EQ(1u, r.Offset());
+    }
+    {
+        StringStream s("{,}");
+        ParseObjectHandler h;
+        Reader reader;
+        ParseResult r = reader.Parse<extraFlags|kParseTrailingCommasFlag>(s, h);
+        EXPECT_TRUE(reader.HasParseError());
+        EXPECT_EQ(kParseErrorObjectMissName, r.Code());
+        EXPECT_EQ(1u, r.Offset());
+    }
+}
+
+TEST(Reader, EmptyExceptForCommaErrors) {
+    TestEmptyExceptForCommaErrors<kParseNoFlags>();
+}
+
+TEST(Reader, EmptyExceptForCommaErrorsIterative) {
+    TestEmptyExceptForCommaErrors<kParseIterativeFlag>();
+}
+
+template <unsigned extraFlags>
+void TestTrailingCommaHandlerTermination() {
+    {
+        HandlerTerminateAtEndArray h;
+        Reader reader;
+        StringStream s("[1,2,3,]");
+        ParseResult r = reader.Parse<extraFlags|kParseTrailingCommasFlag>(s, h);
+        EXPECT_TRUE(reader.HasParseError());
+        EXPECT_EQ(kParseErrorTermination, r.Code());
+        EXPECT_EQ(7u, r.Offset());
+    }
+    {
+        HandlerTerminateAtEndObject h;
+        Reader reader;
+        StringStream s("{\"t\": true, \"f\": false,}");
+        ParseResult r = reader.Parse<extraFlags|kParseTrailingCommasFlag>(s, h);
+        EXPECT_TRUE(reader.HasParseError());
+        EXPECT_EQ(kParseErrorTermination, r.Code());
+        EXPECT_EQ(23u, r.Offset());
+    }
+}
+
+TEST(Reader, TrailingCommaHandlerTermination) {
+    TestTrailingCommaHandlerTermination<kParseNoFlags>();
+}
+
+TEST(Reader, TrailingCommaHandlerTerminationIterative) {
+    TestTrailingCommaHandlerTermination<kParseIterativeFlag>();
+}
+
+TEST(Reader, ParseNanAndInfinity) {
+#define TEST_NAN_INF(str, x) \
+    { \
+        { \
+            StringStream s(str); \
+            ParseDoubleHandler h; \
+            Reader reader; \
+            ASSERT_EQ(kParseErrorNone, reader.Parse<kParseNanAndInfFlag>(s, h).Code()); \
+            EXPECT_EQ(1u, h.step_); \
+            internal::Double e(x), a(h.actual_); \
+            EXPECT_EQ(e.IsNan(), a.IsNan()); \
+            EXPECT_EQ(e.IsInf(), a.IsInf()); \
+            if (!e.IsNan()) \
+                EXPECT_EQ(e.Sign(), a.Sign()); \
+        } \
+        { \
+            const char* json = "{ \"naninfdouble\": " str " } "; \
+            StringStream s(json); \
+            NumbersAsStringsHandler h(str); \
+            Reader reader; \
+            EXPECT_TRUE(reader.Parse<kParseNumbersAsStringsFlag|kParseNanAndInfFlag>(s, h)); \
+        } \
+        { \
+            char* json = StrDup("{ \"naninfdouble\": " str " } "); \
+            InsituStringStream s(json); \
+            NumbersAsStringsHandler h(str); \
+            Reader reader; \
+            EXPECT_TRUE(reader.Parse<kParseInsituFlag|kParseNumbersAsStringsFlag|kParseNanAndInfFlag>(s, h)); \
+            free(json); \
+        } \
+    }
+#define TEST_NAN_INF_ERROR(errorCode, str, errorOffset) \
+    { \
+        unsigned streamPos = errorOffset; \
+        char buffer[1001]; \
+        strncpy(buffer, str, 1000); \
+        InsituStringStream s(buffer); \
+        BaseReaderHandler<> h; \
+        Reader reader; \
+        EXPECT_FALSE(reader.Parse<kParseNanAndInfFlag>(s, h)); \
+        EXPECT_EQ(errorCode, reader.GetParseErrorCode());\
+        EXPECT_EQ(errorOffset, reader.GetErrorOffset());\
+        EXPECT_EQ(streamPos, s.Tell());\
+    }
+
+    double nan = std::numeric_limits<double>::quiet_NaN();
+    double inf = std::numeric_limits<double>::infinity();
+
+    TEST_NAN_INF("NaN", nan);
+    TEST_NAN_INF("-NaN", nan);
+    TEST_NAN_INF("Inf", inf);
+    TEST_NAN_INF("Infinity", inf);
+    TEST_NAN_INF("-Inf", -inf);
+    TEST_NAN_INF("-Infinity", -inf);
+    TEST_NAN_INF_ERROR(kParseErrorValueInvalid, "NInf", 1u);
+    TEST_NAN_INF_ERROR(kParseErrorValueInvalid, "NaInf", 2u);
+    TEST_NAN_INF_ERROR(kParseErrorValueInvalid, "INan", 1u);
+    TEST_NAN_INF_ERROR(kParseErrorValueInvalid, "InNan", 2u);
+    TEST_NAN_INF_ERROR(kParseErrorValueInvalid, "nan", 1u);
+    TEST_NAN_INF_ERROR(kParseErrorValueInvalid, "-nan", 1u);
+    TEST_NAN_INF_ERROR(kParseErrorValueInvalid, "NAN", 1u);
+    TEST_NAN_INF_ERROR(kParseErrorValueInvalid, "-Infinty", 6u);
+
+#undef TEST_NAN_INF_ERROR
+#undef TEST_NAN_INF
+}
+
+RAPIDJSON_DIAG_POP
diff --git a/test/unittest/regextest.cpp b/test/unittest/regextest.cpp
new file mode 100644
index 0000000..cf89973
--- /dev/null
+++ b/test/unittest/regextest.cpp
@@ -0,0 +1,639 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+// 
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed 
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the 
+// specific language governing permissions and limitations under the License.
+
+#include "unittest.h"
+#include "rapidjson/internal/regex.h"
+
+using namespace rapidjson::internal;
+
+TEST(Regex, Single) {
+    Regex re("a");
+    ASSERT_TRUE(re.IsValid());
+    RegexSearch rs(re);
+    EXPECT_TRUE(rs.Match("a"));
+    EXPECT_FALSE(rs.Match(""));
+    EXPECT_FALSE(rs.Match("b"));
+}
+
+TEST(Regex, Concatenation) {
+    Regex re("abc");
+    ASSERT_TRUE(re.IsValid());
+    RegexSearch rs(re);
+    EXPECT_TRUE(rs.Match("abc"));
+    EXPECT_FALSE(rs.Match(""));
+    EXPECT_FALSE(rs.Match("a"));
+    EXPECT_FALSE(rs.Match("b"));
+    EXPECT_FALSE(rs.Match("ab"));
+    EXPECT_FALSE(rs.Match("abcd"));
+}
+
+TEST(Regex, Alternation1) {
+    Regex re("abab|abbb");
+    ASSERT_TRUE(re.IsValid());
+    RegexSearch rs(re);
+    EXPECT_TRUE(rs.Match("abab"));
+    EXPECT_TRUE(rs.Match("abbb"));
+    EXPECT_FALSE(rs.Match(""));
+    EXPECT_FALSE(rs.Match("ab"));
+    EXPECT_FALSE(rs.Match("ababa"));
+    EXPECT_FALSE(rs.Match("abb"));
+    EXPECT_FALSE(rs.Match("abbbb"));
+}
+
+TEST(Regex, Alternation2) {
+    Regex re("a|b|c");
+    ASSERT_TRUE(re.IsValid());
+    RegexSearch rs(re);
+    EXPECT_TRUE(rs.Match("a"));
+    EXPECT_TRUE(rs.Match("b"));
+    EXPECT_TRUE(rs.Match("c"));
+    EXPECT_FALSE(rs.Match(""));
+    EXPECT_FALSE(rs.Match("aa"));
+    EXPECT_FALSE(rs.Match("ab"));
+}
+
+TEST(Regex, Parenthesis1) {
+    Regex re("(ab)c");
+    ASSERT_TRUE(re.IsValid());
+    RegexSearch rs(re);
+    EXPECT_TRUE(rs.Match("abc"));
+    EXPECT_FALSE(rs.Match(""));
+    EXPECT_FALSE(rs.Match("a"));
+    EXPECT_FALSE(rs.Match("b"));
+    EXPECT_FALSE(rs.Match("ab"));
+    EXPECT_FALSE(rs.Match("abcd"));
+}
+
+TEST(Regex, Parenthesis2) {
+    Regex re("a(bc)");
+    ASSERT_TRUE(re.IsValid());
+    RegexSearch rs(re);
+    EXPECT_TRUE(rs.Match("abc"));
+    EXPECT_FALSE(rs.Match(""));
+    EXPECT_FALSE(rs.Match("a"));
+    EXPECT_FALSE(rs.Match("b"));
+    EXPECT_FALSE(rs.Match("ab"));
+    EXPECT_FALSE(rs.Match("abcd"));
+}
+
+TEST(Regex, Parenthesis3) {
+    Regex re("(a|b)(c|d)");
+    ASSERT_TRUE(re.IsValid());
+    RegexSearch rs(re);
+    EXPECT_TRUE(rs.Match("ac"));
+    EXPECT_TRUE(rs.Match("ad"));
+    EXPECT_TRUE(rs.Match("bc"));
+    EXPECT_TRUE(rs.Match("bd"));
+    EXPECT_FALSE(rs.Match(""));
+    EXPECT_FALSE(rs.Match("ab"));
+    EXPECT_FALSE(rs.Match("cd"));
+}
+
+TEST(Regex, ZeroOrOne1) {
+    Regex re("a?");
+    ASSERT_TRUE(re.IsValid());
+    RegexSearch rs(re);
+    EXPECT_TRUE(rs.Match(""));
+    EXPECT_TRUE(rs.Match("a"));
+    EXPECT_FALSE(rs.Match("aa"));
+}
+
+TEST(Regex, ZeroOrOne2) {
+    Regex re("a?b");
+    ASSERT_TRUE(re.IsValid());
+    RegexSearch rs(re);
+    EXPECT_TRUE(rs.Match("b"));
+    EXPECT_TRUE(rs.Match("ab"));
+    EXPECT_FALSE(rs.Match("a"));
+    EXPECT_FALSE(rs.Match("aa"));
+    EXPECT_FALSE(rs.Match("bb"));
+    EXPECT_FALSE(rs.Match("ba"));
+}
+
+TEST(Regex, ZeroOrOne3) {
+    Regex re("ab?");
+    ASSERT_TRUE(re.IsValid());
+    RegexSearch rs(re);
+    EXPECT_TRUE(rs.Match("a"));
+    EXPECT_TRUE(rs.Match("ab"));
+    EXPECT_FALSE(rs.Match("b"));
+    EXPECT_FALSE(rs.Match("aa"));
+    EXPECT_FALSE(rs.Match("bb"));
+    EXPECT_FALSE(rs.Match("ba"));
+}
+
+TEST(Regex, ZeroOrOne4) {
+    Regex re("a?b?");
+    ASSERT_TRUE(re.IsValid());
+    RegexSearch rs(re);
+    EXPECT_TRUE(rs.Match(""));
+    EXPECT_TRUE(rs.Match("a"));
+    EXPECT_TRUE(rs.Match("b"));
+    EXPECT_TRUE(rs.Match("ab"));
+    EXPECT_FALSE(rs.Match("aa"));
+    EXPECT_FALSE(rs.Match("bb"));
+    EXPECT_FALSE(rs.Match("ba"));
+    EXPECT_FALSE(rs.Match("abc"));
+}
+
+TEST(Regex, ZeroOrOne5) {
+    Regex re("a(ab)?b");
+    ASSERT_TRUE(re.IsValid());
+    RegexSearch rs(re);
+    EXPECT_TRUE(rs.Match("ab"));
+    EXPECT_TRUE(rs.Match("aabb"));
+    EXPECT_FALSE(rs.Match("aab"));
+    EXPECT_FALSE(rs.Match("abb"));
+}
+
+TEST(Regex, ZeroOrMore1) {
+    Regex re("a*");
+    ASSERT_TRUE(re.IsValid());
+    RegexSearch rs(re);
+    EXPECT_TRUE(rs.Match(""));
+    EXPECT_TRUE(rs.Match("a"));
+    EXPECT_TRUE(rs.Match("aa"));
+    EXPECT_FALSE(rs.Match("b"));
+    EXPECT_FALSE(rs.Match("ab"));
+}
+
+TEST(Regex, ZeroOrMore2) {
+    Regex re("a*b");
+    ASSERT_TRUE(re.IsValid());
+    RegexSearch rs(re);
+    EXPECT_TRUE(rs.Match("b"));
+    EXPECT_TRUE(rs.Match("ab"));
+    EXPECT_TRUE(rs.Match("aab"));
+    EXPECT_FALSE(rs.Match(""));
+    EXPECT_FALSE(rs.Match("bb"));
+}
+
+TEST(Regex, ZeroOrMore3) {
+    Regex re("a*b*");
+    ASSERT_TRUE(re.IsValid());
+    RegexSearch rs(re);
+    EXPECT_TRUE(rs.Match(""));
+    EXPECT_TRUE(rs.Match("a"));
+    EXPECT_TRUE(rs.Match("aa"));
+    EXPECT_TRUE(rs.Match("b"));
+    EXPECT_TRUE(rs.Match("bb"));
+    EXPECT_TRUE(rs.Match("ab"));
+    EXPECT_TRUE(rs.Match("aabb"));
+    EXPECT_FALSE(rs.Match("ba"));
+}
+
+TEST(Regex, ZeroOrMore4) {
+    Regex re("a(ab)*b");
+    ASSERT_TRUE(re.IsValid());
+    RegexSearch rs(re);
+    EXPECT_TRUE(rs.Match("ab"));
+    EXPECT_TRUE(rs.Match("aabb"));
+    EXPECT_TRUE(rs.Match("aababb"));
+    EXPECT_FALSE(rs.Match(""));
+    EXPECT_FALSE(rs.Match("aa"));
+}
+
+TEST(Regex, OneOrMore1) {
+    Regex re("a+");
+    ASSERT_TRUE(re.IsValid());
+    RegexSearch rs(re);
+    EXPECT_TRUE(rs.Match("a"));
+    EXPECT_TRUE(rs.Match("aa"));
+    EXPECT_FALSE(rs.Match(""));
+    EXPECT_FALSE(rs.Match("b"));
+    EXPECT_FALSE(rs.Match("ab"));
+}
+
+TEST(Regex, OneOrMore2) {
+    Regex re("a+b");
+    ASSERT_TRUE(re.IsValid());
+    RegexSearch rs(re);
+    EXPECT_TRUE(rs.Match("ab"));
+    EXPECT_TRUE(rs.Match("aab"));
+    EXPECT_FALSE(rs.Match(""));
+    EXPECT_FALSE(rs.Match("b"));
+}
+
+TEST(Regex, OneOrMore3) {
+    Regex re("a+b+");
+    ASSERT_TRUE(re.IsValid());
+    RegexSearch rs(re);
+    EXPECT_TRUE(rs.Match("ab"));
+    EXPECT_TRUE(rs.Match("aab"));
+    EXPECT_TRUE(rs.Match("abb"));
+    EXPECT_TRUE(rs.Match("aabb"));
+    EXPECT_FALSE(rs.Match(""));
+    EXPECT_FALSE(rs.Match("b"));
+    EXPECT_FALSE(rs.Match("ba"));
+}
+
+TEST(Regex, OneOrMore4) {
+    Regex re("a(ab)+b");
+    ASSERT_TRUE(re.IsValid());
+    RegexSearch rs(re);
+    EXPECT_TRUE(rs.Match("aabb"));
+    EXPECT_TRUE(rs.Match("aababb"));
+    EXPECT_FALSE(rs.Match(""));
+    EXPECT_FALSE(rs.Match("ab"));
+}
+
+TEST(Regex, QuantifierExact1) {
+    Regex re("ab{3}c");
+    ASSERT_TRUE(re.IsValid());
+    RegexSearch rs(re);
+    EXPECT_TRUE(rs.Match("abbbc"));
+    EXPECT_FALSE(rs.Match("ac"));
+    EXPECT_FALSE(rs.Match("abc"));
+    EXPECT_FALSE(rs.Match("abbc"));
+    EXPECT_FALSE(rs.Match("abbbbc"));
+}
+
+TEST(Regex, QuantifierExact2) {
+    Regex re("a(bc){3}d");
+    ASSERT_TRUE(re.IsValid());
+    RegexSearch rs(re);
+    EXPECT_TRUE(rs.Match("abcbcbcd"));
+    EXPECT_FALSE(rs.Match("ad"));
+    EXPECT_FALSE(rs.Match("abcd"));
+    EXPECT_FALSE(rs.Match("abcbcd"));
+    EXPECT_FALSE(rs.Match("abcbcbcbcd"));
+}
+
+TEST(Regex, QuantifierExact3) {
+    Regex re("a(b|c){3}d");
+    ASSERT_TRUE(re.IsValid());
+    RegexSearch rs(re);
+    EXPECT_TRUE(rs.Match("abbbd"));
+    EXPECT_TRUE(rs.Match("acccd"));
+    EXPECT_TRUE(rs.Match("abcbd"));
+    EXPECT_FALSE(rs.Match("ad"));
+    EXPECT_FALSE(rs.Match("abbd"));
+    EXPECT_FALSE(rs.Match("accccd"));
+    EXPECT_FALSE(rs.Match("abbbbd"));
+}
+
+TEST(Regex, QuantifierMin1) {
+    Regex re("ab{3,}c");
+    ASSERT_TRUE(re.IsValid());
+    RegexSearch rs(re);
+    EXPECT_TRUE(rs.Match("abbbc"));
+    EXPECT_TRUE(rs.Match("abbbbc"));
+    EXPECT_TRUE(rs.Match("abbbbbc"));
+    EXPECT_FALSE(rs.Match("ac"));
+    EXPECT_FALSE(rs.Match("abc"));
+    EXPECT_FALSE(rs.Match("abbc"));
+}
+
+TEST(Regex, QuantifierMin2) {
+    Regex re("a(bc){3,}d");
+    ASSERT_TRUE(re.IsValid());
+    RegexSearch rs(re);
+    EXPECT_TRUE(rs.Match("abcbcbcd"));
+    EXPECT_TRUE(rs.Match("abcbcbcbcd"));
+    EXPECT_FALSE(rs.Match("ad"));
+    EXPECT_FALSE(rs.Match("abcd"));
+    EXPECT_FALSE(rs.Match("abcbcd"));
+}
+
+TEST(Regex, QuantifierMin3) {
+    Regex re("a(b|c){3,}d");
+    ASSERT_TRUE(re.IsValid());
+    RegexSearch rs(re);
+    EXPECT_TRUE(rs.Match("abbbd"));
+    EXPECT_TRUE(rs.Match("acccd"));
+    EXPECT_TRUE(rs.Match("abcbd"));
+    EXPECT_TRUE(rs.Match("accccd"));
+    EXPECT_TRUE(rs.Match("abbbbd"));
+    EXPECT_FALSE(rs.Match("ad"));
+    EXPECT_FALSE(rs.Match("abbd"));
+}
+
+TEST(Regex, QuantifierMinMax1) {
+    Regex re("ab{3,5}c");
+    ASSERT_TRUE(re.IsValid());
+    RegexSearch rs(re);
+    EXPECT_TRUE(rs.Match("abbbc"));
+    EXPECT_TRUE(rs.Match("abbbbc"));
+    EXPECT_TRUE(rs.Match("abbbbbc"));
+    EXPECT_FALSE(rs.Match("ac"));
+    EXPECT_FALSE(rs.Match("abc"));
+    EXPECT_FALSE(rs.Match("abbc"));
+    EXPECT_FALSE(rs.Match("abbbbbbc"));
+}
+
+TEST(Regex, QuantifierMinMax2) {
+    Regex re("a(bc){3,5}d");
+    ASSERT_TRUE(re.IsValid());
+    RegexSearch rs(re);
+    EXPECT_TRUE(rs.Match("abcbcbcd"));
+    EXPECT_TRUE(rs.Match("abcbcbcbcd"));
+    EXPECT_TRUE(rs.Match("abcbcbcbcbcd"));
+    EXPECT_FALSE(rs.Match("ad"));
+    EXPECT_FALSE(rs.Match("abcd"));
+    EXPECT_FALSE(rs.Match("abcbcd"));
+    EXPECT_FALSE(rs.Match("abcbcbcbcbcbcd"));
+}
+
+TEST(Regex, QuantifierMinMax3) {
+    Regex re("a(b|c){3,5}d");
+    ASSERT_TRUE(re.IsValid());
+    RegexSearch rs(re);
+    EXPECT_TRUE(rs.Match("abbbd"));
+    EXPECT_TRUE(rs.Match("acccd"));
+    EXPECT_TRUE(rs.Match("abcbd"));
+    EXPECT_TRUE(rs.Match("accccd"));
+    EXPECT_TRUE(rs.Match("abbbbd"));
+    EXPECT_TRUE(rs.Match("acccccd"));
+    EXPECT_TRUE(rs.Match("abbbbbd"));
+    EXPECT_FALSE(rs.Match("ad"));
+    EXPECT_FALSE(rs.Match("abbd"));
+    EXPECT_FALSE(rs.Match("accccccd"));
+    EXPECT_FALSE(rs.Match("abbbbbbd"));
+}
+
+// Issue538
+TEST(Regex, QuantifierMinMax4) {
+    Regex re("a(b|c){0,3}d");
+    ASSERT_TRUE(re.IsValid());
+    RegexSearch rs(re);
+    EXPECT_TRUE(rs.Match("ad"));
+    EXPECT_TRUE(rs.Match("abd"));
+    EXPECT_TRUE(rs.Match("acd"));
+    EXPECT_TRUE(rs.Match("abbd"));
+    EXPECT_TRUE(rs.Match("accd"));
+    EXPECT_TRUE(rs.Match("abcd"));
+    EXPECT_TRUE(rs.Match("abbbd"));
+    EXPECT_TRUE(rs.Match("acccd"));
+    EXPECT_FALSE(rs.Match("abbbbd"));
+    EXPECT_FALSE(rs.Match("add"));
+    EXPECT_FALSE(rs.Match("accccd"));
+    EXPECT_FALSE(rs.Match("abcbcd"));
+}
+
+// Issue538
+TEST(Regex, QuantifierMinMax5) {
+    Regex re("a(b|c){0,}d");
+    ASSERT_TRUE(re.IsValid());
+    RegexSearch rs(re);
+    EXPECT_TRUE(rs.Match("ad"));
+    EXPECT_TRUE(rs.Match("abd"));
+    EXPECT_TRUE(rs.Match("acd"));
+    EXPECT_TRUE(rs.Match("abbd"));
+    EXPECT_TRUE(rs.Match("accd"));
+    EXPECT_TRUE(rs.Match("abcd"));
+    EXPECT_TRUE(rs.Match("abbbd"));
+    EXPECT_TRUE(rs.Match("acccd"));
+    EXPECT_TRUE(rs.Match("abbbbd"));
+    EXPECT_TRUE(rs.Match("accccd"));
+    EXPECT_TRUE(rs.Match("abcbcd"));
+    EXPECT_FALSE(rs.Match("add"));
+    EXPECT_FALSE(rs.Match("aad"));
+}
+
+#define EURO "\xE2\x82\xAC" // "\xE2\x82\xAC" is UTF-8 rsquence of Euro sign U+20AC
+
+TEST(Regex, Unicode) {
+    Regex re("a" EURO "+b"); 
+    ASSERT_TRUE(re.IsValid());
+    RegexSearch rs(re);
+    EXPECT_TRUE(rs.Match("a" EURO "b"));
+    EXPECT_TRUE(rs.Match("a" EURO EURO "b"));
+    EXPECT_FALSE(rs.Match("a?b"));
+    EXPECT_FALSE(rs.Match("a" EURO "\xAC" "b")); // unaware of UTF-8 will match
+}
+
+TEST(Regex, AnyCharacter) {
+    Regex re(".");
+    ASSERT_TRUE(re.IsValid());
+    RegexSearch rs(re);
+    EXPECT_TRUE(rs.Match("a"));
+    EXPECT_TRUE(rs.Match("b"));
+    EXPECT_TRUE(rs.Match(EURO));
+    EXPECT_FALSE(rs.Match(""));
+    EXPECT_FALSE(rs.Match("aa"));
+}
+
+TEST(Regex, CharacterRange1) {
+    Regex re("[abc]");
+    ASSERT_TRUE(re.IsValid());
+    RegexSearch rs(re);
+    EXPECT_TRUE(rs.Match("a"));
+    EXPECT_TRUE(rs.Match("b"));
+    EXPECT_TRUE(rs.Match("c"));
+    EXPECT_FALSE(rs.Match(""));
+    EXPECT_FALSE(rs.Match("`"));
+    EXPECT_FALSE(rs.Match("d"));
+    EXPECT_FALSE(rs.Match("aa"));
+}
+
+TEST(Regex, CharacterRange2) {
+    Regex re("[^abc]");
+    ASSERT_TRUE(re.IsValid());
+    RegexSearch rs(re);
+    EXPECT_TRUE(rs.Match("`"));
+    EXPECT_TRUE(rs.Match("d"));
+    EXPECT_FALSE(rs.Match("a"));
+    EXPECT_FALSE(rs.Match("b"));
+    EXPECT_FALSE(rs.Match("c"));
+    EXPECT_FALSE(rs.Match(""));
+    EXPECT_FALSE(rs.Match("aa"));
+}
+
+TEST(Regex, CharacterRange3) {
+    Regex re("[a-c]");
+    ASSERT_TRUE(re.IsValid());
+    RegexSearch rs(re);
+    EXPECT_TRUE(rs.Match("a"));
+    EXPECT_TRUE(rs.Match("b"));
+    EXPECT_TRUE(rs.Match("c"));
+    EXPECT_FALSE(rs.Match(""));
+    EXPECT_FALSE(rs.Match("`"));
+    EXPECT_FALSE(rs.Match("d"));
+    EXPECT_FALSE(rs.Match("aa"));
+}
+
+TEST(Regex, CharacterRange4) {
+    Regex re("[^a-c]");
+    ASSERT_TRUE(re.IsValid());
+    RegexSearch rs(re);
+    EXPECT_TRUE(rs.Match("`"));
+    EXPECT_TRUE(rs.Match("d"));
+    EXPECT_FALSE(rs.Match("a"));
+    EXPECT_FALSE(rs.Match("b"));
+    EXPECT_FALSE(rs.Match("c"));
+    EXPECT_FALSE(rs.Match(""));
+    EXPECT_FALSE(rs.Match("aa"));
+}
+
+TEST(Regex, CharacterRange5) {
+    Regex re("[-]");
+    ASSERT_TRUE(re.IsValid());
+    RegexSearch rs(re);
+    EXPECT_TRUE(rs.Match("-"));
+    EXPECT_FALSE(rs.Match(""));
+    EXPECT_FALSE(rs.Match("a"));
+}
+
+TEST(Regex, CharacterRange6) {
+    Regex re("[a-]");
+    ASSERT_TRUE(re.IsValid());
+    RegexSearch rs(re);
+    EXPECT_TRUE(rs.Match("a"));
+    EXPECT_TRUE(rs.Match("-"));
+    EXPECT_FALSE(rs.Match(""));
+    EXPECT_FALSE(rs.Match("`"));
+    EXPECT_FALSE(rs.Match("b"));
+}
+
+TEST(Regex, CharacterRange7) {
+    Regex re("[-a]");
+    ASSERT_TRUE(re.IsValid());
+    RegexSearch rs(re);
+    EXPECT_TRUE(rs.Match("a"));
+    EXPECT_TRUE(rs.Match("-"));
+    EXPECT_FALSE(rs.Match(""));
+    EXPECT_FALSE(rs.Match("`"));
+    EXPECT_FALSE(rs.Match("b"));
+}
+
+TEST(Regex, CharacterRange8) {
+    Regex re("[a-zA-Z0-9]*");
+    ASSERT_TRUE(re.IsValid());
+    RegexSearch rs(re);
+    EXPECT_TRUE(rs.Match("Milo"));
+    EXPECT_TRUE(rs.Match("MT19937"));
+    EXPECT_TRUE(rs.Match("43"));
+    EXPECT_FALSE(rs.Match("a_b"));
+    EXPECT_FALSE(rs.Match("!"));
+}
+
+TEST(Regex, Search) {
+    Regex re("abc");
+    ASSERT_TRUE(re.IsValid());
+    RegexSearch rs(re);
+    EXPECT_TRUE(rs.Search("abc"));
+    EXPECT_TRUE(rs.Search("_abc"));
+    EXPECT_TRUE(rs.Search("abc_"));
+    EXPECT_TRUE(rs.Search("_abc_"));
+    EXPECT_TRUE(rs.Search("__abc__"));
+    EXPECT_TRUE(rs.Search("abcabc"));
+    EXPECT_FALSE(rs.Search("a"));
+    EXPECT_FALSE(rs.Search("ab"));
+    EXPECT_FALSE(rs.Search("bc"));
+    EXPECT_FALSE(rs.Search("cba"));
+}
+
+TEST(Regex, Search_BeginAnchor) {
+    Regex re("^abc");
+    ASSERT_TRUE(re.IsValid());
+    RegexSearch rs(re);
+    EXPECT_TRUE(rs.Search("abc"));
+    EXPECT_TRUE(rs.Search("abc_"));
+    EXPECT_TRUE(rs.Search("abcabc"));
+    EXPECT_FALSE(rs.Search("_abc"));
+    EXPECT_FALSE(rs.Search("_abc_"));
+    EXPECT_FALSE(rs.Search("a"));
+    EXPECT_FALSE(rs.Search("ab"));
+    EXPECT_FALSE(rs.Search("bc"));
+    EXPECT_FALSE(rs.Search("cba"));
+}
+
+TEST(Regex, Search_EndAnchor) {
+    Regex re("abc$");
+    ASSERT_TRUE(re.IsValid());
+    RegexSearch rs(re);
+    EXPECT_TRUE(rs.Search("abc"));
+    EXPECT_TRUE(rs.Search("_abc"));
+    EXPECT_TRUE(rs.Search("abcabc"));
+    EXPECT_FALSE(rs.Search("abc_"));
+    EXPECT_FALSE(rs.Search("_abc_"));
+    EXPECT_FALSE(rs.Search("a"));
+    EXPECT_FALSE(rs.Search("ab"));
+    EXPECT_FALSE(rs.Search("bc"));
+    EXPECT_FALSE(rs.Search("cba"));
+}
+
+TEST(Regex, Search_BothAnchor) {
+    Regex re("^abc$");
+    ASSERT_TRUE(re.IsValid());
+    RegexSearch rs(re);
+    EXPECT_TRUE(rs.Search("abc"));
+    EXPECT_FALSE(rs.Search(""));
+    EXPECT_FALSE(rs.Search("a"));
+    EXPECT_FALSE(rs.Search("b"));
+    EXPECT_FALSE(rs.Search("ab"));
+    EXPECT_FALSE(rs.Search("abcd"));
+}
+
+TEST(Regex, Escape) {
+    const char* s = "\\^\\$\\|\\(\\)\\?\\*\\+\\.\\[\\]\\{\\}\\\\\\f\\n\\r\\t\\v[\\b][\\[][\\]]";
+    Regex re(s);
+    ASSERT_TRUE(re.IsValid());
+    RegexSearch rs(re);
+    EXPECT_TRUE(rs.Match("^$|()?*+.[]{}\\\x0C\n\r\t\x0B\b[]"));
+    EXPECT_FALSE(rs.Match(s)); // Not escaping
+}
+
+TEST(Regex, Invalid) {
+#define TEST_INVALID(s) \
+    {\
+        Regex re(s);\
+        EXPECT_FALSE(re.IsValid());\
+    }
+
+    TEST_INVALID("");
+    TEST_INVALID("a|");
+    TEST_INVALID("()");
+    TEST_INVALID("(");
+    TEST_INVALID(")");
+    TEST_INVALID("(a))");
+    TEST_INVALID("(a|)");
+    TEST_INVALID("(a||b)");
+    TEST_INVALID("(|b)");
+    TEST_INVALID("?");
+    TEST_INVALID("*");
+    TEST_INVALID("+");
+    TEST_INVALID("{");
+    TEST_INVALID("{}");
+    TEST_INVALID("a{a}");
+    TEST_INVALID("a{0}");
+    TEST_INVALID("a{-1}");
+    TEST_INVALID("a{}");
+    // TEST_INVALID("a{0,}");   // Support now
+    TEST_INVALID("a{,0}");
+    TEST_INVALID("a{1,0}");
+    TEST_INVALID("a{-1,0}");
+    TEST_INVALID("a{-1,1}");
+    TEST_INVALID("a{4294967296}"); // overflow of unsigned
+    TEST_INVALID("a{1a}");
+    TEST_INVALID("[");
+    TEST_INVALID("[]");
+    TEST_INVALID("[^]");
+    TEST_INVALID("[\\a]");
+    TEST_INVALID("\\a");
+
+#undef TEST_INVALID
+}
+
+TEST(Regex, Issue538) {
+    Regex re("^[0-9]+(\\\\.[0-9]+){0,2}");
+    EXPECT_TRUE(re.IsValid());
+}
+
+TEST(Regex, Issue583) {
+    Regex re("[0-9]{99999}");
+    ASSERT_TRUE(re.IsValid());
+}
+
+#undef EURO
diff --git a/test/unittest/schematest.cpp b/test/unittest/schematest.cpp
new file mode 100644
index 0000000..3261069
--- /dev/null
+++ b/test/unittest/schematest.cpp
@@ -0,0 +1,2072 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+// 
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed 
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the 
+// specific language governing permissions and limitations under the License.
+
+#include "unittest.h"
+#include "rapidjson/schema.h"
+#include "rapidjson/stringbuffer.h"
+#include "rapidjson/writer.h"
+
+#ifdef __clang__
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(variadic-macros)
+#elif defined(_MSC_VER)
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(4822) // local class member function does not have a body
+#endif
+
+using namespace rapidjson;
+
+#define TEST_HASHER(json1, json2, expected) \
+{\
+    Document d1, d2;\
+    d1.Parse(json1);\
+    ASSERT_FALSE(d1.HasParseError());\
+    d2.Parse(json2);\
+    ASSERT_FALSE(d2.HasParseError());\
+    internal::Hasher<Value, CrtAllocator> h1, h2;\
+    d1.Accept(h1);\
+    d2.Accept(h2);\
+    ASSERT_TRUE(h1.IsValid());\
+    ASSERT_TRUE(h2.IsValid());\
+    /*printf("%s: 0x%016llx\n%s: 0x%016llx\n\n", json1, h1.GetHashCode(), json2, h2.GetHashCode());*/\
+    EXPECT_TRUE(expected == (h1.GetHashCode() == h2.GetHashCode()));\
+}
+
+TEST(SchemaValidator, Hasher) {
+    TEST_HASHER("null", "null", true);
+
+    TEST_HASHER("true", "true", true);
+    TEST_HASHER("false", "false", true);
+    TEST_HASHER("true", "false", false);
+    TEST_HASHER("false", "true", false);
+    TEST_HASHER("true", "null", false);
+    TEST_HASHER("false", "null", false);
+
+    TEST_HASHER("1", "1", true);
+    TEST_HASHER("2147483648", "2147483648", true); // 2^31 can only be fit in unsigned
+    TEST_HASHER("-2147483649", "-2147483649", true); // -2^31 - 1 can only be fit in int64_t
+    TEST_HASHER("2147483648", "2147483648", true); // 2^31 can only be fit in unsigned
+    TEST_HASHER("4294967296", "4294967296", true); // 2^32 can only be fit in int64_t
+    TEST_HASHER("9223372036854775808", "9223372036854775808", true); // 2^63 can only be fit in uint64_t
+    TEST_HASHER("1.5", "1.5", true);
+    TEST_HASHER("1", "1.0", true);
+    TEST_HASHER("1", "-1", false);
+    TEST_HASHER("0.0", "-0.0", false);
+    TEST_HASHER("1", "true", false);
+    TEST_HASHER("0", "false", false);
+    TEST_HASHER("0", "null", false);
+
+    TEST_HASHER("\"\"", "\"\"", true);
+    TEST_HASHER("\"\"", "\"\\u0000\"", false);
+    TEST_HASHER("\"Hello\"", "\"Hello\"", true);
+    TEST_HASHER("\"Hello\"", "\"World\"", false);
+    TEST_HASHER("\"Hello\"", "null", false);
+    TEST_HASHER("\"Hello\\u0000\"", "\"Hello\"", false);
+    TEST_HASHER("\"\"", "null", false);
+    TEST_HASHER("\"\"", "true", false);
+    TEST_HASHER("\"\"", "false", false);
+
+    TEST_HASHER("[]", "[ ]", true);
+    TEST_HASHER("[1, true, false]", "[1, true, false]", true);
+    TEST_HASHER("[1, true, false]", "[1, true]", false);
+    TEST_HASHER("[1, 2]", "[2, 1]", false);
+    TEST_HASHER("[[1], 2]", "[[1, 2]]", false);
+    TEST_HASHER("[1, 2]", "[1, [2]]", false);
+    TEST_HASHER("[]", "null", false);
+    TEST_HASHER("[]", "true", false);
+    TEST_HASHER("[]", "false", false);
+    TEST_HASHER("[]", "0", false);
+    TEST_HASHER("[]", "0.0", false);
+    TEST_HASHER("[]", "\"\"", false);
+
+    TEST_HASHER("{}", "{ }", true);
+    TEST_HASHER("{\"a\":1}", "{\"a\":1}", true);
+    TEST_HASHER("{\"a\":1}", "{\"b\":1}", false);
+    TEST_HASHER("{\"a\":1}", "{\"a\":2}", false);
+    TEST_HASHER("{\"a\":1, \"b\":2}", "{\"b\":2, \"a\":1}", true); // Member order insensitive
+    TEST_HASHER("{}", "null", false);
+    TEST_HASHER("{}", "false", false);
+    TEST_HASHER("{}", "true", false);
+    TEST_HASHER("{}", "0", false);
+    TEST_HASHER("{}", "0.0", false);
+    TEST_HASHER("{}", "\"\"", false);
+}
+
+// Test cases following http://spacetelescope.github.io/understanding-json-schema
+
+#define VALIDATE(schema, json, expected) \
+{\
+    SchemaValidator validator(schema);\
+    Document d;\
+    /*printf("\n%s\n", json);*/\
+    d.Parse(json);\
+    EXPECT_FALSE(d.HasParseError());\
+    EXPECT_TRUE(expected == d.Accept(validator));\
+    EXPECT_TRUE(expected == validator.IsValid());\
+    if ((expected) && !validator.IsValid()) {\
+        StringBuffer sb;\
+        validator.GetInvalidSchemaPointer().StringifyUriFragment(sb);\
+        printf("Invalid schema: %s\n", sb.GetString());\
+        printf("Invalid keyword: %s\n", validator.GetInvalidSchemaKeyword());\
+        sb.Clear();\
+        validator.GetInvalidDocumentPointer().StringifyUriFragment(sb);\
+        printf("Invalid document: %s\n", sb.GetString());\
+        sb.Clear();\
+        Writer<StringBuffer> w(sb);\
+        validator.GetError().Accept(w);\
+        printf("Validation error: %s\n", sb.GetString());\
+    }\
+}
+
+#define INVALIDATE(schema, json, invalidSchemaPointer, invalidSchemaKeyword, invalidDocumentPointer, error) \
+{\
+    INVALIDATE_(schema, json, invalidSchemaPointer, invalidSchemaKeyword, invalidDocumentPointer, error, SchemaValidator, Pointer) \
+}
+
+#define INVALIDATE_(schema, json, invalidSchemaPointer, invalidSchemaKeyword, invalidDocumentPointer, error, \
+    SchemaValidatorType, PointerType) \
+{\
+    SchemaValidatorType validator(schema);\
+    Document d;\
+    /*printf("\n%s\n", json);*/\
+    d.Parse(json);\
+    EXPECT_FALSE(d.HasParseError());\
+    EXPECT_FALSE(d.Accept(validator));\
+    EXPECT_FALSE(validator.IsValid());\
+    if (validator.GetInvalidSchemaPointer() != PointerType(invalidSchemaPointer)) {\
+        StringBuffer sb;\
+        validator.GetInvalidSchemaPointer().Stringify(sb);\
+        printf("GetInvalidSchemaPointer() Expected: %s Actual: %s\n", invalidSchemaPointer, sb.GetString());\
+        ADD_FAILURE();\
+    }\
+    ASSERT_TRUE(validator.GetInvalidSchemaKeyword() != 0);\
+    if (strcmp(validator.GetInvalidSchemaKeyword(), invalidSchemaKeyword) != 0) {\
+        printf("GetInvalidSchemaKeyword() Expected: %s Actual %s\n", invalidSchemaKeyword, validator.GetInvalidSchemaKeyword());\
+        ADD_FAILURE();\
+    }\
+    if (validator.GetInvalidDocumentPointer() != PointerType(invalidDocumentPointer)) {\
+        StringBuffer sb;\
+        validator.GetInvalidDocumentPointer().Stringify(sb);\
+        printf("GetInvalidDocumentPointer() Expected: %s Actual: %s\n", invalidDocumentPointer, sb.GetString());\
+        ADD_FAILURE();\
+    }\
+    Document e;\
+    e.Parse(error);\
+    if (validator.GetError() != e) {\
+        StringBuffer sb;\
+        Writer<StringBuffer> w(sb);\
+        validator.GetError().Accept(w);\
+        printf("GetError() Expected: %s Actual: %s\n", error, sb.GetString());\
+        ADD_FAILURE();\
+    }\
+}
+
+TEST(SchemaValidator, Typeless) {
+    Document sd;
+    sd.Parse("{}");
+    SchemaDocument s(sd);
+    
+    VALIDATE(s, "42", true);
+    VALIDATE(s, "\"I'm a string\"", true);
+    VALIDATE(s, "{ \"an\": [ \"arbitrarily\", \"nested\" ], \"data\": \"structure\" }", true);
+}
+
+TEST(SchemaValidator, MultiType) {
+    Document sd;
+    sd.Parse("{ \"type\": [\"number\", \"string\"] }");
+    SchemaDocument s(sd);
+
+    VALIDATE(s, "42", true);
+    VALIDATE(s, "\"Life, the universe, and everything\"", true);
+    INVALIDATE(s, "[\"Life\", \"the universe\", \"and everything\"]", "", "type", "",
+        "{ \"type\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": [\"string\", \"number\"], \"actual\": \"array\""
+        "}}");
+}
+
+TEST(SchemaValidator, Enum_Typed) {
+    Document sd;
+    sd.Parse("{ \"type\": \"string\", \"enum\" : [\"red\", \"amber\", \"green\"] }");
+    SchemaDocument s(sd);
+
+    VALIDATE(s, "\"red\"", true);
+    INVALIDATE(s, "\"blue\"", "", "enum", "",
+        "{ \"enum\": { \"instanceRef\": \"#\", \"schemaRef\": \"#\" }}");
+}
+
+TEST(SchemaValidator, Enum_Typless) {
+    Document sd;
+    sd.Parse("{  \"enum\": [\"red\", \"amber\", \"green\", null, 42] }");
+    SchemaDocument s(sd);
+
+    VALIDATE(s, "\"red\"", true);
+    VALIDATE(s, "null", true);
+    VALIDATE(s, "42", true);
+    INVALIDATE(s, "0", "", "enum", "",
+        "{ \"enum\": { \"instanceRef\": \"#\", \"schemaRef\": \"#\" }}");
+}
+
+TEST(SchemaValidator, Enum_InvalidType) {
+    Document sd;
+    sd.Parse("{ \"type\": \"string\", \"enum\": [\"red\", \"amber\", \"green\", null] }");
+    SchemaDocument s(sd);
+
+    VALIDATE(s, "\"red\"", true);
+    INVALIDATE(s, "null", "", "type", "",
+        "{ \"type\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": [\"string\"], \"actual\": \"null\""
+        "}}");
+}
+
+TEST(SchemaValidator, AllOf) {
+    {
+        Document sd;
+        sd.Parse("{\"allOf\": [{ \"type\": \"string\" }, { \"type\": \"string\", \"maxLength\": 5 }]}");
+        SchemaDocument s(sd);
+
+        VALIDATE(s, "\"ok\"", true);
+        INVALIDATE(s, "\"too long\"", "", "allOf", "",
+            "{ \"maxLength\": { "
+            "    \"instanceRef\": \"#\", \"schemaRef\": \"#/allOf/1\", "
+            "    \"expected\": 5, \"actual\": \"too long\""
+            "}}");
+    }
+    {
+        Document sd;
+        sd.Parse("{\"allOf\": [{ \"type\": \"string\" }, { \"type\": \"number\" } ] }");
+        SchemaDocument s(sd);
+
+        VALIDATE(s, "\"No way\"", false);
+        INVALIDATE(s, "-1", "", "allOf", "",
+            "{ \"type\": { \"instanceRef\": \"#\", \"schemaRef\": \"#/allOf/0\","
+            "    \"expected\": [\"string\"], \"actual\": \"integer\""
+            "}}");
+    }
+}
+
+TEST(SchemaValidator, AnyOf) {
+    Document sd;
+    sd.Parse("{\"anyOf\": [{ \"type\": \"string\" }, { \"type\": \"number\" } ] }");
+    SchemaDocument s(sd);
+
+    VALIDATE(s, "\"Yes\"", true);
+    VALIDATE(s, "42", true);
+    INVALIDATE(s, "{ \"Not a\": \"string or number\" }", "", "anyOf", "",
+        "{ \"anyOf\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\", "
+        "    \"errors\": ["
+        "      { \"type\": {"
+        "          \"instanceRef\": \"#\", \"schemaRef\": \"#/anyOf/0\","
+        "          \"expected\": [\"string\"], \"actual\": \"object\""
+        "      }},"
+        "      { \"type\": {"
+        "          \"instanceRef\": \"#\", \"schemaRef\": \"#/anyOf/1\","
+        "          \"expected\": [\"number\"], \"actual\": \"object\""
+        "      }}"
+        "    ]"
+        "}}");
+}
+
+TEST(SchemaValidator, OneOf) {
+    Document sd;
+    sd.Parse("{\"oneOf\": [{ \"type\": \"number\", \"multipleOf\": 5 }, { \"type\": \"number\", \"multipleOf\": 3 } ] }");
+    SchemaDocument s(sd);
+
+    VALIDATE(s, "10", true);
+    VALIDATE(s, "9", true);
+    INVALIDATE(s, "2", "", "oneOf", "",
+        "{ \"oneOf\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"errors\": ["
+        "      { \"multipleOf\": {"
+        "          \"instanceRef\": \"#\", \"schemaRef\": \"#/oneOf/0\","
+        "          \"expected\": 5, \"actual\": 2"
+        "      }},"
+        "      { \"multipleOf\": {"
+        "          \"instanceRef\": \"#\", \"schemaRef\": \"#/oneOf/1\","
+        "          \"expected\": 3, \"actual\": 2"
+        "      }}"
+        "    ]"
+        "}}");
+    INVALIDATE(s, "15", "", "oneOf", "",
+        "{ \"oneOf\": { \"instanceRef\": \"#\", \"schemaRef\": \"#\", \"errors\": [{}, {}]}}");
+}
+
+TEST(SchemaValidator, Not) {
+    Document sd;
+    sd.Parse("{\"not\":{ \"type\": \"string\"}}");
+    SchemaDocument s(sd);
+
+    VALIDATE(s, "42", true);
+    VALIDATE(s, "{ \"key\": \"value\" }", true);
+    INVALIDATE(s, "\"I am a string\"", "", "not", "",
+        "{ \"not\": { \"instanceRef\": \"#\", \"schemaRef\": \"#\" }}");
+}
+
+TEST(SchemaValidator, Ref) {
+    Document sd;
+    sd.Parse(
+        "{"
+        "  \"$schema\": \"http://json-schema.org/draft-04/schema#\","
+        ""
+        "  \"definitions\": {"
+        "    \"address\": {"
+        "      \"type\": \"object\","
+        "      \"properties\": {"
+        "        \"street_address\": { \"type\": \"string\" },"
+        "        \"city\":           { \"type\": \"string\" },"
+        "        \"state\":          { \"type\": \"string\" }"
+        "      },"
+        "      \"required\": [\"street_address\", \"city\", \"state\"]"
+        "    }"
+        "  },"
+        "  \"type\": \"object\","
+        "  \"properties\": {"
+        "    \"billing_address\": { \"$ref\": \"#/definitions/address\" },"
+        "    \"shipping_address\": { \"$ref\": \"#/definitions/address\" }"
+        "  }"
+        "}");
+    SchemaDocument s(sd);
+
+    VALIDATE(s, "{\"shipping_address\": {\"street_address\": \"1600 Pennsylvania Avenue NW\", \"city\": \"Washington\", \"state\": \"DC\"}, \"billing_address\": {\"street_address\": \"1st Street SE\", \"city\": \"Washington\", \"state\": \"DC\"} }", true);
+}
+
+TEST(SchemaValidator, Ref_AllOf) {
+    Document sd;
+    sd.Parse(
+        "{"
+        "  \"$schema\": \"http://json-schema.org/draft-04/schema#\","
+        ""
+        "  \"definitions\": {"
+        "    \"address\": {"
+        "      \"type\": \"object\","
+        "      \"properties\": {"
+        "        \"street_address\": { \"type\": \"string\" },"
+        "        \"city\":           { \"type\": \"string\" },"
+        "        \"state\":          { \"type\": \"string\" }"
+        "      },"
+        "      \"required\": [\"street_address\", \"city\", \"state\"]"
+        "    }"
+        "  },"
+        "  \"type\": \"object\","
+        "  \"properties\": {"
+        "    \"billing_address\": { \"$ref\": \"#/definitions/address\" },"
+        "    \"shipping_address\": {"
+        "      \"allOf\": ["
+        "        { \"$ref\": \"#/definitions/address\" },"
+        "        { \"properties\":"
+        "          { \"type\": { \"enum\": [ \"residential\", \"business\" ] } },"
+        "          \"required\": [\"type\"]"
+        "        }"
+        "      ]"
+        "    }"
+        "  }"
+        "}");
+    SchemaDocument s(sd);
+
+    INVALIDATE(s, "{\"shipping_address\": {\"street_address\": \"1600 Pennsylvania Avenue NW\", \"city\": \"Washington\", \"state\": \"DC\"} }", "/properties/shipping_address", "allOf", "/shipping_address",
+        "{ \"required\": {"
+        "    \"instanceRef\": \"#/shipping_address\","
+        "    \"schemaRef\": \"#/properties/shipping_address/allOf/1\","
+        "    \"missing\": [\"type\"]"
+        "}}");
+    VALIDATE(s, "{\"shipping_address\": {\"street_address\": \"1600 Pennsylvania Avenue NW\", \"city\": \"Washington\", \"state\": \"DC\", \"type\": \"business\"} }", true);
+}
+
+TEST(SchemaValidator, String) {
+    Document sd;
+    sd.Parse("{\"type\":\"string\"}");
+    SchemaDocument s(sd);
+
+    VALIDATE(s, "\"I'm a string\"", true);
+    INVALIDATE(s, "42", "", "type", "",
+        "{ \"type\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": [\"string\"], \"actual\": \"integer\""
+        "}}");
+    INVALIDATE(s, "2147483648", "", "type", "",
+        "{ \"type\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": [\"string\"], \"actual\": \"integer\""
+        "}}"); // 2^31 can only be fit in unsigned
+    INVALIDATE(s, "-2147483649", "", "type", "",
+        "{ \"type\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": [\"string\"], \"actual\": \"integer\""
+        "}}"); // -2^31 - 1 can only be fit in int64_t
+    INVALIDATE(s, "4294967296", "", "type", "",
+        "{ \"type\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": [\"string\"], \"actual\": \"integer\""
+        "}}"); // 2^32 can only be fit in int64_t
+    INVALIDATE(s, "3.1415926", "", "type", "",
+        "{ \"type\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": [\"string\"], \"actual\": \"number\""
+        "}}");
+}
+
+TEST(SchemaValidator, String_LengthRange) {
+    Document sd;
+    sd.Parse("{\"type\":\"string\",\"minLength\":2,\"maxLength\":3}");
+    SchemaDocument s(sd);
+
+    INVALIDATE(s, "\"A\"", "", "minLength", "",
+        "{ \"minLength\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 2, \"actual\": \"A\""
+        "}}");
+    VALIDATE(s, "\"AB\"", true);
+    VALIDATE(s, "\"ABC\"", true);
+    INVALIDATE(s, "\"ABCD\"", "", "maxLength", "",
+        "{ \"maxLength\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 3, \"actual\": \"ABCD\""
+        "}}");
+}
+
+#if RAPIDJSON_SCHEMA_HAS_REGEX
+TEST(SchemaValidator, String_Pattern) {
+    Document sd;
+    sd.Parse("{\"type\":\"string\",\"pattern\":\"^(\\\\([0-9]{3}\\\\))?[0-9]{3}-[0-9]{4}$\"}");
+    SchemaDocument s(sd);
+
+    VALIDATE(s, "\"555-1212\"", true);
+    VALIDATE(s, "\"(888)555-1212\"", true);
+    INVALIDATE(s, "\"(888)555-1212 ext. 532\"", "", "pattern", "",
+        "{ \"pattern\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"actual\": \"(888)555-1212 ext. 532\""
+        "}}");
+    INVALIDATE(s, "\"(800)FLOWERS\"", "", "pattern", "",
+        "{ \"pattern\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"actual\": \"(800)FLOWERS\""
+        "}}");
+}
+
+TEST(SchemaValidator, String_Pattern_Invalid) {
+    Document sd;
+    sd.Parse("{\"type\":\"string\",\"pattern\":\"a{0}\"}"); // TODO: report regex is invalid somehow
+    SchemaDocument s(sd);
+
+    VALIDATE(s, "\"\"", true);
+    VALIDATE(s, "\"a\"", true);
+    VALIDATE(s, "\"aa\"", true);
+}
+#endif
+
+TEST(SchemaValidator, Integer) {
+    Document sd;
+    sd.Parse("{\"type\":\"integer\"}");
+    SchemaDocument s(sd);
+
+    VALIDATE(s, "42", true);
+    VALIDATE(s, "-1", true);
+    VALIDATE(s, "2147483648", true); // 2^31 can only be fit in unsigned
+    VALIDATE(s, "-2147483649", true); // -2^31 - 1 can only be fit in int64_t
+    VALIDATE(s, "2147483648", true); // 2^31 can only be fit in unsigned
+    VALIDATE(s, "4294967296", true); // 2^32 can only be fit in int64_t
+    INVALIDATE(s, "3.1415926", "", "type", "",
+        "{ \"type\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": [\"integer\"], \"actual\": \"number\""
+        "}}");
+    INVALIDATE(s, "\"42\"", "", "type", "",
+        "{ \"type\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": [\"integer\"], \"actual\": \"string\""
+        "}}");
+}
+
+TEST(SchemaValidator, Integer_Range) {
+    Document sd;
+    sd.Parse("{\"type\":\"integer\",\"minimum\":0,\"maximum\":100,\"exclusiveMaximum\":true}");
+    SchemaDocument s(sd);
+
+    INVALIDATE(s, "-1", "", "minimum", "",
+        "{ \"minimum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 0, \"actual\": -1"
+        "}}");
+    VALIDATE(s, "0", true);
+    VALIDATE(s, "10", true);
+    VALIDATE(s, "99", true);
+    INVALIDATE(s, "100", "", "maximum", "",
+        "{ \"maximum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 100, \"exclusiveMaximum\": true, \"actual\": 100"
+        "}}");
+    INVALIDATE(s, "101", "", "maximum", "",
+        "{ \"maximum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 100, \"exclusiveMaximum\": true, \"actual\": 101"
+        "}}");
+}
+
+TEST(SchemaValidator, Integer_Range64Boundary) {
+    Document sd;
+    sd.Parse("{\"type\":\"integer\",\"minimum\":-9223372036854775807,\"maximum\":9223372036854775806}");
+    SchemaDocument s(sd);
+
+    INVALIDATE(s, "-9223372036854775808", "", "minimum", "",
+        "{ \"minimum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": -9223372036854775807, \"actual\": -9223372036854775808"
+        "}}");
+    VALIDATE(s, "-9223372036854775807", true);
+    VALIDATE(s, "-2147483648", true); // int min
+    VALIDATE(s, "0", true);
+    VALIDATE(s, "2147483647", true);  // int max
+    VALIDATE(s, "2147483648", true);  // unsigned first
+    VALIDATE(s, "4294967295", true);  // unsigned max
+    VALIDATE(s, "9223372036854775806", true);
+    INVALIDATE(s, "9223372036854775807", "", "maximum", "",
+        "{ \"maximum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 9223372036854775806, \"actual\": 9223372036854775807"
+        "}}");
+    INVALIDATE(s, "18446744073709551615", "", "maximum", "",
+        "{ \"maximum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 9223372036854775806, \"actual\": 18446744073709551615"
+        "}}");   // uint64_t max
+}
+
+TEST(SchemaValidator, Integer_RangeU64Boundary) {
+    Document sd;
+    sd.Parse("{\"type\":\"integer\",\"minimum\":9223372036854775808,\"maximum\":18446744073709551614}");
+    SchemaDocument s(sd);
+
+    INVALIDATE(s, "-9223372036854775808", "", "minimum", "",
+        "{ \"minimum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 9223372036854775808, \"actual\": -9223372036854775808"
+        "}}");
+    INVALIDATE(s, "9223372036854775807", "", "minimum", "",
+        "{ \"minimum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 9223372036854775808, \"actual\": 9223372036854775807"
+        "}}");
+    INVALIDATE(s, "-2147483648", "", "minimum", "",
+        "{ \"minimum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 9223372036854775808, \"actual\": -2147483648"
+        "}}"); // int min
+    INVALIDATE(s, "0", "", "minimum", "",
+        "{ \"minimum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 9223372036854775808, \"actual\": 0"
+        "}}");
+    INVALIDATE(s, "2147483647", "", "minimum", "",
+        "{ \"minimum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 9223372036854775808, \"actual\": 2147483647"
+        "}}");  // int max
+    INVALIDATE(s, "2147483648", "", "minimum", "",
+        "{ \"minimum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 9223372036854775808, \"actual\": 2147483648"
+        "}}");  // unsigned first
+    INVALIDATE(s, "4294967295", "", "minimum", "",
+        "{ \"minimum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 9223372036854775808, \"actual\": 4294967295"
+        "}}");  // unsigned max
+    VALIDATE(s, "9223372036854775808", true);
+    VALIDATE(s, "18446744073709551614", true);
+    INVALIDATE(s, "18446744073709551615", "", "maximum", "",
+        "{ \"maximum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 18446744073709551614, \"actual\": 18446744073709551615"
+        "}}");
+}
+
+TEST(SchemaValidator, Integer_Range64BoundaryExclusive) {
+    Document sd;
+    sd.Parse("{\"type\":\"integer\",\"minimum\":-9223372036854775808,\"maximum\":18446744073709551615,\"exclusiveMinimum\":true,\"exclusiveMaximum\":true}");
+    SchemaDocument s(sd);
+
+    INVALIDATE(s, "-9223372036854775808", "", "minimum", "",
+        "{ \"minimum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": -9223372036854775808, \"exclusiveMinimum\": true, "
+        "    \"actual\": -9223372036854775808"
+        "}}");
+    VALIDATE(s, "-9223372036854775807", true);
+    VALIDATE(s, "18446744073709551614", true);
+    INVALIDATE(s, "18446744073709551615", "", "maximum", "",
+        "{ \"maximum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 18446744073709551615, \"exclusiveMaximum\": true, "
+        "    \"actual\": 18446744073709551615"
+        "}}");
+}
+
+TEST(SchemaValidator, Integer_MultipleOf) {
+    Document sd;
+    sd.Parse("{\"type\":\"integer\",\"multipleOf\":10}");
+    SchemaDocument s(sd);
+
+    VALIDATE(s, "0", true);
+    VALIDATE(s, "10", true);
+    VALIDATE(s, "-10", true);
+    VALIDATE(s, "20", true);
+    INVALIDATE(s, "23", "", "multipleOf", "",
+        "{ \"multipleOf\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 10, \"actual\": 23"
+        "}}");
+    INVALIDATE(s, "-23", "", "multipleOf", "",
+        "{ \"multipleOf\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 10, \"actual\": -23"
+        "}}");
+}
+
+TEST(SchemaValidator, Integer_MultipleOf64Boundary) {
+    Document sd;
+    sd.Parse("{\"type\":\"integer\",\"multipleOf\":18446744073709551615}");
+    SchemaDocument s(sd);
+
+    VALIDATE(s, "0", true);
+    VALIDATE(s, "18446744073709551615", true);
+    INVALIDATE(s, "18446744073709551614", "", "multipleOf", "",
+        "{ \"multipleOf\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 18446744073709551615, \"actual\": 18446744073709551614"
+        "}}");
+}
+
+TEST(SchemaValidator, Number_Range) {
+    Document sd;
+    sd.Parse("{\"type\":\"number\",\"minimum\":0,\"maximum\":100,\"exclusiveMaximum\":true}");
+    SchemaDocument s(sd);
+
+    INVALIDATE(s, "-1", "", "minimum", "",
+        "{ \"minimum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 0, \"actual\": -1"
+        "}}");
+    VALIDATE(s, "0", true);
+    VALIDATE(s, "0.1", true);
+    VALIDATE(s, "10", true);
+    VALIDATE(s, "99", true);
+    VALIDATE(s, "99.9", true);
+    INVALIDATE(s, "100", "", "maximum", "",
+        "{ \"maximum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 100, \"exclusiveMaximum\": true, \"actual\": 100"
+        "}}");
+    INVALIDATE(s, "100.0", "", "maximum", "",
+        "{ \"maximum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 100, \"exclusiveMaximum\": true, \"actual\": 100.0"
+        "}}");
+    INVALIDATE(s, "101.5", "", "maximum", "",
+        "{ \"maximum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 100, \"exclusiveMaximum\": true, \"actual\": 101.5"
+        "}}");
+}
+
+TEST(SchemaValidator, Number_RangeInt) {
+    Document sd;
+    sd.Parse("{\"type\":\"number\",\"minimum\":-100,\"maximum\":-1,\"exclusiveMaximum\":true}");
+    SchemaDocument s(sd);
+
+    INVALIDATE(s, "-101", "", "minimum", "",
+        "{ \"minimum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": -100, \"actual\": -101"
+        "}}");
+    INVALIDATE(s, "-100.1", "", "minimum", "",
+        "{ \"minimum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": -100, \"actual\": -100.1"
+        "}}");
+    VALIDATE(s, "-100", true);
+    VALIDATE(s, "-2", true);
+    INVALIDATE(s, "-1", "", "maximum", "",
+        "{ \"maximum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": -1, \"exclusiveMaximum\": true, \"actual\": -1"
+        "}}");
+    INVALIDATE(s, "-0.9", "", "maximum", "",
+        "{ \"maximum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": -1, \"exclusiveMaximum\": true, \"actual\": -0.9"
+        "}}");
+    INVALIDATE(s, "0", "", "maximum", "",
+        "{ \"maximum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": -1, \"exclusiveMaximum\": true, \"actual\": 0"
+        "}}");
+    INVALIDATE(s, "2147483647", "", "maximum", "",
+        "{ \"maximum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": -1, \"exclusiveMaximum\": true, \"actual\": 2147483647"
+        "}}");  // int max
+    INVALIDATE(s, "2147483648", "", "maximum", "",
+        "{ \"maximum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": -1, \"exclusiveMaximum\": true, \"actual\": 2147483648"
+        "}}");  // unsigned first
+    INVALIDATE(s, "4294967295", "", "maximum", "",
+        "{ \"maximum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": -1, \"exclusiveMaximum\": true, \"actual\": 4294967295"
+        "}}");  // unsigned max
+    INVALIDATE(s, "9223372036854775808", "", "maximum", "",
+        "{ \"maximum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": -1, \"exclusiveMaximum\": true, \"actual\": 9223372036854775808"
+        "}}");
+    INVALIDATE(s, "18446744073709551614", "", "maximum", "",
+        "{ \"maximum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": -1, \"exclusiveMaximum\": true, \"actual\": 18446744073709551614"
+        "}}");
+    INVALIDATE(s, "18446744073709551615", "", "maximum", "",
+        "{ \"maximum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": -1, \"exclusiveMaximum\": true, \"actual\": 18446744073709551615"
+        "}}");
+}
+
+TEST(SchemaValidator, Number_RangeDouble) {
+    Document sd;
+    sd.Parse("{\"type\":\"number\",\"minimum\":0.1,\"maximum\":100.1,\"exclusiveMaximum\":true}");
+    SchemaDocument s(sd);
+
+    INVALIDATE(s, "-9223372036854775808", "", "minimum", "",
+        "{ \"minimum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 0.1, \"actual\": -9223372036854775808"
+        "}}");
+    INVALIDATE(s, "-2147483648", "", "minimum", "",
+        "{ \"minimum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 0.1, \"actual\": -2147483648"
+        "}}"); // int min
+    INVALIDATE(s, "-1", "", "minimum", "",
+        "{ \"minimum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 0.1, \"actual\": -1"
+        "}}");
+    VALIDATE(s, "0.1", true);
+    VALIDATE(s, "10", true);
+    VALIDATE(s, "99", true);
+    VALIDATE(s, "100", true);
+    INVALIDATE(s, "101", "", "maximum", "",
+        "{ \"maximum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 100.1, \"exclusiveMaximum\": true, \"actual\": 101"
+        "}}");
+    INVALIDATE(s, "101.5", "", "maximum", "",
+        "{ \"maximum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 100.1, \"exclusiveMaximum\": true, \"actual\": 101.5"
+        "}}");
+    INVALIDATE(s, "18446744073709551614", "", "maximum", "",
+        "{ \"maximum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 100.1, \"exclusiveMaximum\": true, \"actual\": 18446744073709551614"
+        "}}");
+    INVALIDATE(s, "18446744073709551615", "", "maximum", "",
+        "{ \"maximum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 100.1, \"exclusiveMaximum\": true, \"actual\": 18446744073709551615"
+        "}}");
+    INVALIDATE(s, "2147483647", "", "maximum", "",
+        "{ \"maximum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 100.1, \"exclusiveMaximum\": true, \"actual\": 2147483647"
+        "}}");  // int max
+    INVALIDATE(s, "2147483648", "", "maximum", "",
+        "{ \"maximum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 100.1, \"exclusiveMaximum\": true, \"actual\": 2147483648"
+        "}}");  // unsigned first
+    INVALIDATE(s, "4294967295", "", "maximum", "",
+        "{ \"maximum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 100.1, \"exclusiveMaximum\": true, \"actual\": 4294967295"
+        "}}");  // unsigned max
+    INVALIDATE(s, "9223372036854775808", "", "maximum", "",
+        "{ \"maximum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 100.1, \"exclusiveMaximum\": true, \"actual\": 9223372036854775808"
+        "}}");
+    INVALIDATE(s, "18446744073709551614", "", "maximum", "",
+        "{ \"maximum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 100.1, \"exclusiveMaximum\": true, \"actual\": 18446744073709551614"
+        "}}");
+    INVALIDATE(s, "18446744073709551615", "", "maximum", "",
+        "{ \"maximum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 100.1, \"exclusiveMaximum\": true, \"actual\": 18446744073709551615"
+        "}}");
+}
+
+TEST(SchemaValidator, Number_RangeDoubleU64Boundary) {
+    Document sd;
+    sd.Parse("{\"type\":\"number\",\"minimum\":9223372036854775808.0,\"maximum\":18446744073709550000.0}");
+    SchemaDocument s(sd);
+
+    INVALIDATE(s, "-9223372036854775808", "", "minimum", "",
+        "{ \"minimum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 9223372036854775808.0, \"actual\": -9223372036854775808"
+        "}}");
+    INVALIDATE(s, "-2147483648", "", "minimum", "",
+        "{ \"minimum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 9223372036854775808.0, \"actual\": -2147483648"
+        "}}"); // int min
+    INVALIDATE(s, "0", "", "minimum", "",
+        "{ \"minimum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 9223372036854775808.0, \"actual\": 0"
+        "}}");
+    INVALIDATE(s, "2147483647", "", "minimum", "",
+        "{ \"minimum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 9223372036854775808.0, \"actual\": 2147483647"
+        "}}");  // int max
+    INVALIDATE(s, "2147483648", "", "minimum", "",
+        "{ \"minimum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 9223372036854775808.0, \"actual\": 2147483648"
+        "}}");  // unsigned first
+    INVALIDATE(s, "4294967295", "", "minimum", "",
+        "{ \"minimum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 9223372036854775808.0, \"actual\": 4294967295"
+        "}}");  // unsigned max
+    VALIDATE(s, "9223372036854775808", true);
+    VALIDATE(s, "18446744073709540000", true);
+    INVALIDATE(s, "18446744073709551615", "", "maximum", "",
+        "{ \"maximum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 18446744073709550000.0, \"actual\": 18446744073709551615"
+        "}}");
+}
+
+TEST(SchemaValidator, Number_MultipleOf) {
+    Document sd;
+    sd.Parse("{\"type\":\"number\",\"multipleOf\":10.0}");
+    SchemaDocument s(sd);
+
+    VALIDATE(s, "0", true);
+    VALIDATE(s, "10", true);
+    VALIDATE(s, "-10", true);
+    VALIDATE(s, "20", true);
+    INVALIDATE(s, "23", "", "multipleOf", "",
+        "{ \"multipleOf\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 10.0, \"actual\": 23"
+        "}}");
+    INVALIDATE(s, "-2147483648", "", "multipleOf", "",
+        "{ \"multipleOf\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 10.0, \"actual\": -2147483648"
+        "}}");  // int min
+    VALIDATE(s, "-2147483640", true);
+    INVALIDATE(s, "2147483647", "", "multipleOf", "",
+        "{ \"multipleOf\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 10.0, \"actual\": 2147483647"
+        "}}");  // int max
+    INVALIDATE(s, "2147483648", "", "multipleOf", "",
+        "{ \"multipleOf\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 10.0, \"actual\": 2147483648"
+        "}}");  // unsigned first
+    VALIDATE(s, "2147483650", true);
+    INVALIDATE(s, "4294967295", "", "multipleOf", "",
+        "{ \"multipleOf\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 10.0, \"actual\": 4294967295"
+        "}}");  // unsigned max
+    VALIDATE(s, "4294967300", true);
+}
+
+TEST(SchemaValidator, Number_MultipleOfOne) {
+    Document sd;
+    sd.Parse("{\"type\":\"number\",\"multipleOf\":1}");
+    SchemaDocument s(sd);
+
+    VALIDATE(s, "42", true);
+    VALIDATE(s, "42.0", true);
+    INVALIDATE(s, "3.1415926", "", "multipleOf", "",
+        "{ \"multipleOf\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 1, \"actual\": 3.1415926"
+        "}}");
+}
+
+TEST(SchemaValidator, Object) {
+    Document sd;
+    sd.Parse("{\"type\":\"object\"}");
+    SchemaDocument s(sd);
+
+    VALIDATE(s, "{\"key\":\"value\",\"another_key\":\"another_value\"}", true);
+    VALIDATE(s, "{\"Sun\":1.9891e30,\"Jupiter\":1.8986e27,\"Saturn\":5.6846e26,\"Neptune\":10.243e25,\"Uranus\":8.6810e25,\"Earth\":5.9736e24,\"Venus\":4.8685e24,\"Mars\":6.4185e23,\"Mercury\":3.3022e23,\"Moon\":7.349e22,\"Pluto\":1.25e22}", true);    
+    INVALIDATE(s, "[\"An\", \"array\", \"not\", \"an\", \"object\"]", "", "type", "",
+        "{ \"type\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": [\"object\"], \"actual\": \"array\""
+        "}}");
+    INVALIDATE(s, "\"Not an object\"", "", "type", "",
+        "{ \"type\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": [\"object\"], \"actual\": \"string\""
+        "}}");
+}
+
+TEST(SchemaValidator, Object_Properties) {
+    Document sd;
+    sd.Parse(
+        "{"
+        "    \"type\": \"object\","
+        "    \"properties\" : {"
+        "        \"number\": { \"type\": \"number\" },"
+        "        \"street_name\" : { \"type\": \"string\" },"
+        "        \"street_type\" : { \"type\": \"string\", \"enum\" : [\"Street\", \"Avenue\", \"Boulevard\"] }"
+        "    }"
+        "}");
+
+    SchemaDocument s(sd);
+
+    VALIDATE(s, "{ \"number\": 1600, \"street_name\": \"Pennsylvania\", \"street_type\": \"Avenue\" }", true);
+    INVALIDATE(s, "{ \"number\": \"1600\", \"street_name\": \"Pennsylvania\", \"street_type\": \"Avenue\" }", "/properties/number", "type", "/number",
+        "{ \"type\": {"
+        "    \"instanceRef\": \"#/number\", \"schemaRef\": \"#/properties/number\","
+        "    \"expected\": [\"number\"], \"actual\": \"string\""
+        "}}");
+    INVALIDATE(s, "{ \"number\": \"One\", \"street_name\": \"Microsoft\", \"street_type\": \"Way\" }",
+        "/properties/number", "type", "/number",
+        "{ \"type\": {"
+        "    \"instanceRef\": \"#/number\", \"schemaRef\": \"#/properties/number\","
+        "    \"expected\": [\"number\"], \"actual\": \"string\""
+        "}}"); // fail fast
+    VALIDATE(s, "{ \"number\": 1600, \"street_name\": \"Pennsylvania\" }", true);
+    VALIDATE(s, "{}", true);
+    VALIDATE(s, "{ \"number\": 1600, \"street_name\": \"Pennsylvania\", \"street_type\": \"Avenue\", \"direction\": \"NW\" }", true);
+}
+
+TEST(SchemaValidator, Object_AdditionalPropertiesBoolean) {
+    Document sd;
+    sd.Parse(
+        "{"
+        "    \"type\": \"object\","
+        "        \"properties\" : {"
+        "        \"number\": { \"type\": \"number\" },"
+        "            \"street_name\" : { \"type\": \"string\" },"
+        "            \"street_type\" : { \"type\": \"string\","
+        "            \"enum\" : [\"Street\", \"Avenue\", \"Boulevard\"]"
+        "        }"
+        "    },"
+        "    \"additionalProperties\": false"
+        "}");
+
+    SchemaDocument s(sd);
+
+    VALIDATE(s, "{ \"number\": 1600, \"street_name\": \"Pennsylvania\", \"street_type\": \"Avenue\" }", true);
+    INVALIDATE(s, "{ \"number\": 1600, \"street_name\": \"Pennsylvania\", \"street_type\": \"Avenue\", \"direction\": \"NW\" }", "", "additionalProperties", "/direction",
+        "{ \"additionalProperties\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"disallowed\": \"direction\""
+        "}}");
+}
+
+TEST(SchemaValidator, Object_AdditionalPropertiesObject) {
+    Document sd;
+    sd.Parse(
+        "{"
+        "    \"type\": \"object\","
+        "    \"properties\" : {"
+        "        \"number\": { \"type\": \"number\" },"
+        "        \"street_name\" : { \"type\": \"string\" },"
+        "        \"street_type\" : { \"type\": \"string\","
+        "            \"enum\" : [\"Street\", \"Avenue\", \"Boulevard\"]"
+        "        }"
+        "    },"
+        "    \"additionalProperties\": { \"type\": \"string\" }"
+        "}");
+    SchemaDocument s(sd);
+
+    VALIDATE(s, "{ \"number\": 1600, \"street_name\": \"Pennsylvania\", \"street_type\": \"Avenue\" }", true);
+    VALIDATE(s, "{ \"number\": 1600, \"street_name\": \"Pennsylvania\", \"street_type\": \"Avenue\", \"direction\": \"NW\" }", true);
+    INVALIDATE(s, "{ \"number\": 1600, \"street_name\": \"Pennsylvania\", \"street_type\": \"Avenue\", \"office_number\": 201 }", "/additionalProperties", "type", "/office_number",
+        "{ \"type\": {"
+        "    \"instanceRef\": \"#/office_number\", \"schemaRef\": \"#/additionalProperties\","
+        "    \"expected\": [\"string\"], \"actual\": \"integer\""
+        "}}");
+}
+
+TEST(SchemaValidator, Object_Required) {
+    Document sd;
+    sd.Parse(
+        "{"
+        "    \"type\": \"object\","
+        "    \"properties\" : {"
+        "        \"name\":      { \"type\": \"string\" },"
+        "        \"email\" : { \"type\": \"string\" },"
+        "        \"address\" : { \"type\": \"string\" },"
+        "        \"telephone\" : { \"type\": \"string\" }"
+        "    },"
+        "    \"required\":[\"name\", \"email\"]"
+        "}");
+    SchemaDocument s(sd);
+
+    VALIDATE(s, "{ \"name\": \"William Shakespeare\", \"email\" : \"bill@stratford-upon-avon.co.uk\" }", true);
+    VALIDATE(s, "{ \"name\": \"William Shakespeare\", \"email\" : \"bill@stratford-upon-avon.co.uk\", \"address\" : \"Henley Street, Stratford-upon-Avon, Warwickshire, England\", \"authorship\" : \"in question\"}", true);
+    INVALIDATE(s, "{ \"name\": \"William Shakespeare\", \"address\" : \"Henley Street, Stratford-upon-Avon, Warwickshire, England\" }", "", "required", "",
+        "{ \"required\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"missing\": [\"email\"]"
+        "}}");
+    INVALIDATE(s, "{}", "", "required", "",
+        "{ \"required\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"missing\": [\"name\", \"email\"]"
+        "}}");
+}
+
+TEST(SchemaValidator, Object_Required_PassWithDefault) {
+    Document sd;
+    sd.Parse(
+        "{"
+        "    \"type\": \"object\","
+        "    \"properties\" : {"
+        "        \"name\":      { \"type\": \"string\", \"default\": \"William Shakespeare\" },"
+        "        \"email\" : { \"type\": \"string\", \"default\": \"\" },"
+        "        \"address\" : { \"type\": \"string\" },"
+        "        \"telephone\" : { \"type\": \"string\" }"
+        "    },"
+        "    \"required\":[\"name\", \"email\"]"
+        "}");
+    SchemaDocument s(sd);
+
+    VALIDATE(s, "{ \"email\" : \"bill@stratford-upon-avon.co.uk\", \"address\" : \"Henley Street, Stratford-upon-Avon, Warwickshire, England\", \"authorship\" : \"in question\"}", true);
+    INVALIDATE(s, "{ \"name\": \"William Shakespeare\", \"address\" : \"Henley Street, Stratford-upon-Avon, Warwickshire, England\" }", "", "required", "",
+        "{ \"required\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"missing\": [\"email\"]"
+        "}}");
+    INVALIDATE(s, "{}", "", "required", "",
+        "{ \"required\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"missing\": [\"email\"]"
+        "}}");
+}
+
+TEST(SchemaValidator, Object_PropertiesRange) {
+    Document sd;
+    sd.Parse("{\"type\":\"object\", \"minProperties\":2, \"maxProperties\":3}");
+    SchemaDocument s(sd);
+
+    INVALIDATE(s, "{}", "", "minProperties", "",
+        "{ \"minProperties\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 2, \"actual\": 0"
+        "}}");
+    INVALIDATE(s, "{\"a\":0}", "", "minProperties", "",
+        "{ \"minProperties\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 2, \"actual\": 1"
+        "}}");
+    VALIDATE(s, "{\"a\":0,\"b\":1}", true);
+    VALIDATE(s, "{\"a\":0,\"b\":1,\"c\":2}", true);
+    INVALIDATE(s, "{\"a\":0,\"b\":1,\"c\":2,\"d\":3}", "", "maxProperties", "",
+        "{ \"maxProperties\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\", "
+        "    \"expected\": 3, \"actual\": 4"
+        "}}");
+}
+
+TEST(SchemaValidator, Object_PropertyDependencies) {
+    Document sd;
+    sd.Parse(
+        "{"
+        "  \"type\": \"object\","
+        "  \"properties\": {"
+        "    \"name\": { \"type\": \"string\" },"
+        "    \"credit_card\": { \"type\": \"number\" },"
+        "    \"cvv_code\": { \"type\": \"number\" },"
+        "    \"billing_address\": { \"type\": \"string\" }"
+        "  },"
+        "  \"required\": [\"name\"],"
+        "  \"dependencies\": {"
+        "    \"credit_card\": [\"cvv_code\", \"billing_address\"]"
+        "  }"
+        "}");
+    SchemaDocument s(sd);
+
+    VALIDATE(s, "{ \"name\": \"John Doe\", \"credit_card\": 5555555555555555, \"cvv_code\": 777, "
+        "\"billing_address\": \"555 Debtor's Lane\" }", true);
+    INVALIDATE(s, "{ \"name\": \"John Doe\", \"credit_card\": 5555555555555555 }", "", "dependencies", "",
+        "{ \"dependencies\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"errors\": {\"credit_card\": [\"cvv_code\", \"billing_address\"]}"
+        "}}");
+    VALIDATE(s, "{ \"name\": \"John Doe\"}", true);
+    VALIDATE(s, "{ \"name\": \"John Doe\", \"cvv_code\": 777, \"billing_address\": \"555 Debtor's Lane\" }", true);
+}
+
+TEST(SchemaValidator, Object_SchemaDependencies) {
+    Document sd;
+    sd.Parse(
+        "{"
+        "    \"type\": \"object\","
+        "    \"properties\" : {"
+        "        \"name\": { \"type\": \"string\" },"
+        "        \"credit_card\" : { \"type\": \"number\" }"
+        "    },"
+        "    \"required\" : [\"name\"],"
+        "    \"dependencies\" : {"
+        "        \"credit_card\": {"
+        "            \"properties\": {"
+        "                \"billing_address\": { \"type\": \"string\" }"
+        "            },"
+        "            \"required\" : [\"billing_address\"]"
+        "        }"
+        "    }"
+        "}");
+    SchemaDocument s(sd);
+
+    VALIDATE(s, "{\"name\": \"John Doe\", \"credit_card\" : 5555555555555555,\"billing_address\" : \"555 Debtor's Lane\"}", true);
+    INVALIDATE(s, "{\"name\": \"John Doe\", \"credit_card\" : 5555555555555555 }", "", "dependencies", "",
+        "{ \"dependencies\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"errors\": {"
+        "      \"credit_card\": {"
+        "        \"required\": {"
+        "          \"instanceRef\": \"#\", \"schemaRef\": \"#/dependencies/credit_card\","
+        "          \"missing\": [\"billing_address\"]"
+        "    } } }"
+        "}}");
+    VALIDATE(s, "{\"name\": \"John Doe\", \"billing_address\" : \"555 Debtor's Lane\"}", true);
+}
+
+#if RAPIDJSON_SCHEMA_HAS_REGEX
+TEST(SchemaValidator, Object_PatternProperties) {
+    Document sd;
+    sd.Parse(
+        "{"
+        "  \"type\": \"object\","
+        "  \"patternProperties\": {"
+        "    \"^S_\": { \"type\": \"string\" },"
+        "    \"^I_\": { \"type\": \"integer\" }"
+        "  }"
+        "}");
+    SchemaDocument s(sd);
+
+    VALIDATE(s, "{ \"S_25\": \"This is a string\" }", true);
+    VALIDATE(s, "{ \"I_0\": 42 }", true);
+    INVALIDATE(s, "{ \"S_0\": 42 }", "", "patternProperties", "/S_0",
+        "{ \"type\": {"
+        "    \"instanceRef\": \"#/S_0\", \"schemaRef\": \"#/patternProperties/%5ES_\","
+        "    \"expected\": [\"string\"], \"actual\": \"integer\""
+        "}}");
+    INVALIDATE(s, "{ \"I_42\": \"This is a string\" }", "", "patternProperties", "/I_42",
+        "{ \"type\": {"
+        "    \"instanceRef\": \"#/I_42\", \"schemaRef\": \"#/patternProperties/%5EI_\","
+        "    \"expected\": [\"integer\"], \"actual\": \"string\""
+        "}}");
+    VALIDATE(s, "{ \"keyword\": \"value\" }", true);
+}
+
+TEST(SchemaValidator, Object_PattternProperties_ErrorConflict) {
+    Document sd;
+    sd.Parse(
+        "{"
+        "  \"type\": \"object\","
+        "  \"patternProperties\": {"
+        "    \"^I_\": { \"multipleOf\": 5 },"
+        "    \"30$\": { \"multipleOf\": 6 }"
+        "  }"
+        "}");
+    SchemaDocument s(sd);
+
+    VALIDATE(s, "{ \"I_30\": 30 }", true);
+    INVALIDATE(s, "{ \"I_30\": 7 }", "", "patternProperties", "/I_30",
+        "{ \"multipleOf\": ["
+        "    {"
+        "      \"instanceRef\": \"#/I_30\", \"schemaRef\": \"#/patternProperties/%5EI_\","
+        "      \"expected\": 5, \"actual\": 7"
+        "    }, {"
+        "      \"instanceRef\": \"#/I_30\", \"schemaRef\": \"#/patternProperties/30%24\","
+        "      \"expected\": 6, \"actual\": 7"
+        "    }"
+        "]}");
+}
+
+TEST(SchemaValidator, Object_Properties_PatternProperties) {
+    Document sd;
+    sd.Parse(
+        "{"
+        "  \"type\": \"object\","
+        "  \"properties\": {"
+        "    \"I_42\": { \"type\": \"integer\", \"minimum\": 73 }"
+        "  },"
+        "  \"patternProperties\": {"
+        "    \"^I_\": { \"type\": \"integer\", \"multipleOf\": 6 }"
+        "  }"
+        "}");
+    SchemaDocument s(sd);
+
+    VALIDATE(s, "{ \"I_6\": 6 }", true);
+    VALIDATE(s, "{ \"I_42\": 78 }", true);
+    INVALIDATE(s, "{ \"I_42\": 42 }", "", "patternProperties", "/I_42",
+        "{ \"minimum\": {"
+        "    \"instanceRef\": \"#/I_42\", \"schemaRef\": \"#/properties/I_42\","
+        "    \"expected\": 73, \"actual\": 42"
+        "}}");
+    INVALIDATE(s, "{ \"I_42\": 7 }", "", "patternProperties", "/I_42",
+        "{ \"minimum\": {"
+        "    \"instanceRef\": \"#/I_42\", \"schemaRef\": \"#/properties/I_42\","
+        "    \"expected\": 73, \"actual\": 7"
+        "  },"
+        "  \"multipleOf\": {"
+        "    \"instanceRef\": \"#/I_42\", \"schemaRef\": \"#/patternProperties/%5EI_\","
+        "    \"expected\": 6, \"actual\": 7"
+        "  }"
+        "}");
+}
+
+TEST(SchemaValidator, Object_PatternProperties_AdditionalProperties) {
+    Document sd;
+    sd.Parse(
+        "{"
+        "  \"type\": \"object\","
+        "  \"properties\": {"
+        "    \"builtin\": { \"type\": \"number\" }"
+        "  },"
+        "  \"patternProperties\": {"
+        "    \"^S_\": { \"type\": \"string\" },"
+        "    \"^I_\": { \"type\": \"integer\" }"
+        "  },"
+        "  \"additionalProperties\": { \"type\": \"string\" }"
+        "}");
+    SchemaDocument s(sd);
+
+    VALIDATE(s, "{ \"builtin\": 42 }", true);
+    VALIDATE(s, "{ \"keyword\": \"value\" }", true);
+    INVALIDATE(s, "{ \"keyword\": 42 }", "/additionalProperties", "type", "/keyword",
+        "{ \"type\": {"
+        "    \"instanceRef\": \"#/keyword\", \"schemaRef\": \"#/additionalProperties\","
+        "    \"expected\": [\"string\"], \"actual\": \"integer\""
+        "}}");
+}
+#endif
+
+TEST(SchemaValidator, Array) {
+    Document sd;
+    sd.Parse("{\"type\":\"array\"}");
+    SchemaDocument s(sd);
+
+    VALIDATE(s, "[1, 2, 3, 4, 5]", true);
+    VALIDATE(s, "[3, \"different\", { \"types\" : \"of values\" }]", true);
+    INVALIDATE(s, "{\"Not\": \"an array\"}", "", "type", "",
+        "{ \"type\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": [\"array\"], \"actual\": \"object\""
+        "}}");
+}
+
+TEST(SchemaValidator, Array_ItemsList) {
+    Document sd;
+    sd.Parse(
+        "{"
+        "    \"type\": \"array\","
+        "    \"items\" : {"
+        "        \"type\": \"number\""
+        "    }"
+        "}");
+    SchemaDocument s(sd);
+
+    VALIDATE(s, "[1, 2, 3, 4, 5]", true);
+    INVALIDATE(s, "[1, 2, \"3\", 4, 5]", "/items", "type", "/2",
+        "{ \"type\": {"
+        "    \"instanceRef\": \"#/2\", \"schemaRef\": \"#/items\","
+        "    \"expected\": [\"number\"], \"actual\": \"string\""
+        "}}");
+    VALIDATE(s, "[]", true);
+}
+
+TEST(SchemaValidator, Array_ItemsTuple) {
+    Document sd;
+    sd.Parse(
+        "{"
+        "  \"type\": \"array\","
+        "  \"items\": ["
+        "    {"
+        "      \"type\": \"number\""
+        "    },"
+        "    {"
+        "      \"type\": \"string\""
+        "    },"
+        "    {"
+        "      \"type\": \"string\","
+        "      \"enum\": [\"Street\", \"Avenue\", \"Boulevard\"]"
+        "    },"
+        "    {"
+        "      \"type\": \"string\","
+        "      \"enum\": [\"NW\", \"NE\", \"SW\", \"SE\"]"
+        "    }"
+        "  ]"
+        "}");
+    SchemaDocument s(sd);
+
+    VALIDATE(s, "[1600, \"Pennsylvania\", \"Avenue\", \"NW\"]", true);
+    INVALIDATE(s, "[24, \"Sussex\", \"Drive\"]", "/items/2", "enum", "/2",
+        "{ \"enum\": { \"instanceRef\": \"#/2\", \"schemaRef\": \"#/items/2\" }}");
+    INVALIDATE(s, "[\"Palais de l'Elysee\"]", "/items/0", "type", "/0",
+        "{ \"type\": {"
+        "    \"instanceRef\": \"#/0\", \"schemaRef\": \"#/items/0\","
+        "    \"expected\": [\"number\"], \"actual\": \"string\""
+        "}}");
+    INVALIDATE(s, "[\"Twenty-four\", \"Sussex\", \"Drive\"]", "/items/0", "type", "/0",
+        "{ \"type\": {"
+        "    \"instanceRef\": \"#/0\", \"schemaRef\": \"#/items/0\","
+        "    \"expected\": [\"number\"], \"actual\": \"string\""
+        "}}"); // fail fast
+    VALIDATE(s, "[10, \"Downing\", \"Street\"]", true);
+    VALIDATE(s, "[1600, \"Pennsylvania\", \"Avenue\", \"NW\", \"Washington\"]", true);
+}
+
+TEST(SchemaValidator, Array_AdditionalItmes) {
+    Document sd;
+    sd.Parse(
+        "{"
+        "  \"type\": \"array\","
+        "  \"items\": ["
+        "    {"
+        "      \"type\": \"number\""
+        "    },"
+        "    {"
+        "      \"type\": \"string\""
+        "    },"
+        "    {"
+        "      \"type\": \"string\","
+        "      \"enum\": [\"Street\", \"Avenue\", \"Boulevard\"]"
+        "    },"
+        "    {"
+        "      \"type\": \"string\","
+        "      \"enum\": [\"NW\", \"NE\", \"SW\", \"SE\"]"
+        "    }"
+        "  ],"
+        "  \"additionalItems\": false"
+        "}");
+    SchemaDocument s(sd);
+
+    VALIDATE(s, "[1600, \"Pennsylvania\", \"Avenue\", \"NW\"]", true);
+    VALIDATE(s, "[1600, \"Pennsylvania\", \"Avenue\"]", true);
+    INVALIDATE(s, "[1600, \"Pennsylvania\", \"Avenue\", \"NW\", \"Washington\"]", "", "items", "/4",
+        "{ \"additionalItems\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"disallowed\": 4"
+        "}}");
+}
+
+TEST(SchemaValidator, Array_ItemsRange) {
+    Document sd;
+    sd.Parse("{\"type\": \"array\",\"minItems\": 2,\"maxItems\" : 3}");
+    SchemaDocument s(sd);
+
+    INVALIDATE(s, "[]", "", "minItems", "",
+        "{ \"minItems\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 2, \"actual\": 0"
+        "}}");
+    INVALIDATE(s, "[1]", "", "minItems", "",
+        "{ \"minItems\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 2, \"actual\": 1"
+        "}}");
+    VALIDATE(s, "[1, 2]", true);
+    VALIDATE(s, "[1, 2, 3]", true);
+    INVALIDATE(s, "[1, 2, 3, 4]", "", "maxItems", "",
+        "{ \"maxItems\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 3, \"actual\": 4"
+        "}}");
+}
+
+TEST(SchemaValidator, Array_UniqueItems) {
+    Document sd;
+    sd.Parse("{\"type\": \"array\", \"uniqueItems\": true}");
+    SchemaDocument s(sd);
+
+    VALIDATE(s, "[1, 2, 3, 4, 5]", true);
+    INVALIDATE(s, "[1, 2, 3, 3, 4]", "", "uniqueItems", "/3",
+        "{ \"uniqueItems\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"duplicates\": [2, 3]"
+        "}}");
+    INVALIDATE(s, "[1, 2, 3, 3, 3]", "", "uniqueItems", "/3",
+        "{ \"uniqueItems\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"duplicates\": [2, 3]"
+        "}}"); // fail fast
+    VALIDATE(s, "[]", true);
+}
+
+TEST(SchemaValidator, Boolean) {
+    Document sd;
+    sd.Parse("{\"type\":\"boolean\"}");
+    SchemaDocument s(sd);
+
+    VALIDATE(s, "true", true);
+    VALIDATE(s, "false", true);
+    INVALIDATE(s, "\"true\"", "", "type", "",
+        "{ \"type\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": [\"boolean\"], \"actual\": \"string\""
+        "}}");
+    INVALIDATE(s, "0", "", "type", "",
+        "{ \"type\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": [\"boolean\"], \"actual\": \"integer\""
+        "}}");
+}
+
+TEST(SchemaValidator, Null) {
+    Document sd;
+    sd.Parse("{\"type\":\"null\"}");
+    SchemaDocument s(sd);
+
+    VALIDATE(s, "null", true);
+    INVALIDATE(s, "false", "", "type", "",
+        "{ \"type\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": [\"null\"], \"actual\": \"boolean\""
+        "}}");
+    INVALIDATE(s, "0", "", "type", "",
+        "{ \"type\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": [\"null\"], \"actual\": \"integer\""
+        "}}");
+    INVALIDATE(s, "\"\"", "", "type", "",
+        "{ \"type\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": [\"null\"], \"actual\": \"string\""
+        "}}");
+}
+
+// Additional tests
+
+TEST(SchemaValidator, ObjectInArray) {
+    Document sd;
+    sd.Parse("{\"type\":\"array\", \"items\": { \"type\":\"string\" }}");
+    SchemaDocument s(sd);
+
+    VALIDATE(s, "[\"a\"]", true);
+    INVALIDATE(s, "[1]", "/items", "type", "/0",
+        "{ \"type\": {"
+        "    \"instanceRef\": \"#/0\", \"schemaRef\": \"#/items\","
+        "    \"expected\": [\"string\"], \"actual\": \"integer\""
+        "}}");
+    INVALIDATE(s, "[{}]", "/items", "type", "/0",
+        "{ \"type\": {"
+        "    \"instanceRef\": \"#/0\", \"schemaRef\": \"#/items\","
+        "    \"expected\": [\"string\"], \"actual\": \"object\""
+        "}}");
+}
+
+TEST(SchemaValidator, MultiTypeInObject) {
+    Document sd;
+    sd.Parse(
+        "{"
+        "    \"type\":\"object\","
+        "    \"properties\": {"
+        "        \"tel\" : {"
+        "            \"type\":[\"integer\", \"string\"]"
+        "        }"
+        "    }"
+        "}");
+    SchemaDocument s(sd);
+
+    VALIDATE(s, "{ \"tel\": 999 }", true);
+    VALIDATE(s, "{ \"tel\": \"123-456\" }", true);
+    INVALIDATE(s, "{ \"tel\": true }", "/properties/tel", "type", "/tel",
+        "{ \"type\": {"
+        "    \"instanceRef\": \"#/tel\", \"schemaRef\": \"#/properties/tel\","
+        "    \"expected\": [\"string\", \"integer\"], \"actual\": \"boolean\""
+        "}}");
+}
+
+TEST(SchemaValidator, MultiTypeWithObject) {
+    Document sd;
+    sd.Parse(
+        "{"
+        "    \"type\": [\"object\",\"string\"],"
+        "    \"properties\": {"
+        "        \"tel\" : {"
+        "            \"type\": \"integer\""
+        "        }"
+        "    }"
+        "}");
+    SchemaDocument s(sd);
+
+    VALIDATE(s, "\"Hello\"", true);
+    VALIDATE(s, "{ \"tel\": 999 }", true);
+    INVALIDATE(s, "{ \"tel\": \"fail\" }", "/properties/tel", "type", "/tel",
+        "{ \"type\": {"
+        "    \"instanceRef\": \"#/tel\", \"schemaRef\": \"#/properties/tel\","
+        "    \"expected\": [\"integer\"], \"actual\": \"string\""
+        "}}");
+}
+
+TEST(SchemaValidator, AllOf_Nested) {
+    Document sd;
+    sd.Parse(
+    "{"
+    "    \"allOf\": ["
+    "        { \"type\": \"string\", \"minLength\": 2 },"
+    "        { \"type\": \"string\", \"maxLength\": 5 },"
+    "        { \"allOf\": [ { \"enum\" : [\"ok\", \"okay\", \"OK\", \"o\"] }, { \"enum\" : [\"ok\", \"OK\", \"o\"]} ] }"
+    "    ]"
+    "}");
+    SchemaDocument s(sd);
+
+    VALIDATE(s, "\"ok\"", true);
+    VALIDATE(s, "\"OK\"", true);
+    INVALIDATE(s, "\"okay\"", "", "allOf", "",
+        "{ \"enum\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#/allOf/2/allOf/1\""
+        "}}");
+    INVALIDATE(s, "\"o\"", "", "allOf", "",
+        "{ \"minLength\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#/allOf/0\","
+        "    \"expected\": 2, \"actual\": \"o\""
+        "}}");
+    INVALIDATE(s, "\"n\"", "", "allOf", "",
+        "{ \"minLength\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#/allOf/0\","
+        "    \"expected\": 2, \"actual\": \"n\""
+        "  },"
+        "  \"enum\": ["
+        "    {\"instanceRef\": \"#\", \"schemaRef\": \"#/allOf/2/allOf/0\"},"
+        "    {\"instanceRef\": \"#\", \"schemaRef\": \"#/allOf/2/allOf/1\"}"
+        "  ]"
+        "}")
+    INVALIDATE(s, "\"too long\"", "", "allOf", "",
+        "{ \"maxLength\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#/allOf/1\","
+        "    \"expected\": 5, \"actual\": \"too long\""
+        "  },"
+        "  \"enum\": ["
+        "    {\"instanceRef\": \"#\", \"schemaRef\": \"#/allOf/2/allOf/0\"},"
+        "    {\"instanceRef\": \"#\", \"schemaRef\": \"#/allOf/2/allOf/1\"}"
+        "  ]"
+        "}");
+    INVALIDATE(s, "123", "", "allOf", "",
+        "{ \"type\": ["
+        "    { \"instanceRef\": \"#\", \"schemaRef\": \"#/allOf/0\","
+        "      \"expected\": [\"string\"], \"actual\": \"integer\""
+        "    },"
+        "    { \"instanceRef\": \"#\", \"schemaRef\": \"#/allOf/1\","
+        "      \"expected\": [\"string\"], \"actual\": \"integer\""
+        "    }"
+        "  ],"
+        "  \"enum\": ["
+        "    {\"instanceRef\": \"#\", \"schemaRef\": \"#/allOf/2/allOf/0\"},"
+        "    {\"instanceRef\": \"#\", \"schemaRef\": \"#/allOf/2/allOf/1\"}"
+        "  ]"
+        "}");
+}
+
+TEST(SchemaValidator, EscapedPointer) {
+    Document sd;
+    sd.Parse(
+        "{"
+        "  \"type\": \"object\","
+        "  \"properties\": {"
+        "    \"~/\": { \"type\": \"number\" }"
+        "  }"
+        "}");
+    SchemaDocument s(sd);
+    INVALIDATE(s, "{\"~/\":true}", "/properties/~0~1", "type", "/~0~1",
+        "{ \"type\": {"
+        "    \"instanceRef\": \"#/~0~1\", \"schemaRef\": \"#/properties/~0~1\","
+        "    \"expected\": [\"number\"], \"actual\": \"boolean\""
+        "}}");
+}
+
+template <typename Allocator>
+static char* ReadFile(const char* filename, Allocator& allocator) {
+    const char *paths[] = {
+        "",
+        "bin/",
+        "../bin/",
+        "../../bin/",
+        "../../../bin/"
+    };
+    char buffer[1024];
+    FILE *fp = 0;
+    for (size_t i = 0; i < sizeof(paths) / sizeof(paths[0]); i++) {
+        sprintf(buffer, "%s%s", paths[i], filename);
+        fp = fopen(buffer, "rb");
+        if (fp)
+            break;
+    }
+
+    if (!fp)
+        return 0;
+
+    fseek(fp, 0, SEEK_END);
+    size_t length = static_cast<size_t>(ftell(fp));
+    fseek(fp, 0, SEEK_SET);
+    char* json = reinterpret_cast<char*>(allocator.Malloc(length + 1));
+    size_t readLength = fread(json, 1, length, fp);
+    json[readLength] = '\0';
+    fclose(fp);
+    return json;
+}
+
+TEST(SchemaValidator, ValidateMetaSchema) {
+    CrtAllocator allocator;
+    char* json = ReadFile("draft-04/schema", allocator);
+    Document d;
+    d.Parse(json);
+    ASSERT_FALSE(d.HasParseError());
+    SchemaDocument sd(d);
+    SchemaValidator validator(sd);
+    if (!d.Accept(validator)) {
+        StringBuffer sb;
+        validator.GetInvalidSchemaPointer().StringifyUriFragment(sb);
+        printf("Invalid schema: %s\n", sb.GetString());
+        printf("Invalid keyword: %s\n", validator.GetInvalidSchemaKeyword());
+        sb.Clear();
+        validator.GetInvalidDocumentPointer().StringifyUriFragment(sb);
+        printf("Invalid document: %s\n", sb.GetString());
+        sb.Clear();
+        Writer<StringBuffer> w(sb);
+        validator.GetError().Accept(w);
+        printf("Validation error: %s\n", sb.GetString());
+        ADD_FAILURE();
+    }
+    CrtAllocator::Free(json);
+}
+
+TEST(SchemaValidator, ValidateMetaSchema_UTF16) {
+    typedef GenericDocument<UTF16<> > D;
+    typedef GenericSchemaDocument<D::ValueType> SD;
+    typedef GenericSchemaValidator<SD> SV;
+
+    CrtAllocator allocator;
+    char* json = ReadFile("draft-04/schema", allocator);
+
+    D d;
+    StringStream ss(json);
+    d.ParseStream<0, UTF8<> >(ss);
+    ASSERT_FALSE(d.HasParseError());
+    SD sd(d);
+    SV validator(sd);
+    if (!d.Accept(validator)) {
+        GenericStringBuffer<UTF16<> > sb;
+        validator.GetInvalidSchemaPointer().StringifyUriFragment(sb);
+        wprintf(L"Invalid schema: %ls\n", sb.GetString());
+        wprintf(L"Invalid keyword: %ls\n", validator.GetInvalidSchemaKeyword());
+        sb.Clear();
+        validator.GetInvalidDocumentPointer().StringifyUriFragment(sb);
+        wprintf(L"Invalid document: %ls\n", sb.GetString());
+        sb.Clear();
+        Writer<GenericStringBuffer<UTF16<> >, UTF16<> > w(sb);
+        validator.GetError().Accept(w);
+        printf("Validation error: %ls\n", sb.GetString());
+        ADD_FAILURE();
+    }
+    CrtAllocator::Free(json);
+}
+
+template <typename SchemaDocumentType = SchemaDocument>
+class RemoteSchemaDocumentProvider : public IGenericRemoteSchemaDocumentProvider<SchemaDocumentType> {
+public:
+    RemoteSchemaDocumentProvider() : 
+        documentAllocator_(documentBuffer_, sizeof(documentBuffer_)), 
+        schemaAllocator_(schemaBuffer_, sizeof(schemaBuffer_)) 
+    {
+        const char* filenames[kCount] = {
+            "jsonschema/remotes/integer.json",
+            "jsonschema/remotes/subSchemas.json",
+            "jsonschema/remotes/folder/folderInteger.json",
+            "draft-04/schema"
+        };
+        const char* uris[kCount] = {
+            "http://localhost:1234/integer.json",
+            "http://localhost:1234/subSchemas.json",
+            "http://localhost:1234/folder/folderInteger.json",
+            "http://json-schema.org/draft-04/schema"
+        };
+
+        for (size_t i = 0; i < kCount; i++) {
+            sd_[i] = 0;
+
+            char jsonBuffer[8192];
+            MemoryPoolAllocator<> jsonAllocator(jsonBuffer, sizeof(jsonBuffer));
+            char* json = ReadFile(filenames[i], jsonAllocator);
+            if (!json) {
+                printf("json remote file %s not found", filenames[i]);
+                ADD_FAILURE();
+            }
+            else {
+                char stackBuffer[4096];
+                MemoryPoolAllocator<> stackAllocator(stackBuffer, sizeof(stackBuffer));
+                DocumentType d(&documentAllocator_, 1024, &stackAllocator);
+                d.Parse(json);
+                sd_[i] = new SchemaDocumentType(d, uris[i], static_cast<SizeType>(strlen(uris[i])), 0, &schemaAllocator_);
+                MemoryPoolAllocator<>::Free(json);
+            }
+        };
+    }
+
+    ~RemoteSchemaDocumentProvider() {
+        for (size_t i = 0; i < kCount; i++)
+            delete sd_[i];
+    }
+
+    virtual const SchemaDocumentType* GetRemoteDocument(const char* uri, SizeType length) {
+        for (size_t i = 0; i < kCount; i++)
+            if (typename SchemaDocumentType::URIType(uri, length) == sd_[i]->GetURI())
+                return sd_[i];
+        return 0;
+    }
+
+private:
+    typedef GenericDocument<typename SchemaDocumentType::EncodingType, MemoryPoolAllocator<>, MemoryPoolAllocator<> > DocumentType;
+
+    RemoteSchemaDocumentProvider(const RemoteSchemaDocumentProvider&);
+    RemoteSchemaDocumentProvider& operator=(const RemoteSchemaDocumentProvider&);
+
+    static const size_t kCount = 4;
+    SchemaDocumentType* sd_[kCount];
+    typename DocumentType::AllocatorType documentAllocator_;
+    typename SchemaDocumentType::AllocatorType schemaAllocator_;
+    char documentBuffer_[16384];
+    char schemaBuffer_[128u * 1024];
+};
+
+TEST(SchemaValidator, TestSuite) {
+    const char* filenames[] = {
+        "additionalItems.json",
+        "additionalProperties.json",
+        "allOf.json",
+        "anyOf.json",
+        "default.json",
+        "definitions.json",
+        "dependencies.json",
+        "enum.json",
+        "items.json",
+        "maximum.json",
+        "maxItems.json",
+        "maxLength.json",
+        "maxProperties.json",
+        "minimum.json",
+        "minItems.json",
+        "minLength.json",
+        "minProperties.json",
+        "multipleOf.json",
+        "not.json",
+        "oneOf.json",
+        "pattern.json",
+        "patternProperties.json",
+        "properties.json",
+        "ref.json",
+        "refRemote.json",
+        "required.json",
+        "type.json",
+        "uniqueItems.json"
+    };
+
+    const char* onlyRunDescription = 0;
+    //const char* onlyRunDescription = "a string is a string";
+
+    unsigned testCount = 0;
+    unsigned passCount = 0;
+
+    typedef GenericSchemaDocument<Value, MemoryPoolAllocator<> > SchemaDocumentType;
+    RemoteSchemaDocumentProvider<SchemaDocumentType> provider;
+
+    char jsonBuffer[65536];
+    char documentBuffer[65536];
+    char documentStackBuffer[65536];
+    char schemaBuffer[65536];
+    char validatorBuffer[65536];
+    MemoryPoolAllocator<> jsonAllocator(jsonBuffer, sizeof(jsonBuffer));
+    MemoryPoolAllocator<> documentAllocator(documentBuffer, sizeof(documentBuffer));
+    MemoryPoolAllocator<> documentStackAllocator(documentStackBuffer, sizeof(documentStackBuffer));
+    MemoryPoolAllocator<> schemaAllocator(schemaBuffer, sizeof(schemaBuffer));
+    MemoryPoolAllocator<> validatorAllocator(validatorBuffer, sizeof(validatorBuffer));
+
+    for (size_t i = 0; i < sizeof(filenames) / sizeof(filenames[0]); i++) {
+        char filename[FILENAME_MAX];
+        sprintf(filename, "jsonschema/tests/draft4/%s", filenames[i]);
+        char* json = ReadFile(filename, jsonAllocator);
+        if (!json) {
+            printf("json test suite file %s not found", filename);
+            ADD_FAILURE();
+        }
+        else {
+            GenericDocument<UTF8<>, MemoryPoolAllocator<>, MemoryPoolAllocator<> > d(&documentAllocator, 1024, &documentStackAllocator);
+            d.Parse(json);
+            if (d.HasParseError()) {
+                printf("json test suite file %s has parse error", filename);
+                ADD_FAILURE();
+            }
+            else {
+                for (Value::ConstValueIterator schemaItr = d.Begin(); schemaItr != d.End(); ++schemaItr) {
+                    {
+                        SchemaDocumentType schema((*schemaItr)["schema"], filenames[i], static_cast<SizeType>(strlen(filenames[i])), &provider, &schemaAllocator);
+                        GenericSchemaValidator<SchemaDocumentType, BaseReaderHandler<UTF8<> >, MemoryPoolAllocator<> > validator(schema, &validatorAllocator);
+                        const char* description1 = (*schemaItr)["description"].GetString();
+                        const Value& tests = (*schemaItr)["tests"];
+                        for (Value::ConstValueIterator testItr = tests.Begin(); testItr != tests.End(); ++testItr) {
+                            const char* description2 = (*testItr)["description"].GetString();
+                            if (!onlyRunDescription || strcmp(description2, onlyRunDescription) == 0) {
+                                const Value& data = (*testItr)["data"];
+                                bool expected = (*testItr)["valid"].GetBool();
+                                testCount++;
+                                validator.Reset();
+                                bool actual = data.Accept(validator);
+                                if (expected != actual)
+                                    printf("Fail: %30s \"%s\" \"%s\"\n", filename, description1, description2);
+                                else
+                                    passCount++;
+                            }
+                        }
+                        //printf("%zu %zu %zu\n", documentAllocator.Size(), schemaAllocator.Size(), validatorAllocator.Size());
+                    }
+                    schemaAllocator.Clear();
+                    validatorAllocator.Clear();
+                }
+            }
+        }
+        documentAllocator.Clear();
+        MemoryPoolAllocator<>::Free(json);
+        jsonAllocator.Clear();
+    }
+    printf("%d / %d passed (%2d%%)\n", passCount, testCount, passCount * 100 / testCount);
+    // if (passCount != testCount)
+    //     ADD_FAILURE();
+}
+
+TEST(SchemaValidatingReader, Simple) {
+    Document sd;
+    sd.Parse("{ \"type\": \"string\", \"enum\" : [\"red\", \"amber\", \"green\"] }");
+    SchemaDocument s(sd);
+
+    Document d;
+    StringStream ss("\"red\"");
+    SchemaValidatingReader<kParseDefaultFlags, StringStream, UTF8<> > reader(ss, s);
+    d.Populate(reader);
+    EXPECT_TRUE(reader.GetParseResult());
+    EXPECT_TRUE(reader.IsValid());
+    EXPECT_TRUE(d.IsString());
+    EXPECT_STREQ("red", d.GetString());
+}
+
+TEST(SchemaValidatingReader, Invalid) {
+    Document sd;
+    sd.Parse("{\"type\":\"string\",\"minLength\":2,\"maxLength\":3}");
+    SchemaDocument s(sd);
+
+    Document d;
+    StringStream ss("\"ABCD\"");
+    SchemaValidatingReader<kParseDefaultFlags, StringStream, UTF8<> > reader(ss, s);
+    d.Populate(reader);
+    EXPECT_FALSE(reader.GetParseResult());
+    EXPECT_FALSE(reader.IsValid());
+    EXPECT_EQ(kParseErrorTermination, reader.GetParseResult().Code());
+    EXPECT_STREQ("maxLength", reader.GetInvalidSchemaKeyword());
+    EXPECT_TRUE(reader.GetInvalidSchemaPointer() == SchemaDocument::PointerType(""));
+    EXPECT_TRUE(reader.GetInvalidDocumentPointer() == SchemaDocument::PointerType(""));
+    EXPECT_TRUE(d.IsNull());
+    Document e;
+    e.Parse(
+        "{ \"maxLength\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 3, \"actual\": \"ABCD\""
+        "}}");
+    if (e != reader.GetError()) {
+        ADD_FAILURE();
+    }
+}
+
+TEST(SchemaValidatingWriter, Simple) {
+    Document sd;
+    sd.Parse("{\"type\":\"string\",\"minLength\":2,\"maxLength\":3}");
+    SchemaDocument s(sd);
+
+    Document d;
+    StringBuffer sb;
+    Writer<StringBuffer> writer(sb);
+    GenericSchemaValidator<SchemaDocument, Writer<StringBuffer> > validator(s, writer);
+
+    d.Parse("\"red\"");
+    EXPECT_TRUE(d.Accept(validator));
+    EXPECT_TRUE(validator.IsValid());
+    EXPECT_STREQ("\"red\"", sb.GetString());
+
+    sb.Clear();
+    validator.Reset();
+    d.Parse("\"ABCD\"");
+    EXPECT_FALSE(d.Accept(validator));
+    EXPECT_FALSE(validator.IsValid());
+    EXPECT_TRUE(validator.GetInvalidSchemaPointer() == SchemaDocument::PointerType(""));
+    EXPECT_TRUE(validator.GetInvalidDocumentPointer() == SchemaDocument::PointerType(""));
+    Document e;
+    e.Parse(
+        "{ \"maxLength\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": 3, \"actual\": \"ABCD\""
+        "}}");
+    EXPECT_EQ(e, validator.GetError());
+}
+
+TEST(Schema, Issue848) {
+    rapidjson::Document d;
+    rapidjson::SchemaDocument s(d);
+    rapidjson::GenericSchemaValidator<rapidjson::SchemaDocument, rapidjson::Document> v(s);
+}
+
+#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
+
+static SchemaDocument ReturnSchemaDocument() {
+    Document sd;
+    sd.Parse("{ \"type\": [\"number\", \"string\"] }");
+    SchemaDocument s(sd);
+    return s;
+}
+
+TEST(Schema, Issue552) {
+    SchemaDocument s = ReturnSchemaDocument();
+    VALIDATE(s, "42", true);
+    VALIDATE(s, "\"Life, the universe, and everything\"", true);
+    INVALIDATE(s, "[\"Life\", \"the universe\", \"and everything\"]", "", "type", "",
+        "{ \"type\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"expected\": [\"string\", \"number\"], \"actual\": \"array\""
+        "}}");
+}
+
+#endif // RAPIDJSON_HAS_CXX11_RVALUE_REFS
+
+TEST(SchemaValidator, Issue608) {
+    Document sd;
+    sd.Parse("{\"required\": [\"a\", \"b\"] }");
+    SchemaDocument s(sd);
+
+    VALIDATE(s, "{\"a\" : null, \"b\": null}", true);
+    INVALIDATE(s, "{\"a\" : null, \"a\" : null}", "", "required", "",
+        "{ \"required\": {"
+        "    \"instanceRef\": \"#\", \"schemaRef\": \"#\","
+        "    \"missing\": [\"b\"]"
+        "}}");
+}
+
+// Fail to resolve $ref in allOf causes crash in SchemaValidator::StartObject()
+TEST(SchemaValidator, Issue728_AllOfRef) {
+    Document sd;
+    sd.Parse("{\"allOf\": [{\"$ref\": \"#/abc\"}]}");
+    SchemaDocument s(sd);
+    VALIDATE(s, "{\"key1\": \"abc\", \"key2\": \"def\"}", true);
+}
+
+TEST(SchemaValidator, Issue825) {
+    Document sd;
+    sd.Parse("{\"type\": \"object\", \"additionalProperties\": false, \"patternProperties\": {\"^i\": { \"type\": \"string\" } } }");
+    SchemaDocument s(sd);
+    VALIDATE(s, "{ \"item\": \"hello\" }", true);
+}
+
+TEST(SchemaValidator, Issue1017_allOfHandler) {
+    Document sd;
+    sd.Parse("{\"allOf\": [{\"type\": \"object\",\"properties\": {\"cyanArray2\": {\"type\": \"array\",\"items\": { \"type\": \"string\" }}}},{\"type\": \"object\",\"properties\": {\"blackArray\": {\"type\": \"array\",\"items\": { \"type\": \"string\" }}},\"required\": [ \"blackArray\" ]}]}");
+    SchemaDocument s(sd);
+    StringBuffer sb;
+    Writer<StringBuffer> writer(sb);
+    GenericSchemaValidator<SchemaDocument, Writer<StringBuffer> > validator(s, writer);
+    EXPECT_TRUE(validator.StartObject());
+    EXPECT_TRUE(validator.Key("cyanArray2", 10, false));
+    EXPECT_TRUE(validator.StartArray());    
+    EXPECT_TRUE(validator.EndArray(0));    
+    EXPECT_TRUE(validator.Key("blackArray", 10, false));
+    EXPECT_TRUE(validator.StartArray());    
+    EXPECT_TRUE(validator.EndArray(0));    
+    EXPECT_TRUE(validator.EndObject(0));
+    EXPECT_TRUE(validator.IsValid());
+    EXPECT_STREQ("{\"cyanArray2\":[],\"blackArray\":[]}", sb.GetString());
+}
+
+TEST(SchemaValidator, Ref_remote) {
+    typedef GenericSchemaDocument<Value, MemoryPoolAllocator<> > SchemaDocumentType;
+    RemoteSchemaDocumentProvider<SchemaDocumentType> provider;
+    Document sd;
+    sd.Parse("{\"$ref\": \"http://localhost:1234/subSchemas.json#/integer\"}");
+    SchemaDocumentType s(sd, 0, 0, &provider);
+    typedef GenericSchemaValidator<SchemaDocumentType, BaseReaderHandler<UTF8<> >, MemoryPoolAllocator<> > SchemaValidatorType;
+    typedef GenericPointer<Value, MemoryPoolAllocator<> > PointerType;
+    INVALIDATE_(s, "null", "/integer", "type", "",
+        "{ \"type\": {"
+        "    \"instanceRef\": \"#\","
+        "    \"schemaRef\": \"http://localhost:1234/subSchemas.json#/integer\","
+        "    \"expected\": [\"integer\"], \"actual\": \"null\""
+        "}}",
+        SchemaValidatorType, PointerType);
+}
+
+TEST(SchemaValidator, Ref_remote_issue1210) {
+    class SchemaDocumentProvider : public IRemoteSchemaDocumentProvider {
+        SchemaDocument** collection;
+
+        SchemaDocumentProvider(const SchemaDocumentProvider&);
+        SchemaDocumentProvider& operator=(const SchemaDocumentProvider&);
+
+        public:
+          SchemaDocumentProvider(SchemaDocument** collection) : collection(collection) { }
+          virtual const SchemaDocument* GetRemoteDocument(const char* uri, SizeType length) {
+            int i = 0;
+            while (collection[i] && SchemaDocument::URIType(uri, length) != collection[i]->GetURI()) ++i;
+            return collection[i];
+          }
+    };
+    SchemaDocument* collection[] = { 0, 0, 0 };
+    SchemaDocumentProvider provider(collection);
+
+    Document x, y, z;
+    x.Parse("{\"properties\":{\"country\":{\"$ref\":\"y.json#/definitions/country_remote\"}},\"type\":\"object\"}");
+    y.Parse("{\"definitions\":{\"country_remote\":{\"$ref\":\"z.json#/definitions/country_list\"}}}");
+    z.Parse("{\"definitions\":{\"country_list\":{\"enum\":[\"US\"]}}}");
+
+    SchemaDocument sz(z, "z.json", 6, &provider);
+    collection[0] = &sz;
+    SchemaDocument sy(y, "y.json", 6, &provider);
+    collection[1] = &sy;
+    SchemaDocument sx(x, "x.json", 6, &provider);
+
+    VALIDATE(sx, "{\"country\":\"UK\"}", false);
+    VALIDATE(sx, "{\"country\":\"US\"}", true);
+}
+
+#if defined(_MSC_VER) || defined(__clang__)
+RAPIDJSON_DIAG_POP
+#endif
diff --git a/test/unittest/simdtest.cpp b/test/unittest/simdtest.cpp
new file mode 100644
index 0000000..c60c85b
--- /dev/null
+++ b/test/unittest/simdtest.cpp
@@ -0,0 +1,219 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+// 
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed 
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the 
+// specific language governing permissions and limitations under the License.
+
+// Since Travis CI installs old Valgrind 3.7.0, which fails with some SSE4.2
+// The unit tests prefix with SIMD should be skipped by Valgrind test
+
+// __SSE2__ and __SSE4_2__ are recognized by gcc, clang, and the Intel compiler.
+// We use -march=native with gmake to enable -msse2 and -msse4.2, if supported.
+#if defined(__SSE4_2__)
+#  define RAPIDJSON_SSE42
+#elif defined(__SSE2__)
+#  define RAPIDJSON_SSE2
+#elif defined(__ARM_NEON)
+#  define RAPIDJSON_NEON
+#endif
+
+#define RAPIDJSON_NAMESPACE rapidjson_simd
+
+#include "unittest.h"
+
+#include "rapidjson/reader.h"
+#include "rapidjson/writer.h"
+
+#ifdef __GNUC__
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(effc++)
+#endif
+
+using namespace rapidjson_simd;
+
+#ifdef RAPIDJSON_SSE2
+#define SIMD_SUFFIX(name) name##_SSE2
+#elif defined(RAPIDJSON_SSE42)
+#define SIMD_SUFFIX(name) name##_SSE42
+#elif defined(RAPIDJSON_NEON)
+#define SIMD_SUFFIX(name) name##_NEON
+#else
+#define SIMD_SUFFIX(name) name
+#endif
+
+template <typename StreamType>
+void TestSkipWhitespace() {
+    for (size_t step = 1; step < 32; step++) {
+        char buffer[1025];
+        for (size_t i = 0; i < 1024; i++)
+            buffer[i] = " \t\r\n"[i % 4];
+        for (size_t i = 0; i < 1024; i += step)
+            buffer[i] = 'X';
+        buffer[1024] = '\0';
+
+        StreamType s(buffer);
+        size_t i = 0;
+        for (;;) {
+            SkipWhitespace(s);
+            if (s.Peek() == '\0')
+                break;
+            EXPECT_EQ(i, s.Tell());
+            EXPECT_EQ('X', s.Take());
+            i += step;
+        }
+    }
+}
+
+TEST(SIMD, SIMD_SUFFIX(SkipWhitespace)) {
+    TestSkipWhitespace<StringStream>();
+    TestSkipWhitespace<InsituStringStream>();
+}
+
+TEST(SIMD, SIMD_SUFFIX(SkipWhitespace_EncodedMemoryStream)) {
+    for (size_t step = 1; step < 32; step++) {
+        char buffer[1024];
+        for (size_t i = 0; i < 1024; i++)
+            buffer[i] = " \t\r\n"[i % 4];
+        for (size_t i = 0; i < 1024; i += step)
+            buffer[i] = 'X';
+
+        MemoryStream ms(buffer, 1024);
+        EncodedInputStream<UTF8<>, MemoryStream> s(ms);
+        size_t i = 0;
+        for (;;) {
+            SkipWhitespace(s);
+            if (s.Peek() == '\0')
+                break;
+            //EXPECT_EQ(i, s.Tell());
+            EXPECT_EQ('X', s.Take());
+            i += step;
+        }
+    }
+}
+
+struct ScanCopyUnescapedStringHandler : BaseReaderHandler<UTF8<>, ScanCopyUnescapedStringHandler> {
+    bool String(const char* str, size_t length, bool) {
+        memcpy(buffer, str, length + 1);
+        return true;
+    }
+    char buffer[1024 + 5 + 32];
+};
+
+template <unsigned parseFlags, typename StreamType>
+void TestScanCopyUnescapedString() {
+    char buffer[1024u + 5 + 32];
+    char backup[1024u + 5 + 32];
+
+    // Test "ABCDABCD...\\"
+    for (size_t offset = 0; offset < 32; offset++) {
+        for (size_t step = 0; step < 1024; step++) {
+            char* json = buffer + offset;
+            char *p = json;
+            *p++ = '\"';
+            for (size_t i = 0; i < step; i++)
+                *p++ = "ABCD"[i % 4];
+            *p++ = '\\';
+            *p++ = '\\';
+            *p++ = '\"';
+            *p++ = '\0';
+            strcpy(backup, json); // insitu parsing will overwrite buffer, so need to backup first
+
+            StreamType s(json);
+            Reader reader;
+            ScanCopyUnescapedStringHandler h;
+            reader.Parse<parseFlags>(s, h);
+            EXPECT_TRUE(memcmp(h.buffer, backup + 1, step) == 0);
+            EXPECT_EQ('\\', h.buffer[step]);    // escaped
+            EXPECT_EQ('\0', h.buffer[step + 1]);
+        }
+    }
+
+    // Test "\\ABCDABCD..."
+    for (size_t offset = 0; offset < 32; offset++) {
+        for (size_t step = 0; step < 1024; step++) {
+            char* json = buffer + offset;
+            char *p = json;
+            *p++ = '\"';
+            *p++ = '\\';
+            *p++ = '\\';
+            for (size_t i = 0; i < step; i++)
+                *p++ = "ABCD"[i % 4];
+            *p++ = '\"';
+            *p++ = '\0';
+            strcpy(backup, json); // insitu parsing will overwrite buffer, so need to backup first
+
+            StreamType s(json);
+            Reader reader;
+            ScanCopyUnescapedStringHandler h;
+            reader.Parse<parseFlags>(s, h);
+            EXPECT_TRUE(memcmp(h.buffer + 1, backup + 3, step) == 0);
+            EXPECT_EQ('\\', h.buffer[0]);    // escaped
+            EXPECT_EQ('\0', h.buffer[step + 1]);
+        }
+    }
+}
+
+TEST(SIMD, SIMD_SUFFIX(ScanCopyUnescapedString)) {
+    TestScanCopyUnescapedString<kParseDefaultFlags, StringStream>();
+    TestScanCopyUnescapedString<kParseInsituFlag, InsituStringStream>();
+}
+
+TEST(SIMD, SIMD_SUFFIX(ScanWriteUnescapedString)) {
+    char buffer[2048 + 1 + 32];
+    for (size_t offset = 0; offset < 32; offset++) {
+        for (size_t step = 0; step < 1024; step++) {
+            char* s = buffer + offset;
+            char* p = s;
+            for (size_t i = 0; i < step; i++)
+                *p++ = "ABCD"[i % 4];
+            char escape = "\0\n\\\""[step % 4];
+            *p++ = escape;
+            for (size_t i = 0; i < step; i++)
+                *p++ = "ABCD"[i % 4];
+
+            StringBuffer sb;
+            Writer<StringBuffer> writer(sb);
+            writer.String(s, SizeType(step * 2 + 1));
+            const char* q = sb.GetString();
+            EXPECT_EQ('\"', *q++);
+            for (size_t i = 0; i < step; i++)
+                EXPECT_EQ("ABCD"[i % 4], *q++);
+            if (escape == '\0') {
+                EXPECT_EQ('\\', *q++);
+                EXPECT_EQ('u', *q++);
+                EXPECT_EQ('0', *q++);
+                EXPECT_EQ('0', *q++);
+                EXPECT_EQ('0', *q++);
+                EXPECT_EQ('0', *q++);
+            }
+            else if (escape == '\n') {
+                EXPECT_EQ('\\', *q++);
+                EXPECT_EQ('n', *q++);
+            }
+            else if (escape == '\\') {
+                EXPECT_EQ('\\', *q++);
+                EXPECT_EQ('\\', *q++);
+            }
+            else if (escape == '\"') {
+                EXPECT_EQ('\\', *q++);
+                EXPECT_EQ('\"', *q++);
+            }
+            for (size_t i = 0; i < step; i++)
+                EXPECT_EQ("ABCD"[i % 4], *q++);
+            EXPECT_EQ('\"', *q++);
+            EXPECT_EQ('\0', *q++);
+        }
+    }
+}
+
+#ifdef __GNUC__
+RAPIDJSON_DIAG_POP
+#endif
diff --git a/test/unittest/strfunctest.cpp b/test/unittest/strfunctest.cpp
new file mode 100644
index 0000000..cc1bb22
--- /dev/null
+++ b/test/unittest/strfunctest.cpp
@@ -0,0 +1,30 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+// 
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed 
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the 
+// specific language governing permissions and limitations under the License.
+
+#include "unittest.h"
+#include "rapidjson/internal/strfunc.h"
+
+using namespace rapidjson;
+using namespace rapidjson::internal;
+
+TEST(StrFunc, CountStringCodePoint) {
+    SizeType count;
+    EXPECT_TRUE(CountStringCodePoint<UTF8<> >("", 0, &count));
+    EXPECT_EQ(0u, count);
+    EXPECT_TRUE(CountStringCodePoint<UTF8<> >("Hello", 5, &count));
+    EXPECT_EQ(5u, count);
+    EXPECT_TRUE(CountStringCodePoint<UTF8<> >("\xC2\xA2\xE2\x82\xAC\xF0\x9D\x84\x9E", 9, &count)); // cents euro G-clef
+    EXPECT_EQ(3u, count);
+    EXPECT_FALSE(CountStringCodePoint<UTF8<> >("\xC2\xA2\xE2\x82\xAC\xF0\x9D\x84\x9E\x80", 10, &count));
+}
diff --git a/test/unittest/stringbuffertest.cpp b/test/unittest/stringbuffertest.cpp
new file mode 100644
index 0000000..2e36442
--- /dev/null
+++ b/test/unittest/stringbuffertest.cpp
@@ -0,0 +1,192 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+// 
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed 
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the 
+// specific language governing permissions and limitations under the License.
+
+#include "unittest.h"
+#include "rapidjson/stringbuffer.h"
+#include "rapidjson/writer.h"
+
+#ifdef __clang__
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(c++98-compat)
+#endif
+
+using namespace rapidjson;
+
+TEST(StringBuffer, InitialSize) {
+    StringBuffer buffer;
+    EXPECT_EQ(0u, buffer.GetSize());
+    EXPECT_EQ(0u, buffer.GetLength());
+    EXPECT_STREQ("", buffer.GetString());
+}
+
+TEST(StringBuffer, Put) {
+    StringBuffer buffer;
+    buffer.Put('A');
+
+    EXPECT_EQ(1u, buffer.GetSize());
+    EXPECT_EQ(1u, buffer.GetLength());
+    EXPECT_STREQ("A", buffer.GetString());
+}
+
+TEST(StringBuffer, PutN_Issue672) {
+    GenericStringBuffer<UTF8<>, MemoryPoolAllocator<> > buffer;
+    EXPECT_EQ(0u, buffer.GetSize());
+    EXPECT_EQ(0u, buffer.GetLength());
+    rapidjson::PutN(buffer, ' ', 1);
+    EXPECT_EQ(1u, buffer.GetSize());
+    EXPECT_EQ(1u, buffer.GetLength());
+}
+
+TEST(StringBuffer, Clear) {
+    StringBuffer buffer;
+    buffer.Put('A');
+    buffer.Put('B');
+    buffer.Put('C');
+    buffer.Clear();
+
+    EXPECT_EQ(0u, buffer.GetSize());
+    EXPECT_EQ(0u, buffer.GetLength());
+    EXPECT_STREQ("", buffer.GetString());
+}
+
+TEST(StringBuffer, Push) {
+    StringBuffer buffer;
+    buffer.Push(5);
+
+    EXPECT_EQ(5u, buffer.GetSize());
+    EXPECT_EQ(5u, buffer.GetLength());
+
+    // Causes sudden expansion to make the stack's capacity equal to size
+    buffer.Push(65536u);
+    EXPECT_EQ(5u + 65536u, buffer.GetSize());
+}
+
+TEST(StringBuffer, Pop) {
+    StringBuffer buffer;
+    buffer.Put('A');
+    buffer.Put('B');
+    buffer.Put('C');
+    buffer.Put('D');
+    buffer.Put('E');
+    buffer.Pop(3);
+
+    EXPECT_EQ(2u, buffer.GetSize());
+    EXPECT_EQ(2u, buffer.GetLength());
+    EXPECT_STREQ("AB", buffer.GetString());
+}
+
+TEST(StringBuffer, GetLength_Issue744) {
+    GenericStringBuffer<UTF16<wchar_t> > buffer;
+    buffer.Put('A');
+    buffer.Put('B');
+    buffer.Put('C');
+    EXPECT_EQ(3u * sizeof(wchar_t), buffer.GetSize());
+    EXPECT_EQ(3u, buffer.GetLength());
+}
+
+#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
+
+#if 0 // Many old compiler does not support these. Turn it off temporaily.
+
+#include <type_traits>
+
+TEST(StringBuffer, Traits) {
+    static_assert( std::is_constructible<StringBuffer>::value, "");
+    static_assert( std::is_default_constructible<StringBuffer>::value, "");
+#ifndef _MSC_VER
+    static_assert(!std::is_copy_constructible<StringBuffer>::value, "");
+#endif
+    static_assert( std::is_move_constructible<StringBuffer>::value, "");
+
+    static_assert(!std::is_nothrow_constructible<StringBuffer>::value, "");
+    static_assert(!std::is_nothrow_default_constructible<StringBuffer>::value, "");
+
+#if !defined(_MSC_VER) || _MSC_VER >= 1800
+    static_assert(!std::is_nothrow_copy_constructible<StringBuffer>::value, "");
+    static_assert(!std::is_nothrow_move_constructible<StringBuffer>::value, "");
+#endif
+
+    static_assert( std::is_assignable<StringBuffer,StringBuffer>::value, "");
+#ifndef _MSC_VER
+    static_assert(!std::is_copy_assignable<StringBuffer>::value, "");
+#endif
+    static_assert( std::is_move_assignable<StringBuffer>::value, "");
+
+#if !defined(_MSC_VER) || _MSC_VER >= 1800
+    static_assert(!std::is_nothrow_assignable<StringBuffer, StringBuffer>::value, "");
+#endif
+
+    static_assert(!std::is_nothrow_copy_assignable<StringBuffer>::value, "");
+    static_assert(!std::is_nothrow_move_assignable<StringBuffer>::value, "");
+
+    static_assert( std::is_destructible<StringBuffer>::value, "");
+#ifndef _MSC_VER
+    static_assert(std::is_nothrow_destructible<StringBuffer>::value, "");
+#endif
+}
+
+#endif
+
+TEST(StringBuffer, MoveConstructor) {
+    StringBuffer x;
+    x.Put('A');
+    x.Put('B');
+    x.Put('C');
+    x.Put('D');
+
+    EXPECT_EQ(4u, x.GetSize());
+    EXPECT_EQ(4u, x.GetLength());
+    EXPECT_STREQ("ABCD", x.GetString());
+
+    // StringBuffer y(x); // does not compile (!is_copy_constructible)
+    StringBuffer y(std::move(x));
+    EXPECT_EQ(0u, x.GetSize());
+    EXPECT_EQ(0u, x.GetLength());
+    EXPECT_EQ(4u, y.GetSize());
+    EXPECT_EQ(4u, y.GetLength());
+    EXPECT_STREQ("ABCD", y.GetString());
+
+    // StringBuffer z = y; // does not compile (!is_copy_assignable)
+    StringBuffer z = std::move(y);
+    EXPECT_EQ(0u, y.GetSize());
+    EXPECT_EQ(0u, y.GetLength());
+    EXPECT_EQ(4u, z.GetSize());
+    EXPECT_EQ(4u, z.GetLength());
+    EXPECT_STREQ("ABCD", z.GetString());
+}
+
+TEST(StringBuffer, MoveAssignment) {
+    StringBuffer x;
+    x.Put('A');
+    x.Put('B');
+    x.Put('C');
+    x.Put('D');
+
+    EXPECT_EQ(4u, x.GetSize());
+    EXPECT_EQ(4u, x.GetLength());
+    EXPECT_STREQ("ABCD", x.GetString());
+
+    StringBuffer y;
+    // y = x; // does not compile (!is_copy_assignable)
+    y = std::move(x);
+    EXPECT_EQ(0u, x.GetSize());
+    EXPECT_EQ(4u, y.GetLength());
+    EXPECT_STREQ("ABCD", y.GetString());
+}
+
+#endif // RAPIDJSON_HAS_CXX11_RVALUE_REFS
+
+#ifdef __clang__
+RAPIDJSON_DIAG_POP
+#endif
diff --git a/test/unittest/strtodtest.cpp b/test/unittest/strtodtest.cpp
new file mode 100644
index 0000000..807f887
--- /dev/null
+++ b/test/unittest/strtodtest.cpp
@@ -0,0 +1,132 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+// 
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed 
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the 
+// specific language governing permissions and limitations under the License.
+
+#include "unittest.h"
+
+#include "rapidjson/internal/strtod.h"
+
+#ifdef __clang__
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(unreachable-code)
+#endif
+
+#define BIGINTEGER_LITERAL(s) BigInteger(s, sizeof(s) - 1)
+
+using namespace rapidjson::internal;
+
+TEST(Strtod, CheckApproximationCase) {
+    static const int kSignificandSize = 52;
+    static const int kExponentBias = 0x3FF;
+    static const uint64_t kExponentMask = RAPIDJSON_UINT64_C2(0x7FF00000, 0x00000000);
+    static const uint64_t kSignificandMask = RAPIDJSON_UINT64_C2(0x000FFFFF, 0xFFFFFFFF);
+    static const uint64_t kHiddenBit = RAPIDJSON_UINT64_C2(0x00100000, 0x00000000);
+
+    // http://www.exploringbinary.com/using-integers-to-check-a-floating-point-approximation/
+    // Let b = 0x1.465a72e467d88p-149
+    //       = 5741268244528520 x 2^-201
+    union {
+        double d;
+        uint64_t u;
+    }u;
+    u.u = 0x465a72e467d88 | ((static_cast<uint64_t>(-149 + kExponentBias)) << kSignificandSize);
+    const double b = u.d;
+    const uint64_t bInt = (u.u & kSignificandMask) | kHiddenBit;
+    const int bExp = static_cast<int>(((u.u & kExponentMask) >> kSignificandSize) - kExponentBias - kSignificandSize);
+    EXPECT_DOUBLE_EQ(1.7864e-45, b);
+    EXPECT_EQ(RAPIDJSON_UINT64_C2(0x001465a7, 0x2e467d88), bInt);
+    EXPECT_EQ(-201, bExp);
+
+    // Let d = 17864 x 10-49
+    const char dInt[] = "17864";
+    const int dExp = -49;
+
+    // Let h = 2^(bExp-1)
+    const int hExp = bExp - 1;
+    EXPECT_EQ(-202, hExp);
+
+    int dS_Exp2 = 0;
+    int dS_Exp5 = 0;
+    int bS_Exp2 = 0;
+    int bS_Exp5 = 0;
+    int hS_Exp2 = 0;
+    int hS_Exp5 = 0;
+
+    // Adjust for decimal exponent
+    if (dExp >= 0) {
+        dS_Exp2 += dExp;
+        dS_Exp5 += dExp;
+    }
+    else {
+        bS_Exp2 -= dExp;
+        bS_Exp5 -= dExp;
+        hS_Exp2 -= dExp;
+        hS_Exp5 -= dExp;
+    }
+
+    // Adjust for binary exponent
+    if (bExp >= 0)
+        bS_Exp2 += bExp;
+    else {
+        dS_Exp2 -= bExp;
+        hS_Exp2 -= bExp;
+    }
+
+    // Adjust for half ulp exponent
+    if (hExp >= 0)
+        hS_Exp2 += hExp;
+    else {
+        dS_Exp2 -= hExp;
+        bS_Exp2 -= hExp;
+    }
+
+    // Remove common power of two factor from all three scaled values
+    int common_Exp2 = (std::min)(dS_Exp2, (std::min)(bS_Exp2, hS_Exp2));
+    dS_Exp2 -= common_Exp2;
+    bS_Exp2 -= common_Exp2;
+    hS_Exp2 -= common_Exp2;
+
+    EXPECT_EQ(153, dS_Exp2);
+    EXPECT_EQ(0, dS_Exp5);
+    EXPECT_EQ(1, bS_Exp2);
+    EXPECT_EQ(49, bS_Exp5);
+    EXPECT_EQ(0, hS_Exp2);
+    EXPECT_EQ(49, hS_Exp5);
+
+    BigInteger dS = BIGINTEGER_LITERAL(dInt);
+    dS.MultiplyPow5(static_cast<unsigned>(dS_Exp5)) <<= static_cast<size_t>(dS_Exp2);
+
+    BigInteger bS(bInt);
+    bS.MultiplyPow5(static_cast<unsigned>(bS_Exp5)) <<= static_cast<size_t>(bS_Exp2);
+
+    BigInteger hS(1);
+    hS.MultiplyPow5(static_cast<unsigned>(hS_Exp5)) <<= static_cast<size_t>(hS_Exp2);
+
+    EXPECT_TRUE(BIGINTEGER_LITERAL("203970822259994138521801764465966248930731085529088") == dS);
+    EXPECT_TRUE(BIGINTEGER_LITERAL("203970822259994122305215569213032722473144531250000") == bS);
+    EXPECT_TRUE(BIGINTEGER_LITERAL("17763568394002504646778106689453125") == hS);
+
+    EXPECT_EQ(1, dS.Compare(bS));
+    
+    BigInteger delta(0);
+    EXPECT_FALSE(dS.Difference(bS, &delta));
+    EXPECT_TRUE(BIGINTEGER_LITERAL("16216586195252933526457586554279088") == delta);
+    EXPECT_TRUE(bS.Difference(dS, &delta));
+    EXPECT_TRUE(BIGINTEGER_LITERAL("16216586195252933526457586554279088") == delta);
+
+    EXPECT_EQ(-1, delta.Compare(hS));
+}
+
+#ifdef __clang__
+RAPIDJSON_DIAG_POP
+#endif
diff --git a/test/unittest/unittest.cpp b/test/unittest/unittest.cpp
new file mode 100644
index 0000000..b754563
--- /dev/null
+++ b/test/unittest/unittest.cpp
@@ -0,0 +1,51 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+// 
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed 
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the 
+// specific language governing permissions and limitations under the License.
+
+#include "unittest.h"
+#include "rapidjson/rapidjson.h"
+
+#ifdef __clang__
+#pragma GCC diagnostic push
+#if __has_warning("-Wdeprecated")
+#pragma GCC diagnostic ignored "-Wdeprecated"
+#endif
+#endif
+
+AssertException::~AssertException() throw() {}
+
+#ifdef __clang__
+#pragma GCC diagnostic pop
+#endif
+
+int main(int argc, char **argv) {
+    ::testing::InitGoogleTest(&argc, argv);
+
+    std::cout << "RapidJSON v" << RAPIDJSON_VERSION_STRING << std::endl;
+
+#ifdef _MSC_VER
+    _CrtMemState memoryState = { 0 };
+    (void)memoryState;
+    _CrtMemCheckpoint(&memoryState);
+    //_CrtSetBreakAlloc(X);
+    //void *testWhetherMemoryLeakDetectionWorks = malloc(1);
+#endif
+
+    int ret = RUN_ALL_TESTS();
+
+#ifdef _MSC_VER
+    // Current gtest constantly leak 2 blocks at exit
+    _CrtMemDumpAllObjectsSince(&memoryState);
+#endif
+    return ret;
+}
diff --git a/test/unittest/unittest.h b/test/unittest/unittest.h
new file mode 100644
index 0000000..84c1b73
--- /dev/null
+++ b/test/unittest/unittest.h
@@ -0,0 +1,140 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+// 
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed 
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the 
+// specific language governing permissions and limitations under the License.
+
+#ifndef UNITTEST_H_
+#define UNITTEST_H_
+
+// gtest indirectly included inttypes.h, without __STDC_CONSTANT_MACROS.
+#ifndef __STDC_CONSTANT_MACROS
+#ifdef __clang__
+#pragma GCC diagnostic push
+#if __has_warning("-Wreserved-id-macro")
+#pragma GCC diagnostic ignored "-Wreserved-id-macro"
+#endif
+#endif
+
+#  define __STDC_CONSTANT_MACROS 1 // required by C++ standard
+
+#ifdef __clang__
+#pragma GCC diagnostic pop
+#endif
+#endif
+
+#ifdef _MSC_VER
+#define _CRTDBG_MAP_ALLOC
+#include <crtdbg.h>
+#pragma warning(disable : 4996) // 'function': was declared deprecated
+#endif
+
+#if defined(__clang__) || defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 2))
+#if defined(__clang__) || (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))
+#pragma GCC diagnostic push
+#endif
+#pragma GCC diagnostic ignored "-Weffc++"
+#endif
+
+#include "gtest/gtest.h"
+#include <stdexcept>
+
+#if defined(__clang__) || defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))
+#pragma GCC diagnostic pop
+#endif
+
+#ifdef __clang__
+// All TEST() macro generated this warning, disable globally
+#pragma GCC diagnostic ignored "-Wglobal-constructors"
+#endif
+
+template <typename Ch>
+inline unsigned StrLen(const Ch* s) {
+    const Ch* p = s;
+    while (*p) p++;
+    return unsigned(p - s);
+}
+
+template<typename Ch>
+inline int StrCmp(const Ch* s1, const Ch* s2) {
+    while(*s1 && (*s1 == *s2)) { s1++; s2++; }
+    return static_cast<unsigned>(*s1) < static_cast<unsigned>(*s2) ? -1 : static_cast<unsigned>(*s1) > static_cast<unsigned>(*s2);
+}
+
+template <typename Ch>
+inline Ch* StrDup(const Ch* str) {
+    size_t bufferSize = sizeof(Ch) * (StrLen(str) + 1);
+    Ch* buffer = static_cast<Ch*>(malloc(bufferSize));
+    memcpy(buffer, str, bufferSize);
+    return buffer;
+}
+
+inline FILE* TempFile(char *filename) {
+#if defined(__WIN32__) || defined(_MSC_VER)
+    filename = tmpnam(filename);
+
+    // For Visual Studio, tmpnam() adds a backslash in front. Remove it.
+    if (filename[0] == '\\')
+        for (int i = 0; filename[i] != '\0'; i++)
+            filename[i] = filename[i + 1];
+        
+    return fopen(filename, "wb");
+#else
+    strcpy(filename, "/tmp/fileXXXXXX");
+    int fd = mkstemp(filename);
+    return fdopen(fd, "w");
+#endif
+}
+
+// Use exception for catching assert
+#ifdef _MSC_VER
+#pragma warning(disable : 4127)
+#endif
+
+#ifdef __clang__
+#pragma GCC diagnostic push
+#if __has_warning("-Wdeprecated")
+#pragma GCC diagnostic ignored "-Wdeprecated"
+#endif
+#endif
+
+class AssertException : public std::logic_error {
+public:
+    AssertException(const char* w) : std::logic_error(w) {}
+    AssertException(const AssertException& rhs) : std::logic_error(rhs) {}
+    virtual ~AssertException() throw();
+};
+
+#ifdef __clang__
+#pragma GCC diagnostic pop
+#endif
+
+// Not using noexcept for testing RAPIDJSON_ASSERT()
+#define RAPIDJSON_HAS_CXX11_NOEXCEPT 0
+
+#ifndef RAPIDJSON_ASSERT
+#define RAPIDJSON_ASSERT(x) (!(x) ? throw AssertException(RAPIDJSON_STRINGIFY(x)) : (void)0u)
+#endif
+
+class Random {
+public:
+    Random(unsigned seed = 0) : mSeed(seed) {}
+
+    unsigned operator()() {
+        mSeed = 214013 * mSeed + 2531011;
+        return mSeed;
+    }
+
+private:
+    unsigned mSeed;
+};
+
+#endif // UNITTEST_H_
diff --git a/test/unittest/valuetest.cpp b/test/unittest/valuetest.cpp
new file mode 100644
index 0000000..4a16f7d
--- /dev/null
+++ b/test/unittest/valuetest.cpp
@@ -0,0 +1,1851 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+// 
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed 
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the 
+// specific language governing permissions and limitations under the License.
+
+#include "unittest.h"
+#include "rapidjson/document.h"
+#include <algorithm>
+
+#ifdef __clang__
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(c++98-compat)
+#endif
+
+using namespace rapidjson;
+
+TEST(Value, Size) {
+    if (sizeof(SizeType) == 4) {
+#if RAPIDJSON_48BITPOINTER_OPTIMIZATION
+        EXPECT_EQ(16u, sizeof(Value));
+#elif RAPIDJSON_64BIT
+        EXPECT_EQ(24u, sizeof(Value));
+#else
+        EXPECT_EQ(16u, sizeof(Value));
+#endif
+    }
+}
+
+TEST(Value, DefaultConstructor) {
+    Value x;
+    EXPECT_EQ(kNullType, x.GetType());
+    EXPECT_TRUE(x.IsNull());
+
+    //std::cout << "sizeof(Value): " << sizeof(x) << std::endl;
+}
+
+// Should not pass compilation
+//TEST(Value, copy_constructor) {
+//  Value x(1234);
+//  Value y = x;
+//}
+
+#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
+
+#if 0 // Many old compiler does not support these. Turn it off temporaily.
+
+#include <type_traits>
+
+TEST(Value, Traits) {
+    typedef GenericValue<UTF8<>, CrtAllocator> Value;
+    static_assert(std::is_constructible<Value>::value, "");
+    static_assert(std::is_default_constructible<Value>::value, "");
+#ifndef _MSC_VER
+    static_assert(!std::is_copy_constructible<Value>::value, "");
+#endif
+    static_assert(std::is_move_constructible<Value>::value, "");
+
+#ifndef _MSC_VER
+    static_assert(std::is_nothrow_constructible<Value>::value, "");
+    static_assert(std::is_nothrow_default_constructible<Value>::value, "");
+    static_assert(!std::is_nothrow_copy_constructible<Value>::value, "");
+    static_assert(std::is_nothrow_move_constructible<Value>::value, "");
+#endif
+
+    static_assert(std::is_assignable<Value,Value>::value, "");
+#ifndef _MSC_VER
+    static_assert(!std::is_copy_assignable<Value>::value, "");
+#endif
+    static_assert(std::is_move_assignable<Value>::value, "");
+
+#ifndef _MSC_VER
+    static_assert(std::is_nothrow_assignable<Value, Value>::value, "");
+#endif
+    static_assert(!std::is_nothrow_copy_assignable<Value>::value, "");
+#ifndef _MSC_VER
+    static_assert(std::is_nothrow_move_assignable<Value>::value, "");
+#endif
+
+    static_assert(std::is_destructible<Value>::value, "");
+#ifndef _MSC_VER
+    static_assert(std::is_nothrow_destructible<Value>::value, "");
+#endif
+}
+
+#endif
+
+TEST(Value, MoveConstructor) {
+    typedef GenericValue<UTF8<>, CrtAllocator> V;
+    V::AllocatorType allocator;
+
+    V x((V(kArrayType)));
+    x.Reserve(4u, allocator);
+    x.PushBack(1, allocator).PushBack(2, allocator).PushBack(3, allocator).PushBack(4, allocator);
+    EXPECT_TRUE(x.IsArray());
+    EXPECT_EQ(4u, x.Size());
+
+    // Value y(x); // does not compile (!is_copy_constructible)
+    V y(std::move(x));
+    EXPECT_TRUE(x.IsNull());
+    EXPECT_TRUE(y.IsArray());
+    EXPECT_EQ(4u, y.Size());
+
+    // Value z = y; // does not compile (!is_copy_assignable)
+    V z = std::move(y);
+    EXPECT_TRUE(y.IsNull());
+    EXPECT_TRUE(z.IsArray());
+    EXPECT_EQ(4u, z.Size());
+}
+
+#endif // RAPIDJSON_HAS_CXX11_RVALUE_REFS
+
+TEST(Value, AssignmentOperator) {
+    Value x(1234);
+    Value y;
+    y = x;
+    EXPECT_TRUE(x.IsNull());    // move semantic
+    EXPECT_EQ(1234, y.GetInt());
+
+    y = 5678;
+    EXPECT_TRUE(y.IsInt());
+    EXPECT_EQ(5678, y.GetInt());
+
+    x = "Hello";
+    EXPECT_TRUE(x.IsString());
+    EXPECT_STREQ(x.GetString(),"Hello");
+
+    y = StringRef(x.GetString(),x.GetStringLength());
+    EXPECT_TRUE(y.IsString());
+    EXPECT_EQ(y.GetString(),x.GetString());
+    EXPECT_EQ(y.GetStringLength(),x.GetStringLength());
+
+    static char mstr[] = "mutable";
+    // y = mstr; // should not compile
+    y = StringRef(mstr);
+    EXPECT_TRUE(y.IsString());
+    EXPECT_EQ(y.GetString(),mstr);
+
+#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
+    // C++11 move assignment
+    x = Value("World");
+    EXPECT_TRUE(x.IsString());
+    EXPECT_STREQ("World", x.GetString());
+
+    x = std::move(y);
+    EXPECT_TRUE(y.IsNull());
+    EXPECT_TRUE(x.IsString());
+    EXPECT_EQ(x.GetString(), mstr);
+
+    y = std::move(Value().SetInt(1234));
+    EXPECT_TRUE(y.IsInt());
+    EXPECT_EQ(1234, y);
+#endif // RAPIDJSON_HAS_CXX11_RVALUE_REFS
+}
+
+template <typename A, typename B> 
+void TestEqual(const A& a, const B& b) {
+    EXPECT_TRUE (a == b);
+    EXPECT_FALSE(a != b);
+    EXPECT_TRUE (b == a);
+    EXPECT_FALSE(b != a);
+}
+
+template <typename A, typename B> 
+void TestUnequal(const A& a, const B& b) {
+    EXPECT_FALSE(a == b);
+    EXPECT_TRUE (a != b);
+    EXPECT_FALSE(b == a);
+    EXPECT_TRUE (b != a);
+}
+
+TEST(Value, EqualtoOperator) {
+    Value::AllocatorType allocator;
+    Value x(kObjectType);
+    x.AddMember("hello", "world", allocator)
+        .AddMember("t", Value(true).Move(), allocator)
+        .AddMember("f", Value(false).Move(), allocator)
+        .AddMember("n", Value(kNullType).Move(), allocator)
+        .AddMember("i", 123, allocator)
+        .AddMember("pi", 3.14, allocator)
+        .AddMember("a", Value(kArrayType).Move().PushBack(1, allocator).PushBack(2, allocator).PushBack(3, allocator), allocator);
+
+    // Test templated operator==() and operator!=()
+    TestEqual(x["hello"], "world");
+    const char* cc = "world";
+    TestEqual(x["hello"], cc);
+    char* c = strdup("world");
+    TestEqual(x["hello"], c);
+    free(c);
+
+    TestEqual(x["t"], true);
+    TestEqual(x["f"], false);
+    TestEqual(x["i"], 123);
+    TestEqual(x["pi"], 3.14);
+
+    // Test operator==() (including different allocators)
+    CrtAllocator crtAllocator;
+    GenericValue<UTF8<>, CrtAllocator> y;
+    GenericDocument<UTF8<>, CrtAllocator> z(&crtAllocator);
+    y.CopyFrom(x, crtAllocator);
+    z.CopyFrom(y, z.GetAllocator());
+    TestEqual(x, y);
+    TestEqual(y, z);
+    TestEqual(z, x);
+
+    // Swapping member order should be fine.
+    EXPECT_TRUE(y.RemoveMember("t"));
+    TestUnequal(x, y);
+    TestUnequal(z, y);
+    EXPECT_TRUE(z.RemoveMember("t"));
+    TestUnequal(x, z);
+    TestEqual(y, z);
+    y.AddMember("t", false, crtAllocator);
+    z.AddMember("t", false, z.GetAllocator());
+    TestUnequal(x, y);
+    TestUnequal(z, x);
+    y["t"] = true;
+    z["t"] = true;
+    TestEqual(x, y);
+    TestEqual(y, z);
+    TestEqual(z, x);
+
+    // Swapping element order is not OK
+    x["a"][0].Swap(x["a"][1]);
+    TestUnequal(x, y);
+    x["a"][0].Swap(x["a"][1]);
+    TestEqual(x, y);
+
+    // Array of different size
+    x["a"].PushBack(4, allocator);
+    TestUnequal(x, y);
+    x["a"].PopBack();
+    TestEqual(x, y);
+
+    // Issue #129: compare Uint64
+    x.SetUint64(RAPIDJSON_UINT64_C2(0xFFFFFFFF, 0xFFFFFFF0));
+    y.SetUint64(RAPIDJSON_UINT64_C2(0xFFFFFFFF, 0xFFFFFFFF));
+    TestUnequal(x, y);
+}
+
+template <typename Value>
+void TestCopyFrom() {
+    typename Value::AllocatorType a;
+    Value v1(1234);
+    Value v2(v1, a); // deep copy constructor
+    EXPECT_TRUE(v1.GetType() == v2.GetType());
+    EXPECT_EQ(v1.GetInt(), v2.GetInt());
+
+    v1.SetString("foo");
+    v2.CopyFrom(v1, a);
+    EXPECT_TRUE(v1.GetType() == v2.GetType());
+    EXPECT_STREQ(v1.GetString(), v2.GetString());
+    EXPECT_EQ(v1.GetString(), v2.GetString()); // string NOT copied
+
+    v1.SetString("bar", a); // copy string
+    v2.CopyFrom(v1, a);
+    EXPECT_TRUE(v1.GetType() == v2.GetType());
+    EXPECT_STREQ(v1.GetString(), v2.GetString());
+    EXPECT_NE(v1.GetString(), v2.GetString()); // string copied
+
+
+    v1.SetArray().PushBack(1234, a);
+    v2.CopyFrom(v1, a);
+    EXPECT_TRUE(v2.IsArray());
+    EXPECT_EQ(v1.Size(), v2.Size());
+
+    v1.PushBack(Value().SetString("foo", a), a); // push string copy
+    EXPECT_TRUE(v1.Size() != v2.Size());
+    v2.CopyFrom(v1, a);
+    EXPECT_TRUE(v1.Size() == v2.Size());
+    EXPECT_STREQ(v1[1].GetString(), v2[1].GetString());
+    EXPECT_NE(v1[1].GetString(), v2[1].GetString()); // string got copied
+}
+
+TEST(Value, CopyFrom) {
+    TestCopyFrom<Value>();
+    TestCopyFrom<GenericValue<UTF8<>, CrtAllocator> >();
+}
+
+TEST(Value, Swap) {
+    Value v1(1234);
+    Value v2(kObjectType);
+
+    EXPECT_EQ(&v1, &v1.Swap(v2));
+    EXPECT_TRUE(v1.IsObject());
+    EXPECT_TRUE(v2.IsInt());
+    EXPECT_EQ(1234, v2.GetInt());
+
+    // testing std::swap compatibility
+    using std::swap;
+    swap(v1, v2);
+    EXPECT_TRUE(v1.IsInt());
+    EXPECT_TRUE(v2.IsObject());
+}
+
+TEST(Value, Null) {
+    // Default constructor
+    Value x;
+    EXPECT_EQ(kNullType, x.GetType());
+    EXPECT_TRUE(x.IsNull());
+
+    EXPECT_FALSE(x.IsTrue());
+    EXPECT_FALSE(x.IsFalse());
+    EXPECT_FALSE(x.IsNumber());
+    EXPECT_FALSE(x.IsString());
+    EXPECT_FALSE(x.IsObject());
+    EXPECT_FALSE(x.IsArray());
+
+    // Constructor with type
+    Value y(kNullType);
+    EXPECT_TRUE(y.IsNull());
+
+    // SetNull();
+    Value z(true);
+    z.SetNull();
+    EXPECT_TRUE(z.IsNull());
+}
+
+TEST(Value, True) {
+    // Constructor with bool
+    Value x(true);
+    EXPECT_EQ(kTrueType, x.GetType());
+    EXPECT_TRUE(x.GetBool());
+    EXPECT_TRUE(x.IsBool());
+    EXPECT_TRUE(x.IsTrue());
+
+    EXPECT_FALSE(x.IsNull());
+    EXPECT_FALSE(x.IsFalse());
+    EXPECT_FALSE(x.IsNumber());
+    EXPECT_FALSE(x.IsString());
+    EXPECT_FALSE(x.IsObject());
+    EXPECT_FALSE(x.IsArray());
+
+    // Constructor with type
+    Value y(kTrueType);
+    EXPECT_TRUE(y.IsTrue());
+
+    // SetBool()
+    Value z;
+    z.SetBool(true);
+    EXPECT_TRUE(z.IsTrue());
+
+    // Templated functions
+    EXPECT_TRUE(z.Is<bool>());
+    EXPECT_TRUE(z.Get<bool>());
+    EXPECT_FALSE(z.Set<bool>(false).Get<bool>());
+    EXPECT_TRUE(z.Set(true).Get<bool>());
+}
+
+TEST(Value, False) {
+    // Constructor with bool
+    Value x(false);
+    EXPECT_EQ(kFalseType, x.GetType());
+    EXPECT_TRUE(x.IsBool());
+    EXPECT_TRUE(x.IsFalse());
+
+    EXPECT_FALSE(x.IsNull());
+    EXPECT_FALSE(x.IsTrue());
+    EXPECT_FALSE(x.GetBool());
+    //EXPECT_FALSE((bool)x);
+    EXPECT_FALSE(x.IsNumber());
+    EXPECT_FALSE(x.IsString());
+    EXPECT_FALSE(x.IsObject());
+    EXPECT_FALSE(x.IsArray());
+
+    // Constructor with type
+    Value y(kFalseType);
+    EXPECT_TRUE(y.IsFalse());
+
+    // SetBool()
+    Value z;
+    z.SetBool(false);
+    EXPECT_TRUE(z.IsFalse());
+}
+
+TEST(Value, Int) {
+    // Constructor with int
+    Value x(1234);
+    EXPECT_EQ(kNumberType, x.GetType());
+    EXPECT_EQ(1234, x.GetInt());
+    EXPECT_EQ(1234u, x.GetUint());
+    EXPECT_EQ(1234, x.GetInt64());
+    EXPECT_EQ(1234u, x.GetUint64());
+    EXPECT_NEAR(1234.0, x.GetDouble(), 0.0);
+    //EXPECT_EQ(1234, (int)x);
+    //EXPECT_EQ(1234, (unsigned)x);
+    //EXPECT_EQ(1234, (int64_t)x);
+    //EXPECT_EQ(1234, (uint64_t)x);
+    //EXPECT_EQ(1234, (double)x);
+    EXPECT_TRUE(x.IsNumber());
+    EXPECT_TRUE(x.IsInt());
+    EXPECT_TRUE(x.IsUint());
+    EXPECT_TRUE(x.IsInt64());
+    EXPECT_TRUE(x.IsUint64());
+
+    EXPECT_FALSE(x.IsDouble());
+    EXPECT_FALSE(x.IsFloat());
+    EXPECT_FALSE(x.IsNull());
+    EXPECT_FALSE(x.IsBool());
+    EXPECT_FALSE(x.IsFalse());
+    EXPECT_FALSE(x.IsTrue());
+    EXPECT_FALSE(x.IsString());
+    EXPECT_FALSE(x.IsObject());
+    EXPECT_FALSE(x.IsArray());
+
+    Value nx(-1234);
+    EXPECT_EQ(-1234, nx.GetInt());
+    EXPECT_EQ(-1234, nx.GetInt64());
+    EXPECT_TRUE(nx.IsInt());
+    EXPECT_TRUE(nx.IsInt64());
+    EXPECT_FALSE(nx.IsUint());
+    EXPECT_FALSE(nx.IsUint64());
+
+    // Constructor with type
+    Value y(kNumberType);
+    EXPECT_TRUE(y.IsNumber());
+    EXPECT_TRUE(y.IsInt());
+    EXPECT_EQ(0, y.GetInt());
+
+    // SetInt()
+    Value z;
+    z.SetInt(1234);
+    EXPECT_EQ(1234, z.GetInt());
+
+    // operator=(int)
+    z = 5678;
+    EXPECT_EQ(5678, z.GetInt());
+
+    // Templated functions
+    EXPECT_TRUE(z.Is<int>());
+    EXPECT_EQ(5678, z.Get<int>());
+    EXPECT_EQ(5679, z.Set(5679).Get<int>());
+    EXPECT_EQ(5680, z.Set<int>(5680).Get<int>());
+
+#ifdef _MSC_VER
+    // long as int on MSC platforms
+    RAPIDJSON_STATIC_ASSERT(sizeof(long) == sizeof(int));
+    z.SetInt(2222);
+    EXPECT_TRUE(z.Is<long>());
+    EXPECT_EQ(2222l, z.Get<long>());
+    EXPECT_EQ(3333l, z.Set(3333l).Get<long>());
+    EXPECT_EQ(4444l, z.Set<long>(4444l).Get<long>());
+    EXPECT_TRUE(z.IsInt());
+#endif
+}
+
+TEST(Value, Uint) {
+    // Constructor with int
+    Value x(1234u);
+    EXPECT_EQ(kNumberType, x.GetType());
+    EXPECT_EQ(1234, x.GetInt());
+    EXPECT_EQ(1234u, x.GetUint());
+    EXPECT_EQ(1234, x.GetInt64());
+    EXPECT_EQ(1234u, x.GetUint64());
+    EXPECT_TRUE(x.IsNumber());
+    EXPECT_TRUE(x.IsInt());
+    EXPECT_TRUE(x.IsUint());
+    EXPECT_TRUE(x.IsInt64());
+    EXPECT_TRUE(x.IsUint64());
+    EXPECT_NEAR(1234.0, x.GetDouble(), 0.0);   // Number can always be cast as double but !IsDouble().
+
+    EXPECT_FALSE(x.IsDouble());
+    EXPECT_FALSE(x.IsFloat());
+    EXPECT_FALSE(x.IsNull());
+    EXPECT_FALSE(x.IsBool());
+    EXPECT_FALSE(x.IsFalse());
+    EXPECT_FALSE(x.IsTrue());
+    EXPECT_FALSE(x.IsString());
+    EXPECT_FALSE(x.IsObject());
+    EXPECT_FALSE(x.IsArray());
+
+    // SetUint()
+    Value z;
+    z.SetUint(1234);
+    EXPECT_EQ(1234u, z.GetUint());
+
+    // operator=(unsigned)
+    z = 5678u;
+    EXPECT_EQ(5678u, z.GetUint());
+
+    z = 2147483648u;    // 2^31, cannot cast as int
+    EXPECT_EQ(2147483648u, z.GetUint());
+    EXPECT_FALSE(z.IsInt());
+    EXPECT_TRUE(z.IsInt64());   // Issue 41: Incorrect parsing of unsigned int number types
+
+    // Templated functions
+    EXPECT_TRUE(z.Is<unsigned>());
+    EXPECT_EQ(2147483648u, z.Get<unsigned>());
+    EXPECT_EQ(2147483649u, z.Set(2147483649u).Get<unsigned>());
+    EXPECT_EQ(2147483650u, z.Set<unsigned>(2147483650u).Get<unsigned>());
+
+#ifdef _MSC_VER
+    // unsigned long as unsigned on MSC platforms
+    RAPIDJSON_STATIC_ASSERT(sizeof(unsigned long) == sizeof(unsigned));
+    z.SetUint(2222);
+    EXPECT_TRUE(z.Is<unsigned long>());
+    EXPECT_EQ(2222ul, z.Get<unsigned long>());
+    EXPECT_EQ(3333ul, z.Set(3333ul).Get<unsigned long>());
+    EXPECT_EQ(4444ul, z.Set<unsigned long>(4444ul).Get<unsigned long>());
+    EXPECT_TRUE(x.IsUint());
+#endif
+}
+
+TEST(Value, Int64) {
+    // Constructor with int
+    Value x(int64_t(1234));
+    EXPECT_EQ(kNumberType, x.GetType());
+    EXPECT_EQ(1234, x.GetInt());
+    EXPECT_EQ(1234u, x.GetUint());
+    EXPECT_EQ(1234, x.GetInt64());
+    EXPECT_EQ(1234u, x.GetUint64());
+    EXPECT_TRUE(x.IsNumber());
+    EXPECT_TRUE(x.IsInt());
+    EXPECT_TRUE(x.IsUint());
+    EXPECT_TRUE(x.IsInt64());
+    EXPECT_TRUE(x.IsUint64());
+
+    EXPECT_FALSE(x.IsDouble());
+    EXPECT_FALSE(x.IsFloat());
+    EXPECT_FALSE(x.IsNull());
+    EXPECT_FALSE(x.IsBool());
+    EXPECT_FALSE(x.IsFalse());
+    EXPECT_FALSE(x.IsTrue());
+    EXPECT_FALSE(x.IsString());
+    EXPECT_FALSE(x.IsObject());
+    EXPECT_FALSE(x.IsArray());
+
+    Value nx(int64_t(-1234));
+    EXPECT_EQ(-1234, nx.GetInt());
+    EXPECT_EQ(-1234, nx.GetInt64());
+    EXPECT_TRUE(nx.IsInt());
+    EXPECT_TRUE(nx.IsInt64());
+    EXPECT_FALSE(nx.IsUint());
+    EXPECT_FALSE(nx.IsUint64());
+
+    // SetInt64()
+    Value z;
+    z.SetInt64(1234);
+    EXPECT_EQ(1234, z.GetInt64());
+
+    z.SetInt64(2147483648u);   // 2^31, cannot cast as int
+    EXPECT_FALSE(z.IsInt());
+    EXPECT_TRUE(z.IsUint());
+    EXPECT_NEAR(2147483648.0, z.GetDouble(), 0.0);
+
+    z.SetInt64(int64_t(4294967295u) + 1);   // 2^32, cannot cast as uint
+    EXPECT_FALSE(z.IsInt());
+    EXPECT_FALSE(z.IsUint());
+    EXPECT_NEAR(4294967296.0, z.GetDouble(), 0.0);
+
+    z.SetInt64(-int64_t(2147483648u) - 1);   // -2^31-1, cannot cast as int
+    EXPECT_FALSE(z.IsInt());
+    EXPECT_NEAR(-2147483649.0, z.GetDouble(), 0.0);
+
+    int64_t i = static_cast<int64_t>(RAPIDJSON_UINT64_C2(0x80000000, 00000000));
+    z.SetInt64(i);
+    EXPECT_DOUBLE_EQ(-9223372036854775808.0, z.GetDouble());
+
+    // Templated functions
+    EXPECT_TRUE(z.Is<int64_t>());
+    EXPECT_EQ(i, z.Get<int64_t>());
+#if 0 // signed integer underflow is undefined behaviour
+    EXPECT_EQ(i - 1, z.Set(i - 1).Get<int64_t>());
+    EXPECT_EQ(i - 2, z.Set<int64_t>(i - 2).Get<int64_t>());
+#endif
+}
+
+TEST(Value, Uint64) {
+    // Constructor with int
+    Value x(uint64_t(1234));
+    EXPECT_EQ(kNumberType, x.GetType());
+    EXPECT_EQ(1234, x.GetInt());
+    EXPECT_EQ(1234u, x.GetUint());
+    EXPECT_EQ(1234, x.GetInt64());
+    EXPECT_EQ(1234u, x.GetUint64());
+    EXPECT_TRUE(x.IsNumber());
+    EXPECT_TRUE(x.IsInt());
+    EXPECT_TRUE(x.IsUint());
+    EXPECT_TRUE(x.IsInt64());
+    EXPECT_TRUE(x.IsUint64());
+
+    EXPECT_FALSE(x.IsDouble());
+    EXPECT_FALSE(x.IsFloat());
+    EXPECT_FALSE(x.IsNull());
+    EXPECT_FALSE(x.IsBool());
+    EXPECT_FALSE(x.IsFalse());
+    EXPECT_FALSE(x.IsTrue());
+    EXPECT_FALSE(x.IsString());
+    EXPECT_FALSE(x.IsObject());
+    EXPECT_FALSE(x.IsArray());
+
+    // SetUint64()
+    Value z;
+    z.SetUint64(1234);
+    EXPECT_EQ(1234u, z.GetUint64());
+
+    z.SetUint64(uint64_t(2147483648u));  // 2^31, cannot cast as int
+    EXPECT_FALSE(z.IsInt());
+    EXPECT_TRUE(z.IsUint());
+    EXPECT_TRUE(z.IsInt64());
+
+    z.SetUint64(uint64_t(4294967295u) + 1);  // 2^32, cannot cast as uint
+    EXPECT_FALSE(z.IsInt());
+    EXPECT_FALSE(z.IsUint());
+    EXPECT_TRUE(z.IsInt64());
+
+    uint64_t u = RAPIDJSON_UINT64_C2(0x80000000, 0x00000000);
+    z.SetUint64(u);    // 2^63 cannot cast as int64
+    EXPECT_FALSE(z.IsInt64());
+    EXPECT_EQ(u, z.GetUint64()); // Issue 48
+    EXPECT_DOUBLE_EQ(9223372036854775808.0, z.GetDouble());
+
+    // Templated functions
+    EXPECT_TRUE(z.Is<uint64_t>());
+    EXPECT_EQ(u, z.Get<uint64_t>());
+    EXPECT_EQ(u + 1, z.Set(u + 1).Get<uint64_t>());
+    EXPECT_EQ(u + 2, z.Set<uint64_t>(u + 2).Get<uint64_t>());
+}
+
+TEST(Value, Double) {
+    // Constructor with double
+    Value x(12.34);
+    EXPECT_EQ(kNumberType, x.GetType());
+    EXPECT_NEAR(12.34, x.GetDouble(), 0.0);
+    EXPECT_TRUE(x.IsNumber());
+    EXPECT_TRUE(x.IsDouble());
+
+    EXPECT_FALSE(x.IsInt());
+    EXPECT_FALSE(x.IsNull());
+    EXPECT_FALSE(x.IsBool());
+    EXPECT_FALSE(x.IsFalse());
+    EXPECT_FALSE(x.IsTrue());
+    EXPECT_FALSE(x.IsString());
+    EXPECT_FALSE(x.IsObject());
+    EXPECT_FALSE(x.IsArray());
+
+    // SetDouble()
+    Value z;
+    z.SetDouble(12.34);
+    EXPECT_NEAR(12.34, z.GetDouble(), 0.0);
+
+    z = 56.78;
+    EXPECT_NEAR(56.78, z.GetDouble(), 0.0);
+
+    // Templated functions
+    EXPECT_TRUE(z.Is<double>());
+    EXPECT_EQ(56.78, z.Get<double>());
+    EXPECT_EQ(57.78, z.Set(57.78).Get<double>());
+    EXPECT_EQ(58.78, z.Set<double>(58.78).Get<double>());
+}
+
+TEST(Value, Float) {
+    // Constructor with double
+    Value x(12.34f);
+    EXPECT_EQ(kNumberType, x.GetType());
+    EXPECT_NEAR(12.34f, x.GetFloat(), 0.0);
+    EXPECT_TRUE(x.IsNumber());
+    EXPECT_TRUE(x.IsDouble());
+    EXPECT_TRUE(x.IsFloat());
+
+    EXPECT_FALSE(x.IsInt());
+    EXPECT_FALSE(x.IsNull());
+    EXPECT_FALSE(x.IsBool());
+    EXPECT_FALSE(x.IsFalse());
+    EXPECT_FALSE(x.IsTrue());
+    EXPECT_FALSE(x.IsString());
+    EXPECT_FALSE(x.IsObject());
+    EXPECT_FALSE(x.IsArray());
+
+    // SetFloat()
+    Value z;
+    z.SetFloat(12.34f);
+    EXPECT_NEAR(12.34f, z.GetFloat(), 0.0f);
+
+    // Issue 573
+    z.SetInt(0);
+    EXPECT_EQ(0.0f, z.GetFloat());
+
+    z = 56.78f;
+    EXPECT_NEAR(56.78f, z.GetFloat(), 0.0f);
+
+    // Templated functions
+    EXPECT_TRUE(z.Is<float>());
+    EXPECT_EQ(56.78f, z.Get<float>());
+    EXPECT_EQ(57.78f, z.Set(57.78f).Get<float>());
+    EXPECT_EQ(58.78f, z.Set<float>(58.78f).Get<float>());
+}
+
+TEST(Value, IsLosslessDouble) {
+    EXPECT_TRUE(Value(0.0).IsLosslessDouble());
+    EXPECT_TRUE(Value(12.34).IsLosslessDouble());
+    EXPECT_TRUE(Value(-123).IsLosslessDouble());
+    EXPECT_TRUE(Value(2147483648u).IsLosslessDouble());
+    EXPECT_TRUE(Value(-static_cast<int64_t>(RAPIDJSON_UINT64_C2(0x40000000, 0x00000000))).IsLosslessDouble());
+#if !(defined(_MSC_VER) && _MSC_VER < 1800) // VC2010 has problem
+    EXPECT_TRUE(Value(RAPIDJSON_UINT64_C2(0xA0000000, 0x00000000)).IsLosslessDouble());
+#endif
+
+    EXPECT_FALSE(Value(static_cast<int64_t>(RAPIDJSON_UINT64_C2(0x7FFFFFFF, 0xFFFFFFFF))).IsLosslessDouble()); // INT64_MAX
+    EXPECT_FALSE(Value(-static_cast<int64_t>(RAPIDJSON_UINT64_C2(0x7FFFFFFF, 0xFFFFFFFF))).IsLosslessDouble()); // -INT64_MAX
+    EXPECT_TRUE(Value(-static_cast<int64_t>(RAPIDJSON_UINT64_C2(0x7FFFFFFF, 0xFFFFFFFF)) - 1).IsLosslessDouble()); // INT64_MIN
+    EXPECT_FALSE(Value(RAPIDJSON_UINT64_C2(0xFFFFFFFF, 0xFFFFFFFF)).IsLosslessDouble()); // UINT64_MAX
+
+    EXPECT_TRUE(Value(3.4028234e38f).IsLosslessDouble()); // FLT_MAX
+    EXPECT_TRUE(Value(-3.4028234e38f).IsLosslessDouble()); // -FLT_MAX
+    EXPECT_TRUE(Value(1.17549435e-38f).IsLosslessDouble()); // FLT_MIN
+    EXPECT_TRUE(Value(-1.17549435e-38f).IsLosslessDouble()); // -FLT_MIN
+    EXPECT_TRUE(Value(1.7976931348623157e+308).IsLosslessDouble()); // DBL_MAX
+    EXPECT_TRUE(Value(-1.7976931348623157e+308).IsLosslessDouble()); // -DBL_MAX
+    EXPECT_TRUE(Value(2.2250738585072014e-308).IsLosslessDouble()); // DBL_MIN
+    EXPECT_TRUE(Value(-2.2250738585072014e-308).IsLosslessDouble()); // -DBL_MIN
+}
+
+TEST(Value, IsLosslessFloat) {
+    EXPECT_TRUE(Value(12.25).IsLosslessFloat());
+    EXPECT_TRUE(Value(-123).IsLosslessFloat());
+    EXPECT_TRUE(Value(2147483648u).IsLosslessFloat());
+    EXPECT_TRUE(Value(3.4028234e38f).IsLosslessFloat());
+    EXPECT_TRUE(Value(-3.4028234e38f).IsLosslessFloat());
+    EXPECT_FALSE(Value(3.4028235e38).IsLosslessFloat());
+    EXPECT_FALSE(Value(0.3).IsLosslessFloat());
+}
+
+TEST(Value, String) {
+    // Construction with const string
+    Value x("Hello", 5); // literal
+    EXPECT_EQ(kStringType, x.GetType());
+    EXPECT_TRUE(x.IsString());
+    EXPECT_STREQ("Hello", x.GetString());
+    EXPECT_EQ(5u, x.GetStringLength());
+
+    EXPECT_FALSE(x.IsNumber());
+    EXPECT_FALSE(x.IsNull());
+    EXPECT_FALSE(x.IsBool());
+    EXPECT_FALSE(x.IsFalse());
+    EXPECT_FALSE(x.IsTrue());
+    EXPECT_FALSE(x.IsObject());
+    EXPECT_FALSE(x.IsArray());
+
+    static const char cstr[] = "World"; // const array
+    Value(cstr).Swap(x);
+    EXPECT_TRUE(x.IsString());
+    EXPECT_EQ(x.GetString(), cstr);
+    EXPECT_EQ(x.GetStringLength(), sizeof(cstr)-1);
+
+    static char mstr[] = "Howdy"; // non-const array
+    // Value(mstr).Swap(x); // should not compile
+    Value(StringRef(mstr)).Swap(x);
+    EXPECT_TRUE(x.IsString());
+    EXPECT_EQ(x.GetString(), mstr);
+    EXPECT_EQ(x.GetStringLength(), sizeof(mstr)-1);
+    strncpy(mstr,"Hello", sizeof(mstr));
+    EXPECT_STREQ(x.GetString(), "Hello");
+
+    const char* pstr = cstr;
+    //Value(pstr).Swap(x); // should not compile
+    Value(StringRef(pstr)).Swap(x);
+    EXPECT_TRUE(x.IsString());
+    EXPECT_EQ(x.GetString(), cstr);
+    EXPECT_EQ(x.GetStringLength(), sizeof(cstr)-1);
+
+    char* mpstr = mstr;
+    Value(StringRef(mpstr,sizeof(mstr)-1)).Swap(x);
+    EXPECT_TRUE(x.IsString());
+    EXPECT_EQ(x.GetString(), mstr);
+    EXPECT_EQ(x.GetStringLength(), 5u);
+    EXPECT_STREQ(x.GetString(), "Hello");
+
+    // Constructor with copy string
+    MemoryPoolAllocator<> allocator;
+    Value c(x.GetString(), x.GetStringLength(), allocator);
+    EXPECT_NE(x.GetString(), c.GetString());
+    EXPECT_EQ(x.GetStringLength(), c.GetStringLength());
+    EXPECT_STREQ(x.GetString(), c.GetString());
+    //x.SetString("World");
+    x.SetString("World", 5);
+    EXPECT_STREQ("Hello", c.GetString());
+    EXPECT_EQ(5u, c.GetStringLength());
+
+    // Constructor with type
+    Value y(kStringType);
+    EXPECT_TRUE(y.IsString());
+    EXPECT_STREQ("", y.GetString());    // Empty string should be "" instead of 0 (issue 226)
+    EXPECT_EQ(0u, y.GetStringLength());
+
+    // SetConsttring()
+    Value z;
+    z.SetString("Hello");
+    EXPECT_TRUE(x.IsString());
+    z.SetString("Hello", 5);
+    EXPECT_STREQ("Hello", z.GetString());
+    EXPECT_STREQ("Hello", z.GetString());
+    EXPECT_EQ(5u, z.GetStringLength());
+
+    z.SetString("Hello");
+    EXPECT_TRUE(z.IsString());
+    EXPECT_STREQ("Hello", z.GetString());
+
+    //z.SetString(mstr); // should not compile
+    //z.SetString(pstr); // should not compile
+    z.SetString(StringRef(mstr));
+    EXPECT_TRUE(z.IsString());
+    EXPECT_STREQ(z.GetString(), mstr);
+
+    z.SetString(cstr);
+    EXPECT_TRUE(z.IsString());
+    EXPECT_EQ(cstr, z.GetString());
+
+    z = cstr;
+    EXPECT_TRUE(z.IsString());
+    EXPECT_EQ(cstr, z.GetString());
+
+    // SetString()
+    char s[] = "World";
+    Value w;
+    w.SetString(s, static_cast<SizeType>(strlen(s)), allocator);
+    s[0] = '\0';
+    EXPECT_STREQ("World", w.GetString());
+    EXPECT_EQ(5u, w.GetStringLength());
+
+    // templated functions
+    EXPECT_TRUE(z.Is<const char*>());
+    EXPECT_STREQ(cstr, z.Get<const char*>());
+    EXPECT_STREQ("Apple", z.Set<const char*>("Apple").Get<const char*>());
+
+#if RAPIDJSON_HAS_STDSTRING
+    {
+        std::string str = "Hello World";
+        str[5] = '\0';
+        EXPECT_STREQ(str.data(),"Hello"); // embedded '\0'
+        EXPECT_EQ(str.size(), 11u);
+
+        // no copy
+        Value vs0(StringRef(str));
+        EXPECT_TRUE(vs0.IsString());
+        EXPECT_EQ(vs0.GetString(), str.data());
+        EXPECT_EQ(vs0.GetStringLength(), str.size());
+        TestEqual(vs0, str);
+
+        // do copy
+        Value vs1(str, allocator);
+        EXPECT_TRUE(vs1.IsString());
+        EXPECT_NE(vs1.GetString(), str.data());
+        EXPECT_NE(vs1.GetString(), str); // not equal due to embedded '\0'
+        EXPECT_EQ(vs1.GetStringLength(), str.size());
+        TestEqual(vs1, str);
+
+        // SetString
+        str = "World";
+        vs0.SetNull().SetString(str, allocator);
+        EXPECT_TRUE(vs0.IsString());
+        EXPECT_STREQ(vs0.GetString(), str.c_str());
+        EXPECT_EQ(vs0.GetStringLength(), str.size());
+        TestEqual(str, vs0);
+        TestUnequal(str, vs1);
+
+        // vs1 = str; // should not compile
+        vs1 = StringRef(str);
+        TestEqual(str, vs1);
+        TestEqual(vs0, vs1);
+
+        // Templated function.
+        EXPECT_TRUE(vs0.Is<std::string>());
+        EXPECT_EQ(str, vs0.Get<std::string>());
+        vs0.Set<std::string>(std::string("Apple"), allocator);
+        EXPECT_EQ(std::string("Apple"), vs0.Get<std::string>());
+        vs0.Set(std::string("Orange"), allocator);
+        EXPECT_EQ(std::string("Orange"), vs0.Get<std::string>());
+    }
+#endif // RAPIDJSON_HAS_STDSTRING
+}
+
+// Issue 226: Value of string type should not point to NULL
+TEST(Value, SetStringNull) {
+
+    MemoryPoolAllocator<> allocator;
+    const char* nullPtr = 0;
+    {
+        // Construction with string type creates empty string
+        Value v(kStringType);
+        EXPECT_NE(v.GetString(), nullPtr); // non-null string returned
+        EXPECT_EQ(v.GetStringLength(), 0u);
+
+        // Construction from/setting to null without length not allowed
+        EXPECT_THROW(Value(StringRef(nullPtr)), AssertException);
+        EXPECT_THROW(Value(StringRef(nullPtr), allocator), AssertException);
+        EXPECT_THROW(v.SetString(nullPtr, allocator), AssertException);
+
+        // Non-empty length with null string is not allowed
+        EXPECT_THROW(v.SetString(nullPtr, 17u), AssertException);
+        EXPECT_THROW(v.SetString(nullPtr, 42u, allocator), AssertException);
+
+        // Setting to null string with empty length is allowed
+        v.SetString(nullPtr, 0u);
+        EXPECT_NE(v.GetString(), nullPtr); // non-null string returned
+        EXPECT_EQ(v.GetStringLength(), 0u);
+
+        v.SetNull();
+        v.SetString(nullPtr, 0u, allocator);
+        EXPECT_NE(v.GetString(), nullPtr); // non-null string returned
+        EXPECT_EQ(v.GetStringLength(), 0u);
+    }
+    // Construction with null string and empty length is allowed
+    {
+        Value v(nullPtr,0u);
+        EXPECT_NE(v.GetString(), nullPtr); // non-null string returned
+        EXPECT_EQ(v.GetStringLength(), 0u);
+    }
+    {
+        Value v(nullPtr, 0u, allocator);
+        EXPECT_NE(v.GetString(), nullPtr); // non-null string returned
+        EXPECT_EQ(v.GetStringLength(), 0u);
+    }
+}
+
+template <typename T, typename Allocator>
+static void TestArray(T& x, Allocator& allocator) {
+    const T& y = x;
+
+    // PushBack()
+    Value v;
+    x.PushBack(v, allocator);
+    v.SetBool(true);
+    x.PushBack(v, allocator);
+    v.SetBool(false);
+    x.PushBack(v, allocator);
+    v.SetInt(123);
+    x.PushBack(v, allocator);
+    //x.PushBack((const char*)"foo", allocator); // should not compile
+    x.PushBack("foo", allocator);
+
+    EXPECT_FALSE(x.Empty());
+    EXPECT_EQ(5u, x.Size());
+    EXPECT_FALSE(y.Empty());
+    EXPECT_EQ(5u, y.Size());
+    EXPECT_TRUE(x[SizeType(0)].IsNull());
+    EXPECT_TRUE(x[1].IsTrue());
+    EXPECT_TRUE(x[2].IsFalse());
+    EXPECT_TRUE(x[3].IsInt());
+    EXPECT_EQ(123, x[3].GetInt());
+    EXPECT_TRUE(y[SizeType(0)].IsNull());
+    EXPECT_TRUE(y[1].IsTrue());
+    EXPECT_TRUE(y[2].IsFalse());
+    EXPECT_TRUE(y[3].IsInt());
+    EXPECT_EQ(123, y[3].GetInt());
+    EXPECT_TRUE(y[4].IsString());
+    EXPECT_STREQ("foo", y[4].GetString());
+
+#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
+    // PushBack(GenericValue&&, Allocator&);
+    {
+        Value y2(kArrayType);
+        y2.PushBack(Value(true), allocator);
+        y2.PushBack(std::move(Value(kArrayType).PushBack(Value(1), allocator).PushBack("foo", allocator)), allocator);
+        EXPECT_EQ(2u, y2.Size());
+        EXPECT_TRUE(y2[0].IsTrue());
+        EXPECT_TRUE(y2[1].IsArray());
+        EXPECT_EQ(2u, y2[1].Size());
+        EXPECT_TRUE(y2[1][0].IsInt());
+        EXPECT_TRUE(y2[1][1].IsString());
+    }
+#endif
+
+    // iterator
+    typename T::ValueIterator itr = x.Begin();
+    EXPECT_TRUE(itr != x.End());
+    EXPECT_TRUE(itr->IsNull());
+    ++itr;
+    EXPECT_TRUE(itr != x.End());
+    EXPECT_TRUE(itr->IsTrue());
+    ++itr;
+    EXPECT_TRUE(itr != x.End());
+    EXPECT_TRUE(itr->IsFalse());
+    ++itr;
+    EXPECT_TRUE(itr != x.End());
+    EXPECT_TRUE(itr->IsInt());
+    EXPECT_EQ(123, itr->GetInt());
+    ++itr;
+    EXPECT_TRUE(itr != x.End());
+    EXPECT_TRUE(itr->IsString());
+    EXPECT_STREQ("foo", itr->GetString());
+
+    // const iterator
+    typename T::ConstValueIterator citr = y.Begin();
+    EXPECT_TRUE(citr != y.End());
+    EXPECT_TRUE(citr->IsNull());
+    ++citr;
+    EXPECT_TRUE(citr != y.End());
+    EXPECT_TRUE(citr->IsTrue());
+    ++citr;
+    EXPECT_TRUE(citr != y.End());
+    EXPECT_TRUE(citr->IsFalse());
+    ++citr;
+    EXPECT_TRUE(citr != y.End());
+    EXPECT_TRUE(citr->IsInt());
+    EXPECT_EQ(123, citr->GetInt());
+    ++citr;
+    EXPECT_TRUE(citr != y.End());
+    EXPECT_TRUE(citr->IsString());
+    EXPECT_STREQ("foo", citr->GetString());
+
+    // PopBack()
+    x.PopBack();
+    EXPECT_EQ(4u, x.Size());
+    EXPECT_TRUE(y[SizeType(0)].IsNull());
+    EXPECT_TRUE(y[1].IsTrue());
+    EXPECT_TRUE(y[2].IsFalse());
+    EXPECT_TRUE(y[3].IsInt());
+
+    // Clear()
+    x.Clear();
+    EXPECT_TRUE(x.Empty());
+    EXPECT_EQ(0u, x.Size());
+    EXPECT_TRUE(y.Empty());
+    EXPECT_EQ(0u, y.Size());
+
+    // Erase(ValueIterator)
+
+    // Use array of array to ensure removed elements' destructor is called.
+    // [[0],[1],[2],...]
+    for (int i = 0; i < 10; i++)
+        x.PushBack(Value(kArrayType).PushBack(i, allocator).Move(), allocator);
+
+    // Erase the first
+    itr = x.Erase(x.Begin());
+    EXPECT_EQ(x.Begin(), itr);
+    EXPECT_EQ(9u, x.Size());
+    for (int i = 0; i < 9; i++)
+        EXPECT_EQ(i + 1, x[static_cast<SizeType>(i)][0].GetInt());
+
+    // Ease the last
+    itr = x.Erase(x.End() - 1);
+    EXPECT_EQ(x.End(), itr);
+    EXPECT_EQ(8u, x.Size());
+    for (int i = 0; i < 8; i++)
+        EXPECT_EQ(i + 1, x[static_cast<SizeType>(i)][0].GetInt());
+
+    // Erase the middle
+    itr = x.Erase(x.Begin() + 4);
+    EXPECT_EQ(x.Begin() + 4, itr);
+    EXPECT_EQ(7u, x.Size());
+    for (int i = 0; i < 4; i++)
+        EXPECT_EQ(i + 1, x[static_cast<SizeType>(i)][0].GetInt());
+    for (int i = 4; i < 7; i++)
+        EXPECT_EQ(i + 2, x[static_cast<SizeType>(i)][0].GetInt());
+
+    // Erase(ValueIterator, ValueIterator)
+    // Exhaustive test with all 0 <= first < n, first <= last <= n cases
+    const unsigned n = 10;
+    for (unsigned first = 0; first < n; first++) {
+        for (unsigned last = first; last <= n; last++) {
+            x.Clear();
+            for (unsigned i = 0; i < n; i++)
+                x.PushBack(Value(kArrayType).PushBack(i, allocator).Move(), allocator);
+            
+            itr = x.Erase(x.Begin() + first, x.Begin() + last);
+            if (last == n)
+                EXPECT_EQ(x.End(), itr);
+            else
+                EXPECT_EQ(x.Begin() + first, itr);
+
+            size_t removeCount = last - first;
+            EXPECT_EQ(n - removeCount, x.Size());
+            for (unsigned i = 0; i < first; i++)
+                EXPECT_EQ(i, x[i][0].GetUint());
+            for (unsigned i = first; i < n - removeCount; i++)
+                EXPECT_EQ(i + removeCount, x[static_cast<SizeType>(i)][0].GetUint());
+        }
+    }
+}
+
+TEST(Value, Array) {
+    Value x(kArrayType);
+    const Value& y = x;
+    Value::AllocatorType allocator;
+
+    EXPECT_EQ(kArrayType, x.GetType());
+    EXPECT_TRUE(x.IsArray());
+    EXPECT_TRUE(x.Empty());
+    EXPECT_EQ(0u, x.Size());
+    EXPECT_TRUE(y.IsArray());
+    EXPECT_TRUE(y.Empty());
+    EXPECT_EQ(0u, y.Size());
+
+    EXPECT_FALSE(x.IsNull());
+    EXPECT_FALSE(x.IsBool());
+    EXPECT_FALSE(x.IsFalse());
+    EXPECT_FALSE(x.IsTrue());
+    EXPECT_FALSE(x.IsString());
+    EXPECT_FALSE(x.IsObject());
+
+    TestArray(x, allocator);
+
+    // Working in gcc without C++11, but VS2013 cannot compile. To be diagnosed.
+    // http://en.wikipedia.org/wiki/Erase-remove_idiom
+    x.Clear();
+    for (int i = 0; i < 10; i++)
+        if (i % 2 == 0)
+            x.PushBack(i, allocator);
+        else
+            x.PushBack(Value(kNullType).Move(), allocator);
+
+    const Value null(kNullType);
+    x.Erase(std::remove(x.Begin(), x.End(), null), x.End());
+    EXPECT_EQ(5u, x.Size());
+    for (int i = 0; i < 5; i++)
+        EXPECT_EQ(i * 2, x[static_cast<SizeType>(i)]);
+
+    // SetArray()
+    Value z;
+    z.SetArray();
+    EXPECT_TRUE(z.IsArray());
+    EXPECT_TRUE(z.Empty());
+}
+
+TEST(Value, ArrayHelper) {
+    Value::AllocatorType allocator;
+    {
+        Value x(kArrayType);
+        Value::Array a = x.GetArray();
+        TestArray(a, allocator);
+    }
+
+    {
+        Value x(kArrayType);
+        Value::Array a = x.GetArray();
+        a.PushBack(1, allocator);
+
+        Value::Array a2(a); // copy constructor
+        EXPECT_EQ(1u, a2.Size());
+
+        Value::Array a3 = a;
+        EXPECT_EQ(1u, a3.Size());
+
+        Value::ConstArray y = static_cast<const Value&>(x).GetArray();
+        (void)y;
+        // y.PushBack(1, allocator); // should not compile
+
+        // Templated functions
+        x.Clear();
+        EXPECT_TRUE(x.Is<Value::Array>());
+        EXPECT_TRUE(x.Is<Value::ConstArray>());
+        a.PushBack(1, allocator);
+        EXPECT_EQ(1, x.Get<Value::Array>()[0].GetInt());
+        EXPECT_EQ(1, x.Get<Value::ConstArray>()[0].GetInt());
+
+        Value x2;
+        x2.Set<Value::Array>(a);
+        EXPECT_TRUE(x.IsArray());   // IsArray() is invariant after moving.
+        EXPECT_EQ(1, x2.Get<Value::Array>()[0].GetInt());
+    }
+
+    {
+        Value y(kArrayType);
+        y.PushBack(123, allocator);
+
+        Value x(y.GetArray());      // Construct value form array.
+        EXPECT_TRUE(x.IsArray());
+        EXPECT_EQ(123, x[0].GetInt());
+        EXPECT_TRUE(y.IsArray());   // Invariant
+        EXPECT_TRUE(y.Empty());
+    }
+
+    {
+        Value x(kArrayType);
+        Value y(kArrayType);
+        y.PushBack(123, allocator);
+        x.PushBack(y.GetArray(), allocator);    // Implicit constructor to convert Array to GenericValue
+
+        EXPECT_EQ(1u, x.Size());
+        EXPECT_EQ(123, x[0][0].GetInt());
+        EXPECT_TRUE(y.IsArray());
+        EXPECT_TRUE(y.Empty());
+    }
+}
+
+#if RAPIDJSON_HAS_CXX11_RANGE_FOR
+TEST(Value, ArrayHelperRangeFor) {
+    Value::AllocatorType allocator;
+    Value x(kArrayType);
+
+    for (int i = 0; i < 10; i++)
+        x.PushBack(i, allocator);
+
+    {
+        int i = 0;
+        for (auto& v : x.GetArray()) {
+            EXPECT_EQ(i, v.GetInt());
+            i++;
+        }
+        EXPECT_EQ(i, 10);
+    }
+    {
+        int i = 0;
+        for (const auto& v : const_cast<const Value&>(x).GetArray()) {
+            EXPECT_EQ(i, v.GetInt());
+            i++;
+        }
+        EXPECT_EQ(i, 10);
+    }
+
+    // Array a = x.GetArray();
+    // Array ca = const_cast<const Value&>(x).GetArray();
+}
+#endif
+
+template <typename T, typename Allocator>
+static void TestObject(T& x, Allocator& allocator) {
+    const T& y = x; // const version
+
+    // AddMember()
+    x.AddMember("A", "Apple", allocator);
+    EXPECT_FALSE(x.ObjectEmpty());
+    EXPECT_EQ(1u, x.MemberCount());
+
+    Value value("Banana", 6);
+    x.AddMember("B", "Banana", allocator);
+    EXPECT_EQ(2u, x.MemberCount());
+
+    // AddMember<T>(StringRefType, T, Allocator)
+    {
+        Value o(kObjectType);
+        o.AddMember("true", true, allocator);
+        o.AddMember("false", false, allocator);
+        o.AddMember("int", -1, allocator);
+        o.AddMember("uint", 1u, allocator);
+        o.AddMember("int64", int64_t(-4294967296), allocator);
+        o.AddMember("uint64", uint64_t(4294967296), allocator);
+        o.AddMember("double", 3.14, allocator);
+        o.AddMember("string", "Jelly", allocator);
+
+        EXPECT_TRUE(o["true"].GetBool());
+        EXPECT_FALSE(o["false"].GetBool());
+        EXPECT_EQ(-1, o["int"].GetInt());
+        EXPECT_EQ(1u, o["uint"].GetUint());
+        EXPECT_EQ(int64_t(-4294967296), o["int64"].GetInt64());
+        EXPECT_EQ(uint64_t(4294967296), o["uint64"].GetUint64());
+        EXPECT_STREQ("Jelly",o["string"].GetString());
+        EXPECT_EQ(8u, o.MemberCount());
+    }
+
+    // AddMember<T>(Value&, T, Allocator)
+    {
+        Value o(kObjectType);
+
+        Value n("s");
+        o.AddMember(n, "string", allocator);
+        EXPECT_EQ(1u, o.MemberCount());
+
+        Value count("#");
+        o.AddMember(count, o.MemberCount(), allocator);
+        EXPECT_EQ(2u, o.MemberCount());
+    }
+
+#if RAPIDJSON_HAS_STDSTRING
+    {
+        // AddMember(StringRefType, const std::string&, Allocator)
+        Value o(kObjectType);
+        o.AddMember("b", std::string("Banana"), allocator);
+        EXPECT_STREQ("Banana", o["b"].GetString());
+
+        // RemoveMember(const std::string&)
+        o.RemoveMember(std::string("b"));
+        EXPECT_TRUE(o.ObjectEmpty());
+    }
+#endif
+
+#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
+    // AddMember(GenericValue&&, ...) variants
+    {
+        Value o(kObjectType);
+        o.AddMember(Value("true"), Value(true), allocator);
+        o.AddMember(Value("false"), Value(false).Move(), allocator);    // value is lvalue ref
+        o.AddMember(Value("int").Move(), Value(-1), allocator);         // name is lvalue ref
+        o.AddMember("uint", std::move(Value().SetUint(1u)), allocator); // name is literal, value is rvalue
+        EXPECT_TRUE(o["true"].GetBool());
+        EXPECT_FALSE(o["false"].GetBool());
+        EXPECT_EQ(-1, o["int"].GetInt());
+        EXPECT_EQ(1u, o["uint"].GetUint());
+        EXPECT_EQ(4u, o.MemberCount());
+    }
+#endif
+
+    // Tests a member with null character
+    Value name;
+    const Value C0D("C\0D", 3);
+    name.SetString(C0D.GetString(), 3);
+    value.SetString("CherryD", 7);
+    x.AddMember(name, value, allocator);
+
+    // HasMember()
+    EXPECT_TRUE(x.HasMember("A"));
+    EXPECT_TRUE(x.HasMember("B"));
+    EXPECT_TRUE(y.HasMember("A"));
+    EXPECT_TRUE(y.HasMember("B"));
+
+#if RAPIDJSON_HAS_STDSTRING
+    EXPECT_TRUE(x.HasMember(std::string("A")));
+#endif
+
+    name.SetString("C\0D");
+    EXPECT_TRUE(x.HasMember(name));
+    EXPECT_TRUE(y.HasMember(name));
+
+    GenericValue<UTF8<>, CrtAllocator> othername("A");
+    EXPECT_TRUE(x.HasMember(othername));
+    EXPECT_TRUE(y.HasMember(othername));
+    othername.SetString("C\0D");
+    EXPECT_TRUE(x.HasMember(othername));
+    EXPECT_TRUE(y.HasMember(othername));
+
+    // operator[]
+    EXPECT_STREQ("Apple", x["A"].GetString());
+    EXPECT_STREQ("Banana", x["B"].GetString());
+    EXPECT_STREQ("CherryD", x[C0D].GetString());
+    EXPECT_STREQ("CherryD", x[othername].GetString());
+    EXPECT_THROW(x["nonexist"], AssertException);
+
+    // const operator[]
+    EXPECT_STREQ("Apple", y["A"].GetString());
+    EXPECT_STREQ("Banana", y["B"].GetString());
+    EXPECT_STREQ("CherryD", y[C0D].GetString());
+
+#if RAPIDJSON_HAS_STDSTRING
+    EXPECT_STREQ("Apple", x["A"].GetString());
+    EXPECT_STREQ("Apple", y[std::string("A")].GetString());
+#endif
+
+    // member iterator
+    Value::MemberIterator itr = x.MemberBegin(); 
+    EXPECT_TRUE(itr != x.MemberEnd());
+    EXPECT_STREQ("A", itr->name.GetString());
+    EXPECT_STREQ("Apple", itr->value.GetString());
+    ++itr;
+    EXPECT_TRUE(itr != x.MemberEnd());
+    EXPECT_STREQ("B", itr->name.GetString());
+    EXPECT_STREQ("Banana", itr->value.GetString());
+    ++itr;
+    EXPECT_TRUE(itr != x.MemberEnd());
+    EXPECT_TRUE(memcmp(itr->name.GetString(), "C\0D", 4) == 0);
+    EXPECT_STREQ("CherryD", itr->value.GetString());
+    ++itr;
+    EXPECT_FALSE(itr != x.MemberEnd());
+
+    // const member iterator
+    Value::ConstMemberIterator citr = y.MemberBegin(); 
+    EXPECT_TRUE(citr != y.MemberEnd());
+    EXPECT_STREQ("A", citr->name.GetString());
+    EXPECT_STREQ("Apple", citr->value.GetString());
+    ++citr;
+    EXPECT_TRUE(citr != y.MemberEnd());
+    EXPECT_STREQ("B", citr->name.GetString());
+    EXPECT_STREQ("Banana", citr->value.GetString());
+    ++citr;
+    EXPECT_TRUE(citr != y.MemberEnd());
+    EXPECT_TRUE(memcmp(citr->name.GetString(), "C\0D", 4) == 0);
+    EXPECT_STREQ("CherryD", citr->value.GetString());
+    ++citr;
+    EXPECT_FALSE(citr != y.MemberEnd());
+
+    // member iterator conversions/relations
+    itr  = x.MemberBegin();
+    citr = x.MemberBegin(); // const conversion
+    TestEqual(itr, citr);
+    EXPECT_TRUE(itr < x.MemberEnd());
+    EXPECT_FALSE(itr > y.MemberEnd());
+    EXPECT_TRUE(citr < x.MemberEnd());
+    EXPECT_FALSE(citr > y.MemberEnd());
+    ++citr;
+    TestUnequal(itr, citr);
+    EXPECT_FALSE(itr < itr);
+    EXPECT_TRUE(itr < citr);
+    EXPECT_FALSE(itr > itr);
+    EXPECT_TRUE(citr > itr);
+    EXPECT_EQ(1, citr - x.MemberBegin());
+    EXPECT_EQ(0, itr - y.MemberBegin());
+    itr += citr - x.MemberBegin();
+    EXPECT_EQ(1, itr - y.MemberBegin());
+    TestEqual(citr, itr);
+    EXPECT_TRUE(itr <= citr);
+    EXPECT_TRUE(citr <= itr);
+    itr++;
+    EXPECT_TRUE(itr >= citr);
+    EXPECT_FALSE(citr >= itr);
+
+    // RemoveMember()
+    EXPECT_TRUE(x.RemoveMember("A"));
+    EXPECT_FALSE(x.HasMember("A"));
+
+    EXPECT_TRUE(x.RemoveMember("B"));
+    EXPECT_FALSE(x.HasMember("B"));
+
+    EXPECT_FALSE(x.RemoveMember("nonexist"));
+
+    EXPECT_TRUE(x.RemoveMember(othername));
+    EXPECT_FALSE(x.HasMember(name));
+
+    EXPECT_TRUE(x.MemberBegin() == x.MemberEnd());
+
+    // EraseMember(ConstMemberIterator)
+
+    // Use array members to ensure removed elements' destructor is called.
+    // { "a": [0], "b": [1],[2],...]
+    const char keys[][2] = { "a", "b", "c", "d", "e", "f", "g", "h", "i", "j" };
+    for (int i = 0; i < 10; i++)
+        x.AddMember(keys[i], Value(kArrayType).PushBack(i, allocator), allocator);
+
+    // MemberCount, iterator difference
+    EXPECT_EQ(x.MemberCount(), SizeType(x.MemberEnd() - x.MemberBegin()));
+
+    // Erase the first
+    itr = x.EraseMember(x.MemberBegin());
+    EXPECT_FALSE(x.HasMember(keys[0]));
+    EXPECT_EQ(x.MemberBegin(), itr);
+    EXPECT_EQ(9u, x.MemberCount());
+    for (; itr != x.MemberEnd(); ++itr) {
+        size_t i = static_cast<size_t>((itr - x.MemberBegin())) + 1;
+        EXPECT_STREQ(itr->name.GetString(), keys[i]);
+        EXPECT_EQ(static_cast<int>(i), itr->value[0].GetInt());
+    }
+
+    // Erase the last
+    itr = x.EraseMember(x.MemberEnd() - 1);
+    EXPECT_FALSE(x.HasMember(keys[9]));
+    EXPECT_EQ(x.MemberEnd(), itr);
+    EXPECT_EQ(8u, x.MemberCount());
+    for (; itr != x.MemberEnd(); ++itr) {
+        size_t i = static_cast<size_t>(itr - x.MemberBegin()) + 1;
+        EXPECT_STREQ(itr->name.GetString(), keys[i]);
+        EXPECT_EQ(static_cast<int>(i), itr->value[0].GetInt());
+    }
+
+    // Erase the middle
+    itr = x.EraseMember(x.MemberBegin() + 4);
+    EXPECT_FALSE(x.HasMember(keys[5]));
+    EXPECT_EQ(x.MemberBegin() + 4, itr);
+    EXPECT_EQ(7u, x.MemberCount());
+    for (; itr != x.MemberEnd(); ++itr) {
+        size_t i = static_cast<size_t>(itr - x.MemberBegin());
+        i += (i < 4) ? 1 : 2;
+        EXPECT_STREQ(itr->name.GetString(), keys[i]);
+        EXPECT_EQ(static_cast<int>(i), itr->value[0].GetInt());
+    }
+
+    // EraseMember(ConstMemberIterator, ConstMemberIterator)
+    // Exhaustive test with all 0 <= first < n, first <= last <= n cases
+    const unsigned n = 10;
+    for (unsigned first = 0; first < n; first++) {
+        for (unsigned last = first; last <= n; last++) {
+            x.RemoveAllMembers();
+            for (unsigned i = 0; i < n; i++)
+                x.AddMember(keys[i], Value(kArrayType).PushBack(i, allocator), allocator);
+
+            itr = x.EraseMember(x.MemberBegin() + static_cast<int>(first), x.MemberBegin() + static_cast<int>(last));
+            if (last == n)
+                EXPECT_EQ(x.MemberEnd(), itr);
+            else
+                EXPECT_EQ(x.MemberBegin() + static_cast<int>(first), itr);
+
+            size_t removeCount = last - first;
+            EXPECT_EQ(n - removeCount, x.MemberCount());
+            for (unsigned i = 0; i < first; i++)
+                EXPECT_EQ(i, x[keys[i]][0].GetUint());
+            for (unsigned i = first; i < n - removeCount; i++)
+                EXPECT_EQ(i + removeCount, x[keys[i+removeCount]][0].GetUint());
+        }
+    }
+
+    // RemoveAllMembers()
+    x.RemoveAllMembers();
+    EXPECT_TRUE(x.ObjectEmpty());
+    EXPECT_EQ(0u, x.MemberCount());
+}
+
+TEST(Value, Object) {
+    Value x(kObjectType);
+    const Value& y = x; // const version
+    Value::AllocatorType allocator;
+
+    EXPECT_EQ(kObjectType, x.GetType());
+    EXPECT_TRUE(x.IsObject());
+    EXPECT_TRUE(x.ObjectEmpty());
+    EXPECT_EQ(0u, x.MemberCount());
+    EXPECT_EQ(kObjectType, y.GetType());
+    EXPECT_TRUE(y.IsObject());
+    EXPECT_TRUE(y.ObjectEmpty());
+    EXPECT_EQ(0u, y.MemberCount());
+
+    TestObject(x, allocator);
+
+    // SetObject()
+    Value z;
+    z.SetObject();
+    EXPECT_TRUE(z.IsObject());
+}
+
+TEST(Value, ObjectHelper) {
+    Value::AllocatorType allocator;
+    {
+        Value x(kObjectType);
+        Value::Object o = x.GetObject();
+        TestObject(o, allocator);
+    }
+
+    {
+        Value x(kObjectType);
+        Value::Object o = x.GetObject();
+        o.AddMember("1", 1, allocator);
+
+        Value::Object o2(o); // copy constructor
+        EXPECT_EQ(1u, o2.MemberCount());
+
+        Value::Object o3 = o;
+        EXPECT_EQ(1u, o3.MemberCount());
+
+        Value::ConstObject y = static_cast<const Value&>(x).GetObject();
+        (void)y;
+        // y.AddMember("1", 1, allocator); // should not compile
+
+        // Templated functions
+        x.RemoveAllMembers();
+        EXPECT_TRUE(x.Is<Value::Object>());
+        EXPECT_TRUE(x.Is<Value::ConstObject>());
+        o.AddMember("1", 1, allocator);
+        EXPECT_EQ(1, x.Get<Value::Object>()["1"].GetInt());
+        EXPECT_EQ(1, x.Get<Value::ConstObject>()["1"].GetInt());
+
+        Value x2;
+        x2.Set<Value::Object>(o);
+        EXPECT_TRUE(x.IsObject());   // IsObject() is invariant after moving
+        EXPECT_EQ(1, x2.Get<Value::Object>()["1"].GetInt());
+    }
+
+    {
+        Value x(kObjectType);
+        x.AddMember("a", "apple", allocator);
+        Value y(x.GetObject());
+        EXPECT_STREQ("apple", y["a"].GetString());
+        EXPECT_TRUE(x.IsObject());  // Invariant
+    }
+    
+    {
+        Value x(kObjectType);
+        x.AddMember("a", "apple", allocator);
+        Value y(kObjectType);
+        y.AddMember("fruits", x.GetObject(), allocator);
+        EXPECT_STREQ("apple", y["fruits"]["a"].GetString());
+        EXPECT_TRUE(x.IsObject());  // Invariant
+    }
+}
+
+#if RAPIDJSON_HAS_CXX11_RANGE_FOR
+TEST(Value, ObjectHelperRangeFor) {
+    Value::AllocatorType allocator;
+    Value x(kObjectType);
+
+    for (int i = 0; i < 10; i++) {
+        char name[10];
+        Value n(name, static_cast<SizeType>(sprintf(name, "%d", i)), allocator);
+        x.AddMember(n, i, allocator);
+    }
+
+    {
+        int i = 0;
+        for (auto& m : x.GetObject()) {
+            char name[10];
+            sprintf(name, "%d", i);
+            EXPECT_STREQ(name, m.name.GetString());
+            EXPECT_EQ(i, m.value.GetInt());
+            i++;
+        }
+        EXPECT_EQ(i, 10);
+    }
+    {
+        int i = 0;
+        for (const auto& m : const_cast<const Value&>(x).GetObject()) {
+            char name[10];
+            sprintf(name, "%d", i);
+            EXPECT_STREQ(name, m.name.GetString());
+            EXPECT_EQ(i, m.value.GetInt());
+            i++;
+        }
+        EXPECT_EQ(i, 10);
+    }
+
+    // Object a = x.GetObject();
+    // Object ca = const_cast<const Value&>(x).GetObject();
+}
+#endif
+
+TEST(Value, EraseMember_String) {
+    Value::AllocatorType allocator;
+    Value x(kObjectType);
+    x.AddMember("A", "Apple", allocator);
+    x.AddMember("B", "Banana", allocator);
+
+    EXPECT_TRUE(x.EraseMember("B"));
+    EXPECT_FALSE(x.HasMember("B"));
+
+    EXPECT_FALSE(x.EraseMember("nonexist"));
+
+    GenericValue<UTF8<>, CrtAllocator> othername("A");
+    EXPECT_TRUE(x.EraseMember(othername));
+    EXPECT_FALSE(x.HasMember("A"));
+
+    EXPECT_TRUE(x.MemberBegin() == x.MemberEnd());
+}
+
+TEST(Value, BigNestedArray) {
+    MemoryPoolAllocator<> allocator;
+    Value x(kArrayType);
+    static const SizeType  n = 200;
+
+    for (SizeType i = 0; i < n; i++) {
+        Value y(kArrayType);
+        for (SizeType  j = 0; j < n; j++) {
+            Value number(static_cast<int>(i * n + j));
+            y.PushBack(number, allocator);
+        }
+        x.PushBack(y, allocator);
+    }
+
+    for (SizeType i = 0; i < n; i++)
+        for (SizeType j = 0; j < n; j++) {
+            EXPECT_TRUE(x[i][j].IsInt());
+            EXPECT_EQ(static_cast<int>(i * n + j), x[i][j].GetInt());
+        }
+}
+
+TEST(Value, BigNestedObject) {
+    MemoryPoolAllocator<> allocator;
+    Value x(kObjectType);
+    static const SizeType n = 200;
+
+    for (SizeType i = 0; i < n; i++) {
+        char name1[10];
+        sprintf(name1, "%d", i);
+
+        // Value name(name1); // should not compile
+        Value name(name1, static_cast<SizeType>(strlen(name1)), allocator);
+        Value object(kObjectType);
+
+        for (SizeType j = 0; j < n; j++) {
+            char name2[10];
+            sprintf(name2, "%d", j);
+
+            Value name3(name2, static_cast<SizeType>(strlen(name2)), allocator);
+            Value number(static_cast<int>(i * n + j));
+            object.AddMember(name3, number, allocator);
+        }
+
+        // x.AddMember(name1, object, allocator); // should not compile
+        x.AddMember(name, object, allocator);
+    }
+
+    for (SizeType i = 0; i < n; i++) {
+        char name1[10];
+        sprintf(name1, "%d", i);
+        
+        for (SizeType j = 0; j < n; j++) {
+            char name2[10];
+            sprintf(name2, "%d", j);
+            x[name1];
+            EXPECT_EQ(static_cast<int>(i * n + j), x[name1][name2].GetInt());
+        }
+    }
+}
+
+// Issue 18: Error removing last element of object
+// http://code.google.com/p/rapidjson/issues/detail?id=18
+TEST(Value, RemoveLastElement) {
+    rapidjson::Document doc;
+    rapidjson::Document::AllocatorType& allocator = doc.GetAllocator();
+    rapidjson::Value objVal(rapidjson::kObjectType);        
+    objVal.AddMember("var1", 123, allocator);       
+    objVal.AddMember("var2", "444", allocator);
+    objVal.AddMember("var3", 555, allocator);
+    EXPECT_TRUE(objVal.HasMember("var3"));
+    objVal.RemoveMember("var3");    // Assertion here in r61
+    EXPECT_FALSE(objVal.HasMember("var3"));
+}
+
+// Issue 38:    Segmentation fault with CrtAllocator
+TEST(Document, CrtAllocator) {
+    typedef GenericValue<UTF8<>, CrtAllocator> V;
+
+    V::AllocatorType allocator;
+    V o(kObjectType);
+    o.AddMember("x", 1, allocator); // Should not call destructor on uninitialized name/value of newly allocated members.
+
+    V a(kArrayType);
+    a.PushBack(1, allocator);   // Should not call destructor on uninitialized Value of newly allocated elements.
+}
+
+static void TestShortStringOptimization(const char* str) {
+    const rapidjson::SizeType len = static_cast<rapidjson::SizeType>(strlen(str));
+	
+    rapidjson::Document doc;
+    rapidjson::Value val;
+    val.SetString(str, len, doc.GetAllocator());
+	
+	EXPECT_EQ(val.GetStringLength(), len);
+	EXPECT_STREQ(val.GetString(), str);
+}
+
+TEST(Value, AllocateShortString) {
+	TestShortStringOptimization("");                 // edge case: empty string
+	TestShortStringOptimization("12345678");         // regular case for short strings: 8 chars
+	TestShortStringOptimization("12345678901");      // edge case: 11 chars in 32-bit mode (=> short string)
+	TestShortStringOptimization("123456789012");     // edge case: 12 chars in 32-bit mode (=> regular string)
+	TestShortStringOptimization("123456789012345");  // edge case: 15 chars in 64-bit mode (=> short string)
+	TestShortStringOptimization("1234567890123456"); // edge case: 16 chars in 64-bit mode (=> regular string)
+}
+
+template <int e>
+struct TerminateHandler {
+    bool Null() { return e != 0; }
+    bool Bool(bool) { return e != 1; }
+    bool Int(int) { return e != 2; }
+    bool Uint(unsigned) { return e != 3; }
+    bool Int64(int64_t) { return e != 4; }
+    bool Uint64(uint64_t) { return e != 5; }
+    bool Double(double) { return e != 6; }
+    bool RawNumber(const char*, SizeType, bool) { return e != 7; }
+    bool String(const char*, SizeType, bool) { return e != 8; }
+    bool StartObject() { return e != 9; }
+    bool Key(const char*, SizeType, bool)  { return e != 10; }
+    bool EndObject(SizeType) { return e != 11; }
+    bool StartArray() { return e != 12; }
+    bool EndArray(SizeType) { return e != 13; }
+};
+
+#define TEST_TERMINATION(e, json)\
+{\
+    Document d; \
+    EXPECT_FALSE(d.Parse(json).HasParseError()); \
+    Reader reader; \
+    TerminateHandler<e> h;\
+    EXPECT_FALSE(d.Accept(h));\
+}
+
+TEST(Value, AcceptTerminationByHandler) {
+    TEST_TERMINATION(0, "[null]");
+    TEST_TERMINATION(1, "[true]");
+    TEST_TERMINATION(1, "[false]");
+    TEST_TERMINATION(2, "[-1]");
+    TEST_TERMINATION(3, "[2147483648]");
+    TEST_TERMINATION(4, "[-1234567890123456789]");
+    TEST_TERMINATION(5, "[9223372036854775808]");
+    TEST_TERMINATION(6, "[0.5]");
+    // RawNumber() is never called
+    TEST_TERMINATION(8, "[\"a\"]");
+    TEST_TERMINATION(9, "[{}]");
+    TEST_TERMINATION(10, "[{\"a\":1}]");
+    TEST_TERMINATION(11, "[{}]");
+    TEST_TERMINATION(12, "{\"a\":[]}");
+    TEST_TERMINATION(13, "{\"a\":[]}");
+}
+
+struct ValueIntComparer {
+    bool operator()(const Value& lhs, const Value& rhs) const {
+        return lhs.GetInt() < rhs.GetInt();
+    }
+};
+
+#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
+TEST(Value, Sorting) {
+    Value::AllocatorType allocator;
+    Value a(kArrayType);
+    a.PushBack(5, allocator);
+    a.PushBack(1, allocator);
+    a.PushBack(3, allocator);
+    std::sort(a.Begin(), a.End(), ValueIntComparer());
+    EXPECT_EQ(1, a[0].GetInt());
+    EXPECT_EQ(3, a[1].GetInt());
+    EXPECT_EQ(5, a[2].GetInt());
+}
+#endif
+
+// http://stackoverflow.com/questions/35222230/
+
+static void MergeDuplicateKey(Value& v, Value::AllocatorType& a) {
+    if (v.IsObject()) {
+        // Convert all key:value into key:[value]
+        for (Value::MemberIterator itr = v.MemberBegin(); itr != v.MemberEnd(); ++itr)
+            itr->value = Value(kArrayType).Move().PushBack(itr->value, a);
+        
+        // Merge arrays if key is duplicated
+        for (Value::MemberIterator itr = v.MemberBegin(); itr != v.MemberEnd();) {
+            Value::MemberIterator itr2 = v.FindMember(itr->name);
+            if (itr != itr2) {
+                itr2->value.PushBack(itr->value[0], a);
+                itr = v.EraseMember(itr);
+            }
+            else
+                ++itr;
+        }
+
+        // Convert key:[values] back to key:value if there is only one value
+        for (Value::MemberIterator itr = v.MemberBegin(); itr != v.MemberEnd(); ++itr) {
+            if (itr->value.Size() == 1)
+                itr->value = itr->value[0];
+            MergeDuplicateKey(itr->value, a); // Recursion on the value
+        }
+    }
+    else if (v.IsArray())
+        for (Value::ValueIterator itr = v.Begin(); itr != v.End(); ++itr)
+            MergeDuplicateKey(*itr, a);
+}
+
+TEST(Value, MergeDuplicateKey) {
+    Document d;
+    d.Parse(
+        "{"
+        "    \"key1\": {"
+        "        \"a\": \"asdf\","
+        "        \"b\": \"foo\","
+        "        \"b\": \"bar\","
+        "        \"c\": \"fdas\""
+        "    }"
+        "}");
+
+    Document d2;
+    d2.Parse(
+        "{"
+        "    \"key1\": {"
+        "        \"a\": \"asdf\","
+        "        \"b\": ["
+        "            \"foo\","
+        "            \"bar\""
+        "        ],"
+        "        \"c\": \"fdas\""
+        "    }"
+        "}");
+
+    EXPECT_NE(d2, d);
+    MergeDuplicateKey(d, d.GetAllocator());
+    EXPECT_EQ(d2, d);
+}
+
+#ifdef __clang__
+RAPIDJSON_DIAG_POP
+#endif
diff --git a/test/unittest/writertest.cpp b/test/unittest/writertest.cpp
new file mode 100644
index 0000000..232b03d
--- /dev/null
+++ b/test/unittest/writertest.cpp
@@ -0,0 +1,598 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+// 
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed 
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the 
+// specific language governing permissions and limitations under the License.
+
+#include "unittest.h"
+
+#include "rapidjson/document.h"
+#include "rapidjson/reader.h"
+#include "rapidjson/writer.h"
+#include "rapidjson/stringbuffer.h"
+#include "rapidjson/memorybuffer.h"
+
+#ifdef __clang__
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(c++98-compat)
+#endif
+
+using namespace rapidjson;
+
+TEST(Writer, Compact) {
+    StringStream s("{ \"hello\" : \"world\", \"t\" : true , \"f\" : false, \"n\": null, \"i\":123, \"pi\": 3.1416, \"a\":[1, 2, 3] } ");
+    StringBuffer buffer;
+    Writer<StringBuffer> writer(buffer);
+    buffer.ShrinkToFit();
+    Reader reader;
+    reader.Parse<0>(s, writer);
+    EXPECT_STREQ("{\"hello\":\"world\",\"t\":true,\"f\":false,\"n\":null,\"i\":123,\"pi\":3.1416,\"a\":[1,2,3]}", buffer.GetString());
+    EXPECT_EQ(77u, buffer.GetSize());
+    EXPECT_TRUE(writer.IsComplete());
+}
+
+// json -> parse -> writer -> json
+#define TEST_ROUNDTRIP(json) \
+    { \
+        StringStream s(json); \
+        StringBuffer buffer; \
+        Writer<StringBuffer> writer(buffer); \
+        Reader reader; \
+        reader.Parse<kParseFullPrecisionFlag>(s, writer); \
+        EXPECT_STREQ(json, buffer.GetString()); \
+        EXPECT_TRUE(writer.IsComplete()); \
+    }
+
+TEST(Writer, Root) {
+    TEST_ROUNDTRIP("null");
+    TEST_ROUNDTRIP("true");
+    TEST_ROUNDTRIP("false");
+    TEST_ROUNDTRIP("0");
+    TEST_ROUNDTRIP("\"foo\"");
+    TEST_ROUNDTRIP("[]");
+    TEST_ROUNDTRIP("{}");
+}
+
+TEST(Writer, Int) {
+    TEST_ROUNDTRIP("[-1]");
+    TEST_ROUNDTRIP("[-123]");
+    TEST_ROUNDTRIP("[-2147483648]");
+}
+
+TEST(Writer, UInt) {
+    TEST_ROUNDTRIP("[0]");
+    TEST_ROUNDTRIP("[1]");
+    TEST_ROUNDTRIP("[123]");
+    TEST_ROUNDTRIP("[2147483647]");
+    TEST_ROUNDTRIP("[4294967295]");
+}
+
+TEST(Writer, Int64) {
+    TEST_ROUNDTRIP("[-1234567890123456789]");
+    TEST_ROUNDTRIP("[-9223372036854775808]");
+}
+
+TEST(Writer, Uint64) {
+    TEST_ROUNDTRIP("[1234567890123456789]");
+    TEST_ROUNDTRIP("[9223372036854775807]");
+}
+
+TEST(Writer, String) {
+    TEST_ROUNDTRIP("[\"Hello\"]");
+    TEST_ROUNDTRIP("[\"Hello\\u0000World\"]");
+    TEST_ROUNDTRIP("[\"\\\"\\\\/\\b\\f\\n\\r\\t\"]");
+
+#if RAPIDJSON_HAS_STDSTRING
+    {
+        StringBuffer buffer;
+        Writer<StringBuffer> writer(buffer);
+        writer.String(std::string("Hello\n"));
+        EXPECT_STREQ("\"Hello\\n\"", buffer.GetString());
+    }
+#endif
+}
+
+TEST(Writer, Issue_889) {
+    char buf[100] = "Hello";
+    
+    StringBuffer buffer;
+    Writer<StringBuffer> writer(buffer);
+    writer.StartArray();
+    writer.String(buf);
+    writer.EndArray();
+    
+    EXPECT_STREQ("[\"Hello\"]", buffer.GetString());
+    EXPECT_TRUE(writer.IsComplete()); \
+}
+
+TEST(Writer, ScanWriteUnescapedString) {
+    const char json[] = "[\" \\\"0123456789ABCDEF\"]";
+    //                       ^ scanning stops here.
+    char buffer2[sizeof(json) + 32];
+
+    // Use different offset to test different alignments
+    for (int i = 0; i < 32; i++) {
+        char* p = buffer2 + i;
+        memcpy(p, json, sizeof(json));
+        TEST_ROUNDTRIP(p);
+    }
+}
+
+TEST(Writer, Double) {
+    TEST_ROUNDTRIP("[1.2345,1.2345678,0.123456789012,1234567.8]");
+    TEST_ROUNDTRIP("0.0");
+    TEST_ROUNDTRIP("-0.0"); // Issue #289
+    TEST_ROUNDTRIP("1e30");
+    TEST_ROUNDTRIP("1.0");
+    TEST_ROUNDTRIP("5e-324"); // Min subnormal positive double
+    TEST_ROUNDTRIP("2.225073858507201e-308"); // Max subnormal positive double
+    TEST_ROUNDTRIP("2.2250738585072014e-308"); // Min normal positive double
+    TEST_ROUNDTRIP("1.7976931348623157e308"); // Max double
+
+}
+
+// UTF8 -> TargetEncoding -> UTF8
+template <typename TargetEncoding>
+void TestTranscode(const char* json) {
+    StringStream s(json);
+    GenericStringBuffer<TargetEncoding> buffer;
+    Writer<GenericStringBuffer<TargetEncoding>, UTF8<>, TargetEncoding> writer(buffer);
+    Reader reader;
+    reader.Parse(s, writer);
+
+    StringBuffer buffer2;
+    Writer<StringBuffer> writer2(buffer2);
+    GenericReader<TargetEncoding, UTF8<> > reader2;
+    GenericStringStream<TargetEncoding> s2(buffer.GetString());
+    reader2.Parse(s2, writer2);
+
+    EXPECT_STREQ(json, buffer2.GetString());
+}
+
+TEST(Writer, Transcode) {
+    const char json[] = "{\"hello\":\"world\",\"t\":true,\"f\":false,\"n\":null,\"i\":123,\"pi\":3.1416,\"a\":[1,2,3],\"dollar\":\"\x24\",\"cents\":\"\xC2\xA2\",\"euro\":\"\xE2\x82\xAC\",\"gclef\":\"\xF0\x9D\x84\x9E\"}";
+
+    // UTF8 -> UTF16 -> UTF8
+    TestTranscode<UTF8<> >(json);
+
+    // UTF8 -> ASCII -> UTF8
+    TestTranscode<ASCII<> >(json);
+
+    // UTF8 -> UTF16 -> UTF8
+    TestTranscode<UTF16<> >(json);
+
+    // UTF8 -> UTF32 -> UTF8
+    TestTranscode<UTF32<> >(json);
+
+    // UTF8 -> AutoUTF -> UTF8
+    UTFType types[] = { kUTF8, kUTF16LE , kUTF16BE, kUTF32LE , kUTF32BE };
+    for (size_t i = 0; i < 5; i++) {
+        StringStream s(json);
+        MemoryBuffer buffer;
+        AutoUTFOutputStream<unsigned, MemoryBuffer> os(buffer, types[i], true);
+        Writer<AutoUTFOutputStream<unsigned, MemoryBuffer>, UTF8<>, AutoUTF<unsigned> > writer(os);
+        Reader reader;
+        reader.Parse(s, writer);
+
+        StringBuffer buffer2;
+        Writer<StringBuffer> writer2(buffer2);
+        GenericReader<AutoUTF<unsigned>, UTF8<> > reader2;
+        MemoryStream s2(buffer.GetBuffer(), buffer.GetSize());
+        AutoUTFInputStream<unsigned, MemoryStream> is(s2);
+        reader2.Parse(is, writer2);
+
+        EXPECT_STREQ(json, buffer2.GetString());
+    }
+
+}
+
+#include <sstream>
+
+class OStreamWrapper {
+public:
+    typedef char Ch;
+
+    OStreamWrapper(std::ostream& os) : os_(os) {}
+
+    Ch Peek() const { assert(false); return '\0'; }
+    Ch Take() { assert(false); return '\0'; }
+    size_t Tell() const { return 0; }
+
+    Ch* PutBegin() { assert(false); return 0; }
+    void Put(Ch c) { os_.put(c); }
+    void Flush() { os_.flush(); }
+    size_t PutEnd(Ch*) { assert(false); return 0; }
+
+private:
+    OStreamWrapper(const OStreamWrapper&);
+    OStreamWrapper& operator=(const OStreamWrapper&);
+
+    std::ostream& os_;
+};
+
+TEST(Writer, OStreamWrapper) {
+    StringStream s("{ \"hello\" : \"world\", \"t\" : true , \"f\" : false, \"n\": null, \"i\":123, \"pi\": 3.1416, \"a\":[1, 2, 3], \"u64\": 1234567890123456789, \"i64\":-1234567890123456789 } ");
+    
+    std::stringstream ss;
+    OStreamWrapper os(ss);
+    
+    Writer<OStreamWrapper> writer(os);
+
+    Reader reader;
+    reader.Parse<0>(s, writer);
+    
+    std::string actual = ss.str();
+    EXPECT_STREQ("{\"hello\":\"world\",\"t\":true,\"f\":false,\"n\":null,\"i\":123,\"pi\":3.1416,\"a\":[1,2,3],\"u64\":1234567890123456789,\"i64\":-1234567890123456789}", actual.c_str());
+}
+
+TEST(Writer, AssertRootMayBeAnyValue) {
+#define T(x)\
+    {\
+        StringBuffer buffer;\
+        Writer<StringBuffer> writer(buffer);\
+        EXPECT_TRUE(x);\
+    }
+    T(writer.Bool(false));
+    T(writer.Bool(true));
+    T(writer.Null());
+    T(writer.Int(0));
+    T(writer.Uint(0));
+    T(writer.Int64(0));
+    T(writer.Uint64(0));
+    T(writer.Double(0));
+    T(writer.String("foo"));
+#undef T
+}
+
+TEST(Writer, AssertIncorrectObjectLevel) {
+    StringBuffer buffer;
+    Writer<StringBuffer> writer(buffer);
+    writer.StartObject();
+    writer.EndObject();
+    ASSERT_THROW(writer.EndObject(), AssertException);
+}
+
+TEST(Writer, AssertIncorrectArrayLevel) {
+    StringBuffer buffer;
+    Writer<StringBuffer> writer(buffer);
+    writer.StartArray();
+    writer.EndArray();
+    ASSERT_THROW(writer.EndArray(), AssertException);
+}
+
+TEST(Writer, AssertIncorrectEndObject) {
+    StringBuffer buffer;
+    Writer<StringBuffer> writer(buffer);
+    writer.StartObject();
+    ASSERT_THROW(writer.EndArray(), AssertException);
+}
+
+TEST(Writer, AssertIncorrectEndArray) {
+    StringBuffer buffer;
+    Writer<StringBuffer> writer(buffer);
+    writer.StartObject();
+    ASSERT_THROW(writer.EndArray(), AssertException);
+}
+
+TEST(Writer, AssertObjectKeyNotString) {
+#define T(x)\
+    {\
+        StringBuffer buffer;\
+        Writer<StringBuffer> writer(buffer);\
+        writer.StartObject();\
+        ASSERT_THROW(x, AssertException); \
+    }
+    T(writer.Bool(false));
+    T(writer.Bool(true));
+    T(writer.Null());
+    T(writer.Int(0));
+    T(writer.Uint(0));
+    T(writer.Int64(0));
+    T(writer.Uint64(0));
+    T(writer.Double(0));
+    T(writer.StartObject());
+    T(writer.StartArray());
+#undef T
+}
+
+TEST(Writer, AssertMultipleRoot) {
+    StringBuffer buffer;
+    Writer<StringBuffer> writer(buffer);
+
+    writer.StartObject();
+    writer.EndObject();
+    ASSERT_THROW(writer.StartObject(), AssertException);
+
+    writer.Reset(buffer);
+    writer.Null();
+    ASSERT_THROW(writer.Int(0), AssertException);
+
+    writer.Reset(buffer);
+    writer.String("foo");
+    ASSERT_THROW(writer.StartArray(), AssertException);
+
+    writer.Reset(buffer);
+    writer.StartArray();
+    writer.EndArray();
+    //ASSERT_THROW(writer.Double(3.14), AssertException);
+}
+
+TEST(Writer, RootObjectIsComplete) {
+    StringBuffer buffer;
+    Writer<StringBuffer> writer(buffer);
+    EXPECT_FALSE(writer.IsComplete());
+    writer.StartObject();
+    EXPECT_FALSE(writer.IsComplete());
+    writer.String("foo");
+    EXPECT_FALSE(writer.IsComplete());
+    writer.Int(1);
+    EXPECT_FALSE(writer.IsComplete());
+    writer.EndObject();
+    EXPECT_TRUE(writer.IsComplete());
+}
+
+TEST(Writer, RootArrayIsComplete) {
+    StringBuffer buffer;
+    Writer<StringBuffer> writer(buffer);
+    EXPECT_FALSE(writer.IsComplete());
+    writer.StartArray();
+    EXPECT_FALSE(writer.IsComplete());
+    writer.String("foo");
+    EXPECT_FALSE(writer.IsComplete());
+    writer.Int(1);
+    EXPECT_FALSE(writer.IsComplete());
+    writer.EndArray();
+    EXPECT_TRUE(writer.IsComplete());
+}
+
+TEST(Writer, RootValueIsComplete) {
+#define T(x)\
+    {\
+        StringBuffer buffer;\
+        Writer<StringBuffer> writer(buffer);\
+        EXPECT_FALSE(writer.IsComplete()); \
+        x; \
+        EXPECT_TRUE(writer.IsComplete()); \
+    }
+    T(writer.Null());
+    T(writer.Bool(true));
+    T(writer.Bool(false));
+    T(writer.Int(0));
+    T(writer.Uint(0));
+    T(writer.Int64(0));
+    T(writer.Uint64(0));
+    T(writer.Double(0));
+    T(writer.String(""));
+#undef T
+}
+
+TEST(Writer, InvalidEncoding) {
+    // Fail in decoding invalid UTF-8 sequence http://www.cl.cam.ac.uk/~mgk25/ucs/examples/UTF-8-test.txt
+    {
+        GenericStringBuffer<UTF16<> > buffer;
+        Writer<GenericStringBuffer<UTF16<> >, UTF8<>, UTF16<> > writer(buffer);
+        writer.StartArray();
+        EXPECT_FALSE(writer.String("\xfe"));
+        EXPECT_FALSE(writer.String("\xff"));
+        EXPECT_FALSE(writer.String("\xfe\xfe\xff\xff"));
+        writer.EndArray();
+    }
+
+    // Fail in encoding
+    {
+        StringBuffer buffer;
+        Writer<StringBuffer, UTF32<> > writer(buffer);
+        static const UTF32<>::Ch s[] = { 0x110000, 0 }; // Out of U+0000 to U+10FFFF
+        EXPECT_FALSE(writer.String(s));
+    }
+
+    // Fail in unicode escaping in ASCII output
+    {
+        StringBuffer buffer;
+        Writer<StringBuffer, UTF32<>, ASCII<> > writer(buffer);
+        static const UTF32<>::Ch s[] = { 0x110000, 0 }; // Out of U+0000 to U+10FFFF
+        EXPECT_FALSE(writer.String(s));
+    }
+}
+
+TEST(Writer, ValidateEncoding) {
+    {
+        StringBuffer buffer;
+        Writer<StringBuffer, UTF8<>, UTF8<>, CrtAllocator, kWriteValidateEncodingFlag> writer(buffer);
+        writer.StartArray();
+        EXPECT_TRUE(writer.String("\x24"));             // Dollar sign U+0024
+        EXPECT_TRUE(writer.String("\xC2\xA2"));         // Cents sign U+00A2
+        EXPECT_TRUE(writer.String("\xE2\x82\xAC"));     // Euro sign U+20AC
+        EXPECT_TRUE(writer.String("\xF0\x9D\x84\x9E")); // G clef sign U+1D11E
+        EXPECT_TRUE(writer.String("\x01"));             // SOH control U+0001
+        EXPECT_TRUE(writer.String("\x1B"));             // Escape control U+001B
+        writer.EndArray();
+        EXPECT_STREQ("[\"\x24\",\"\xC2\xA2\",\"\xE2\x82\xAC\",\"\xF0\x9D\x84\x9E\",\"\\u0001\",\"\\u001B\"]", buffer.GetString());
+    }
+
+    // Fail in decoding invalid UTF-8 sequence http://www.cl.cam.ac.uk/~mgk25/ucs/examples/UTF-8-test.txt
+    {
+        StringBuffer buffer;
+        Writer<StringBuffer, UTF8<>, UTF8<>, CrtAllocator, kWriteValidateEncodingFlag> writer(buffer);
+        writer.StartArray();
+        EXPECT_FALSE(writer.String("\xfe"));
+        EXPECT_FALSE(writer.String("\xff"));
+        EXPECT_FALSE(writer.String("\xfe\xfe\xff\xff"));
+        writer.EndArray();
+    }
+}
+
+TEST(Writer, InvalidEventSequence) {
+    // {]
+    {
+        StringBuffer buffer;
+        Writer<StringBuffer> writer(buffer);
+        writer.StartObject();
+        EXPECT_THROW(writer.EndArray(), AssertException);
+        EXPECT_FALSE(writer.IsComplete());
+    }
+
+    // [}
+    {
+        StringBuffer buffer;
+        Writer<StringBuffer> writer(buffer);
+        writer.StartArray();
+        EXPECT_THROW(writer.EndObject(), AssertException);
+        EXPECT_FALSE(writer.IsComplete());
+    }
+
+    // { 1: 
+    {
+        StringBuffer buffer;
+        Writer<StringBuffer> writer(buffer);
+        writer.StartObject();
+        EXPECT_THROW(writer.Int(1), AssertException);
+        EXPECT_FALSE(writer.IsComplete());
+    }
+
+    // { 'a' }
+    {
+        StringBuffer buffer;
+        Writer<StringBuffer> writer(buffer);
+        writer.StartObject();
+        writer.Key("a");
+        EXPECT_THROW(writer.EndObject(), AssertException);
+        EXPECT_FALSE(writer.IsComplete());
+    }
+
+    // { 'a':'b','c' }
+    {
+        StringBuffer buffer;
+        Writer<StringBuffer> writer(buffer);
+        writer.StartObject();
+        writer.Key("a");
+        writer.String("b");
+        writer.Key("c");
+        EXPECT_THROW(writer.EndObject(), AssertException);
+        EXPECT_FALSE(writer.IsComplete());
+    }
+}
+
+TEST(Writer, NaN) {
+    double nan = std::numeric_limits<double>::quiet_NaN();
+
+    EXPECT_TRUE(internal::Double(nan).IsNan());
+    StringBuffer buffer;
+    {
+        Writer<StringBuffer> writer(buffer);
+        EXPECT_FALSE(writer.Double(nan));
+    }
+    {
+        Writer<StringBuffer, UTF8<>, UTF8<>, CrtAllocator, kWriteNanAndInfFlag> writer(buffer);
+        EXPECT_TRUE(writer.Double(nan));
+        EXPECT_STREQ("NaN", buffer.GetString());
+    }
+    GenericStringBuffer<UTF16<> > buffer2;
+    Writer<GenericStringBuffer<UTF16<> > > writer2(buffer2);
+    EXPECT_FALSE(writer2.Double(nan));
+}
+
+TEST(Writer, Inf) {
+    double inf = std::numeric_limits<double>::infinity();
+
+    EXPECT_TRUE(internal::Double(inf).IsInf());
+    StringBuffer buffer;
+    {
+        Writer<StringBuffer> writer(buffer);
+        EXPECT_FALSE(writer.Double(inf));
+    }
+    {
+        Writer<StringBuffer> writer(buffer);
+        EXPECT_FALSE(writer.Double(-inf));
+    }
+    {
+        Writer<StringBuffer, UTF8<>, UTF8<>, CrtAllocator, kWriteNanAndInfFlag> writer(buffer);
+        EXPECT_TRUE(writer.Double(inf));
+    }
+    {
+        Writer<StringBuffer, UTF8<>, UTF8<>, CrtAllocator, kWriteNanAndInfFlag> writer(buffer);
+        EXPECT_TRUE(writer.Double(-inf));
+    }
+    EXPECT_STREQ("Infinity-Infinity", buffer.GetString());
+}
+
+TEST(Writer, RawValue) {
+    StringBuffer buffer;
+    Writer<StringBuffer> writer(buffer);
+    writer.StartObject();
+    writer.Key("a");
+    writer.Int(1);
+    writer.Key("raw");
+    const char json[] = "[\"Hello\\nWorld\", 123.456]";
+    writer.RawValue(json, strlen(json), kArrayType);
+    writer.EndObject();
+    EXPECT_TRUE(writer.IsComplete());
+    EXPECT_STREQ("{\"a\":1,\"raw\":[\"Hello\\nWorld\", 123.456]}", buffer.GetString());
+}
+
+TEST(Write, RawValue_Issue1152) {
+    {
+        GenericStringBuffer<UTF32<> > sb;
+        Writer<GenericStringBuffer<UTF32<> >, UTF8<>, UTF32<> > writer(sb);
+        writer.RawValue("null", 4, kNullType);
+        EXPECT_TRUE(writer.IsComplete());
+        const unsigned *out = sb.GetString();
+        EXPECT_EQ(static_cast<unsigned>('n'), out[0]);
+        EXPECT_EQ(static_cast<unsigned>('u'), out[1]);
+        EXPECT_EQ(static_cast<unsigned>('l'), out[2]);
+        EXPECT_EQ(static_cast<unsigned>('l'), out[3]);
+        EXPECT_EQ(static_cast<unsigned>(0  ), out[4]);
+    }
+
+    {
+        GenericStringBuffer<UTF8<> > sb;
+        Writer<GenericStringBuffer<UTF8<> >, UTF16<>, UTF8<> > writer(sb);
+        writer.RawValue(L"null", 4, kNullType);
+        EXPECT_TRUE(writer.IsComplete());
+        EXPECT_STREQ("null", sb.GetString());
+    }
+
+    {
+        // Fail in transcoding
+        GenericStringBuffer<UTF16<> > buffer;
+        Writer<GenericStringBuffer<UTF16<> >, UTF8<>, UTF16<> > writer(buffer);
+        EXPECT_FALSE(writer.RawValue("\"\xfe\"", 3, kStringType));
+    }
+
+    {
+        // Fail in encoding validation
+        StringBuffer buffer;
+        Writer<StringBuffer, UTF8<>, UTF8<>, CrtAllocator, kWriteValidateEncodingFlag> writer(buffer);
+        EXPECT_FALSE(writer.RawValue("\"\xfe\"", 3, kStringType));
+    }
+}
+
+#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
+static Writer<StringBuffer> WriterGen(StringBuffer &target) {
+    Writer<StringBuffer> writer(target);
+    writer.StartObject();
+    writer.Key("a");
+    writer.Int(1);
+    return writer;
+}
+
+TEST(Writer, MoveCtor) {
+    StringBuffer buffer;
+    Writer<StringBuffer> writer(WriterGen(buffer));
+    writer.EndObject();
+    EXPECT_TRUE(writer.IsComplete());
+    EXPECT_STREQ("{\"a\":1}", buffer.GetString());
+}
+#endif
+
+#ifdef __clang__
+RAPIDJSON_DIAG_POP
+#endif
diff --git a/test/valgrind.supp b/test/valgrind.supp
new file mode 100644
index 0000000..1fed18b
--- /dev/null
+++ b/test/valgrind.supp
@@ -0,0 +1,17 @@
+{
+	Suppress wcslen valgrind report 1
+	Memcheck:Cond
+	fun:__wcslen_sse2
+}
+
+{
+    Suppress wcslen valgrind report 2
+    Memcheck:Addr8
+    fun:__wcslen_sse2
+}
+
+{
+    Suppress wcslen valgrind report 3
+    Memcheck:Value8
+    fun:__wcslen_sse2
+}
diff --git a/thirdparty/gtest b/thirdparty/gtest
new file mode 160000
index 0000000..ba96d0b
--- /dev/null
+++ b/thirdparty/gtest
@@ -0,0 +1 @@
+Subproject commit ba96d0b1161f540656efdaed035b3c062b60e006
diff --git a/travis-doxygen.sh b/travis-doxygen.sh
new file mode 100755
index 0000000..38e4eb6
--- /dev/null
+++ b/travis-doxygen.sh
@@ -0,0 +1,121 @@
+#!/bin/bash
+# Update Doxygen documentation after push to 'master'.
+# Author: @pah
+
+set -e
+
+DOXYGEN_VER=doxygen-1.8.15
+DOXYGEN_TAR=${DOXYGEN_VER}.linux.bin.tar.gz
+DOXYGEN_URL="http://doxygen.nl/files/${DOXYGEN_TAR}"
+
+: ${GITHUB_REPO:="Tencent/rapidjson"}
+GITHUB_HOST="github.com"
+GITHUB_CLONE="git://${GITHUB_HOST}/${GITHUB_REPO}"
+GITHUB_URL="https://${GITHUB_HOST}/${GITHUB_PUSH-${GITHUB_REPO}}"
+
+# if not set, ignore password
+#GIT_ASKPASS="${TRAVIS_BUILD_DIR}/gh_ignore_askpass.sh"
+
+skip() {
+	echo "$@" 1>&2
+	echo "Exiting..." 1>&2
+	exit 0
+}
+
+abort() {
+	echo "Error: $@" 1>&2
+	echo "Exiting..." 1>&2
+	exit 1
+}
+
+# TRAVIS_BUILD_DIR not set, exiting
+[ -d "${TRAVIS_BUILD_DIR-/nonexistent}" ] || \
+	abort '${TRAVIS_BUILD_DIR} not set or nonexistent.'
+
+# check for pull-requests
+[ "${TRAVIS_PULL_REQUEST}" = "false" ] || \
+	skip "Not running Doxygen for pull-requests."
+
+# check for branch name
+[ "${TRAVIS_BRANCH}" = "master" ] || \
+	skip "Running Doxygen only for updates on 'master' branch (current: ${TRAVIS_BRANCH})."
+
+# check for job number
+# [ "${TRAVIS_JOB_NUMBER}" = "${TRAVIS_BUILD_NUMBER}.1" ] || \
+# 	skip "Running Doxygen only on first job of build ${TRAVIS_BUILD_NUMBER} (current: ${TRAVIS_JOB_NUMBER})."
+
+# install doxygen binary distribution
+doxygen_install()
+{
+	wget -O - "${DOXYGEN_URL}" | \
+		tar xz -C ${TMPDIR-/tmp} ${DOXYGEN_VER}/bin/doxygen
+    export PATH="${TMPDIR-/tmp}/${DOXYGEN_VER}/bin:$PATH"
+}
+
+doxygen_run()
+{
+	cd "${TRAVIS_BUILD_DIR}";
+	doxygen ${TRAVIS_BUILD_DIR}/build/doc/Doxyfile;
+	doxygen ${TRAVIS_BUILD_DIR}/build/doc/Doxyfile.zh-cn;
+}
+
+gh_pages_prepare()
+{
+	cd "${TRAVIS_BUILD_DIR}/build/doc";
+	[ ! -d "html" ] || \
+		abort "Doxygen target directory already exists."
+	git --version
+	git clone --single-branch -b gh-pages "${GITHUB_CLONE}" html
+	cd html
+	# setup git config (with defaults)
+	git config user.name "${GIT_NAME-travis}"
+	git config user.email "${GIT_EMAIL-"travis@localhost"}"
+	# clean working dir
+	rm -f .git/index
+	git clean -df
+}
+
+gh_pages_commit() {
+	cd "${TRAVIS_BUILD_DIR}/build/doc/html";
+	echo "rapidjson.org" > CNAME
+	git add --all;
+	git diff-index --quiet HEAD || git commit -m "Automatic doxygen build";
+}
+
+gh_setup_askpass() {
+	cat > ${GIT_ASKPASS} <<EOF
+#!/bin/bash
+echo
+exit 0
+EOF
+	chmod a+x "$GIT_ASKPASS"
+}
+
+gh_pages_push() {
+	# check for secure variables
+	[ "${TRAVIS_SECURE_ENV_VARS}" = "true" ] || \
+		skip "Secure variables not available, not updating GitHub pages."
+	# check for GitHub access token
+	[ "${GH_TOKEN+set}" = set ] || \
+		skip "GitHub access token not available, not updating GitHub pages."
+	[ "${#GH_TOKEN}" -eq 40 ] || \
+		abort "GitHub token invalid: found ${#GH_TOKEN} characters, expected 40."
+
+	cd "${TRAVIS_BUILD_DIR}/build/doc/html";
+	# setup credentials (hide in "set -x" mode)
+	git remote set-url --push origin "${GITHUB_URL}"
+	git config credential.helper 'store'
+	# ( set +x ; git config credential.username "${GH_TOKEN}" )
+	( set +x ; [ -f ${HOME}/.git-credentials ] || \
+			( echo "https://${GH_TOKEN}:@${GITHUB_HOST}" > ${HOME}/.git-credentials ; \
+			 chmod go-rw ${HOME}/.git-credentials ) )
+	# push to GitHub
+	git push origin gh-pages
+}
+
+doxygen_install
+gh_pages_prepare
+doxygen_run
+gh_pages_commit
+gh_pages_push
+