summaryrefslogtreecommitdiffstats
path: root/contrib/serf
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/serf')
-rw-r--r--contrib/serf/CHANGES214
-rw-r--r--contrib/serf/LICENSE201
-rw-r--r--contrib/serf/NOTICE2
-rw-r--r--contrib/serf/README80
-rw-r--r--contrib/serf/SConstruct438
-rw-r--r--contrib/serf/auth/auth.c472
-rw-r--r--contrib/serf/auth/auth.h123
-rw-r--r--contrib/serf/auth/auth_basic.c177
-rw-r--r--contrib/serf/auth/auth_digest.c533
-rw-r--r--contrib/serf/auth/auth_spnego.c556
-rw-r--r--contrib/serf/auth/auth_spnego.h115
-rw-r--r--contrib/serf/auth/auth_spnego_gss.c224
-rw-r--r--contrib/serf/auth/auth_spnego_sspi.c297
-rw-r--r--contrib/serf/buckets/aggregate_buckets.c488
-rw-r--r--contrib/serf/buckets/allocator.c434
-rw-r--r--contrib/serf/buckets/barrier_buckets.c97
-rw-r--r--contrib/serf/buckets/buckets.c640
-rw-r--r--contrib/serf/buckets/bwtp_buckets.c596
-rw-r--r--contrib/serf/buckets/chunk_buckets.c235
-rw-r--r--contrib/serf/buckets/dechunk_buckets.c199
-rw-r--r--contrib/serf/buckets/deflate_buckets.c384
-rw-r--r--contrib/serf/buckets/file_buckets.c117
-rw-r--r--contrib/serf/buckets/headers_buckets.c431
-rw-r--r--contrib/serf/buckets/iovec_buckets.c169
-rw-r--r--contrib/serf/buckets/limit_buckets.c127
-rw-r--r--contrib/serf/buckets/mmap_buckets.c140
-rw-r--r--contrib/serf/buckets/request_buckets.c223
-rw-r--r--contrib/serf/buckets/response_body_buckets.c135
-rw-r--r--contrib/serf/buckets/response_buckets.c464
-rw-r--r--contrib/serf/buckets/simple_buckets.c159
-rw-r--r--contrib/serf/buckets/socket_buckets.c125
-rw-r--r--contrib/serf/buckets/ssl_buckets.c1775
-rwxr-xr-xcontrib/serf/build/check.py56
-rwxr-xr-xcontrib/serf/build/gen_def.py76
-rw-r--r--contrib/serf/build/serf.pc.in13
-rw-r--r--contrib/serf/context.c379
-rw-r--r--contrib/serf/design-guide.txt152
-rw-r--r--contrib/serf/incoming.c176
-rw-r--r--contrib/serf/outgoing.c1683
-rw-r--r--contrib/serf/serf.h1117
-rw-r--r--contrib/serf/serf_bucket_types.h688
-rw-r--r--contrib/serf/serf_bucket_util.h294
-rw-r--r--contrib/serf/serf_private.h455
-rw-r--r--contrib/serf/ssltunnel.c178
44 files changed, 15637 insertions, 0 deletions
diff --git a/contrib/serf/CHANGES b/contrib/serf/CHANGES
new file mode 100644
index 0000000..1aa1a0b
--- /dev/null
+++ b/contrib/serf/CHANGES
@@ -0,0 +1,214 @@
+Serf 1.3.0 [2013-07-23, from /tags/1.3.0]
+ Fix issue 83: use PATH rather than URI within an ssltunnel (r1952)
+ Fix issue 108: improved error reporting from the underlying socket (r1951)
+ NEW: Switch to the SCons build system; retire serfmake, serf.mak, autotools
+ Improved Basic and Digest authentication:
+ - remember credentials on a per-server basis
+ - properly manage authentication realms
+ - continue functioning when a server sets KeepAlive: off
+ Windows: add support for NTLM authentication
+ Improved 2617 compliance: always use strongest authentication (r1968,1971)
+ Fixed bugs with proxy authentication and SSL tunneling through a proxy
+ Fixed bugs the response parser (r2032,r2036)
+ SSL connection performance improvements
+ Huge expansion of the test suite
+
+
+Serf 1.2.1 [2013-06-03, from /tags/1.2.1, r1906]
+ Fix issue 95: add gssapi switches to configure (r1864, r1900)
+ Fix issue 97: skip mmap bucket if APR_HAS_MMAP is undefined (r1877)
+ Fix issue 100: building against an old Windows Platform SDK (r1881)
+ Fix issue 102: digest authentication failures (r1885)
+ Improve error return values in SSPI authentication (r1804)
+ Ensure serf-1.pc is constructed by serfmake (r1865)
+ Optimize SPNego authentication processing (r1868)
+ Reject certs that application does not like (r1794)
+ Fix possible endless loop in serf_linebuf_fetch() (r1816)
+ Windows build: dereference INTDIR in serf.mak (r1882)
+
+
+Serf 1.2.0 [2013-02-22, from /tags/1.2.0, r1726]
+ Fixed issue 94: Serf can enter an infinite loop when server aborts conn.
+ Fixed issue 91: Serf doesn't handle an incoming 408 Timeout Request
+ Fixed issue 80: Serf is not handling Negotiate authentication correctly
+ Fixed issue 77: Endless loop if server doesn't accept Negotiate authn
+ Fixed issue 93: cleanup-after-fork interferes with parent (r1714)
+ Fixed most of issue 89: Support REAL SPNEGO authentication
+ Enable Negotiate/Kerberos support for proxy servers.
+ Return error when C-L, chunked, gzip encoded response bodies were
+ truncated (due to aborted connection) (r1688)
+ Add a logging mechanism that can be enabled at compile-time.
+ Don't lookup server address if a proxy was configured. (r1706)
+ Fix an off-by-one in buffer sizing (r1695)
+ Disable SSL compression by default + API to enable it (r1692)
+ New serf_connection_get_latency() for estimated network latency (r1689)
+ New error code and RFC compliance for the HTTPS tunnel (r1701, r1644)
+ Handle EINTR when a user suspends and then backgrounds the app (r1708)
+ Minor fixes and test suite improvements.
+
+
+Serf 1.1.1 [2012-10-04, from /tags/1.1.1, r1657]
+ Fixed issue 86: ensure requeued requests are correctly handled.
+ This fixes:
+ - infinite loop with multiple connection resets or SIGPIPE errors
+ - "connection" hang where we would not re-queue requests that are
+ held after we re-connect
+ Fixed issue 74: test_all goes in an endless loop
+ Fix memleak when conn. is closed explicitly/due to pool cleanups (r1623)
+ Windows: Fix https connection aborts (r1628..-30,-33,-34,-37)
+ Add new error codes for the SSL bucket
+
+
+Serf 1.1.0 [2012-06-07, from /tags/1.1.0, r1617]
+ New: serf_bucket_request_set_CL() for C-L based, non-chunked requests
+ New: serf_ssl_server_cert_chain_callback_set() for full-chain validation
+
+
+Serf 1.0.3 [2012-03-20, from /tags/1.0.3, r1586]
+ Map more OpenSSL errors into SERF_SSL_CERT_UNKNOWNCA (r1573)
+
+
+Serf 1.0.2
+ Not released.
+
+
+Serf 1.0.1 [2012-02-15, from /tags/1.0.1, r1569]
+ FreeBSD fixes in the test suite (r1560, r1565)
+ Minor build fixes
+
+
+Serf 1.0.0 [2011-07-15, from /tags/1.0.0, r1540]
+ Fixed issue 38: enable builds using non-GNU make
+ Fixed issue 49: support SSL tunnels for HTTPS via a proxy
+ Fixed issue 56: allow Subject Alternative Name, and enable SNI
+ Fixed issue 61: include order dependencies
+ Fixed issue 66: improved error reporting when creating install dirs
+ Fixed issue 71: handle ECONNREFUSED on Windows
+ Fixed issue 79: destroy the APR allocator, if we create one
+ Fixed issue 81: build failed on APR 0.9.x
+ Major performance improvements and bug fixes for SSL buckets/handling (r1462)
+ Add a new "iovec" bucket type (r1434)
+ Minimize network packet writes based on ra_serf analysis (r1467, r1471)
+ Fix out of order issue with multiple priority requests (r1469)
+ Work around broken WSAPoll() impl on Windows introduced in APR 1.4.0 (r1506)
+ Fix 100% CPU usage with many pipelined requests (r1456)
+ Corrected contents of build/serf.def; it now includes bucket types (r1512)
+ Removed "snapshot" feature from buckets (r1503)
+ Various improvements to the test system
+ Various memory leak fixes
+
+
+Serf 0.7.2 [2011-03-12, from /tags/0.7.2, r1452]
+ Actually disable Nagle when creating a connection (r1441)
+ Return error when app asks for HTTPS over proxy connection (r1433)
+
+
+Serf 0.7.1 [2011-01-25, from /tags/0.7.1, r1432]
+ Fix memory leak when using SSL (r1408, r1416)
+ Fix build for blank apr-util directory (r1421)
+
+
+Serf 0.7.0 [2010-08-25, from /tags/0.7.0, r1407]
+ Fix double free abort when destroying request buckets
+ Fix test server in unit test framework to avoid random test failures
+ Allow older Serf programs which don't use the new authn framework to still
+ handle authn without forcing them to switch to the new framework. (r1401)
+ Remove the SERF_DECLARE macros, preferring a .DEF file for Windows
+ Barrier buckets now pass read_iovec to their wrapped bucket
+ Fix HTTP header parsing to allow for empty header values
+
+
+Serf 0.6.1 [2010-05-14, from /tags/0.6.1, r1370]
+ Generally: this release fixes problems with the 0.4.0 packaging
+ Small compilation fix in outgoing.c for Windows builds
+
+
+Serf 0.6.0
+ Not released.
+
+
+Serf 0.5.0
+ Not released.
+
+
+Serf 0.4.0
+ WITHDRAWN: this release misstated itself as 0.5.0; use a later release
+
+ Provide authn framework, supporting Basic, Digest, Kerberos (SSPI, GSS),
+ along with proxy authn using Basic or Digest
+ Added experimental listener framework, along with test_server.c
+ Improvements and fixes to SSL support, including connection setup changes
+ Experimental support for unrequested, arriving ("async") responses
+ Experimental BWTP support using the async arrival feature
+ Headers are combined on read (not write), to ease certian classes of parsing
+ Experimental feature on aggregate buckets for a callback-on-empty
+ Fix the bucket allocator for when APR is using its pool debugging features
+ Proxy support in the serf_get testing utility
+ Fix to include the port number in the Host header
+ serf_get propagates errors from the response, instead of aborting (Issue 52)
+ Added serf_lib_version() for runtime version tests
+
+
+Serf 0.3.1 [2010-02-14, from /tags/0.3.1, r1322]
+ Fix loss of error on request->setup() callback. (Issue 47)
+ Support APR 2.x. (Issue 48)
+ Fixed slowdown in aggregate bucket with millions of child buckets
+ Avoid hang in apr_pollset_poll() by unclosed connections after fork()
+
+
+Serf 0.3.0 [2009-01-26, from /tags/0.3.0, r1217]
+ Support LTFLAGS override as a config-time env. variable (Issue 44)
+ Fix CUTest test harness compilation on Solaris (Issue 43)
+ Fix small race condition in OpenSSL initialization (Issue 39)
+ Handle content streams larger than 4GB on 32-bit OSes (Issue 41)
+ Fix test_ssl.c compilation with mingw+msys
+ Fix conn close segfault by explicitly closing conn when pool is destroyed
+ Expose the depth of the SSL certificate so the validator can use that info
+ Fix socket address family issue when opening a connection to a proxy
+ Provide new API to take snapshots of buckets
+ Implement snapshot API for simple and aggregate buckets
+ Build with bundled apr and apr-util VPATH builds
+ Build with bundled OpenSSL builds
+
+
+Serf 0.2.0 [2008-06-06, from /tags/0.2.0, r1189]
+ Enable use of external event loop: serf_create_context_ex
+ Enable adding new requests at the beginning of the request queue
+ Handle 'Connection:close' headers
+ Enable limiting the number of outstanding requests
+ Add readline function to simple buckets
+ Concatenate repeated headers using comma as separator, as per RFC 2616,
+ section 4.2. (Issue 29)
+ Add proxy server support
+ Add progress feedback support. (Issue 11)
+ Provide new API to simplify use of proxy and progress feedback support
+ Add callback to validate SSL server certificates. (Issue 31)
+ Add new test framework
+ Send current version string in the test programs (Issue 21)
+ Bugfixes:
+ Fix segfault with epoll when removing a NULL socket
+ Reset OpenSSL thread-safety callbacks when apr_terminate() called
+ Do not remove the socket from the pollset on pool cleanup
+ Do not issue double close on skt w/second one being close(-1) (Issue 33)
+
+
+Serf 0.1.2 [2007-06-18, from /tags/0.1.2, r1115]
+ Enable thread-safety with OpenSSL (Issue 19)
+ Teach serfmake to install headers into include/serf-0
+ Be more tolerant when servers close the connection without telling us
+ Do not open the connection until we have requests to deliver
+ Fix serfmake to produce the library that corresponds to the minor version
+ Fix a memory leak with the socket bucket (Issue 14)
+ Fix uninitialized branch in serf_spider (Issue 15)
+
+
+Serf 0.1.1 [2007-05-12, from /tags/0.1.1, r1105]
+ Add SSL client certificate support
+ Implement optimized iovec reads for header buckets
+ Fix up 'make clean' and 'make distclean' (Issues 9, 10)
+ Add SERF_VERSION_AT_LEAST macro
+ Remove abort() calls (Issue 13)
+
+
+Serf 0.1.0 [2006-12-14, from /tags/0.1.0, r1087]
+ Initial packaged release
diff --git a/contrib/serf/LICENSE b/contrib/serf/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/contrib/serf/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/contrib/serf/NOTICE b/contrib/serf/NOTICE
new file mode 100644
index 0000000..3f59805
--- /dev/null
+++ b/contrib/serf/NOTICE
@@ -0,0 +1,2 @@
+This product includes software developed by
+The Apache Software Foundation (http://www.apache.org/).
diff --git a/contrib/serf/README b/contrib/serf/README
new file mode 100644
index 0000000..215ce2e
--- /dev/null
+++ b/contrib/serf/README
@@ -0,0 +1,80 @@
+Welcome to serf, a high-performance asynchronous HTTP client library.
+
+The serf library is a C-based HTTP client library built upon the Apache
+Portable Runtime (APR) library. It multiplexes connections, running the
+read/write communication asynchronously. Memory copies and transformations are
+kept to a minimum to provide high performance operation.
+
+ * Status: http://code.google.com/p/serf/wiki/
+ * Site: http://code.google.com/p/serf/
+ * Code: http://serf.googlecode.com/svn/
+ * Issues: http://code.google.com/p/serf/issues/list
+ * Mail: serf-dev@googlegroups.com
+ * People: Justin Erenkrantz, Greg Stein
+
+----
+
+1. INSTALL
+
+1.1. SCons build system
+
+serf uses SCons 2.x for its build system. If it is not installed on
+your system, then you can install it onto your system. If you do not
+have permissions, then you can download and install the "local"
+version into your home directory. When installed privately, simply
+create a symlink for 'scons' in your PATH to /path/to/scons/scons.py.
+
+Fetch the scons-local package:
+ http://prdownloads.sourceforge.net/scons/scons-local-2.0.1.tar.gz
+
+
+1.2 Building serf
+
+To build serf:
+
+$ scons APR=/path/to/apr APU=/path/to/apu OPENSSL=/openssl/base PREFIX=/path/to/prefix
+
+The switches are recorded into .saved_config, so they only need to be
+specified the first time scons is run.
+
+PREFIX should specify where serf should be installed. PREFIX defaults to
+/usr/local.
+
+The default for the other three switches (APR, APU, OPENSSL) is /usr.
+
+The build system looks for apr-1-config at $APR/bin/apr-1-config, or
+the path should indicate apr-1-config itself. Similarly for the path
+to apu-1-config.
+
+OPENSSL should specify the root of the install (eg. /opt/local). The
+includes will be found OPENSSL/include and libraries at OPENSSL/lib.
+
+If you wish to use VPATH-style builds (where objects are created in a
+distinct directory from the source), you can use:
+
+$ scons -Y /path/to/serf/source
+
+At any point, the current settings can be examined:
+
+$ scons --help
+
+
+1.3 Running the test suite
+
+$ scons check
+
+
+1.4 Installing serf
+
+$ scons install
+
+Note that the PREFIX variable should have been specified in a previous
+invocation of scons (and saved into .saved_config), or it can be
+specified on the install command line:
+
+$ scons PREFIX=/some/path install
+
+
+1.4 Cleaning up the build
+
+$ scons -c
diff --git a/contrib/serf/SConstruct b/contrib/serf/SConstruct
new file mode 100644
index 0000000..3f1bd73
--- /dev/null
+++ b/contrib/serf/SConstruct
@@ -0,0 +1,438 @@
+# -*- python -*-
+#
+# Copyright 2011-2012 Justin Erenkrantz and Greg Stein
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import sys
+import os
+import re
+
+HEADER_FILES = ['serf.h',
+ 'serf_bucket_types.h',
+ 'serf_bucket_util.h',
+ ]
+
+# where we save the configuration variables
+SAVED_CONFIG = '.saved_config'
+
+# Variable class that does no validation on the input
+def _converter(val):
+ """
+ """
+ if val == 'none':
+ val = []
+ else:
+ val = val.split(',')
+ return val
+
+def RawListVariable(key, help, default):
+ """
+ The input parameters describe a 'raw string list' option. This class
+ accepts a comma separated list and converts it to a space separated
+ list.
+ """
+ return (key, '%s' % (help), default, None, lambda val: _converter(val))
+
+# default directories
+if sys.platform == 'win32':
+ default_libdir='..'
+ default_prefix='Debug'
+else:
+ default_libdir='/usr'
+ default_prefix='/usr/local'
+
+opts = Variables(files=[SAVED_CONFIG])
+opts.AddVariables(
+ PathVariable('PREFIX',
+ 'Directory to install under',
+ default_prefix,
+ PathVariable.PathIsDir),
+ PathVariable('APR',
+ "Path to apr-1-config, or to APR's install area",
+ default_libdir,
+ PathVariable.PathAccept),
+ PathVariable('APU',
+ "Path to apu-1-config, or to APR's install area",
+ default_libdir,
+ PathVariable.PathAccept),
+ PathVariable('OPENSSL',
+ "Path to OpenSSL's install area",
+ default_libdir,
+ PathVariable.PathIsDir),
+ PathVariable('ZLIB',
+ "Path to zlib's install area",
+ default_libdir,
+ PathVariable.PathIsDir),
+ PathVariable('GSSAPI',
+ "Path to GSSAPI's install area",
+ None,
+ None),
+ BoolVariable('DEBUG',
+ "Enable debugging info and strict compile warnings",
+ False),
+ BoolVariable('APR_STATIC',
+ "Enable using a static compiled APR",
+ False),
+ RawListVariable('CC', "Command name or path of the C compiler", None),
+ RawListVariable('CFLAGS', "Extra flags for the C compiler (comma separated)",
+ None),
+ RawListVariable('LIBS', "Extra libraries passed to the linker, "
+ "e.g. -l<library> (comma separated)", None),
+ RawListVariable('LINKFLAGS', "Extra flags for the linker (comma separated)",
+ None),
+ RawListVariable('CPPFLAGS', "Extra flags for the C preprocessor "
+ "(comma separated)", None),
+ )
+
+if sys.platform == 'win32':
+ opts.AddVariables(
+ # By default SCons builds for the host platform on Windows, when using
+ # a supported compiler (E.g. VS2010/VS2012). Allow overriding
+
+ # Note that Scons 1.3 only supports this on Windows and only when
+ # constructing Environment(). Later changes to TARGET_ARCH are ignored
+ EnumVariable('TARGET_ARCH',
+ "Platform to build for (x86|x64|win32|x86_64)",
+ 'x86',
+ allowed_values=('x86', 'x86_64', 'ia64'),
+ map={'X86' : 'x86',
+ 'win32': 'x86',
+ 'Win32': 'x86',
+ 'x64' : 'x86_64',
+ 'X64' : 'x86_64'
+ }),
+
+ EnumVariable('MSVC_VERSION',
+ "Visual C++ to use for building (E.g. 11.0, 9.0)",
+ None,
+ allowed_values=('12.0', '11.0', '10.0', '9.0', '8.0', '6.0')
+ ),
+
+ # We always documented that we handle an install layout, but in fact we
+ # hardcoded source layouts. Allow disabling this behavior.
+ # ### Fix default?
+ BoolVariable('SOURCE_LAYOUT',
+ "Assume a source layout instead of install layout",
+ True),
+ )
+
+env = Environment(variables=opts,
+ tools=('default', 'textfile',),
+ CPPPATH=['.', ],
+ )
+
+env.Append(BUILDERS = {
+ 'GenDef' :
+ Builder(action = sys.executable + ' build/gen_def.py $SOURCES > $TARGET',
+ suffix='.def', src_suffix='.h')
+ })
+
+match = re.search('SERF_MAJOR_VERSION ([0-9]+).*'
+ 'SERF_MINOR_VERSION ([0-9]+).*'
+ 'SERF_PATCH_VERSION ([0-9]+)',
+ env.File('serf.h').get_contents(),
+ re.DOTALL)
+MAJOR, MINOR, PATCH = [int(x) for x in match.groups()]
+env.Append(MAJOR=str(MAJOR))
+
+# Calling external programs is okay if we're not cleaning or printing help.
+# (cleaning: no sense in fetching information; help: we may not know where
+# they are)
+CALLOUT_OKAY = not (env.GetOption('clean') or env.GetOption('help'))
+
+
+# HANDLING OF OPTION VARIABLES
+
+unknown = opts.UnknownVariables()
+if unknown:
+ print 'Unknown variables:', ', '.join(unknown.keys())
+ Exit(1)
+
+apr = str(env['APR'])
+apu = str(env['APU'])
+zlib = str(env['ZLIB'])
+gssapi = env.get('GSSAPI', None)
+
+if gssapi and os.path.isdir(gssapi):
+ krb5_config = os.path.join(gssapi, 'bin', 'krb5-config')
+ if os.path.isfile(krb5_config):
+ gssapi = krb5_config
+ env['GSSAPI'] = krb5_config
+
+debug = env.get('DEBUG', None)
+aprstatic = env.get('APR_STATIC', None)
+
+Help(opts.GenerateHelpText(env))
+opts.Save(SAVED_CONFIG, env)
+
+
+# PLATFORM-SPECIFIC BUILD TWEAKS
+
+thisdir = os.getcwd()
+libdir = '$PREFIX/lib'
+incdir = '$PREFIX/include/serf-$MAJOR'
+
+LIBNAME = 'libserf-${MAJOR}'
+if sys.platform != 'win32':
+ LIBNAMESTATIC = LIBNAME
+else:
+ LIBNAMESTATIC = 'serf-${MAJOR}'
+
+env.Append(RPATH=libdir,
+ PDB='${TARGET.filebase}.pdb')
+
+if sys.platform == 'darwin':
+# linkflags.append('-Wl,-install_name,@executable_path/%s.dylib' % (LIBNAME,))
+ env.Append(LINKFLAGS='-Wl,-install_name,%s/%s.dylib' % (thisdir, LIBNAME,))
+ # 'man ld' says positive non-zero for the first number, so we add one.
+ # Mac's interpretation of compatibility is the same as our MINOR version.
+ env.Append(LINKFLAGS='-Wl,-compatibility_version,%d' % (MINOR+1,))
+ env.Append(LINKFLAGS='-Wl,-current_version,%d.%d' % (MINOR+1, PATCH,))
+
+if sys.platform != 'win32':
+ ### gcc only. figure out appropriate test / better way to check these
+ ### flags, and check for gcc.
+ env.Append(CFLAGS='-std=c89')
+ env.Append(CCFLAGS=[
+ '-Wdeclaration-after-statement',
+ '-Wmissing-prototypes',
+ ])
+
+ ### -Wall is not available on Solaris
+ if sys.platform != 'sunos5':
+ env.Append(CCFLAGS='-Wall')
+
+ if debug:
+ env.Append(CCFLAGS='-g')
+ env.Append(CPPDEFINES=['DEBUG', '_DEBUG'])
+ else:
+ env.Append(CCFLAGS='-O2')
+ env.Append(CPPDEFINES='NDEBUG')
+
+ ### works for Mac OS. probably needs to change
+ env.Append(LIBS=['ssl', 'crypto', 'z', ])
+
+ if sys.platform == 'sunos5':
+ env.Append(LIBS='m')
+else:
+ # Warning level 4, no unused argument warnings
+ env.Append(CCFLAGS=['/W4', '/wd4100'])
+
+ # Choose runtime and optimization
+ if debug:
+ # Disable optimizations for debugging, use debug DLL runtime
+ env.Append(CCFLAGS=['/Od', '/MDd'])
+ env.Append(CPPDEFINES=['DEBUG', '_DEBUG'])
+ else:
+ # Optimize for speed, use DLL runtime
+ env.Append(CCFLAGS=['/O2', '/MD'])
+ env.Append(CPPDEFINES='NDEBUG')
+
+# PLAN THE BUILD
+SHARED_SOURCES = []
+if sys.platform == 'win32':
+ env.GenDef(['serf.h','serf_bucket_types.h', 'serf_bucket_util.h'])
+ SHARED_SOURCES.append(['serf.def'])
+
+SOURCES = Glob('*.c') + Glob('buckets/*.c') + Glob('auth/*.c')
+
+lib_static = env.StaticLibrary(LIBNAMESTATIC, SOURCES)
+lib_shared = env.SharedLibrary(LIBNAME, SOURCES + SHARED_SOURCES)
+
+if aprstatic:
+ env.Append(CPPDEFINES=['APR_DECLARE_STATIC', 'APU_DECLARE_STATIC'])
+
+if sys.platform == 'win32':
+ env.Append(LIBS=['user32.lib', 'advapi32.lib', 'gdi32.lib', 'ws2_32.lib',
+ 'crypt32.lib', 'mswsock.lib', 'rpcrt4.lib', 'secur32.lib'])
+
+ # Get apr/apu information into our build
+ env.Append(CPPDEFINES=['WIN32','WIN32_LEAN_AND_MEAN','NOUSER',
+ 'NOGDI', 'NONLS','NOCRYPT'])
+
+ if env.get('TARGET_ARCH', None) == 'x86_64':
+ env.Append(CPPDEFINES=['WIN64'])
+
+ if aprstatic:
+ apr_libs='apr-1.lib'
+ apu_libs='aprutil-1.lib'
+ else:
+ apr_libs='libapr-1.lib'
+ apu_libs='libaprutil-1.lib'
+
+ env.Append(LIBS=[apr_libs, apu_libs])
+ if not env.get('SOURCE_LAYOUT', None):
+ env.Append(LIBPATH=['$APR/lib', '$APU/lib'],
+ CPPPATH=['$APR/include/apr-1', '$APU/include/apr-1'])
+ elif aprstatic:
+ env.Append(LIBPATH=['$APR/LibR','$APU/LibR'],
+ CPPPATH=['$APR/include', '$APU/include'])
+ else:
+ env.Append(LIBPATH=['$APR/Release','$APU/Release'],
+ CPPPATH=['$APR/include', '$APU/include'])
+
+ # zlib
+ env.Append(LIBS='zlib.lib')
+ if not env.get('SOURCE_LAYOUT', None):
+ env.Append(CPPPATH='$ZLIB/include',
+ LIBPATH='$ZLIB/lib')
+ else:
+ env.Append(CPPPATH='$ZLIB',
+ LIBPATH='$ZLIB')
+
+ # openssl
+ env.Append(LIBS=['libeay32.lib', 'ssleay32.lib'])
+ if not env.get('SOURCE_LAYOUT', None):
+ env.Append(CPPPATH='$OPENSSL/include/openssl',
+ LIBPATH='$OPENSSL/lib')
+ elif 0: # opensslstatic:
+ env.Append(CPPPATH='$OPENSSL/inc32',
+ LIBPATH='$OPENSSL/out32')
+ else:
+ env.Append(CPPPATH='$OPENSSL/inc32',
+ LIBPATH='$OPENSSL/out32dll')
+else:
+ if os.path.isdir(apr):
+ apr = os.path.join(apr, 'bin', 'apr-1-config')
+ env['APR'] = apr
+ if os.path.isdir(apu):
+ apu = os.path.join(apu, 'bin', 'apu-1-config')
+ env['APU'] = apu
+
+ ### we should use --cc, but that is giving some scons error about an implict
+ ### dependency upon gcc. probably ParseConfig doesn't know what to do with
+ ### the apr-1-config output
+ if CALLOUT_OKAY:
+ env.ParseConfig('$APR --cflags --cppflags --ldflags --includes'
+ ' --link-ld --libs')
+ env.ParseConfig('$APU --ldflags --includes --link-ld --libs')
+
+ ### there is probably a better way to run/capture output.
+ ### env.ParseConfig() may be handy for getting this stuff into the build
+ if CALLOUT_OKAY:
+ apr_libs = os.popen(env.subst('$APR --link-libtool --libs')).read().strip()
+ apu_libs = os.popen(env.subst('$APU --link-libtool --libs')).read().strip()
+ else:
+ apr_libs = ''
+ apu_libs = ''
+
+ env.Append(CPPPATH='$OPENSSL/include')
+ env.Append(LIBPATH='$OPENSSL/lib')
+
+
+# If build with gssapi, get its information and define SERF_HAVE_GSSAPI
+if gssapi and CALLOUT_OKAY:
+ env.ParseConfig('$GSSAPI --libs gssapi')
+ env.Append(CPPDEFINES='SERF_HAVE_GSSAPI')
+if sys.platform == 'win32':
+ env.Append(CPPDEFINES=['SERF_HAVE_SSPI'])
+
+# On Solaris, the -R values that APR describes never make it into actual
+# RPATH flags. We'll manually map all directories in LIBPATH into new
+# flags to set RPATH values.
+if sys.platform == 'sunos5':
+ for d in env['LIBPATH']:
+ env.Append(RPATH=d)
+
+# Set up the construction of serf-*.pc
+# TODO: add gssapi libs
+pkgconfig = env.Textfile('serf-%d.pc' % (MAJOR,),
+ env.File('build/serf.pc.in'),
+ SUBST_DICT = {
+ '@MAJOR@': str(MAJOR),
+ '@PREFIX@': '$PREFIX',
+ '@INCLUDE_SUBDIR@': 'serf-%d' % (MAJOR,),
+ '@VERSION@': '%d.%d.%d' % (MAJOR, MINOR, PATCH),
+ '@LIBS@': '%s %s -lz' % (apu_libs, apr_libs),
+ })
+
+env.Default(lib_static, lib_shared, pkgconfig)
+
+if CALLOUT_OKAY:
+ conf = Configure(env)
+
+ ### some configuration stuffs
+
+ env = conf.Finish()
+
+
+# INSTALLATION STUFF
+
+install_static = env.Install(libdir, lib_static)
+install_shared = env.Install(libdir, lib_shared)
+
+if sys.platform == 'darwin':
+ install_shared_path = install_shared[0].abspath
+ env.AddPostAction(install_shared, ('install_name_tool -id %s %s'
+ % (install_shared_path,
+ install_shared_path)))
+ ### construct shared lib symlinks. this also means install the lib
+ ### as libserf-2.1.0.0.dylib, then add the symlinks.
+ ### note: see InstallAs
+
+env.Alias('install-lib', [install_static, install_shared,
+ ])
+env.Alias('install-inc', env.Install(incdir, HEADER_FILES))
+env.Alias('install-pc', env.Install(os.path.join(libdir, 'pkgconfig'),
+ pkgconfig))
+env.Alias('install', ['install-lib', 'install-inc', 'install-pc', ])
+
+
+# TESTS
+### make move to a separate scons file in the test/ subdir?
+
+tenv = env.Clone()
+
+TEST_PROGRAMS = [ 'serf_get', 'serf_response', 'serf_request', 'serf_spider',
+ 'test_all', 'serf_bwtp' ]
+if sys.platform == 'win32':
+ TEST_EXES = [ os.path.join('test', '%s.exe' % (prog)) for prog in TEST_PROGRAMS ]
+else:
+ TEST_EXES = [ os.path.join('test', '%s' % (prog)) for prog in TEST_PROGRAMS ]
+
+env.AlwaysBuild(env.Alias('check', TEST_EXES, sys.executable + ' build/check.py',
+ ENV={'PATH' : os.environ['PATH']}))
+
+# Find the (dynamic) library in this directory
+tenv.Replace(RPATH=thisdir)
+tenv.Prepend(LIBS=[LIBNAMESTATIC, ],
+ LIBPATH=[thisdir, ])
+
+testall_files = [
+ 'test/test_all.c',
+ 'test/CuTest.c',
+ 'test/test_util.c',
+ 'test/test_context.c',
+ 'test/test_buckets.c',
+ 'test/test_auth.c',
+ 'test/mock_buckets.c',
+ 'test/test_ssl.c',
+ 'test/server/test_server.c',
+ 'test/server/test_sslserver.c',
+ ]
+
+for proggie in TEST_EXES:
+ if 'test_all' in proggie:
+ tenv.Program(proggie, testall_files )
+ else:
+ tenv.Program(target = proggie, source = [proggie.replace('.exe','') + '.c'])
+
+
+# HANDLE CLEANING
+
+if env.GetOption('clean'):
+ # When we're cleaning, we want the dependency tree to include "everything"
+ # that could be built. Thus, include all of the tests.
+ env.Default('check')
diff --git a/contrib/serf/auth/auth.c b/contrib/serf/auth/auth.c
new file mode 100644
index 0000000..6b7d395
--- /dev/null
+++ b/contrib/serf/auth/auth.c
@@ -0,0 +1,472 @@
+/* Copyright 2009 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "serf.h"
+#include "serf_private.h"
+#include "auth.h"
+
+#include <apr.h>
+#include <apr_base64.h>
+#include <apr_strings.h>
+#include <apr_lib.h>
+
+static apr_status_t
+default_auth_response_handler(peer_t peer,
+ int code,
+ serf_connection_t *conn,
+ serf_request_t *request,
+ serf_bucket_t *response,
+ apr_pool_t *pool)
+{
+ return APR_SUCCESS;
+}
+
+/* These authentication schemes are in order of decreasing security, the topmost
+ scheme will be used first when the server supports it.
+
+ Each set of handlers should support both server (401) and proxy (407)
+ authentication.
+
+ Use lower case for the scheme names to enable case insensitive matching.
+ */
+static const serf__authn_scheme_t serf_authn_schemes[] = {
+#ifdef SERF_HAVE_SPNEGO
+ {
+ "Negotiate",
+ "negotiate",
+ SERF_AUTHN_NEGOTIATE,
+ serf__init_spnego,
+ serf__init_spnego_connection,
+ serf__handle_spnego_auth,
+ serf__setup_request_spnego_auth,
+ serf__validate_response_spnego_auth,
+ },
+#ifdef WIN32
+ {
+ "NTLM",
+ "ntlm",
+ SERF_AUTHN_NTLM,
+ serf__init_spnego,
+ serf__init_spnego_connection,
+ serf__handle_spnego_auth,
+ serf__setup_request_spnego_auth,
+ serf__validate_response_spnego_auth,
+ },
+#endif /* #ifdef WIN32 */
+#endif /* SERF_HAVE_SPNEGO */
+ {
+ "Digest",
+ "digest",
+ SERF_AUTHN_DIGEST,
+ serf__init_digest,
+ serf__init_digest_connection,
+ serf__handle_digest_auth,
+ serf__setup_request_digest_auth,
+ serf__validate_response_digest_auth,
+ },
+ {
+ "Basic",
+ "basic",
+ SERF_AUTHN_BASIC,
+ serf__init_basic,
+ serf__init_basic_connection,
+ serf__handle_basic_auth,
+ serf__setup_request_basic_auth,
+ default_auth_response_handler,
+ },
+ /* ADD NEW AUTHENTICATION IMPLEMENTATIONS HERE (as they're written) */
+
+ /* sentinel */
+ { 0 }
+};
+
+
+/* Reads and discards all bytes in the response body. */
+static apr_status_t discard_body(serf_bucket_t *response)
+{
+ apr_status_t status;
+ const char *data;
+ apr_size_t len;
+
+ while (1) {
+ status = serf_bucket_read(response, SERF_READ_ALL_AVAIL, &data, &len);
+
+ if (status) {
+ return status;
+ }
+
+ /* feed me */
+ }
+}
+
+/**
+ * handle_auth_header is called for each header in the response. It filters
+ * out the Authenticate headers (WWW or Proxy depending on what's needed) and
+ * tries to find a matching scheme handler.
+ *
+ * Returns a non-0 value of a matching handler was found.
+ */
+static int handle_auth_headers(int code,
+ void *baton,
+ apr_hash_t *hdrs,
+ serf_request_t *request,
+ serf_bucket_t *response,
+ apr_pool_t *pool)
+{
+ const serf__authn_scheme_t *scheme;
+ serf_connection_t *conn = request->conn;
+ serf_context_t *ctx = conn->ctx;
+ apr_status_t status;
+
+ status = SERF_ERROR_AUTHN_NOT_SUPPORTED;
+
+ /* Find the matching authentication handler.
+ Note that we don't reuse the auth scheme stored in the context,
+ as that may have changed. (ex. fallback from ntlm to basic.) */
+ for (scheme = serf_authn_schemes; scheme->name != 0; ++scheme) {
+ const char *auth_hdr;
+ serf__auth_handler_func_t handler;
+ serf__authn_info_t *authn_info;
+
+ if (! (ctx->authn_types & scheme->type))
+ continue;
+
+ serf__log_skt(AUTH_VERBOSE, __FILE__, conn->skt,
+ "Client supports: %s\n", scheme->name);
+
+ auth_hdr = apr_hash_get(hdrs, scheme->key, APR_HASH_KEY_STRING);
+
+ if (!auth_hdr)
+ continue;
+
+ /* Found a matching scheme */
+ status = APR_SUCCESS;
+
+ handler = scheme->handle_func;
+
+ serf__log_skt(AUTH_VERBOSE, __FILE__, conn->skt,
+ "... matched: %s\n", scheme->name);
+
+ if (code == 401) {
+ authn_info = serf__get_authn_info_for_server(conn);
+ } else {
+ authn_info = &ctx->proxy_authn_info;
+ }
+ /* If this is the first time we use this scheme on this context and/or
+ this connection, make sure to initialize the authentication handler
+ first. */
+ if (authn_info->scheme != scheme) {
+ status = scheme->init_ctx_func(code, ctx, ctx->pool);
+ if (!status) {
+ status = scheme->init_conn_func(scheme, code, conn,
+ conn->pool);
+ if (!status)
+ authn_info->scheme = scheme;
+ else
+ authn_info->scheme = NULL;
+ }
+ }
+
+ if (!status) {
+ const char *auth_attr = strchr(auth_hdr, ' ');
+ if (auth_attr) {
+ auth_attr++;
+ }
+
+ status = handler(code, request, response,
+ auth_hdr, auth_attr, baton, ctx->pool);
+ }
+
+ if (status == APR_SUCCESS)
+ break;
+
+ /* No success authenticating with this scheme, try the next.
+ If no more authn schemes are found the status of this scheme will be
+ returned.
+ */
+ serf__log_skt(AUTH_VERBOSE, __FILE__, conn->skt,
+ "%s authentication failed.\n", scheme->name);
+ }
+
+ return status;
+}
+
+/**
+ * Baton passed to the store_header_in_dict callback function
+ */
+typedef struct {
+ const char *header;
+ apr_pool_t *pool;
+ apr_hash_t *hdrs;
+} auth_baton_t;
+
+static int store_header_in_dict(void *baton,
+ const char *key,
+ const char *header)
+{
+ auth_baton_t *ab = baton;
+ const char *auth_attr;
+ char *auth_name, *c;
+
+ /* We're only interested in xxxx-Authenticate headers. */
+ if (strcmp(key, ab->header) != 0)
+ return 0;
+
+ /* Extract the authentication scheme name. */
+ auth_attr = strchr(header, ' ');
+ if (auth_attr) {
+ auth_name = apr_pstrmemdup(ab->pool, header, auth_attr - header);
+ }
+ else
+ auth_name = apr_pstrmemdup(ab->pool, header, strlen(header));
+
+ /* Convert scheme name to lower case to enable case insensitive matching. */
+ for (c = auth_name; *c != '\0'; c++)
+ *c = (char)apr_tolower(*c);
+
+ apr_hash_set(ab->hdrs, auth_name, APR_HASH_KEY_STRING,
+ apr_pstrdup(ab->pool, header));
+
+ return 0;
+}
+
+/* Dispatch authentication handling. This function matches the possible
+ authentication mechanisms with those available. Server and proxy
+ authentication are evaluated separately. */
+static apr_status_t dispatch_auth(int code,
+ serf_request_t *request,
+ serf_bucket_t *response,
+ void *baton,
+ apr_pool_t *pool)
+{
+ serf_bucket_t *hdrs;
+
+ if (code == 401 || code == 407) {
+ auth_baton_t ab = { 0 };
+ const char *auth_hdr;
+
+ ab.hdrs = apr_hash_make(pool);
+ ab.pool = pool;
+
+ /* Before iterating over all authn headers, check if there are any. */
+ if (code == 401)
+ ab.header = "WWW-Authenticate";
+ else
+ ab.header = "Proxy-Authenticate";
+
+ hdrs = serf_bucket_response_get_headers(response);
+ auth_hdr = serf_bucket_headers_get(hdrs, ab.header);
+
+ if (!auth_hdr) {
+ return SERF_ERROR_AUTHN_FAILED;
+ }
+ serf__log_skt(AUTH_VERBOSE, __FILE__, request->conn->skt,
+ "%s authz required. Response header(s): %s\n",
+ code == 401 ? "Server" : "Proxy", auth_hdr);
+
+
+ /* Store all WWW- or Proxy-Authenticate headers in a dictionary.
+
+ Note: it is possible to have multiple Authentication: headers. We do
+ not want to combine them (per normal header combination rules) as that
+ would make it hard to parse. Instead, we want to individually parse
+ and handle each header in the response, looking for one that we can
+ work with.
+ */
+ serf_bucket_headers_do(hdrs,
+ store_header_in_dict,
+ &ab);
+
+ /* Iterate over all authentication schemes, in order of decreasing
+ security. Try to find a authentication schema the server support. */
+ return handle_auth_headers(code, baton, ab.hdrs,
+ request, response, pool);
+ }
+
+ return APR_SUCCESS;
+}
+
+/* Read the headers of the response and try the available
+ handlers if authentication or validation is needed. */
+apr_status_t serf__handle_auth_response(int *consumed_response,
+ serf_request_t *request,
+ serf_bucket_t *response,
+ void *baton,
+ apr_pool_t *pool)
+{
+ apr_status_t status;
+ serf_status_line sl;
+
+ *consumed_response = 0;
+
+ /* TODO: the response bucket was created by the application, not at all
+ guaranteed that this is of type response_bucket!! */
+ status = serf_bucket_response_status(response, &sl);
+ if (SERF_BUCKET_READ_ERROR(status)) {
+ return status;
+ }
+ if (!sl.version && (APR_STATUS_IS_EOF(status) ||
+ APR_STATUS_IS_EAGAIN(status))) {
+ return status;
+ }
+
+ status = serf_bucket_response_wait_for_headers(response);
+ if (status) {
+ if (!APR_STATUS_IS_EOF(status)) {
+ return status;
+ }
+
+ /* If status is APR_EOF, there were no headers to read.
+ This can be ok in some situations, and it definitely
+ means there's no authentication requested now. */
+ return APR_SUCCESS;
+ }
+
+ if (sl.code == 401 || sl.code == 407) {
+ /* Authentication requested. */
+
+ /* Don't bother handling the authentication request if the response
+ wasn't received completely yet. Serf will call serf__handle_auth_response
+ again when more data is received. */
+ status = discard_body(response);
+ *consumed_response = 1;
+
+ /* Discard all response body before processing authentication. */
+ if (!APR_STATUS_IS_EOF(status)) {
+ return status;
+ }
+
+ status = dispatch_auth(sl.code, request, response, baton, pool);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+
+ /* Requeue the request with the necessary auth headers. */
+ /* ### Application doesn't know about this request! */
+ if (request->ssltunnel) {
+ serf__ssltunnel_request_create(request->conn,
+ request->setup,
+ request->setup_baton);
+ } else {
+ serf_connection_priority_request_create(request->conn,
+ request->setup,
+ request->setup_baton);
+ }
+
+ return APR_EOF;
+ } else {
+ serf__validate_response_func_t validate_resp;
+ serf_connection_t *conn = request->conn;
+ serf_context_t *ctx = conn->ctx;
+ serf__authn_info_t *authn_info;
+ apr_status_t resp_status = APR_SUCCESS;
+
+
+ /* Validate the response server authn headers. */
+ authn_info = serf__get_authn_info_for_server(conn);
+ if (authn_info->scheme) {
+ validate_resp = authn_info->scheme->validate_response_func;
+ resp_status = validate_resp(HOST, sl.code, conn, request, response,
+ pool);
+ }
+
+ /* Validate the response proxy authn headers. */
+ authn_info = &ctx->proxy_authn_info;
+ if (!resp_status && authn_info->scheme) {
+ validate_resp = authn_info->scheme->validate_response_func;
+ resp_status = validate_resp(PROXY, sl.code, conn, request, response,
+ pool);
+ }
+
+ if (resp_status) {
+ /* If there was an error in the final step of the authentication,
+ consider the reponse body as invalid and discard it. */
+ status = discard_body(response);
+ *consumed_response = 1;
+ if (!APR_STATUS_IS_EOF(status)) {
+ return status;
+ }
+ /* The whole body was discarded, now return our error. */
+ return resp_status;
+ }
+ }
+
+ return APR_SUCCESS;
+}
+
+/**
+ * base64 encode the authentication data and build an authentication
+ * header in this format:
+ * [SCHEME] [BASE64 of auth DATA]
+ */
+void serf__encode_auth_header(const char **header,
+ const char *scheme,
+ const char *data, apr_size_t data_len,
+ apr_pool_t *pool)
+{
+ apr_size_t encoded_len, scheme_len;
+ char *ptr;
+
+ encoded_len = apr_base64_encode_len(data_len);
+ scheme_len = strlen(scheme);
+
+ ptr = apr_palloc(pool, encoded_len + scheme_len + 1);
+ *header = ptr;
+
+ apr_cpystrn(ptr, scheme, scheme_len + 1);
+ ptr += scheme_len;
+ *ptr++ = ' ';
+
+ apr_base64_encode(ptr, data, data_len);
+}
+
+const char *serf__construct_realm(peer_t peer,
+ serf_connection_t *conn,
+ const char *realm_name,
+ apr_pool_t *pool)
+{
+ if (peer == HOST) {
+ return apr_psprintf(pool, "<%s://%s:%d> %s",
+ conn->host_info.scheme,
+ conn->host_info.hostname,
+ conn->host_info.port,
+ realm_name);
+ } else {
+ serf_context_t *ctx = conn->ctx;
+
+ return apr_psprintf(pool, "<http://%s:%d> %s",
+ ctx->proxy_address->hostname,
+ ctx->proxy_address->port,
+ realm_name);
+ }
+}
+
+serf__authn_info_t *serf__get_authn_info_for_server(serf_connection_t *conn)
+{
+ serf_context_t *ctx = conn->ctx;
+ serf__authn_info_t *authn_info;
+
+ authn_info = apr_hash_get(ctx->server_authn_info, conn->host_url,
+ APR_HASH_KEY_STRING);
+
+ if (!authn_info) {
+ authn_info = apr_pcalloc(ctx->pool, sizeof(serf__authn_info_t));
+ apr_hash_set(ctx->server_authn_info,
+ apr_pstrdup(ctx->pool, conn->host_url),
+ APR_HASH_KEY_STRING, authn_info);
+ }
+
+ return authn_info;
+}
diff --git a/contrib/serf/auth/auth.h b/contrib/serf/auth/auth.h
new file mode 100644
index 0000000..ea14115
--- /dev/null
+++ b/contrib/serf/auth/auth.h
@@ -0,0 +1,123 @@
+/* Copyright 2009 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AUTH_H
+#define AUTH_H
+
+#include "auth_spnego.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void serf__encode_auth_header(const char **header, const char *protocol,
+ const char *data, apr_size_t data_len,
+ apr_pool_t *pool);
+
+/* Prefixes the realm_name with a string containing scheme, hostname and port
+ of the connection, for providing it to the application. */
+const char *serf__construct_realm(peer_t peer,
+ serf_connection_t *conn,
+ const char *realm_name,
+ apr_pool_t *pool);
+
+/** Basic authentication **/
+apr_status_t serf__init_basic(int code,
+ serf_context_t *ctx,
+ apr_pool_t *pool);
+apr_status_t serf__init_basic_connection(const serf__authn_scheme_t *scheme,
+ int code,
+ serf_connection_t *conn,
+ apr_pool_t *pool);
+apr_status_t serf__handle_basic_auth(int code,
+ serf_request_t *request,
+ serf_bucket_t *response,
+ const char *auth_hdr,
+ const char *auth_attr,
+ void *baton,
+ apr_pool_t *pool);
+apr_status_t serf__setup_request_basic_auth(peer_t peer,
+ int code,
+ serf_connection_t *conn,
+ serf_request_t *request,
+ const char *method,
+ const char *uri,
+ serf_bucket_t *hdrs_bkt);
+
+/** Digest authentication **/
+apr_status_t serf__init_digest(int code,
+ serf_context_t *ctx,
+ apr_pool_t *pool);
+apr_status_t serf__init_digest_connection(const serf__authn_scheme_t *scheme,
+ int code,
+ serf_connection_t *conn,
+ apr_pool_t *pool);
+apr_status_t serf__handle_digest_auth(int code,
+ serf_request_t *request,
+ serf_bucket_t *response,
+ const char *auth_hdr,
+ const char *auth_attr,
+ void *baton,
+ apr_pool_t *pool);
+apr_status_t serf__setup_request_digest_auth(peer_t peer,
+ int code,
+ serf_connection_t *conn,
+ serf_request_t *request,
+ const char *method,
+ const char *uri,
+ serf_bucket_t *hdrs_bkt);
+apr_status_t serf__validate_response_digest_auth(peer_t peer,
+ int code,
+ serf_connection_t *conn,
+ serf_request_t *request,
+ serf_bucket_t *response,
+ apr_pool_t *pool);
+
+#ifdef SERF_HAVE_SPNEGO
+/** Kerberos authentication **/
+apr_status_t serf__init_spnego(int code,
+ serf_context_t *ctx,
+ apr_pool_t *pool);
+apr_status_t serf__init_spnego_connection(const serf__authn_scheme_t *scheme,
+ int code,
+ serf_connection_t *conn,
+ apr_pool_t *pool);
+apr_status_t serf__handle_spnego_auth(int code,
+ serf_request_t *request,
+ serf_bucket_t *response,
+ const char *auth_hdr,
+ const char *auth_attr,
+ void *baton,
+ apr_pool_t *pool);
+apr_status_t serf__setup_request_spnego_auth(peer_t peer,
+ int code,
+ serf_connection_t *conn,
+ serf_request_t *request,
+ const char *method,
+ const char *uri,
+ serf_bucket_t *hdrs_bkt);
+apr_status_t serf__validate_response_spnego_auth(peer_t peer,
+ int code,
+ serf_connection_t *conn,
+ serf_request_t *request,
+ serf_bucket_t *response,
+ apr_pool_t *pool);
+#endif /* SERF_HAVE_SPNEGO */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !AUTH_H */
diff --git a/contrib/serf/auth/auth_basic.c b/contrib/serf/auth/auth_basic.c
new file mode 100644
index 0000000..53b5d95
--- /dev/null
+++ b/contrib/serf/auth/auth_basic.c
@@ -0,0 +1,177 @@
+/* Copyright 2009 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*** Basic authentication ***/
+
+#include <serf.h>
+#include <serf_private.h>
+#include <auth/auth.h>
+
+#include <apr.h>
+#include <apr_base64.h>
+#include <apr_strings.h>
+
+/* Stores the context information related to Basic authentication.
+ This information is stored in the per server cache in the serf context. */
+typedef struct basic_authn_info_t {
+ const char *header;
+ const char *value;
+} basic_authn_info_t;
+
+apr_status_t
+serf__handle_basic_auth(int code,
+ serf_request_t *request,
+ serf_bucket_t *response,
+ const char *auth_hdr,
+ const char *auth_attr,
+ void *baton,
+ apr_pool_t *pool)
+{
+ const char *tmp;
+ apr_size_t tmp_len;
+ serf_connection_t *conn = request->conn;
+ serf_context_t *ctx = conn->ctx;
+ serf__authn_info_t *authn_info;
+ basic_authn_info_t *basic_info;
+ apr_status_t status;
+ apr_pool_t *cred_pool;
+ char *username, *password, *realm_name;
+ const char *eq, *realm;
+
+ /* Can't do Basic authentication if there's no callback to get
+ username & password. */
+ if (!ctx->cred_cb) {
+ return SERF_ERROR_AUTHN_FAILED;
+ }
+
+ if (code == 401) {
+ authn_info = serf__get_authn_info_for_server(conn);
+ } else {
+ authn_info = &ctx->proxy_authn_info;
+ }
+ basic_info = authn_info->baton;
+
+ realm_name = NULL;
+ eq = strchr(auth_attr, '=');
+
+ if (eq && strncasecmp(auth_attr, "realm", 5) == 0) {
+ realm_name = apr_pstrdup(pool, eq + 1);
+ if (realm_name[0] == '\"') {
+ apr_size_t realm_len;
+
+ realm_len = strlen(realm_name);
+ if (realm_name[realm_len - 1] == '\"') {
+ realm_name[realm_len - 1] = '\0';
+ realm_name++;
+ }
+ }
+
+ if (!realm_name) {
+ return SERF_ERROR_AUTHN_MISSING_ATTRIBUTE;
+ }
+
+ realm = serf__construct_realm(code == 401 ? HOST : PROXY,
+ conn, realm_name,
+ pool);
+ }
+
+ /* Ask the application for credentials */
+ apr_pool_create(&cred_pool, pool);
+ status = serf__provide_credentials(ctx,
+ &username, &password,
+ request, baton,
+ code, authn_info->scheme->name,
+ realm, cred_pool);
+ if (status) {
+ apr_pool_destroy(cred_pool);
+ return status;
+ }
+
+ tmp = apr_pstrcat(conn->pool, username, ":", password, NULL);
+ tmp_len = strlen(tmp);
+ apr_pool_destroy(cred_pool);
+
+ serf__encode_auth_header(&basic_info->value,
+ authn_info->scheme->name,
+ tmp, tmp_len, pool);
+ basic_info->header = (code == 401) ? "Authorization" : "Proxy-Authorization";
+
+ return APR_SUCCESS;
+}
+
+apr_status_t
+serf__init_basic(int code,
+ serf_context_t *ctx,
+ apr_pool_t *pool)
+{
+ return APR_SUCCESS;
+}
+
+/* For Basic authentication we expect all authn info to be the same for all
+ connections in the context to the same server (same realm, username,
+ password). Therefore we can keep the header value in the per-server store
+ context instead of per connection.
+ TODO: we currently don't cache this info per realm, so each time a request
+ 'switches realms', we have to ask the application for new credentials. */
+apr_status_t
+serf__init_basic_connection(const serf__authn_scheme_t *scheme,
+ int code,
+ serf_connection_t *conn,
+ apr_pool_t *pool)
+{
+ serf_context_t *ctx = conn->ctx;
+ serf__authn_info_t *authn_info;
+
+ if (code == 401) {
+ authn_info = serf__get_authn_info_for_server(conn);
+ } else {
+ authn_info = &ctx->proxy_authn_info;
+ }
+
+ if (!authn_info->baton) {
+ authn_info->baton = apr_pcalloc(pool, sizeof(basic_authn_info_t));
+ }
+
+ return APR_SUCCESS;
+}
+
+apr_status_t
+serf__setup_request_basic_auth(peer_t peer,
+ int code,
+ serf_connection_t *conn,
+ serf_request_t *request,
+ const char *method,
+ const char *uri,
+ serf_bucket_t *hdrs_bkt)
+{
+ serf_context_t *ctx = conn->ctx;
+ serf__authn_info_t *authn_info;
+ basic_authn_info_t *basic_info;
+
+ if (peer == HOST) {
+ authn_info = serf__get_authn_info_for_server(conn);
+ } else {
+ authn_info = &ctx->proxy_authn_info;
+ }
+ basic_info = authn_info->baton;
+
+ if (basic_info && basic_info->header && basic_info->value) {
+ serf_bucket_headers_setn(hdrs_bkt, basic_info->header,
+ basic_info->value);
+ return APR_SUCCESS;
+ }
+
+ return SERF_ERROR_AUTHN_FAILED;
+}
diff --git a/contrib/serf/auth/auth_digest.c b/contrib/serf/auth/auth_digest.c
new file mode 100644
index 0000000..7403386
--- /dev/null
+++ b/contrib/serf/auth/auth_digest.c
@@ -0,0 +1,533 @@
+/* Copyright 2009 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*** Digest authentication ***/
+
+#include <serf.h>
+#include <serf_private.h>
+#include <auth/auth.h>
+
+#include <apr.h>
+#include <apr_base64.h>
+#include <apr_strings.h>
+#include <apr_uuid.h>
+#include <apr_md5.h>
+
+/** Digest authentication, implements RFC 2617. **/
+
+/* TODO: add support for the domain attribute. This defines the protection
+ space, so that serf can decide per URI if it should reuse the cached
+ credentials for the server, or not. */
+
+/* Stores the context information related to Digest authentication.
+ This information is stored in the per server cache in the serf context. */
+typedef struct digest_authn_info_t {
+ /* nonce-count for digest authentication */
+ unsigned int digest_nc;
+
+ const char *header;
+
+ const char *ha1;
+
+ const char *realm;
+ const char *cnonce;
+ const char *nonce;
+ const char *opaque;
+ const char *algorithm;
+ const char *qop;
+ const char *username;
+
+ apr_pool_t *pool;
+} digest_authn_info_t;
+
+static char
+int_to_hex(int v)
+{
+ return (v < 10) ? '0' + v : 'a' + (v - 10);
+}
+
+/**
+ * Convert a string if ASCII characters HASHVAL to its hexadecimal
+ * representation.
+ *
+ * The returned string will be allocated in the POOL and be null-terminated.
+ */
+static const char *
+hex_encode(const unsigned char *hashval,
+ apr_pool_t *pool)
+{
+ int i;
+ char *hexval = apr_palloc(pool, (APR_MD5_DIGESTSIZE * 2) + 1);
+ for (i = 0; i < APR_MD5_DIGESTSIZE; i++) {
+ hexval[2 * i] = int_to_hex((hashval[i] >> 4) & 0xf);
+ hexval[2 * i + 1] = int_to_hex(hashval[i] & 0xf);
+ }
+ hexval[APR_MD5_DIGESTSIZE * 2] = '\0';
+ return hexval;
+}
+
+/**
+ * Returns a 36-byte long string of random characters.
+ * UUIDs are formatted as: 00112233-4455-6677-8899-AABBCCDDEEFF.
+ *
+ * The returned string will be allocated in the POOL and be null-terminated.
+ */
+static const char *
+random_cnonce(apr_pool_t *pool)
+{
+ apr_uuid_t uuid;
+ char *buf = apr_palloc(pool, APR_UUID_FORMATTED_LENGTH + 1);
+
+ apr_uuid_get(&uuid);
+ apr_uuid_format(buf, &uuid);
+
+ return hex_encode((unsigned char*)buf, pool);
+}
+
+static const char *
+build_digest_ha1(const char *username,
+ const char *password,
+ const char *realm_name,
+ apr_pool_t *pool)
+{
+ const char *tmp;
+ unsigned char ha1[APR_MD5_DIGESTSIZE];
+ apr_status_t status;
+
+ /* calculate ha1:
+ MD5 hash of the combined user name, authentication realm and password */
+ tmp = apr_psprintf(pool, "%s:%s:%s",
+ username,
+ realm_name,
+ password);
+ status = apr_md5(ha1, tmp, strlen(tmp));
+
+ return hex_encode(ha1, pool);
+}
+
+static const char *
+build_digest_ha2(const char *uri,
+ const char *method,
+ const char *qop,
+ apr_pool_t *pool)
+{
+ if (!qop || strcmp(qop, "auth") == 0) {
+ const char *tmp;
+ unsigned char ha2[APR_MD5_DIGESTSIZE];
+ apr_status_t status;
+
+ /* calculate ha2:
+ MD5 hash of the combined method and URI */
+ tmp = apr_psprintf(pool, "%s:%s",
+ method,
+ uri);
+ status = apr_md5(ha2, tmp, strlen(tmp));
+
+ return hex_encode(ha2, pool);
+ } else {
+ /* TODO: auth-int isn't supported! */
+ }
+
+ return NULL;
+}
+
+static const char *
+build_auth_header(digest_authn_info_t *digest_info,
+ const char *path,
+ const char *method,
+ apr_pool_t *pool)
+{
+ char *hdr;
+ const char *ha2;
+ const char *response;
+ unsigned char response_hdr[APR_MD5_DIGESTSIZE];
+ const char *response_hdr_hex;
+ apr_status_t status;
+
+ ha2 = build_digest_ha2(path, method, digest_info->qop, pool);
+
+ hdr = apr_psprintf(pool,
+ "Digest realm=\"%s\","
+ " username=\"%s\","
+ " nonce=\"%s\","
+ " uri=\"%s\"",
+ digest_info->realm, digest_info->username,
+ digest_info->nonce,
+ path);
+
+ if (digest_info->qop) {
+ if (! digest_info->cnonce)
+ digest_info->cnonce = random_cnonce(digest_info->pool);
+
+ hdr = apr_psprintf(pool, "%s, nc=%08x, cnonce=\"%s\", qop=\"%s\"",
+ hdr,
+ digest_info->digest_nc,
+ digest_info->cnonce,
+ digest_info->qop);
+
+ /* Build the response header:
+ MD5 hash of the combined HA1 result, server nonce (nonce),
+ request counter (nc), client nonce (cnonce),
+ quality of protection code (qop) and HA2 result. */
+ response = apr_psprintf(pool, "%s:%s:%08x:%s:%s:%s",
+ digest_info->ha1, digest_info->nonce,
+ digest_info->digest_nc,
+ digest_info->cnonce, digest_info->qop, ha2);
+ } else {
+ /* Build the response header:
+ MD5 hash of the combined HA1 result, server nonce (nonce)
+ and HA2 result. */
+ response = apr_psprintf(pool, "%s:%s:%s",
+ digest_info->ha1, digest_info->nonce, ha2);
+ }
+
+ status = apr_md5(response_hdr, response, strlen(response));
+ response_hdr_hex = hex_encode(response_hdr, pool);
+
+ hdr = apr_psprintf(pool, "%s, response=\"%s\"", hdr, response_hdr_hex);
+
+ if (digest_info->opaque) {
+ hdr = apr_psprintf(pool, "%s, opaque=\"%s\"", hdr,
+ digest_info->opaque);
+ }
+ if (digest_info->algorithm) {
+ hdr = apr_psprintf(pool, "%s, algorithm=\"%s\"", hdr,
+ digest_info->algorithm);
+ }
+
+ return hdr;
+}
+
+apr_status_t
+serf__handle_digest_auth(int code,
+ serf_request_t *request,
+ serf_bucket_t *response,
+ const char *auth_hdr,
+ const char *auth_attr,
+ void *baton,
+ apr_pool_t *pool)
+{
+ char *attrs;
+ char *nextkv;
+ const char *realm, *realm_name = NULL;
+ const char *nonce = NULL;
+ const char *algorithm = NULL;
+ const char *qop = NULL;
+ const char *opaque = NULL;
+ const char *key;
+ serf_connection_t *conn = request->conn;
+ serf_context_t *ctx = conn->ctx;
+ serf__authn_info_t *authn_info;
+ digest_authn_info_t *digest_info;
+ apr_status_t status;
+ apr_pool_t *cred_pool;
+ char *username, *password;
+
+ /* Can't do Digest authentication if there's no callback to get
+ username & password. */
+ if (!ctx->cred_cb) {
+ return SERF_ERROR_AUTHN_FAILED;
+ }
+
+ if (code == 401) {
+ authn_info = serf__get_authn_info_for_server(conn);
+ } else {
+ authn_info = &ctx->proxy_authn_info;
+ }
+ digest_info = authn_info->baton;
+
+ /* Need a copy cuz we're going to write NUL characters into the string. */
+ attrs = apr_pstrdup(pool, auth_attr);
+
+ /* We're expecting a list of key=value pairs, separated by a comma.
+ Ex. realm="SVN Digest",
+ nonce="f+zTl/leBAA=e371bd3070adfb47b21f5fc64ad8cc21adc371a5",
+ algorithm=MD5, qop="auth" */
+ for ( ; (key = apr_strtok(attrs, ",", &nextkv)) != NULL; attrs = NULL) {
+ char *val;
+
+ val = strchr(key, '=');
+ if (val == NULL)
+ continue;
+ *val++ = '\0';
+
+ /* skip leading spaces */
+ while (*key && *key == ' ')
+ key++;
+
+ /* If the value is quoted, then remove the quotes. */
+ if (*val == '"') {
+ apr_size_t last = strlen(val) - 1;
+
+ if (val[last] == '"') {
+ val[last] = '\0';
+ val++;
+ }
+ }
+
+ if (strcmp(key, "realm") == 0)
+ realm_name = val;
+ else if (strcmp(key, "nonce") == 0)
+ nonce = val;
+ else if (strcmp(key, "algorithm") == 0)
+ algorithm = val;
+ else if (strcmp(key, "qop") == 0)
+ qop = val;
+ else if (strcmp(key, "opaque") == 0)
+ opaque = val;
+
+ /* Ignore all unsupported attributes. */
+ }
+
+ if (!realm_name) {
+ return SERF_ERROR_AUTHN_MISSING_ATTRIBUTE;
+ }
+
+ realm = serf__construct_realm(code == 401 ? HOST : PROXY,
+ conn, realm_name,
+ pool);
+
+ /* Ask the application for credentials */
+ apr_pool_create(&cred_pool, pool);
+ status = serf__provide_credentials(ctx,
+ &username, &password,
+ request, baton,
+ code, authn_info->scheme->name,
+ realm, cred_pool);
+ if (status) {
+ apr_pool_destroy(cred_pool);
+ return status;
+ }
+
+ digest_info->header = (code == 401) ? "Authorization" :
+ "Proxy-Authorization";
+
+ /* Store the digest authentication parameters in the context cached for
+ this server in the serf context, so we can use it to create the
+ Authorization header when setting up requests on the same or different
+ connections (e.g. in case of KeepAlive off on the server).
+ TODO: we currently don't cache this info per realm, so each time a request
+ 'switches realms', we have to ask the application for new credentials. */
+ digest_info->pool = conn->pool;
+ digest_info->qop = apr_pstrdup(digest_info->pool, qop);
+ digest_info->nonce = apr_pstrdup(digest_info->pool, nonce);
+ digest_info->cnonce = NULL;
+ digest_info->opaque = apr_pstrdup(digest_info->pool, opaque);
+ digest_info->algorithm = apr_pstrdup(digest_info->pool, algorithm);
+ digest_info->realm = apr_pstrdup(digest_info->pool, realm_name);
+ digest_info->username = apr_pstrdup(digest_info->pool, username);
+ digest_info->digest_nc++;
+
+ digest_info->ha1 = build_digest_ha1(username, password, digest_info->realm,
+ digest_info->pool);
+
+ apr_pool_destroy(cred_pool);
+
+ /* If the handshake is finished tell serf it can send as much requests as it
+ likes. */
+ serf_connection_set_max_outstanding_requests(conn, 0);
+
+ return APR_SUCCESS;
+}
+
+apr_status_t
+serf__init_digest(int code,
+ serf_context_t *ctx,
+ apr_pool_t *pool)
+{
+ return APR_SUCCESS;
+}
+
+apr_status_t
+serf__init_digest_connection(const serf__authn_scheme_t *scheme,
+ int code,
+ serf_connection_t *conn,
+ apr_pool_t *pool)
+{
+ serf_context_t *ctx = conn->ctx;
+ serf__authn_info_t *authn_info;
+
+ if (code == 401) {
+ authn_info = serf__get_authn_info_for_server(conn);
+ } else {
+ authn_info = &ctx->proxy_authn_info;
+ }
+
+ if (!authn_info->baton) {
+ authn_info->baton = apr_pcalloc(pool, sizeof(digest_authn_info_t));
+ }
+
+ /* Make serf send the initial requests one by one */
+ serf_connection_set_max_outstanding_requests(conn, 1);
+
+ return APR_SUCCESS;
+}
+
+apr_status_t
+serf__setup_request_digest_auth(peer_t peer,
+ int code,
+ serf_connection_t *conn,
+ serf_request_t *request,
+ const char *method,
+ const char *uri,
+ serf_bucket_t *hdrs_bkt)
+{
+ serf_context_t *ctx = conn->ctx;
+ serf__authn_info_t *authn_info;
+ digest_authn_info_t *digest_info;
+ apr_status_t status = APR_SUCCESS;
+
+ if (peer == HOST) {
+ authn_info = serf__get_authn_info_for_server(conn);
+ } else {
+ authn_info = &ctx->proxy_authn_info;
+ }
+ digest_info = authn_info->baton;
+
+ if (digest_info && digest_info->realm) {
+ const char *value;
+ const char *path;
+
+ /* TODO: per request pool? */
+
+ /* for request 'CONNECT serf.googlecode.com:443', the uri also should be
+ serf.googlecode.com:443. apr_uri_parse can't handle this, so special
+ case. */
+ if (strcmp(method, "CONNECT") == 0)
+ path = uri;
+ else {
+ apr_uri_t parsed_uri;
+
+ /* Extract path from uri. */
+ status = apr_uri_parse(conn->pool, uri, &parsed_uri);
+ if (status)
+ return status;
+
+ path = parsed_uri.path;
+ }
+
+ /* Build a new Authorization header. */
+ digest_info->header = (peer == HOST) ? "Authorization" :
+ "Proxy-Authorization";
+ value = build_auth_header(digest_info, path, method,
+ conn->pool);
+
+ serf_bucket_headers_setn(hdrs_bkt, digest_info->header,
+ value);
+ digest_info->digest_nc++;
+
+ /* Store the uri of this request on the serf_request_t object, to make
+ it available when validating the Authentication-Info header of the
+ matching response. */
+ request->auth_baton = path;
+ }
+
+ return status;
+}
+
+apr_status_t
+serf__validate_response_digest_auth(peer_t peer,
+ int code,
+ serf_connection_t *conn,
+ serf_request_t *request,
+ serf_bucket_t *response,
+ apr_pool_t *pool)
+{
+ const char *key;
+ char *auth_attr;
+ char *nextkv;
+ const char *rspauth = NULL;
+ const char *qop = NULL;
+ const char *nc_str = NULL;
+ serf_bucket_t *hdrs;
+ serf_context_t *ctx = conn->ctx;
+
+ hdrs = serf_bucket_response_get_headers(response);
+
+ /* Need a copy cuz we're going to write NUL characters into the string. */
+ if (peer == HOST)
+ auth_attr = apr_pstrdup(pool,
+ serf_bucket_headers_get(hdrs, "Authentication-Info"));
+ else
+ auth_attr = apr_pstrdup(pool,
+ serf_bucket_headers_get(hdrs, "Proxy-Authentication-Info"));
+
+ /* If there's no Authentication-Info header there's nothing to validate. */
+ if (! auth_attr)
+ return APR_SUCCESS;
+
+ /* We're expecting a list of key=value pairs, separated by a comma.
+ Ex. rspauth="8a4b8451084b082be6b105e2b7975087",
+ cnonce="346531653132652d303033392d3435", nc=00000007,
+ qop=auth */
+ for ( ; (key = apr_strtok(auth_attr, ",", &nextkv)) != NULL; auth_attr = NULL) {
+ char *val;
+
+ val = strchr(key, '=');
+ if (val == NULL)
+ continue;
+ *val++ = '\0';
+
+ /* skip leading spaces */
+ while (*key && *key == ' ')
+ key++;
+
+ /* If the value is quoted, then remove the quotes. */
+ if (*val == '"') {
+ apr_size_t last = strlen(val) - 1;
+
+ if (val[last] == '"') {
+ val[last] = '\0';
+ val++;
+ }
+ }
+
+ if (strcmp(key, "rspauth") == 0)
+ rspauth = val;
+ else if (strcmp(key, "qop") == 0)
+ qop = val;
+ else if (strcmp(key, "nc") == 0)
+ nc_str = val;
+ }
+
+ if (rspauth) {
+ const char *ha2, *tmp, *resp_hdr_hex;
+ unsigned char resp_hdr[APR_MD5_DIGESTSIZE];
+ const char *req_uri = request->auth_baton;
+ serf__authn_info_t *authn_info;
+ digest_authn_info_t *digest_info;
+
+ if (peer == HOST) {
+ authn_info = serf__get_authn_info_for_server(conn);
+ } else {
+ authn_info = &ctx->proxy_authn_info;
+ }
+ digest_info = authn_info->baton;
+
+ ha2 = build_digest_ha2(req_uri, "", qop, pool);
+ tmp = apr_psprintf(pool, "%s:%s:%s:%s:%s:%s",
+ digest_info->ha1, digest_info->nonce, nc_str,
+ digest_info->cnonce, digest_info->qop, ha2);
+ apr_md5(resp_hdr, tmp, strlen(tmp));
+ resp_hdr_hex = hex_encode(resp_hdr, pool);
+
+ /* Incorrect response-digest in Authentication-Info header. */
+ if (strcmp(rspauth, resp_hdr_hex) != 0) {
+ return SERF_ERROR_AUTHN_FAILED;
+ }
+ }
+
+ return APR_SUCCESS;
+}
diff --git a/contrib/serf/auth/auth_spnego.c b/contrib/serf/auth/auth_spnego.c
new file mode 100644
index 0000000..4d22ec1
--- /dev/null
+++ b/contrib/serf/auth/auth_spnego.c
@@ -0,0 +1,556 @@
+/* Copyright 2009 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "auth_spnego.h"
+
+#ifdef SERF_HAVE_SPNEGO
+
+/** These functions implement SPNEGO-based Kerberos and NTLM authentication,
+ * using either GSS-API (RFC 2743) or SSPI on Windows.
+ * The HTTP message exchange is documented in RFC 4559.
+ **/
+
+#include <serf.h>
+#include <serf_private.h>
+#include <auth/auth.h>
+
+#include <apr.h>
+#include <apr_base64.h>
+#include <apr_strings.h>
+
+/** TODO:
+ ** - send session key directly on new connections where we already know
+ ** the server requires Kerberos authn.
+ ** - Add a way for serf to give detailed error information back to the
+ ** application.
+ **/
+
+/* Authentication over HTTP using Kerberos
+ *
+ * Kerberos involves three servers:
+ * - Authentication Server (AS): verifies users during login
+ * - Ticket-Granting Server (TGS): issues proof of identity tickets
+ * - HTTP server (S)
+ *
+ * Steps:
+ * 0. User logs in to the AS and receives a TGS ticket. On workstations
+ * where the login program doesn't support Kerberos, the user can use
+ * 'kinit'.
+ *
+ * 1. C --> S: GET
+ *
+ * C <-- S: 401 Authentication Required
+ * WWW-Authenticate: Negotiate
+ *
+ * -> app contacts the TGS to request a session key for the HTTP service
+ * @ target host. The returned session key is encrypted with the HTTP
+ * service's secret key, so we can safely send it to the server.
+ *
+ * 2. C --> S: GET
+ * Authorization: Negotiate <Base64 encoded session key>
+ * gss_api_ctx->state = gss_api_auth_in_progress;
+ *
+ * C <-- S: 200 OK
+ * WWW-Authenticate: Negotiate <Base64 encoded server
+ * authentication data>
+ *
+ * -> The server returned an (optional) key to proof itself to us. We check this
+ * key with the TGS again. If it checks out, we can return the response
+ * body to the application.
+ *
+ * Note: It's possible that the server returns 401 again in step 2, if the
+ * Kerberos context isn't complete yet. This means there is 3rd step
+ * where we'll send a request with an Authorization header to the
+ * server. Some (simple) tests with mod_auth_kerb and MIT Kerberos 5 show
+ * this never happens.
+ *
+ * Depending on the type of HTTP server, this handshake is required for either
+ * every new connection, or for every new request! For more info see the next
+ * comment on authn_persistence_state_t.
+ *
+ * Note: Step 1 of the handshake will only happen on the first connection, once
+ * we know the server requires Kerberos authentication, the initial requests
+ * on the other connections will include a session key, so we start at
+ * step 2 in the handshake.
+ * ### TODO: Not implemented yet!
+ */
+
+/* Current state of the authentication of the current request. */
+typedef enum {
+ gss_api_auth_not_started,
+ gss_api_auth_in_progress,
+ gss_api_auth_completed,
+} gss_api_auth_state;
+
+/**
+ authn_persistence_state_t: state that indicates if we are talking with a
+ server that requires authentication only of the first request (stateful),
+ or of each request (stateless).
+
+ INIT: Begin state. Authenticating the first request on this connection.
+ UNDECIDED: we haven't identified the server yet, assume STATEFUL for now.
+ Pipeline mode disabled, requests are sent only after the response off the
+ previous request arrived.
+ STATELESS: we know the server requires authentication for each request.
+ On all new requests add the Authorization header with an initial SPNEGO
+ token (created per request).
+ To keep things simple, keep the connection in one by one mode.
+ (otherwise we'd have to keep a queue of gssapi context objects to match
+ the Negotiate header of the response with the session initiated by the
+ mathing request).
+ This state is an final state.
+ STATEFUL: alright, we have authenticated the connection and for the server
+ that is enough. Don't add an Authorization header to new requests.
+ Serf will switch to pipelined mode.
+ This state is not a final state, although in practical scenario's it will
+ be. When we receive a 40x response from the server switch to STATELESS
+ mode.
+
+ We start in state init for the first request until it is authenticated.
+
+ The rest of the state machine starts with the arrival of the response to the
+ second request, and then goes on with each response:
+
+ --------
+ | INIT | C --> S: GET request in response to 40x of the server
+ -------- add [Proxy]-Authorization header
+ |
+ |
+ ------------
+ | UNDECIDED| C --> S: GET request, assume stateful,
+ ------------ no [Proxy]-Authorization header
+ |
+ |
+ |------------------------------------------------
+ | |
+ | C <-- S: 40x Authentication | C <-- S: 200 OK
+ | Required |
+ | |
+ v v
+ ------------- ------------
+ ->| STATELESS |<------------------------------| STATEFUL |<--
+ | ------------- C <-- S: 40x ------------ |
+ * | | Authentication | | 200 OK
+ | / Required | |
+ ----- -----/
+
+ **/
+typedef enum {
+ pstate_init,
+ pstate_undecided,
+ pstate_stateless,
+ pstate_stateful,
+} authn_persistence_state_t;
+
+
+/* HTTP Service name, used to get the session key. */
+#define KRB_HTTP_SERVICE "HTTP"
+
+/* Stores the context information related to Kerberos authentication. */
+typedef struct
+{
+ apr_pool_t *pool;
+
+ /* GSSAPI context */
+ serf__spnego_context_t *gss_ctx;
+
+ /* Current state of the authentication cycle. */
+ gss_api_auth_state state;
+
+ /* Current persistence state. */
+ authn_persistence_state_t pstate;
+
+ const char *header;
+ const char *value;
+} gss_authn_info_t;
+
+/* On the initial 401 response of the server, request a session key from
+ the Kerberos KDC to pass to the server, proving that we are who we
+ claim to be. The session key can only be used with the HTTP service
+ on the target host. */
+static apr_status_t
+gss_api_get_credentials(char *token, apr_size_t token_len,
+ const char *hostname,
+ const char **buf, apr_size_t *buf_len,
+ gss_authn_info_t *gss_info)
+{
+ serf__spnego_buffer_t input_buf;
+ serf__spnego_buffer_t output_buf;
+ apr_status_t status = APR_SUCCESS;
+
+ /* If the server sent us a token, pass it to gss_init_sec_token for
+ validation. */
+ if (token) {
+ input_buf.value = token;
+ input_buf.length = token_len;
+ } else {
+ input_buf.value = 0;
+ input_buf.length = 0;
+ }
+
+ /* Establish a security context to the server. */
+ status = serf__spnego_init_sec_context(
+ gss_info->gss_ctx,
+ KRB_HTTP_SERVICE, hostname,
+ &input_buf,
+ &output_buf,
+ gss_info->pool,
+ gss_info->pool
+ );
+
+ switch(status) {
+ case APR_SUCCESS:
+ gss_info->state = gss_api_auth_completed;
+ break;
+ case APR_EAGAIN:
+ gss_info->state = gss_api_auth_in_progress;
+ status = APR_SUCCESS;
+ break;
+ default:
+ return status;
+ }
+
+ /* Return the session key to our caller. */
+ *buf = output_buf.value;
+ *buf_len = output_buf.length;
+
+ return status;
+}
+
+/* do_auth is invoked in two situations:
+ - when a response from a server is received that contains an authn header
+ (either from a 40x or 2xx response)
+ - when a request is prepared on a connection with stateless authentication.
+
+ Read the header sent by the server (if any), invoke the gssapi authn
+ code and use the resulting Server Ticket on the next request to the
+ server. */
+static apr_status_t
+do_auth(peer_t peer,
+ int code,
+ gss_authn_info_t *gss_info,
+ serf_connection_t *conn,
+ const char *auth_hdr,
+ apr_pool_t *pool)
+{
+ serf_context_t *ctx = conn->ctx;
+ serf__authn_info_t *authn_info;
+ const char *tmp = NULL;
+ char *token = NULL;
+ apr_size_t tmp_len = 0, token_len = 0;
+ apr_status_t status;
+
+ if (peer == HOST) {
+ authn_info = serf__get_authn_info_for_server(conn);
+ } else {
+ authn_info = &ctx->proxy_authn_info;
+ }
+
+ /* Is this a response from a host/proxy? auth_hdr should always be set. */
+ if (code && auth_hdr) {
+ const char *space = NULL;
+ /* The server will return a token as attribute to the Negotiate key.
+ Negotiate YGwGCSqGSIb3EgECAgIAb10wW6ADAgEFoQMCAQ+iTzBNoAMCARCiRgREa6
+ mouMBAMFqKVdTGtfpZNXKzyw4Yo1paphJdIA3VOgncaoIlXxZLnkHiIHS2v65pVvrp
+ bRIyjF8xve9HxpnNIucCY9c=
+
+ Read this base64 value, decode it and validate it so we're sure the
+ server is who we expect it to be. */
+ space = strchr(auth_hdr, ' ');
+
+ if (space) {
+ token = apr_palloc(pool, apr_base64_decode_len(space + 1));
+ token_len = apr_base64_decode(token, space + 1);
+ }
+ } else {
+ /* This is a new request, not a retry in response to a 40x of the
+ host/proxy.
+ Only add the Authorization header if we know the server requires
+ per-request authentication (stateless). */
+ if (gss_info->pstate != pstate_stateless)
+ return APR_SUCCESS;
+ }
+
+ switch(gss_info->pstate) {
+ case pstate_init:
+ /* Nothing to do here */
+ break;
+ case pstate_undecided: /* Fall through */
+ case pstate_stateful:
+ {
+ /* Switch to stateless mode, from now on handle authentication
+ of each request with a new gss context. This is easiest to
+ manage when sending requests one by one. */
+ serf__log_skt(AUTH_VERBOSE, __FILE__, conn->skt,
+ "Server requires per-request SPNEGO authn, "
+ "switching to stateless mode.\n");
+
+ gss_info->pstate = pstate_stateless;
+ serf_connection_set_max_outstanding_requests(conn, 1);
+ break;
+ }
+ case pstate_stateless:
+ /* Nothing to do here */
+ break;
+ }
+
+ /* If the server didn't provide us with a token, start with a new initial
+ step in the SPNEGO authentication. */
+ if (!token) {
+ serf__spnego_reset_sec_context(gss_info->gss_ctx);
+ gss_info->state = gss_api_auth_not_started;
+ }
+
+ if (peer == HOST) {
+ status = gss_api_get_credentials(token, token_len,
+ conn->host_info.hostname,
+ &tmp, &tmp_len,
+ gss_info);
+ } else {
+ char *proxy_host;
+ apr_getnameinfo(&proxy_host, conn->ctx->proxy_address, 0);
+ status = gss_api_get_credentials(token, token_len, proxy_host,
+ &tmp, &tmp_len,
+ gss_info);
+ }
+ if (status)
+ return status;
+
+ /* On the next request, add an Authorization header. */
+ if (tmp_len) {
+ serf__encode_auth_header(&gss_info->value, authn_info->scheme->name,
+ tmp,
+ tmp_len,
+ pool);
+ gss_info->header = (peer == HOST) ?
+ "Authorization" : "Proxy-Authorization";
+ }
+
+ return APR_SUCCESS;
+}
+
+apr_status_t
+serf__init_spnego(int code,
+ serf_context_t *ctx,
+ apr_pool_t *pool)
+{
+ return APR_SUCCESS;
+}
+
+/* A new connection is created to a server that's known to use
+ Kerberos. */
+apr_status_t
+serf__init_spnego_connection(const serf__authn_scheme_t *scheme,
+ int code,
+ serf_connection_t *conn,
+ apr_pool_t *pool)
+{
+ gss_authn_info_t *gss_info;
+ apr_status_t status;
+
+ gss_info = apr_pcalloc(conn->pool, sizeof(*gss_info));
+ gss_info->pool = conn->pool;
+ gss_info->state = gss_api_auth_not_started;
+ gss_info->pstate = pstate_init;
+ status = serf__spnego_create_sec_context(&gss_info->gss_ctx, scheme,
+ gss_info->pool, pool);
+
+ if (status) {
+ return status;
+ }
+
+ if (code == 401) {
+ conn->authn_baton = gss_info;
+ } else {
+ conn->proxy_authn_baton = gss_info;
+ }
+
+ /* Make serf send the initial requests one by one */
+ serf_connection_set_max_outstanding_requests(conn, 1);
+
+ serf__log_skt(AUTH_VERBOSE, __FILE__, conn->skt,
+ "Initialized Kerberos context for this connection.\n");
+
+ return APR_SUCCESS;
+}
+
+/* A 40x response was received, handle the authentication. */
+apr_status_t
+serf__handle_spnego_auth(int code,
+ serf_request_t *request,
+ serf_bucket_t *response,
+ const char *auth_hdr,
+ const char *auth_attr,
+ void *baton,
+ apr_pool_t *pool)
+{
+ serf_connection_t *conn = request->conn;
+ gss_authn_info_t *gss_info = (code == 401) ? conn->authn_baton :
+ conn->proxy_authn_baton;
+
+ return do_auth(code == 401 ? HOST : PROXY,
+ code,
+ gss_info,
+ request->conn,
+ auth_hdr,
+ pool);
+}
+
+/* Setup the authn headers on this request message. */
+apr_status_t
+serf__setup_request_spnego_auth(peer_t peer,
+ int code,
+ serf_connection_t *conn,
+ serf_request_t *request,
+ const char *method,
+ const char *uri,
+ serf_bucket_t *hdrs_bkt)
+{
+ gss_authn_info_t *gss_info = (peer == HOST) ? conn->authn_baton :
+ conn->proxy_authn_baton;
+
+ /* If we have an ongoing authentication handshake, the handler of the
+ previous response will have created the authn headers for this request
+ already. */
+ if (gss_info && gss_info->header && gss_info->value) {
+ serf__log_skt(AUTH_VERBOSE, __FILE__, conn->skt,
+ "Set Negotiate authn header on retried request.\n");
+
+ serf_bucket_headers_setn(hdrs_bkt, gss_info->header,
+ gss_info->value);
+
+ /* We should send each token only once. */
+ gss_info->header = NULL;
+ gss_info->value = NULL;
+
+ return APR_SUCCESS;
+ }
+
+ switch (gss_info->pstate) {
+ case pstate_init:
+ /* We shouldn't normally arrive here, do nothing. */
+ break;
+ case pstate_undecided: /* fall through */
+ serf__log_skt(AUTH_VERBOSE, __FILE__, conn->skt,
+ "Assume for now that the server supports persistent "
+ "SPNEGO authentication.\n");
+ /* Nothing to do here. */
+ break;
+ case pstate_stateful:
+ serf__log_skt(AUTH_VERBOSE, __FILE__, conn->skt,
+ "SPNEGO on this connection is persistent, "
+ "don't set authn header on next request.\n");
+ /* Nothing to do here. */
+ break;
+ case pstate_stateless:
+ {
+ apr_status_t status;
+
+ /* Authentication on this connection is known to be stateless.
+ Add an initial Negotiate token for the server, to bypass the
+ 40x response we know we'll otherwise receive.
+ (RFC 4559 section 4.2) */
+ serf__log_skt(AUTH_VERBOSE, __FILE__, conn->skt,
+ "Add initial Negotiate header to request.\n");
+
+ status = do_auth(peer,
+ code,
+ gss_info,
+ conn,
+ 0l, /* no response authn header */
+ conn->pool);
+ if (status)
+ return status;
+
+ serf_bucket_headers_setn(hdrs_bkt, gss_info->header,
+ gss_info->value);
+ /* We should send each token only once. */
+ gss_info->header = NULL;
+ gss_info->value = NULL;
+ break;
+ }
+ }
+
+ return APR_SUCCESS;
+}
+
+/* Function is called when 2xx responses are received. Normally we don't
+ * have to do anything, except for the first response after the
+ * authentication handshake. This specific response includes authentication
+ * data which should be validated by the client (mutual authentication).
+ */
+apr_status_t
+serf__validate_response_spnego_auth(peer_t peer,
+ int code,
+ serf_connection_t *conn,
+ serf_request_t *request,
+ serf_bucket_t *response,
+ apr_pool_t *pool)
+{
+ gss_authn_info_t *gss_info;
+ const char *auth_hdr_name;
+
+ /* TODO: currently this function is only called when a response includes
+ an Authenticate header. This header is optional. If the server does
+ not provide this header on the first 2xx response, we will not promote
+ the connection from undecided to stateful. This won't break anything,
+ but means we stay in non-pipelining mode. */
+ serf__log_skt(AUTH_VERBOSE, __FILE__, conn->skt,
+ "Validate Negotiate response header.\n");
+
+ if (peer == HOST) {
+ gss_info = conn->authn_baton;
+ auth_hdr_name = "WWW-Authenticate";
+ } else {
+ gss_info = conn->proxy_authn_baton;
+ auth_hdr_name = "Proxy-Authenticate";
+ }
+
+ if (gss_info->state != gss_api_auth_completed) {
+ serf_bucket_t *hdrs;
+ const char *auth_hdr_val;
+ apr_status_t status;
+
+ hdrs = serf_bucket_response_get_headers(response);
+ auth_hdr_val = serf_bucket_headers_get(hdrs, auth_hdr_name);
+
+ status = do_auth(peer, code, gss_info, conn, auth_hdr_val, pool);
+ if (status)
+ return status;
+ }
+
+ if (gss_info->state == gss_api_auth_completed) {
+ switch(gss_info->pstate) {
+ case pstate_init:
+ /* Authentication of the first request is done. */
+ gss_info->pstate = pstate_undecided;
+ break;
+ case pstate_undecided:
+ /* The server didn't request for authentication even though
+ we didn't add an Authorization header to previous
+ request. That means it supports persistent authentication. */
+ gss_info->pstate = pstate_stateful;
+ serf_connection_set_max_outstanding_requests(conn, 0);
+ break;
+ default:
+ /* Nothing to do here. */
+ break;
+ }
+ }
+
+ return APR_SUCCESS;
+}
+
+#endif /* SERF_HAVE_SPNEGO */
diff --git a/contrib/serf/auth/auth_spnego.h b/contrib/serf/auth/auth_spnego.h
new file mode 100644
index 0000000..5af3b09
--- /dev/null
+++ b/contrib/serf/auth/auth_spnego.h
@@ -0,0 +1,115 @@
+/* Copyright 2010 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AUTH_SPNEGO_H
+#define AUTH_SPNEGO_H
+
+#include <apr.h>
+#include <apr_pools.h>
+#include "serf.h"
+#include "serf_private.h"
+
+#if defined(SERF_HAVE_SSPI)
+#define SERF_HAVE_SPNEGO
+#define SERF_USE_SSPI
+#elif defined(SERF_HAVE_GSSAPI)
+#define SERF_HAVE_SPNEGO
+#define SERF_USE_GSSAPI
+#endif
+
+#ifdef SERF_HAVE_SPNEGO
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct serf__spnego_context_t serf__spnego_context_t;
+
+typedef struct serf__spnego_buffer_t {
+ apr_size_t length;
+ void *value;
+} serf__spnego_buffer_t;
+
+/* Create outbound security context.
+ *
+ * All temporary allocations will be performed in SCRATCH_POOL, while security
+ * context will be allocated in result_pool and will be destroyed automatically
+ * on RESULT_POOL cleanup.
+ *
+ */
+apr_status_t
+serf__spnego_create_sec_context(serf__spnego_context_t **ctx_p,
+ const serf__authn_scheme_t *scheme,
+ apr_pool_t *result_pool,
+ apr_pool_t *scratch_pool);
+
+/* Initialize outbound security context.
+ *
+ * The function is used to build a security context between the client
+ * application and a remote peer.
+ *
+ * CTX is pointer to existing context created using
+ * serf__spnego_create_sec_context() function.
+ *
+ * SERVICE is name of Kerberos service name. Usually 'HTTP'. HOSTNAME is
+ * canonical name of destination server. Caller should resolve server's alias
+ * to canonical name.
+ *
+ * INPUT_BUF is pointer structure describing input token if any. Should be
+ * zero length on first call.
+ *
+ * OUTPUT_BUF will be populated with pointer to output data that should send
+ * to destination server. This buffer will be automatically freed on
+ * RESULT_POOL cleanup.
+ *
+ * All temporary allocations will be performed in SCRATCH_POOL.
+ *
+ * Return value:
+ * - APR_EAGAIN The client must send the output token to the server and wait
+ * for a return token.
+ *
+ * - APR_SUCCESS The security context was successfully initialized. There is no
+ * need for another serf__spnego_init_sec_context call. If the function returns
+ * an output token, that is, if the OUTPUT_BUF is of nonzero length, that
+ * token must be sent to the server.
+ *
+ * Other returns values indicates error.
+ */
+apr_status_t
+serf__spnego_init_sec_context(serf__spnego_context_t *ctx,
+ const char *service,
+ const char *hostname,
+ serf__spnego_buffer_t *input_buf,
+ serf__spnego_buffer_t *output_buf,
+ apr_pool_t *result_pool,
+ apr_pool_t *scratch_pool
+ );
+
+/*
+ * Reset a previously created security context so we can start with a new one.
+ *
+ * This is triggered when the server requires per-request authentication,
+ * where each request requires a new security context.
+ */
+apr_status_t
+serf__spnego_reset_sec_context(serf__spnego_context_t *ctx);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* SERF_HAVE_SPNEGO */
+
+#endif /* !AUTH_SPNEGO_H */
diff --git a/contrib/serf/auth/auth_spnego_gss.c b/contrib/serf/auth/auth_spnego_gss.c
new file mode 100644
index 0000000..aa3ebc6
--- /dev/null
+++ b/contrib/serf/auth/auth_spnego_gss.c
@@ -0,0 +1,224 @@
+/* Copyright 2009 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "serf.h"
+#include "serf_private.h"
+#include "auth_spnego.h"
+
+#ifdef SERF_USE_GSSAPI
+#include <apr_strings.h>
+#include <gssapi/gssapi.h>
+
+
+/* This module can support all authentication mechanisms as provided by
+ the GSS-API implementation, but for now it only supports SPNEGO for
+ Negotiate.
+ SPNEGO can delegate authentication to Kerberos if supported by the
+ host. */
+
+#ifndef GSS_SPNEGO_MECHANISM
+static gss_OID_desc spnego_mech_oid = { 6, "\x2b\x06\x01\x05\x05\x02" };
+#define GSS_SPNEGO_MECHANISM &spnego_mech_oid
+#endif
+
+struct serf__spnego_context_t
+{
+ /* GSSAPI context */
+ gss_ctx_id_t gss_ctx;
+
+ /* Mechanism used to authenticate. */
+ gss_OID gss_mech;
+};
+
+static void
+log_error(int verbose_flag, const char *filename,
+ serf__spnego_context_t *ctx,
+ OM_uint32 err_maj_stat,
+ OM_uint32 err_min_stat,
+ const char *msg)
+{
+ OM_uint32 maj_stat, min_stat;
+ gss_buffer_desc stat_buff;
+ OM_uint32 msg_ctx = 0;
+
+ if (verbose_flag) {
+ maj_stat = gss_display_status(&min_stat,
+ err_maj_stat,
+ GSS_C_GSS_CODE,
+ ctx->gss_mech,
+ &msg_ctx,
+ &stat_buff);
+ if (maj_stat == GSS_S_COMPLETE ||
+ maj_stat == GSS_S_FAILURE) {
+ maj_stat = gss_display_status(&min_stat,
+ err_min_stat,
+ GSS_C_MECH_CODE,
+ ctx->gss_mech,
+ &msg_ctx,
+ &stat_buff);
+ }
+
+ serf__log(verbose_flag, filename,
+ "%s (%x,%d): %s\n", msg,
+ err_maj_stat, err_min_stat, stat_buff.value);
+ }
+}
+
+/* Cleans the GSS context object, when the pool used to create it gets
+ cleared or destroyed. */
+static apr_status_t
+cleanup_ctx(void *data)
+{
+ serf__spnego_context_t *ctx = data;
+
+ if (ctx->gss_ctx != GSS_C_NO_CONTEXT) {
+ OM_uint32 gss_min_stat, gss_maj_stat;
+
+ gss_maj_stat = gss_delete_sec_context(&gss_min_stat, &ctx->gss_ctx,
+ GSS_C_NO_BUFFER);
+ if(GSS_ERROR(gss_maj_stat)) {
+ log_error(AUTH_VERBOSE, __FILE__, ctx,
+ gss_maj_stat, gss_min_stat,
+ "Error cleaning up GSS security context");
+ return SERF_ERROR_AUTHN_FAILED;
+ }
+ }
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t
+cleanup_sec_buffer(void *data)
+{
+ OM_uint32 min_stat;
+ gss_buffer_desc *gss_buf = data;
+
+ gss_release_buffer(&min_stat, gss_buf);
+
+ return APR_SUCCESS;
+}
+
+apr_status_t
+serf__spnego_create_sec_context(serf__spnego_context_t **ctx_p,
+ const serf__authn_scheme_t *scheme,
+ apr_pool_t *result_pool,
+ apr_pool_t *scratch_pool)
+{
+ serf__spnego_context_t *ctx;
+
+ ctx = apr_pcalloc(result_pool, sizeof(*ctx));
+
+ ctx->gss_ctx = GSS_C_NO_CONTEXT;
+ ctx->gss_mech = GSS_SPNEGO_MECHANISM;
+
+ apr_pool_cleanup_register(result_pool, ctx,
+ cleanup_ctx,
+ apr_pool_cleanup_null);
+
+ *ctx_p = ctx;
+
+ return APR_SUCCESS;
+}
+
+apr_status_t
+serf__spnego_reset_sec_context(serf__spnego_context_t *ctx)
+{
+ OM_uint32 dummy_stat;
+
+ if (ctx->gss_ctx)
+ (void)gss_delete_sec_context(&dummy_stat, &ctx->gss_ctx,
+ GSS_C_NO_BUFFER);
+ ctx->gss_ctx = GSS_C_NO_CONTEXT;
+
+ return APR_SUCCESS;
+}
+
+apr_status_t
+serf__spnego_init_sec_context(serf__spnego_context_t *ctx,
+ const char *service,
+ const char *hostname,
+ serf__spnego_buffer_t *input_buf,
+ serf__spnego_buffer_t *output_buf,
+ apr_pool_t *result_pool,
+ apr_pool_t *scratch_pool
+ )
+{
+ gss_buffer_desc gss_input_buf = GSS_C_EMPTY_BUFFER;
+ gss_buffer_desc *gss_output_buf_p;
+ OM_uint32 gss_min_stat, gss_maj_stat;
+ gss_name_t host_gss_name;
+ gss_buffer_desc bufdesc;
+ gss_OID dummy; /* unused */
+
+ /* Get the name for the HTTP service at the target host. */
+ /* TODO: should be shared between multiple requests. */
+ bufdesc.value = apr_pstrcat(scratch_pool, service, "@", hostname, NULL);
+ bufdesc.length = strlen(bufdesc.value);
+ serf__log(AUTH_VERBOSE, __FILE__, "Get principal for %s\n", bufdesc.value);
+ gss_maj_stat = gss_import_name (&gss_min_stat, &bufdesc,
+ GSS_C_NT_HOSTBASED_SERVICE,
+ &host_gss_name);
+ if(GSS_ERROR(gss_maj_stat)) {
+ log_error(AUTH_VERBOSE, __FILE__, ctx,
+ gss_maj_stat, gss_min_stat,
+ "Error converting principal name to GSS internal format ");
+ return SERF_ERROR_AUTHN_FAILED;
+ }
+
+ /* If the server sent us a token, pass it to gss_init_sec_token for
+ validation. */
+ gss_input_buf.value = input_buf->value;
+ gss_input_buf.length = input_buf->length;
+
+ gss_output_buf_p = apr_pcalloc(result_pool, sizeof(*gss_output_buf_p));
+
+ /* Establish a security context to the server. */
+ gss_maj_stat = gss_init_sec_context
+ (&gss_min_stat, /* minor_status */
+ GSS_C_NO_CREDENTIAL, /* XXXXX claimant_cred_handle */
+ &ctx->gss_ctx, /* gssapi context handle */
+ host_gss_name, /* HTTP@server name */
+ ctx->gss_mech, /* mech_type (SPNEGO) */
+ GSS_C_MUTUAL_FLAG, /* ensure the peer authenticates itself */
+ 0, /* default validity period */
+ GSS_C_NO_CHANNEL_BINDINGS, /* do not use channel bindings */
+ &gss_input_buf, /* server token, initially empty */
+ &dummy, /* actual mech type */
+ gss_output_buf_p, /* output_token */
+ NULL, /* ret_flags */
+ NULL /* not interested in remaining validity */
+ );
+
+ apr_pool_cleanup_register(result_pool, gss_output_buf_p,
+ cleanup_sec_buffer,
+ apr_pool_cleanup_null);
+
+ output_buf->value = gss_output_buf_p->value;
+ output_buf->length = gss_output_buf_p->length;
+
+ switch(gss_maj_stat) {
+ case GSS_S_COMPLETE:
+ return APR_SUCCESS;
+ case GSS_S_CONTINUE_NEEDED:
+ return APR_EAGAIN;
+ default:
+ log_error(AUTH_VERBOSE, __FILE__, ctx,
+ gss_maj_stat, gss_min_stat,
+ "Error during Kerberos handshake");
+ return SERF_ERROR_AUTHN_FAILED;
+ }
+}
+
+#endif /* SERF_USE_GSSAPI */
diff --git a/contrib/serf/auth/auth_spnego_sspi.c b/contrib/serf/auth/auth_spnego_sspi.c
new file mode 100644
index 0000000..ef13428
--- /dev/null
+++ b/contrib/serf/auth/auth_spnego_sspi.c
@@ -0,0 +1,297 @@
+/* Copyright 2010 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "auth_spnego.h"
+#include "serf.h"
+#include "serf_private.h"
+
+#ifdef SERF_USE_SSPI
+#include <apr.h>
+#include <apr_strings.h>
+
+#define SECURITY_WIN32
+#include <sspi.h>
+
+/* SEC_E_MUTUAL_AUTH_FAILED is not defined in Windows Platform SDK 5.0. */
+#ifndef SEC_E_MUTUAL_AUTH_FAILED
+#define SEC_E_MUTUAL_AUTH_FAILED _HRESULT_TYPEDEF_(0x80090363L)
+#endif
+
+struct serf__spnego_context_t
+{
+ CredHandle sspi_credentials;
+ CtxtHandle sspi_context;
+ BOOL initalized;
+ apr_pool_t *pool;
+
+ /* Service Principal Name (SPN) used for authentication. */
+ const char *target_name;
+
+ /* One of SERF_AUTHN_* authentication types.*/
+ int authn_type;
+};
+
+/* Map SECURITY_STATUS from SSPI to APR error code. Some error codes mapped
+ * to our own codes and some to Win32 error codes:
+ * http://support.microsoft.com/kb/113996
+ */
+static apr_status_t
+map_sspi_status(SECURITY_STATUS sspi_status)
+{
+ switch(sspi_status)
+ {
+ case SEC_E_INSUFFICIENT_MEMORY:
+ return APR_FROM_OS_ERROR(ERROR_NO_SYSTEM_RESOURCES);
+ case SEC_E_INVALID_HANDLE:
+ return APR_FROM_OS_ERROR(ERROR_INVALID_HANDLE);
+ case SEC_E_UNSUPPORTED_FUNCTION:
+ return APR_FROM_OS_ERROR(ERROR_INVALID_FUNCTION);
+ case SEC_E_TARGET_UNKNOWN:
+ return APR_FROM_OS_ERROR(ERROR_BAD_NETPATH);
+ case SEC_E_INTERNAL_ERROR:
+ return APR_FROM_OS_ERROR(ERROR_INTERNAL_ERROR);
+ case SEC_E_SECPKG_NOT_FOUND:
+ case SEC_E_BAD_PKGID:
+ return APR_FROM_OS_ERROR(ERROR_NO_SUCH_PACKAGE);
+ case SEC_E_NO_IMPERSONATION:
+ return APR_FROM_OS_ERROR(ERROR_CANNOT_IMPERSONATE);
+ case SEC_E_NO_AUTHENTICATING_AUTHORITY:
+ return APR_FROM_OS_ERROR(ERROR_NO_LOGON_SERVERS);
+ case SEC_E_UNTRUSTED_ROOT:
+ return APR_FROM_OS_ERROR(ERROR_TRUST_FAILURE);
+ case SEC_E_WRONG_PRINCIPAL:
+ return APR_FROM_OS_ERROR(ERROR_WRONG_TARGET_NAME);
+ case SEC_E_MUTUAL_AUTH_FAILED:
+ return APR_FROM_OS_ERROR(ERROR_MUTUAL_AUTH_FAILED);
+ case SEC_E_TIME_SKEW:
+ return APR_FROM_OS_ERROR(ERROR_TIME_SKEW);
+ default:
+ return SERF_ERROR_AUTHN_FAILED;
+ }
+}
+
+/* Cleans the SSPI context object, when the pool used to create it gets
+ cleared or destroyed. */
+static apr_status_t
+cleanup_ctx(void *data)
+{
+ serf__spnego_context_t *ctx = data;
+
+ if (SecIsValidHandle(&ctx->sspi_context)) {
+ DeleteSecurityContext(&ctx->sspi_context);
+ SecInvalidateHandle(&ctx->sspi_context);
+ }
+
+ if (SecIsValidHandle(&ctx->sspi_credentials)) {
+ FreeCredentialsHandle(&ctx->sspi_context);
+ SecInvalidateHandle(&ctx->sspi_context);
+ }
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t
+cleanup_sec_buffer(void *data)
+{
+ FreeContextBuffer(data);
+
+ return APR_SUCCESS;
+}
+
+apr_status_t
+serf__spnego_create_sec_context(serf__spnego_context_t **ctx_p,
+ const serf__authn_scheme_t *scheme,
+ apr_pool_t *result_pool,
+ apr_pool_t *scratch_pool)
+{
+ SECURITY_STATUS sspi_status;
+ serf__spnego_context_t *ctx;
+ const char *sspi_package;
+
+ ctx = apr_pcalloc(result_pool, sizeof(*ctx));
+
+ SecInvalidateHandle(&ctx->sspi_context);
+ SecInvalidateHandle(&ctx->sspi_credentials);
+ ctx->initalized = FALSE;
+ ctx->pool = result_pool;
+ ctx->target_name = NULL;
+ ctx->authn_type = scheme->type;
+
+ apr_pool_cleanup_register(result_pool, ctx,
+ cleanup_ctx,
+ apr_pool_cleanup_null);
+
+ if (ctx->authn_type == SERF_AUTHN_NEGOTIATE)
+ sspi_package = "Negotiate";
+ else
+ sspi_package = "NTLM";
+
+ sspi_status = AcquireCredentialsHandle(
+ NULL, sspi_package, SECPKG_CRED_OUTBOUND,
+ NULL, NULL, NULL, NULL,
+ &ctx->sspi_credentials, NULL);
+
+ if (FAILED(sspi_status)) {
+ return map_sspi_status(sspi_status);
+ }
+
+ *ctx_p = ctx;
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t
+get_canonical_hostname(const char **canonname,
+ const char *hostname,
+ apr_pool_t *pool)
+{
+ struct addrinfo hints;
+ struct addrinfo *addrinfo;
+
+ ZeroMemory(&hints, sizeof(hints));
+ hints.ai_flags = AI_CANONNAME;
+
+ if (getaddrinfo(hostname, NULL, &hints, &addrinfo)) {
+ return apr_get_netos_error();
+ }
+
+ if (addrinfo) {
+ *canonname = apr_pstrdup(pool, addrinfo->ai_canonname);
+ }
+ else {
+ *canonname = apr_pstrdup(pool, hostname);
+ }
+
+ freeaddrinfo(addrinfo);
+ return APR_SUCCESS;
+}
+
+apr_status_t
+serf__spnego_reset_sec_context(serf__spnego_context_t *ctx)
+{
+ if (SecIsValidHandle(&ctx->sspi_context)) {
+ DeleteSecurityContext(&ctx->sspi_context);
+ SecInvalidateHandle(&ctx->sspi_context);
+ }
+
+ ctx->initalized = FALSE;
+
+ return APR_SUCCESS;
+}
+
+apr_status_t
+serf__spnego_init_sec_context(serf__spnego_context_t *ctx,
+ const char *service,
+ const char *hostname,
+ serf__spnego_buffer_t *input_buf,
+ serf__spnego_buffer_t *output_buf,
+ apr_pool_t *result_pool,
+ apr_pool_t *scratch_pool
+ )
+{
+ SECURITY_STATUS status;
+ ULONG actual_attr;
+ SecBuffer sspi_in_buffer;
+ SecBufferDesc sspi_in_buffer_desc;
+ SecBuffer sspi_out_buffer;
+ SecBufferDesc sspi_out_buffer_desc;
+ apr_status_t apr_status;
+ const char *canonname;
+
+ if (!ctx->initalized && ctx->authn_type == SERF_AUTHN_NEGOTIATE) {
+ apr_status = get_canonical_hostname(&canonname, hostname, scratch_pool);
+ if (apr_status) {
+ return apr_status;
+ }
+
+ ctx->target_name = apr_pstrcat(scratch_pool, service, "/", canonname,
+ NULL);
+
+ serf__log(AUTH_VERBOSE, __FILE__,
+ "Using SPN '%s' for '%s'\n", ctx->target_name, hostname);
+ }
+ else if (ctx->authn_type == SERF_AUTHN_NTLM)
+ {
+ /* Target name is not used for NTLM authentication. */
+ ctx->target_name = NULL;
+ }
+
+ /* Prepare input buffer description. */
+ sspi_in_buffer.BufferType = SECBUFFER_TOKEN;
+ sspi_in_buffer.pvBuffer = input_buf->value;
+ sspi_in_buffer.cbBuffer = input_buf->length;
+
+ sspi_in_buffer_desc.cBuffers = 1;
+ sspi_in_buffer_desc.pBuffers = &sspi_in_buffer;
+ sspi_in_buffer_desc.ulVersion = SECBUFFER_VERSION;
+
+ /* Output buffers. Output buffer will be allocated by system. */
+ sspi_out_buffer.BufferType = SECBUFFER_TOKEN;
+ sspi_out_buffer.pvBuffer = NULL;
+ sspi_out_buffer.cbBuffer = 0;
+
+ sspi_out_buffer_desc.cBuffers = 1;
+ sspi_out_buffer_desc.pBuffers = &sspi_out_buffer;
+ sspi_out_buffer_desc.ulVersion = SECBUFFER_VERSION;
+
+ status = InitializeSecurityContext(
+ &ctx->sspi_credentials,
+ ctx->initalized ? &ctx->sspi_context : NULL,
+ ctx->target_name,
+ ISC_REQ_ALLOCATE_MEMORY
+ | ISC_REQ_MUTUAL_AUTH
+ | ISC_REQ_CONFIDENTIALITY,
+ 0, /* Reserved1 */
+ SECURITY_NETWORK_DREP,
+ &sspi_in_buffer_desc,
+ 0, /* Reserved2 */
+ &ctx->sspi_context,
+ &sspi_out_buffer_desc,
+ &actual_attr,
+ NULL);
+
+ if (sspi_out_buffer.cbBuffer > 0) {
+ apr_pool_cleanup_register(result_pool, sspi_out_buffer.pvBuffer,
+ cleanup_sec_buffer,
+ apr_pool_cleanup_null);
+ }
+
+ ctx->initalized = TRUE;
+
+ /* Finish authentication if SSPI requires so. */
+ if (status == SEC_I_COMPLETE_NEEDED
+ || status == SEC_I_COMPLETE_AND_CONTINUE)
+ {
+ CompleteAuthToken(&ctx->sspi_context, &sspi_out_buffer_desc);
+ }
+
+ output_buf->value = sspi_out_buffer.pvBuffer;
+ output_buf->length = sspi_out_buffer.cbBuffer;
+
+ switch(status) {
+ case SEC_I_COMPLETE_AND_CONTINUE:
+ case SEC_I_CONTINUE_NEEDED:
+ return APR_EAGAIN;
+
+ case SEC_I_COMPLETE_NEEDED:
+ case SEC_E_OK:
+ return APR_SUCCESS;
+
+ default:
+ return map_sspi_status(status);
+ }
+}
+
+#endif /* SERF_USE_SSPI */
diff --git a/contrib/serf/buckets/aggregate_buckets.c b/contrib/serf/buckets/aggregate_buckets.c
new file mode 100644
index 0000000..2ae3fd5
--- /dev/null
+++ b/contrib/serf/buckets/aggregate_buckets.c
@@ -0,0 +1,488 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "serf.h"
+#include "serf_bucket_util.h"
+
+
+/* Should be an APR_RING? */
+typedef struct bucket_list {
+ serf_bucket_t *bucket;
+ struct bucket_list *next;
+} bucket_list_t;
+
+typedef struct {
+ bucket_list_t *list; /* active buckets */
+ bucket_list_t *last; /* last bucket of the list */
+ bucket_list_t *done; /* we finished reading this; now pending a destroy */
+
+ serf_bucket_aggregate_eof_t hold_open;
+ void *hold_open_baton;
+
+ /* Does this bucket own its children? !0 if yes, 0 if not. */
+ int bucket_owner;
+} aggregate_context_t;
+
+
+static void cleanup_aggregate(aggregate_context_t *ctx,
+ serf_bucket_alloc_t *allocator)
+{
+ bucket_list_t *next_list;
+
+ /* If we finished reading a bucket during the previous read, then
+ * we can now toss that bucket.
+ */
+ while (ctx->done != NULL) {
+ next_list = ctx->done->next;
+
+ if (ctx->bucket_owner) {
+ serf_bucket_destroy(ctx->done->bucket);
+ }
+ serf_bucket_mem_free(allocator, ctx->done);
+
+ ctx->done = next_list;
+ }
+}
+
+void serf_bucket_aggregate_cleanup(
+ serf_bucket_t *bucket, serf_bucket_alloc_t *allocator)
+{
+ aggregate_context_t *ctx = bucket->data;
+
+ cleanup_aggregate(ctx, allocator);
+}
+
+static aggregate_context_t *create_aggregate(serf_bucket_alloc_t *allocator)
+{
+ aggregate_context_t *ctx;
+
+ ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
+
+ ctx->list = NULL;
+ ctx->last = NULL;
+ ctx->done = NULL;
+ ctx->hold_open = NULL;
+ ctx->hold_open_baton = NULL;
+ ctx->bucket_owner = 1;
+
+ return ctx;
+}
+
+serf_bucket_t *serf_bucket_aggregate_create(
+ serf_bucket_alloc_t *allocator)
+{
+ aggregate_context_t *ctx;
+
+ ctx = create_aggregate(allocator);
+
+ return serf_bucket_create(&serf_bucket_type_aggregate, allocator, ctx);
+}
+
+serf_bucket_t *serf__bucket_stream_create(
+ serf_bucket_alloc_t *allocator,
+ serf_bucket_aggregate_eof_t fn,
+ void *baton)
+{
+ serf_bucket_t *bucket = serf_bucket_aggregate_create(allocator);
+ aggregate_context_t *ctx = bucket->data;
+
+ serf_bucket_aggregate_hold_open(bucket, fn, baton);
+
+ ctx->bucket_owner = 0;
+
+ return bucket;
+}
+
+
+static void serf_aggregate_destroy_and_data(serf_bucket_t *bucket)
+{
+ aggregate_context_t *ctx = bucket->data;
+ bucket_list_t *next_ctx;
+
+ while (ctx->list) {
+ if (ctx->bucket_owner) {
+ serf_bucket_destroy(ctx->list->bucket);
+ }
+ next_ctx = ctx->list->next;
+ serf_bucket_mem_free(bucket->allocator, ctx->list);
+ ctx->list = next_ctx;
+ }
+ cleanup_aggregate(ctx, bucket->allocator);
+
+ serf_default_destroy_and_data(bucket);
+}
+
+void serf_bucket_aggregate_become(serf_bucket_t *bucket)
+{
+ aggregate_context_t *ctx;
+
+ ctx = create_aggregate(bucket->allocator);
+
+ bucket->type = &serf_bucket_type_aggregate;
+ bucket->data = ctx;
+
+ /* The allocator remains the same. */
+}
+
+
+void serf_bucket_aggregate_prepend(
+ serf_bucket_t *aggregate_bucket,
+ serf_bucket_t *prepend_bucket)
+{
+ aggregate_context_t *ctx = aggregate_bucket->data;
+ bucket_list_t *new_list;
+
+ new_list = serf_bucket_mem_alloc(aggregate_bucket->allocator,
+ sizeof(*new_list));
+ new_list->bucket = prepend_bucket;
+ new_list->next = ctx->list;
+
+ ctx->list = new_list;
+}
+
+void serf_bucket_aggregate_append(
+ serf_bucket_t *aggregate_bucket,
+ serf_bucket_t *append_bucket)
+{
+ aggregate_context_t *ctx = aggregate_bucket->data;
+ bucket_list_t *new_list;
+
+ new_list = serf_bucket_mem_alloc(aggregate_bucket->allocator,
+ sizeof(*new_list));
+ new_list->bucket = append_bucket;
+ new_list->next = NULL;
+
+ /* If we use APR_RING, this is trivial. So, wait.
+ new_list->next = ctx->list;
+ ctx->list = new_list;
+ */
+ if (ctx->list == NULL) {
+ ctx->list = new_list;
+ ctx->last = new_list;
+ }
+ else {
+ ctx->last->next = new_list;
+ ctx->last = ctx->last->next;
+ }
+}
+
+void serf_bucket_aggregate_hold_open(serf_bucket_t *aggregate_bucket,
+ serf_bucket_aggregate_eof_t fn,
+ void *baton)
+{
+ aggregate_context_t *ctx = aggregate_bucket->data;
+ ctx->hold_open = fn;
+ ctx->hold_open_baton = baton;
+}
+
+void serf_bucket_aggregate_prepend_iovec(
+ serf_bucket_t *aggregate_bucket,
+ struct iovec *vecs,
+ int vecs_count)
+{
+ int i;
+
+ /* Add in reverse order. */
+ for (i = vecs_count - 1; i >= 0; i--) {
+ serf_bucket_t *new_bucket;
+
+ new_bucket = serf_bucket_simple_create(vecs[i].iov_base,
+ vecs[i].iov_len,
+ NULL, NULL,
+ aggregate_bucket->allocator);
+
+ serf_bucket_aggregate_prepend(aggregate_bucket, new_bucket);
+
+ }
+}
+
+void serf_bucket_aggregate_append_iovec(
+ serf_bucket_t *aggregate_bucket,
+ struct iovec *vecs,
+ int vecs_count)
+{
+ serf_bucket_t *new_bucket;
+
+ new_bucket = serf_bucket_iovec_create(vecs, vecs_count,
+ aggregate_bucket->allocator);
+
+ serf_bucket_aggregate_append(aggregate_bucket, new_bucket);
+}
+
+static apr_status_t read_aggregate(serf_bucket_t *bucket,
+ apr_size_t requested,
+ int vecs_size, struct iovec *vecs,
+ int *vecs_used)
+{
+ aggregate_context_t *ctx = bucket->data;
+ int cur_vecs_used;
+ apr_status_t status;
+
+ *vecs_used = 0;
+
+ if (!ctx->list) {
+ if (ctx->hold_open) {
+ return ctx->hold_open(ctx->hold_open_baton, bucket);
+ }
+ else {
+ return APR_EOF;
+ }
+ }
+
+ status = APR_SUCCESS;
+ while (requested) {
+ serf_bucket_t *head = ctx->list->bucket;
+
+ status = serf_bucket_read_iovec(head, requested, vecs_size, vecs,
+ &cur_vecs_used);
+
+ if (SERF_BUCKET_READ_ERROR(status))
+ return status;
+
+ /* Add the number of vecs we read to our running total. */
+ *vecs_used += cur_vecs_used;
+
+ if (cur_vecs_used > 0 || status) {
+ bucket_list_t *next_list;
+
+ /* If we got SUCCESS (w/bytes) or EAGAIN, we want to return now
+ * as it isn't safe to read more without returning to our caller.
+ */
+ if (!status || APR_STATUS_IS_EAGAIN(status) || status == SERF_ERROR_WAIT_CONN) {
+ return status;
+ }
+
+ /* However, if we read EOF, we can stash this bucket in a
+ * to-be-freed list and move on to the next bucket. This ensures
+ * that the bucket stays alive (so as not to violate our read
+ * semantics). We'll destroy this list of buckets the next time
+ * we are asked to perform a read operation - thus ensuring the
+ * proper read lifetime.
+ */
+ next_list = ctx->list->next;
+ ctx->list->next = ctx->done;
+ ctx->done = ctx->list;
+ ctx->list = next_list;
+
+ /* If we have no more in our list, return EOF. */
+ if (!ctx->list) {
+ if (ctx->hold_open) {
+ return ctx->hold_open(ctx->hold_open_baton, bucket);
+ }
+ else {
+ return APR_EOF;
+ }
+ }
+
+ /* At this point, it safe to read the next bucket - if we can. */
+
+ /* If the caller doesn't want ALL_AVAIL, decrement the size
+ * of the items we just read from the list.
+ */
+ if (requested != SERF_READ_ALL_AVAIL) {
+ int i;
+
+ for (i = 0; i < cur_vecs_used; i++)
+ requested -= vecs[i].iov_len;
+ }
+
+ /* Adjust our vecs to account for what we just read. */
+ vecs_size -= cur_vecs_used;
+ vecs += cur_vecs_used;
+
+ /* We reached our max. Oh well. */
+ if (!requested || !vecs_size) {
+ return APR_SUCCESS;
+ }
+ }
+ }
+
+ return status;
+}
+
+static apr_status_t serf_aggregate_read(serf_bucket_t *bucket,
+ apr_size_t requested,
+ const char **data, apr_size_t *len)
+{
+ aggregate_context_t *ctx = bucket->data;
+ struct iovec vec;
+ int vecs_used;
+ apr_status_t status;
+
+ cleanup_aggregate(ctx, bucket->allocator);
+
+ status = read_aggregate(bucket, requested, 1, &vec, &vecs_used);
+
+ if (!vecs_used) {
+ *len = 0;
+ }
+ else {
+ *data = vec.iov_base;
+ *len = vec.iov_len;
+ }
+
+ return status;
+}
+
+static apr_status_t serf_aggregate_read_iovec(serf_bucket_t *bucket,
+ apr_size_t requested,
+ int vecs_size,
+ struct iovec *vecs,
+ int *vecs_used)
+{
+ aggregate_context_t *ctx = bucket->data;
+
+ cleanup_aggregate(ctx, bucket->allocator);
+
+ return read_aggregate(bucket, requested, vecs_size, vecs, vecs_used);
+}
+
+static apr_status_t serf_aggregate_readline(serf_bucket_t *bucket,
+ int acceptable, int *found,
+ const char **data, apr_size_t *len)
+{
+ aggregate_context_t *ctx = bucket->data;
+ apr_status_t status;
+
+ cleanup_aggregate(ctx, bucket->allocator);
+
+ do {
+ serf_bucket_t *head;
+
+ *len = 0;
+
+ if (!ctx->list) {
+ if (ctx->hold_open) {
+ return ctx->hold_open(ctx->hold_open_baton, bucket);
+ }
+ else {
+ return APR_EOF;
+ }
+ }
+
+ head = ctx->list->bucket;
+
+ status = serf_bucket_readline(head, acceptable, found,
+ data, len);
+ if (SERF_BUCKET_READ_ERROR(status))
+ return status;
+
+ if (status == APR_EOF) {
+ bucket_list_t *next_list;
+
+ /* head bucket is empty, move to to-be-cleaned-up list. */
+ next_list = ctx->list->next;
+ ctx->list->next = ctx->done;
+ ctx->done = ctx->list;
+ ctx->list = next_list;
+
+ /* If we have no more in our list, return EOF. */
+ if (!ctx->list) {
+ if (ctx->hold_open) {
+ return ctx->hold_open(ctx->hold_open_baton, bucket);
+ }
+ else {
+ return APR_EOF;
+ }
+ }
+
+ /* we read something, so bail out and let the appl. read again. */
+ if (*len)
+ status = APR_SUCCESS;
+ }
+
+ /* continue with APR_SUCCESS or APR_EOF and no data read yet. */
+ } while (!*len && status != APR_EAGAIN);
+
+ return status;
+}
+
+static apr_status_t serf_aggregate_peek(serf_bucket_t *bucket,
+ const char **data,
+ apr_size_t *len)
+{
+ aggregate_context_t *ctx = bucket->data;
+ serf_bucket_t *head;
+ apr_status_t status;
+
+ cleanup_aggregate(ctx, bucket->allocator);
+
+ /* Peek the first bucket in the list, if any. */
+ if (!ctx->list) {
+ *len = 0;
+ if (ctx->hold_open) {
+ status = ctx->hold_open(ctx->hold_open_baton, bucket);
+ if (status == APR_EAGAIN)
+ status = APR_SUCCESS;
+ return status;
+ }
+ else {
+ return APR_EOF;
+ }
+ }
+
+ head = ctx->list->bucket;
+
+ status = serf_bucket_peek(head, data, len);
+
+ if (status == APR_EOF) {
+ if (ctx->list->next) {
+ status = APR_SUCCESS;
+ } else {
+ if (ctx->hold_open) {
+ status = ctx->hold_open(ctx->hold_open_baton, bucket);
+ if (status == APR_EAGAIN)
+ status = APR_SUCCESS;
+ return status;
+ }
+ }
+ }
+
+ return status;
+}
+
+static serf_bucket_t * serf_aggregate_read_bucket(
+ serf_bucket_t *bucket,
+ const serf_bucket_type_t *type)
+{
+ aggregate_context_t *ctx = bucket->data;
+ serf_bucket_t *found_bucket;
+
+ if (!ctx->list) {
+ return NULL;
+ }
+
+ if (ctx->list->bucket->type == type) {
+ /* Got the bucket. Consume it from our list. */
+ found_bucket = ctx->list->bucket;
+ ctx->list = ctx->list->next;
+ return found_bucket;
+ }
+
+ /* Call read_bucket on first one in our list. */
+ return serf_bucket_read_bucket(ctx->list->bucket, type);
+}
+
+
+const serf_bucket_type_t serf_bucket_type_aggregate = {
+ "AGGREGATE",
+ serf_aggregate_read,
+ serf_aggregate_readline,
+ serf_aggregate_read_iovec,
+ serf_default_read_for_sendfile,
+ serf_aggregate_read_bucket,
+ serf_aggregate_peek,
+ serf_aggregate_destroy_and_data,
+};
diff --git a/contrib/serf/buckets/allocator.c b/contrib/serf/buckets/allocator.c
new file mode 100644
index 0000000..108167e
--- /dev/null
+++ b/contrib/serf/buckets/allocator.c
@@ -0,0 +1,434 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdlib.h>
+
+#include <apr_pools.h>
+
+#include "serf.h"
+#include "serf_bucket_util.h"
+
+
+typedef struct node_header_t {
+ apr_size_t size;
+ union {
+ struct node_header_t *next; /* if size == 0 (freed/inactive) */
+ /* no data if size == STANDARD_NODE_SIZE */
+ apr_memnode_t *memnode; /* if size > STANDARD_NODE_SIZE */
+ } u;
+} node_header_t;
+
+/* The size of a node_header_t, properly aligned. Note that (normally)
+ * this macro will round the size to a multiple of 8 bytes. Keep this in
+ * mind when altering the node_header_t structure. Also, keep in mind that
+ * node_header_t is an overhead for every allocation performed through
+ * the serf_bucket_mem_alloc() function.
+ */
+#define SIZEOF_NODE_HEADER_T APR_ALIGN_DEFAULT(sizeof(node_header_t))
+
+
+/* STANDARD_NODE_SIZE is manually set to an allocation size that will
+ * capture most allocators performed via this API. It must be "large
+ * enough" to avoid lots of spillage to allocating directly from the
+ * apr_allocator associated with the bucket allocator. The apr_allocator
+ * has a minimum size of 8k, which can be expensive if you missed the
+ * STANDARD_NODE_SIZE by just a few bytes.
+ */
+/* ### we should define some rules or ways to determine how to derive
+ * ### a "good" value for this. probably log some stats on allocs, then
+ * ### analyze them for size "misses". then find the balance point between
+ * ### wasted space due to min-size allocator, and wasted-space due to
+ * ### size-spill to the 8k minimum.
+ */
+#define STANDARD_NODE_SIZE 128
+
+/* When allocating a block of memory from the allocator, we should go for
+ * an 8k block, minus the overhead that the allocator needs.
+ */
+#define ALLOC_AMT (8192 - APR_MEMNODE_T_SIZE)
+
+/* Define DEBUG_DOUBLE_FREE if you're interested in debugging double-free
+ * calls to serf_bucket_mem_free().
+ */
+#define DEBUG_DOUBLE_FREE
+
+
+typedef struct {
+ const serf_bucket_t *bucket;
+ apr_status_t last;
+} read_status_t;
+
+#define TRACK_BUCKET_COUNT 100 /* track N buckets' status */
+
+typedef struct {
+ int next_index; /* info[] is a ring. next bucket goes at this idx. */
+ int num_used;
+
+ read_status_t info[TRACK_BUCKET_COUNT];
+} track_state_t;
+
+
+struct serf_bucket_alloc_t {
+ apr_pool_t *pool;
+ apr_allocator_t *allocator;
+ int own_allocator;
+
+ serf_unfreed_func_t unfreed;
+ void *unfreed_baton;
+
+ apr_uint32_t num_alloc;
+
+ node_header_t *freelist; /* free STANDARD_NODE_SIZE blocks */
+ apr_memnode_t *blocks; /* blocks we allocated for subdividing */
+
+ track_state_t *track;
+};
+
+/* ==================================================================== */
+
+
+static apr_status_t allocator_cleanup(void *data)
+{
+ serf_bucket_alloc_t *allocator = data;
+
+ /* If we allocated anything, give it back. */
+ if (allocator->blocks) {
+ apr_allocator_free(allocator->allocator, allocator->blocks);
+ }
+
+ /* If we allocated our own allocator (?!), destroy it here. */
+ if (allocator->own_allocator) {
+ apr_allocator_destroy(allocator->allocator);
+ }
+
+ return APR_SUCCESS;
+}
+
+serf_bucket_alloc_t *serf_bucket_allocator_create(
+ apr_pool_t *pool,
+ serf_unfreed_func_t unfreed,
+ void *unfreed_baton)
+{
+ serf_bucket_alloc_t *allocator = apr_pcalloc(pool, sizeof(*allocator));
+
+ allocator->pool = pool;
+ allocator->allocator = apr_pool_allocator_get(pool);
+ if (allocator->allocator == NULL) {
+ /* This most likely means pools are running in debug mode, create our
+ * own allocator to deal with memory ourselves */
+ apr_allocator_create(&allocator->allocator);
+ allocator->own_allocator = 1;
+ }
+ allocator->unfreed = unfreed;
+ allocator->unfreed_baton = unfreed_baton;
+
+#ifdef SERF_DEBUG_BUCKET_USE
+ {
+ track_state_t *track;
+
+ track = allocator->track = apr_palloc(pool, sizeof(*allocator->track));
+ track->next_index = 0;
+ track->num_used = 0;
+ }
+#endif
+
+ /* NOTE: On a fork/exec, the child won't bother cleaning up memory.
+ This is just fine... the memory will go away at exec.
+
+ NOTE: If the child will NOT perform an exec, then the parent or
+ the child will need to decide who to clean up any
+ outstanding connection/buckets (as appropriate). */
+ apr_pool_cleanup_register(pool, allocator,
+ allocator_cleanup, apr_pool_cleanup_null);
+
+ return allocator;
+}
+
+apr_pool_t *serf_bucket_allocator_get_pool(
+ const serf_bucket_alloc_t *allocator)
+{
+ return allocator->pool;
+}
+
+
+void *serf_bucket_mem_alloc(
+ serf_bucket_alloc_t *allocator,
+ apr_size_t size)
+{
+ node_header_t *node;
+
+ ++allocator->num_alloc;
+
+ size += SIZEOF_NODE_HEADER_T;
+ if (size <= STANDARD_NODE_SIZE) {
+ if (allocator->freelist) {
+ /* just pull a node off our freelist */
+ node = allocator->freelist;
+ allocator->freelist = node->u.next;
+#ifdef DEBUG_DOUBLE_FREE
+ /* When we free an item, we set its size to zero. Thus, when
+ * we return it to the caller, we must ensure the size is set
+ * properly.
+ */
+ node->size = STANDARD_NODE_SIZE;
+#endif
+ }
+ else {
+ apr_memnode_t *active = allocator->blocks;
+
+ if (active == NULL
+ || active->first_avail + STANDARD_NODE_SIZE >= active->endp) {
+ apr_memnode_t *head = allocator->blocks;
+
+ /* ran out of room. grab another block. */
+ active = apr_allocator_alloc(allocator->allocator, ALLOC_AMT);
+
+ /* System couldn't provide us with memory. */
+ if (active == NULL)
+ return NULL;
+
+ /* link the block into our tracking list */
+ allocator->blocks = active;
+ active->next = head;
+ }
+
+ node = (node_header_t *)active->first_avail;
+ node->size = STANDARD_NODE_SIZE;
+ active->first_avail += STANDARD_NODE_SIZE;
+ }
+ }
+ else {
+ apr_memnode_t *memnode = apr_allocator_alloc(allocator->allocator,
+ size);
+
+ if (memnode == NULL)
+ return NULL;
+
+ node = (node_header_t *)memnode->first_avail;
+ node->u.memnode = memnode;
+ node->size = size;
+ }
+
+ return ((char *)node) + SIZEOF_NODE_HEADER_T;
+}
+
+
+void *serf_bucket_mem_calloc(
+ serf_bucket_alloc_t *allocator,
+ apr_size_t size)
+{
+ void *mem;
+ mem = serf_bucket_mem_alloc(allocator, size);
+ if (mem == NULL)
+ return NULL;
+ memset(mem, 0, size);
+ return mem;
+}
+
+
+void serf_bucket_mem_free(
+ serf_bucket_alloc_t *allocator,
+ void *block)
+{
+ node_header_t *node;
+
+ --allocator->num_alloc;
+
+ node = (node_header_t *)((char *)block - SIZEOF_NODE_HEADER_T);
+
+ if (node->size == STANDARD_NODE_SIZE) {
+ /* put the node onto our free list */
+ node->u.next = allocator->freelist;
+ allocator->freelist = node;
+
+#ifdef DEBUG_DOUBLE_FREE
+ /* note that this thing was freed. */
+ node->size = 0;
+ }
+ else if (node->size == 0) {
+ /* damn thing was freed already. */
+ abort();
+#endif
+ }
+ else {
+#ifdef DEBUG_DOUBLE_FREE
+ /* note that this thing was freed. */
+ node->size = 0;
+#endif
+
+ /* now free it */
+ apr_allocator_free(allocator->allocator, node->u.memnode);
+ }
+}
+
+
+/* ==================================================================== */
+
+
+#ifdef SERF_DEBUG_BUCKET_USE
+
+static read_status_t *find_read_status(
+ track_state_t *track,
+ const serf_bucket_t *bucket,
+ int create_rs)
+{
+ read_status_t *rs;
+
+ if (track->num_used) {
+ int count = track->num_used;
+ int idx = track->next_index;
+
+ /* Search backwards. In all likelihood, the bucket which just got
+ * read was read very recently.
+ */
+ while (count-- > 0) {
+ if (!idx--) {
+ /* assert: track->num_used == TRACK_BUCKET_COUNT */
+ idx = track->num_used - 1;
+ }
+ if ((rs = &track->info[idx])->bucket == bucket) {
+ return rs;
+ }
+ }
+ }
+
+ /* Only create a new read_status_t when asked. */
+ if (!create_rs)
+ return NULL;
+
+ if (track->num_used < TRACK_BUCKET_COUNT) {
+ /* We're still filling up the ring. */
+ ++track->num_used;
+ }
+
+ rs = &track->info[track->next_index];
+ rs->bucket = bucket;
+ rs->last = APR_SUCCESS; /* ### the right initial value? */
+
+ if (++track->next_index == TRACK_BUCKET_COUNT)
+ track->next_index = 0;
+
+ return rs;
+}
+
+#endif /* SERF_DEBUG_BUCKET_USE */
+
+
+apr_status_t serf_debug__record_read(
+ const serf_bucket_t *bucket,
+ apr_status_t status)
+{
+#ifndef SERF_DEBUG_BUCKET_USE
+ return status;
+#else
+
+ track_state_t *track = bucket->allocator->track;
+ read_status_t *rs = find_read_status(track, bucket, 1);
+
+ /* Validate that the previous status value allowed for another read. */
+ if (APR_STATUS_IS_EAGAIN(rs->last) /* ### or APR_EOF? */) {
+ /* Somebody read when they weren't supposed to. Bail. */
+ abort();
+ }
+
+ /* Save the current status for later. */
+ rs->last = status;
+
+ return status;
+#endif
+}
+
+
+void serf_debug__entered_loop(serf_bucket_alloc_t *allocator)
+{
+#ifdef SERF_DEBUG_BUCKET_USE
+
+ track_state_t *track = allocator->track;
+ read_status_t *rs = &track->info[0];
+
+ for ( ; track->num_used; --track->num_used, ++rs ) {
+ if (rs->last == APR_SUCCESS) {
+ /* Somebody should have read this bucket again. */
+ abort();
+ }
+
+ /* ### other status values? */
+ }
+
+ /* num_used was reset. also need to reset the next index. */
+ track->next_index = 0;
+
+#endif
+}
+
+
+void serf_debug__closed_conn(serf_bucket_alloc_t *allocator)
+{
+#ifdef SERF_DEBUG_BUCKET_USE
+
+ /* Just reset the number used so that we don't examine the info[] */
+ allocator->track->num_used = 0;
+ allocator->track->next_index = 0;
+
+#endif
+}
+
+
+void serf_debug__bucket_destroy(const serf_bucket_t *bucket)
+{
+#ifdef SERF_DEBUG_BUCKET_USE
+
+ track_state_t *track = bucket->allocator->track;
+ read_status_t *rs = find_read_status(track, bucket, 0);
+
+ if (rs != NULL && rs->last != APR_EOF) {
+ /* The bucket was destroyed before it was read to completion. */
+
+ /* Special exception for socket buckets. If a connection remains
+ * open, they are not read to completion.
+ */
+ if (SERF_BUCKET_IS_SOCKET(bucket))
+ return;
+
+ /* Ditto for SSL Decrypt buckets. */
+ if (SERF_BUCKET_IS_SSL_DECRYPT(bucket))
+ return;
+
+ /* Ditto for SSL Encrypt buckets. */
+ if (SERF_BUCKET_IS_SSL_ENCRYPT(bucket))
+ return;
+
+ /* Ditto for barrier buckets. */
+ if (SERF_BUCKET_IS_BARRIER(bucket))
+ return;
+
+
+ abort();
+ }
+
+#endif
+}
+
+
+void serf_debug__bucket_alloc_check(
+ serf_bucket_alloc_t *allocator)
+{
+#ifdef SERF_DEBUG_BUCKET_USE
+ if (allocator->num_alloc != 0) {
+ abort();
+ }
+#endif
+}
+
diff --git a/contrib/serf/buckets/barrier_buckets.c b/contrib/serf/buckets/barrier_buckets.c
new file mode 100644
index 0000000..eb410ee
--- /dev/null
+++ b/contrib/serf/buckets/barrier_buckets.c
@@ -0,0 +1,97 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <apr_pools.h>
+
+#include "serf.h"
+#include "serf_bucket_util.h"
+
+
+typedef struct {
+ serf_bucket_t *stream;
+} barrier_context_t;
+
+
+serf_bucket_t *serf_bucket_barrier_create(
+ serf_bucket_t *stream,
+ serf_bucket_alloc_t *allocator)
+{
+ barrier_context_t *ctx;
+
+ ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
+ ctx->stream = stream;
+
+ return serf_bucket_create(&serf_bucket_type_barrier, allocator, ctx);
+}
+
+static apr_status_t serf_barrier_read(serf_bucket_t *bucket,
+ apr_size_t requested,
+ const char **data, apr_size_t *len)
+{
+ barrier_context_t *ctx = bucket->data;
+
+ return serf_bucket_read(ctx->stream, requested, data, len);
+}
+
+static apr_status_t serf_barrier_read_iovec(serf_bucket_t *bucket,
+ apr_size_t requested,
+ int vecs_size, struct iovec *vecs,
+ int *vecs_used)
+{
+ barrier_context_t *ctx = bucket->data;
+
+ return serf_bucket_read_iovec(ctx->stream, requested, vecs_size, vecs,
+ vecs_used);
+}
+
+static apr_status_t serf_barrier_readline(serf_bucket_t *bucket,
+ int acceptable, int *found,
+ const char **data, apr_size_t *len)
+{
+ barrier_context_t *ctx = bucket->data;
+
+ return serf_bucket_readline(ctx->stream, acceptable, found, data, len);
+}
+
+static apr_status_t serf_barrier_peek(serf_bucket_t *bucket,
+ const char **data,
+ apr_size_t *len)
+{
+ barrier_context_t *ctx = bucket->data;
+
+ return serf_bucket_peek(ctx->stream, data, len);
+}
+
+static void serf_barrier_destroy(serf_bucket_t *bucket)
+{
+ /* The intent of this bucket is not to let our wrapped buckets be
+ * destroyed. */
+
+ /* The option is for us to go ahead and 'eat' this bucket now,
+ * or just ignore the deletion entirely.
+ */
+ serf_default_destroy_and_data(bucket);
+}
+
+const serf_bucket_type_t serf_bucket_type_barrier = {
+ "BARRIER",
+ serf_barrier_read,
+ serf_barrier_readline,
+ serf_barrier_read_iovec,
+ serf_default_read_for_sendfile,
+ serf_default_read_bucket,
+ serf_barrier_peek,
+ serf_barrier_destroy,
+};
diff --git a/contrib/serf/buckets/buckets.c b/contrib/serf/buckets/buckets.c
new file mode 100644
index 0000000..88e256b
--- /dev/null
+++ b/contrib/serf/buckets/buckets.c
@@ -0,0 +1,640 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <apr_pools.h>
+
+#include "serf.h"
+#include "serf_bucket_util.h"
+#include "serf_private.h"
+
+serf_bucket_t *serf_bucket_create(
+ const serf_bucket_type_t *type,
+ serf_bucket_alloc_t *allocator,
+ void *data)
+{
+ serf_bucket_t *bkt = serf_bucket_mem_alloc(allocator, sizeof(*bkt));
+
+ bkt->type = type;
+ bkt->data = data;
+ bkt->allocator = allocator;
+
+ return bkt;
+}
+
+
+apr_status_t serf_default_read_iovec(
+ serf_bucket_t *bucket,
+ apr_size_t requested,
+ int vecs_size,
+ struct iovec *vecs,
+ int *vecs_used)
+{
+ const char *data;
+ apr_size_t len;
+
+ /* Read some data from the bucket.
+ *
+ * Because we're an internal 'helper' to the bucket, we can't call the
+ * normal serf_bucket_read() call because the debug allocator tracker will
+ * end up marking the bucket as read *twice* - once for us and once for
+ * our caller - which is reading the same bucket. This leads to premature
+ * abort()s if we ever see EAGAIN. Instead, we'll go directly to the
+ * vtable and bypass the debug tracker.
+ */
+ apr_status_t status = bucket->type->read(bucket, requested, &data, &len);
+
+ /* assert that vecs_size >= 1 ? */
+
+ /* Return that data as a single iovec. */
+ if (len) {
+ vecs[0].iov_base = (void *)data; /* loses the 'const' */
+ vecs[0].iov_len = len;
+ *vecs_used = 1;
+ }
+ else {
+ *vecs_used = 0;
+ }
+
+ return status;
+}
+
+
+apr_status_t serf_default_read_for_sendfile(
+ serf_bucket_t *bucket,
+ apr_size_t requested,
+ apr_hdtr_t *hdtr,
+ apr_file_t **file,
+ apr_off_t *offset,
+ apr_size_t *len)
+{
+ /* Read a bunch of stuff into the headers.
+ *
+ * See serf_default_read_iovec as to why we call into the vtable
+ * directly.
+ */
+ apr_status_t status = bucket->type->read_iovec(bucket, requested,
+ hdtr->numheaders,
+ hdtr->headers,
+ &hdtr->numheaders);
+
+ /* There isn't a file, and there are no trailers. */
+ *file = NULL;
+ hdtr->numtrailers = 0;
+
+ return status;
+}
+
+
+serf_bucket_t *serf_default_read_bucket(
+ serf_bucket_t *bucket,
+ const serf_bucket_type_t *type)
+{
+ return NULL;
+}
+
+
+void serf_default_destroy(serf_bucket_t *bucket)
+{
+#ifdef SERF_DEBUG_BUCKET_USE
+ serf_debug__bucket_destroy(bucket);
+#endif
+
+ serf_bucket_mem_free(bucket->allocator, bucket);
+}
+
+
+void serf_default_destroy_and_data(serf_bucket_t *bucket)
+{
+ serf_bucket_mem_free(bucket->allocator, bucket->data);
+ serf_default_destroy(bucket);
+}
+
+
+/* ==================================================================== */
+
+
+char *serf_bstrmemdup(serf_bucket_alloc_t *allocator,
+ const char *str,
+ apr_size_t size)
+{
+ char *newstr = serf_bucket_mem_alloc(allocator, size + 1);
+ memcpy(newstr, str, size);
+ newstr[size] = '\0';
+ return newstr;
+}
+
+
+void *serf_bmemdup(serf_bucket_alloc_t *allocator,
+ const void *mem,
+ apr_size_t size)
+{
+ void *newmem = serf_bucket_mem_alloc(allocator, size);
+ memcpy(newmem, mem, size);
+ return newmem;
+}
+
+
+char *serf_bstrdup(serf_bucket_alloc_t *allocator,
+ const char *str)
+{
+ apr_size_t size = strlen(str) + 1;
+ char *newstr = serf_bucket_mem_alloc(allocator, size);
+ memcpy(newstr, str, size);
+ return newstr;
+}
+
+char *serf_bstrcatv(serf_bucket_alloc_t *allocator, struct iovec *vec,
+ int vecs, apr_size_t *bytes_written)
+{
+ int i;
+ apr_size_t new_len = 0;
+ char *c, *newstr;
+
+ for (i = 0; i < vecs; i++) {
+ new_len += vec[i].iov_len;
+ }
+
+ /* It's up to the caller to free this memory later. */
+ newstr = serf_bucket_mem_alloc(allocator, new_len);
+
+ c = newstr;
+ for (i = 0; i < vecs; i++) {
+ memcpy(c, vec[i].iov_base, vec[i].iov_len);
+ c += vec[i].iov_len;
+ }
+
+ if (bytes_written) {
+ *bytes_written = c - newstr;
+ }
+
+ return newstr;
+}
+
+/* ==================================================================== */
+
+
+static void find_crlf(const char **data, apr_size_t *len, int *found)
+{
+ const char *start = *data;
+ const char *end = start + *len;
+
+ while (start < end) {
+ const char *cr = memchr(start, '\r', *len);
+
+ if (cr == NULL) {
+ break;
+ }
+ ++cr;
+
+ if (cr < end && cr[0] == '\n') {
+ *len -= cr + 1 - start;
+ *data = cr + 1;
+ *found = SERF_NEWLINE_CRLF;
+ return;
+ }
+ if (cr == end) {
+ *len = 0;
+ *data = end;
+ *found = SERF_NEWLINE_CRLF_SPLIT;
+ return;
+ }
+
+ /* It was a bare CR without an LF. Just move past it. */
+ *len -= cr - start;
+ start = cr;
+ }
+
+ *data = start + *len;
+ *len -= *data - start;
+ *found = SERF_NEWLINE_NONE;
+}
+
+
+void serf_util_readline(
+ const char **data,
+ apr_size_t *len,
+ int acceptable,
+ int *found)
+{
+ const char *start;
+ const char *cr;
+ const char *lf;
+ int want_cr;
+ int want_crlf;
+ int want_lf;
+
+ /* If _only_ CRLF is acceptable, then the scanning needs a loop to
+ * skip false hits on CR characters. Use a separate function.
+ */
+ if (acceptable == SERF_NEWLINE_CRLF) {
+ find_crlf(data, len, found);
+ return;
+ }
+
+ start = *data;
+ cr = lf = NULL;
+ want_cr = acceptable & SERF_NEWLINE_CR;
+ want_crlf = acceptable & SERF_NEWLINE_CRLF;
+ want_lf = acceptable & SERF_NEWLINE_LF;
+
+ if (want_cr || want_crlf) {
+ cr = memchr(start, '\r', *len);
+ }
+ if (want_lf) {
+ lf = memchr(start, '\n', *len);
+ }
+
+ if (cr != NULL) {
+ if (lf != NULL) {
+ if (cr + 1 == lf)
+ *found = want_crlf ? SERF_NEWLINE_CRLF : SERF_NEWLINE_CR;
+ else if (want_cr && cr < lf)
+ *found = SERF_NEWLINE_CR;
+ else
+ *found = SERF_NEWLINE_LF;
+ }
+ else if (cr == start + *len - 1) {
+ /* the CR occurred in the last byte of the buffer. this could be
+ * a CRLF split across the data boundary.
+ * ### FIX THIS LOGIC? does caller need to detect?
+ */
+ *found = want_crlf ? SERF_NEWLINE_CRLF_SPLIT : SERF_NEWLINE_CR;
+ }
+ else if (want_cr)
+ *found = SERF_NEWLINE_CR;
+ else /* want_crlf */
+ *found = SERF_NEWLINE_NONE;
+ }
+ else if (lf != NULL)
+ *found = SERF_NEWLINE_LF;
+ else
+ *found = SERF_NEWLINE_NONE;
+
+ switch (*found) {
+ case SERF_NEWLINE_LF:
+ *data = lf + 1;
+ break;
+ case SERF_NEWLINE_CR:
+ case SERF_NEWLINE_CRLF:
+ case SERF_NEWLINE_CRLF_SPLIT:
+ *data = cr + 1 + (*found == SERF_NEWLINE_CRLF);
+ break;
+ case SERF_NEWLINE_NONE:
+ *data += *len;
+ break;
+ default:
+ /* Not reachable */
+ return;
+ }
+
+ *len -= *data - start;
+}
+
+
+/* ==================================================================== */
+
+
+void serf_databuf_init(serf_databuf_t *databuf)
+{
+ /* nothing is sitting in the buffer */
+ databuf->remaining = 0;
+
+ /* avoid thinking we have hit EOF */
+ databuf->status = APR_SUCCESS;
+}
+
+/* Ensure the buffer is prepared for reading. Will return APR_SUCCESS,
+ * APR_EOF, or some failure code. *len is only set for EOF. */
+static apr_status_t common_databuf_prep(serf_databuf_t *databuf,
+ apr_size_t *len)
+{
+ apr_size_t readlen;
+ apr_status_t status;
+
+ /* if there is data in the buffer, then we're happy. */
+ if (databuf->remaining > 0)
+ return APR_SUCCESS;
+
+ /* if we already hit EOF, then keep returning that. */
+ if (APR_STATUS_IS_EOF(databuf->status)) {
+ /* *data = NULL; ?? */
+ *len = 0;
+ return APR_EOF;
+ }
+
+ /* refill the buffer */
+ status = (*databuf->read)(databuf->read_baton, sizeof(databuf->buf),
+ databuf->buf, &readlen);
+ if (SERF_BUCKET_READ_ERROR(status)) {
+ return status;
+ }
+
+ databuf->current = databuf->buf;
+ databuf->remaining = readlen;
+ databuf->status = status;
+
+ return APR_SUCCESS;
+}
+
+
+apr_status_t serf_databuf_read(
+ serf_databuf_t *databuf,
+ apr_size_t requested,
+ const char **data,
+ apr_size_t *len)
+{
+ apr_status_t status = common_databuf_prep(databuf, len);
+ if (status)
+ return status;
+
+ /* peg the requested amount to what we have remaining */
+ if (requested == SERF_READ_ALL_AVAIL || requested > databuf->remaining)
+ requested = databuf->remaining;
+
+ /* return the values */
+ *data = databuf->current;
+ *len = requested;
+
+ /* adjust our internal state to note we've consumed some data */
+ databuf->current += requested;
+ databuf->remaining -= requested;
+
+ /* If we read everything, then we need to return whatever the data
+ * read returned to us. This is going to be APR_EOF or APR_EGAIN.
+ * If we have NOT read everything, then return APR_SUCCESS to indicate
+ * that we're ready to return some more if asked.
+ */
+ return databuf->remaining ? APR_SUCCESS : databuf->status;
+}
+
+
+apr_status_t serf_databuf_readline(
+ serf_databuf_t *databuf,
+ int acceptable,
+ int *found,
+ const char **data,
+ apr_size_t *len)
+{
+ apr_status_t status = common_databuf_prep(databuf, len);
+ if (status)
+ return status;
+
+ /* the returned line will start at the current position. */
+ *data = databuf->current;
+
+ /* read a line from the buffer, and adjust the various pointers. */
+ serf_util_readline(&databuf->current, &databuf->remaining, acceptable,
+ found);
+
+ /* the length matches the amount consumed by the readline */
+ *len = databuf->current - *data;
+
+ /* see serf_databuf_read's return condition */
+ return databuf->remaining ? APR_SUCCESS : databuf->status;
+}
+
+
+apr_status_t serf_databuf_peek(
+ serf_databuf_t *databuf,
+ const char **data,
+ apr_size_t *len)
+{
+ apr_status_t status = common_databuf_prep(databuf, len);
+ if (status)
+ return status;
+
+ /* return everything we have */
+ *data = databuf->current;
+ *len = databuf->remaining;
+
+ /* If the last read returned EOF, then the peek should return the same.
+ * The other possibility in databuf->status is APR_EAGAIN, which we
+ * should never return. Thus, just return APR_SUCCESS for non-EOF cases.
+ */
+ if (APR_STATUS_IS_EOF(databuf->status))
+ return APR_EOF;
+ return APR_SUCCESS;
+}
+
+
+/* ==================================================================== */
+
+
+void serf_linebuf_init(serf_linebuf_t *linebuf)
+{
+ linebuf->state = SERF_LINEBUF_EMPTY;
+ linebuf->used = 0;
+}
+
+
+apr_status_t serf_linebuf_fetch(
+ serf_linebuf_t *linebuf,
+ serf_bucket_t *bucket,
+ int acceptable)
+{
+ /* If we had a complete line, then assume the caller has used it, so
+ * we can now reset the state.
+ */
+ if (linebuf->state == SERF_LINEBUF_READY) {
+ linebuf->state = SERF_LINEBUF_EMPTY;
+
+ /* Reset the line_used, too, so we don't have to test the state
+ * before using this value.
+ */
+ linebuf->used = 0;
+ }
+
+ while (1) {
+ apr_status_t status;
+ const char *data;
+ apr_size_t len;
+
+ if (linebuf->state == SERF_LINEBUF_CRLF_SPLIT) {
+ /* On the previous read, we received just a CR. The LF might
+ * be present, but the bucket couldn't see it. We need to
+ * examine a single character to determine how to handle the
+ * split CRLF.
+ */
+
+ status = serf_bucket_peek(bucket, &data, &len);
+ if (SERF_BUCKET_READ_ERROR(status))
+ return status;
+
+ if (len > 0) {
+ if (*data == '\n') {
+ /* We saw the second part of CRLF. We don't need to
+ * save that character, so do an actual read to suck
+ * up that character.
+ */
+ /* ### check status */
+ (void) serf_bucket_read(bucket, 1, &data, &len);
+ }
+ /* else:
+ * We saw the first character of the next line. Thus,
+ * the current line is terminated by the CR. Just
+ * ignore whatever we peeked at. The next reader will
+ * see it and handle it as appropriate.
+ */
+
+ /* Whatever was read, the line is now ready for use. */
+ linebuf->state = SERF_LINEBUF_READY;
+ } else {
+ /* no data available, try again later. */
+ return APR_EAGAIN;
+ }
+ }
+ else {
+ int found;
+
+ status = serf_bucket_readline(bucket, acceptable, &found,
+ &data, &len);
+ if (SERF_BUCKET_READ_ERROR(status)) {
+ return status;
+ }
+ /* Some bucket types (socket) might need an extra read to find
+ out EOF state, so they'll return no data in that read. This
+ means we're done reading, return what we got. */
+ if (APR_STATUS_IS_EOF(status) && len == 0) {
+ return status;
+ }
+ if (linebuf->used + len > sizeof(linebuf->line)) {
+ /* ### need a "line too long" error */
+ return APR_EGENERAL;
+ }
+
+ /* Note: our logic doesn't change for SERF_LINEBUF_PARTIAL. That
+ * only affects how we fill the buffer. It is a communication to
+ * our caller on whether the line is ready or not.
+ */
+
+ /* If we didn't see a newline, then we should mark the line
+ * buffer as partially complete.
+ */
+ if (found == SERF_NEWLINE_NONE) {
+ linebuf->state = SERF_LINEBUF_PARTIAL;
+ }
+ else if (found == SERF_NEWLINE_CRLF_SPLIT) {
+ linebuf->state = SERF_LINEBUF_CRLF_SPLIT;
+
+ /* Toss the partial CR. We won't ever need it. */
+ --len;
+ }
+ else {
+ /* We got a newline (of some form). We don't need it
+ * in the line buffer, so back up the length. Then
+ * mark the line as ready.
+ */
+ len -= 1 + (found == SERF_NEWLINE_CRLF);
+
+ linebuf->state = SERF_LINEBUF_READY;
+ }
+
+ /* ### it would be nice to avoid this copy if at all possible,
+ ### and just return the a data/len pair to the caller. we're
+ ### keeping it simple for now. */
+ memcpy(&linebuf->line[linebuf->used], data, len);
+ linebuf->used += len;
+ }
+
+ /* If we saw anything besides "success. please read again", then
+ * we should return that status. If the line was completed, then
+ * we should also return.
+ */
+ if (status || linebuf->state == SERF_LINEBUF_READY)
+ return status;
+
+ /* We got APR_SUCCESS and the line buffer is not complete. Let's
+ * loop to read some more data.
+ */
+ }
+ /* NOTREACHED */
+}
+
+/* Logging functions.
+ Use with one of the [COMP]_VERBOSE defines so that the compiler knows to
+ optimize this code out when no logging is needed. */
+static void log_time()
+{
+ apr_time_exp_t tm;
+
+ apr_time_exp_lt(&tm, apr_time_now());
+ fprintf(stderr, "[%d-%02d-%02dT%02d:%02d:%02d.%06d%+03d] ",
+ 1900 + tm.tm_year, 1 + tm.tm_mon, tm.tm_mday,
+ tm.tm_hour, tm.tm_min, tm.tm_sec, tm.tm_usec,
+ tm.tm_gmtoff/3600);
+}
+
+void serf__log(int verbose_flag, const char *filename, const char *fmt, ...)
+{
+ va_list argp;
+
+ if (verbose_flag) {
+ log_time();
+
+ if (filename)
+ fprintf(stderr, "%s: ", filename);
+
+ va_start(argp, fmt);
+ vfprintf(stderr, fmt, argp);
+ va_end(argp);
+ }
+}
+
+void serf__log_nopref(int verbose_flag, const char *fmt, ...)
+{
+ va_list argp;
+
+ if (verbose_flag) {
+ va_start(argp, fmt);
+ vfprintf(stderr, fmt, argp);
+ va_end(argp);
+ }
+}
+
+void serf__log_skt(int verbose_flag, const char *filename, apr_socket_t *skt,
+ const char *fmt, ...)
+{
+ va_list argp;
+
+ if (verbose_flag) {
+ apr_sockaddr_t *sa;
+ log_time();
+
+ if (skt) {
+ /* Log local and remote ip address:port */
+ fprintf(stderr, "[l:");
+ if (apr_socket_addr_get(&sa, APR_LOCAL, skt) == APR_SUCCESS) {
+ char buf[32];
+ apr_sockaddr_ip_getbuf(buf, 32, sa);
+ fprintf(stderr, "%s:%d", buf, sa->port);
+ }
+ fprintf(stderr, " r:");
+ if (apr_socket_addr_get(&sa, APR_REMOTE, skt) == APR_SUCCESS) {
+ char buf[32];
+ apr_sockaddr_ip_getbuf(buf, 32, sa);
+ fprintf(stderr, "%s:%d", buf, sa->port);
+ }
+ fprintf(stderr, "] ");
+ }
+
+ if (filename)
+ fprintf(stderr, "%s: ", filename);
+
+ va_start(argp, fmt);
+ vfprintf(stderr, fmt, argp);
+ va_end(argp);
+ }
+}
+
diff --git a/contrib/serf/buckets/bwtp_buckets.c b/contrib/serf/buckets/bwtp_buckets.c
new file mode 100644
index 0000000..7ef3047
--- /dev/null
+++ b/contrib/serf/buckets/bwtp_buckets.c
@@ -0,0 +1,596 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <apr_pools.h>
+#include <apr_strings.h>
+#include <apr_lib.h>
+#include <apr_date.h>
+
+#include "serf.h"
+#include "serf_bucket_util.h"
+#include "serf_bucket_types.h"
+
+#include <stdlib.h>
+
+/* This is an implementation of Bidirectional Web Transfer Protocol (BWTP)
+ * See:
+ * http://bwtp.wikidot.com/
+ */
+
+typedef struct {
+ int channel;
+ int open;
+ int type; /* 0 = header, 1 = message */ /* TODO enum? */
+ const char *phrase;
+ serf_bucket_t *headers;
+
+ char req_line[1000];
+} frame_context_t;
+
+typedef struct {
+ serf_bucket_t *stream;
+ serf_bucket_t *body; /* Pointer to the stream wrapping the body. */
+ serf_bucket_t *headers; /* holds parsed headers */
+
+ enum {
+ STATE_STATUS_LINE, /* reading status line */
+ STATE_HEADERS, /* reading headers */
+ STATE_BODY, /* reading body */
+ STATE_DONE /* we've sent EOF */
+ } state;
+
+ /* Buffer for accumulating a line from the response. */
+ serf_linebuf_t linebuf;
+
+ int type; /* 0 = header, 1 = message */ /* TODO enum? */
+ int channel;
+ char *phrase;
+ apr_size_t length;
+} incoming_context_t;
+
+
+serf_bucket_t *serf_bucket_bwtp_channel_close(
+ int channel,
+ serf_bucket_alloc_t *allocator)
+{
+ frame_context_t *ctx;
+
+ ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
+ ctx->type = 0;
+ ctx->open = 0;
+ ctx->channel = channel;
+ ctx->phrase = "CLOSED";
+ ctx->headers = serf_bucket_headers_create(allocator);
+
+ return serf_bucket_create(&serf_bucket_type_bwtp_frame, allocator, ctx);
+}
+
+serf_bucket_t *serf_bucket_bwtp_channel_open(
+ int channel,
+ const char *uri,
+ serf_bucket_alloc_t *allocator)
+{
+ frame_context_t *ctx;
+
+ ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
+ ctx->type = 0;
+ ctx->open = 1;
+ ctx->channel = channel;
+ ctx->phrase = uri;
+ ctx->headers = serf_bucket_headers_create(allocator);
+
+ return serf_bucket_create(&serf_bucket_type_bwtp_frame, allocator, ctx);
+}
+
+serf_bucket_t *serf_bucket_bwtp_header_create(
+ int channel,
+ const char *phrase,
+ serf_bucket_alloc_t *allocator)
+{
+ frame_context_t *ctx;
+
+ ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
+ ctx->type = 0;
+ ctx->open = 0;
+ ctx->channel = channel;
+ ctx->phrase = phrase;
+ ctx->headers = serf_bucket_headers_create(allocator);
+
+ return serf_bucket_create(&serf_bucket_type_bwtp_frame, allocator, ctx);
+}
+
+serf_bucket_t *serf_bucket_bwtp_message_create(
+ int channel,
+ serf_bucket_t *body,
+ serf_bucket_alloc_t *allocator)
+{
+ frame_context_t *ctx;
+
+ ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
+ ctx->type = 1;
+ ctx->open = 0;
+ ctx->channel = channel;
+ ctx->phrase = "MESSAGE";
+ ctx->headers = serf_bucket_headers_create(allocator);
+
+ return serf_bucket_create(&serf_bucket_type_bwtp_frame, allocator, ctx);
+}
+
+int serf_bucket_bwtp_frame_get_channel(
+ serf_bucket_t *bucket)
+{
+ if (SERF_BUCKET_IS_BWTP_FRAME(bucket)) {
+ frame_context_t *ctx = bucket->data;
+
+ return ctx->channel;
+ }
+ else if (SERF_BUCKET_IS_BWTP_INCOMING_FRAME(bucket)) {
+ incoming_context_t *ctx = bucket->data;
+
+ return ctx->channel;
+ }
+
+ return -1;
+}
+
+int serf_bucket_bwtp_frame_get_type(
+ serf_bucket_t *bucket)
+{
+ if (SERF_BUCKET_IS_BWTP_FRAME(bucket)) {
+ frame_context_t *ctx = bucket->data;
+
+ return ctx->type;
+ }
+ else if (SERF_BUCKET_IS_BWTP_INCOMING_FRAME(bucket)) {
+ incoming_context_t *ctx = bucket->data;
+
+ return ctx->type;
+ }
+
+ return -1;
+}
+
+const char *serf_bucket_bwtp_frame_get_phrase(
+ serf_bucket_t *bucket)
+{
+ if (SERF_BUCKET_IS_BWTP_FRAME(bucket)) {
+ frame_context_t *ctx = bucket->data;
+
+ return ctx->phrase;
+ }
+ else if (SERF_BUCKET_IS_BWTP_INCOMING_FRAME(bucket)) {
+ incoming_context_t *ctx = bucket->data;
+
+ return ctx->phrase;
+ }
+
+ return NULL;
+}
+
+serf_bucket_t *serf_bucket_bwtp_frame_get_headers(
+ serf_bucket_t *bucket)
+{
+ if (SERF_BUCKET_IS_BWTP_FRAME(bucket)) {
+ frame_context_t *ctx = bucket->data;
+
+ return ctx->headers;
+ }
+ else if (SERF_BUCKET_IS_BWTP_INCOMING_FRAME(bucket)) {
+ incoming_context_t *ctx = bucket->data;
+
+ return ctx->headers;
+ }
+
+ return NULL;
+}
+
+static int count_size(void *baton, const char *key, const char *value)
+{
+ apr_size_t *c = baton;
+ /* TODO Deal with folding. Yikes. */
+
+ /* Add in ": " and CRLF - so an extra four bytes. */
+ *c += strlen(key) + strlen(value) + 4;
+
+ return 0;
+}
+
+static apr_size_t calc_header_size(serf_bucket_t *hdrs)
+{
+ apr_size_t size = 0;
+
+ serf_bucket_headers_do(hdrs, count_size, &size);
+
+ return size;
+}
+
+static void serialize_data(serf_bucket_t *bucket)
+{
+ frame_context_t *ctx = bucket->data;
+ serf_bucket_t *new_bucket;
+ apr_size_t req_len;
+
+ /* Serialize the request-line and headers into one mother string,
+ * and wrap a bucket around it.
+ */
+ req_len = apr_snprintf(ctx->req_line, sizeof(ctx->req_line),
+ "%s %d " "%" APR_UINT64_T_HEX_FMT " %s%s\r\n",
+ (ctx->type ? "BWM" : "BWH"),
+ ctx->channel, calc_header_size(ctx->headers),
+ (ctx->open ? "OPEN " : ""),
+ ctx->phrase);
+ new_bucket = serf_bucket_simple_copy_create(ctx->req_line, req_len,
+ bucket->allocator);
+
+ /* Build up the new bucket structure.
+ *
+ * Note that self needs to become an aggregate bucket so that a
+ * pointer to self still represents the "right" data.
+ */
+ serf_bucket_aggregate_become(bucket);
+
+ /* Insert the two buckets. */
+ serf_bucket_aggregate_append(bucket, new_bucket);
+ serf_bucket_aggregate_append(bucket, ctx->headers);
+
+ /* Our private context is no longer needed, and is not referred to by
+ * any existing bucket. Toss it.
+ */
+ serf_bucket_mem_free(bucket->allocator, ctx);
+}
+
+static apr_status_t serf_bwtp_frame_read(serf_bucket_t *bucket,
+ apr_size_t requested,
+ const char **data, apr_size_t *len)
+{
+ /* Seralize our private data into a new aggregate bucket. */
+ serialize_data(bucket);
+
+ /* Delegate to the "new" aggregate bucket to do the read. */
+ return serf_bucket_read(bucket, requested, data, len);
+}
+
+static apr_status_t serf_bwtp_frame_readline(serf_bucket_t *bucket,
+ int acceptable, int *found,
+ const char **data, apr_size_t *len)
+{
+ /* Seralize our private data into a new aggregate bucket. */
+ serialize_data(bucket);
+
+ /* Delegate to the "new" aggregate bucket to do the readline. */
+ return serf_bucket_readline(bucket, acceptable, found, data, len);
+}
+
+static apr_status_t serf_bwtp_frame_read_iovec(serf_bucket_t *bucket,
+ apr_size_t requested,
+ int vecs_size,
+ struct iovec *vecs,
+ int *vecs_used)
+{
+ /* Seralize our private data into a new aggregate bucket. */
+ serialize_data(bucket);
+
+ /* Delegate to the "new" aggregate bucket to do the read. */
+ return serf_bucket_read_iovec(bucket, requested,
+ vecs_size, vecs, vecs_used);
+}
+
+static apr_status_t serf_bwtp_frame_peek(serf_bucket_t *bucket,
+ const char **data,
+ apr_size_t *len)
+{
+ /* Seralize our private data into a new aggregate bucket. */
+ serialize_data(bucket);
+
+ /* Delegate to the "new" aggregate bucket to do the peek. */
+ return serf_bucket_peek(bucket, data, len);
+}
+
+const serf_bucket_type_t serf_bucket_type_bwtp_frame = {
+ "BWTP-FRAME",
+ serf_bwtp_frame_read,
+ serf_bwtp_frame_readline,
+ serf_bwtp_frame_read_iovec,
+ serf_default_read_for_sendfile,
+ serf_default_read_bucket,
+ serf_bwtp_frame_peek,
+ serf_default_destroy_and_data,
+};
+
+
+serf_bucket_t *serf_bucket_bwtp_incoming_frame_create(
+ serf_bucket_t *stream,
+ serf_bucket_alloc_t *allocator)
+{
+ incoming_context_t *ctx;
+
+ ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
+ ctx->stream = stream;
+ ctx->body = NULL;
+ ctx->headers = serf_bucket_headers_create(allocator);
+ ctx->state = STATE_STATUS_LINE;
+ ctx->length = 0;
+ ctx->channel = -1;
+ ctx->phrase = NULL;
+
+ serf_linebuf_init(&ctx->linebuf);
+
+ return serf_bucket_create(&serf_bucket_type_bwtp_incoming_frame, allocator, ctx);
+}
+
+static void bwtp_incoming_destroy_and_data(serf_bucket_t *bucket)
+{
+ incoming_context_t *ctx = bucket->data;
+
+ if (ctx->state != STATE_STATUS_LINE && ctx->phrase) {
+ serf_bucket_mem_free(bucket->allocator, (void*)ctx->phrase);
+ }
+
+ serf_bucket_destroy(ctx->stream);
+ if (ctx->body != NULL)
+ serf_bucket_destroy(ctx->body);
+ serf_bucket_destroy(ctx->headers);
+
+ serf_default_destroy_and_data(bucket);
+}
+
+static apr_status_t fetch_line(incoming_context_t *ctx, int acceptable)
+{
+ return serf_linebuf_fetch(&ctx->linebuf, ctx->stream, acceptable);
+}
+
+static apr_status_t parse_status_line(incoming_context_t *ctx,
+ serf_bucket_alloc_t *allocator)
+{
+ int res;
+ char *reason; /* ### stupid APR interface makes this non-const */
+
+ /* ctx->linebuf.line should be of form: BW* */
+ res = apr_date_checkmask(ctx->linebuf.line, "BW*");
+ if (!res) {
+ /* Not an BWTP response? Well, at least we won't understand it. */
+ return APR_EGENERAL;
+ }
+
+ if (ctx->linebuf.line[2] == 'H') {
+ ctx->type = 0;
+ }
+ else if (ctx->linebuf.line[2] == 'M') {
+ ctx->type = 1;
+ }
+ else {
+ ctx->type = -1;
+ }
+
+ ctx->channel = apr_strtoi64(ctx->linebuf.line + 3, &reason, 16);
+
+ /* Skip leading spaces for the reason string. */
+ if (apr_isspace(*reason)) {
+ reason++;
+ }
+
+ ctx->length = apr_strtoi64(reason, &reason, 16);
+
+ /* Skip leading spaces for the reason string. */
+ if (reason - ctx->linebuf.line < ctx->linebuf.used) {
+ if (apr_isspace(*reason)) {
+ reason++;
+ }
+
+ ctx->phrase = serf_bstrmemdup(allocator, reason,
+ ctx->linebuf.used
+ - (reason - ctx->linebuf.line));
+ } else {
+ ctx->phrase = NULL;
+ }
+
+ return APR_SUCCESS;
+}
+
+/* This code should be replaced with header buckets. */
+static apr_status_t fetch_headers(serf_bucket_t *bkt, incoming_context_t *ctx)
+{
+ apr_status_t status;
+
+ /* RFC 2616 says that CRLF is the only line ending, but we can easily
+ * accept any kind of line ending.
+ */
+ status = fetch_line(ctx, SERF_NEWLINE_ANY);
+ if (SERF_BUCKET_READ_ERROR(status)) {
+ return status;
+ }
+ /* Something was read. Process it. */
+
+ if (ctx->linebuf.state == SERF_LINEBUF_READY && ctx->linebuf.used) {
+ const char *end_key;
+ const char *c;
+
+ end_key = c = memchr(ctx->linebuf.line, ':', ctx->linebuf.used);
+ if (!c) {
+ /* Bad headers? */
+ return APR_EGENERAL;
+ }
+
+ /* Skip over initial : and spaces. */
+ while (apr_isspace(*++c))
+ continue;
+
+ /* Always copy the headers (from the linebuf into new mem). */
+ /* ### we should be able to optimize some mem copies */
+ serf_bucket_headers_setx(
+ ctx->headers,
+ ctx->linebuf.line, end_key - ctx->linebuf.line, 1,
+ c, ctx->linebuf.line + ctx->linebuf.used - c, 1);
+ }
+
+ return status;
+}
+
+/* Perform one iteration of the state machine.
+ *
+ * Will return when one the following conditions occurred:
+ * 1) a state change
+ * 2) an error
+ * 3) the stream is not ready or at EOF
+ * 4) APR_SUCCESS, meaning the machine can be run again immediately
+ */
+static apr_status_t run_machine(serf_bucket_t *bkt, incoming_context_t *ctx)
+{
+ apr_status_t status = APR_SUCCESS; /* initialize to avoid gcc warnings */
+
+ switch (ctx->state) {
+ case STATE_STATUS_LINE:
+ /* RFC 2616 says that CRLF is the only line ending, but we can easily
+ * accept any kind of line ending.
+ */
+ status = fetch_line(ctx, SERF_NEWLINE_ANY);
+ if (SERF_BUCKET_READ_ERROR(status))
+ return status;
+
+ if (ctx->linebuf.state == SERF_LINEBUF_READY && ctx->linebuf.used) {
+ /* The Status-Line is in the line buffer. Process it. */
+ status = parse_status_line(ctx, bkt->allocator);
+ if (status)
+ return status;
+
+ if (ctx->length) {
+ ctx->body =
+ serf_bucket_barrier_create(ctx->stream, bkt->allocator);
+ ctx->body = serf_bucket_limit_create(ctx->body, ctx->length,
+ bkt->allocator);
+ if (!ctx->type) {
+ ctx->state = STATE_HEADERS;
+ } else {
+ ctx->state = STATE_BODY;
+ }
+ } else {
+ ctx->state = STATE_DONE;
+ }
+ }
+ else {
+ /* The connection closed before we could get the next
+ * response. Treat the request as lost so that our upper
+ * end knows the server never tried to give us a response.
+ */
+ if (APR_STATUS_IS_EOF(status)) {
+ return SERF_ERROR_REQUEST_LOST;
+ }
+ }
+ break;
+ case STATE_HEADERS:
+ status = fetch_headers(ctx->body, ctx);
+ if (SERF_BUCKET_READ_ERROR(status))
+ return status;
+
+ /* If an empty line was read, then we hit the end of the headers.
+ * Move on to the body.
+ */
+ if (ctx->linebuf.state == SERF_LINEBUF_READY && !ctx->linebuf.used) {
+ /* Advance the state. */
+ ctx->state = STATE_DONE;
+ }
+ break;
+ case STATE_BODY:
+ /* Don't do anything. */
+ break;
+ case STATE_DONE:
+ return APR_EOF;
+ default:
+ /* Not reachable */
+ return APR_EGENERAL;
+ }
+
+ return status;
+}
+
+static apr_status_t wait_for_body(serf_bucket_t *bkt, incoming_context_t *ctx)
+{
+ apr_status_t status;
+
+ /* Keep reading and moving through states if we aren't at the BODY */
+ while (ctx->state != STATE_BODY) {
+ status = run_machine(bkt, ctx);
+
+ /* Anything other than APR_SUCCESS means that we cannot immediately
+ * read again (for now).
+ */
+ if (status)
+ return status;
+ }
+ /* in STATE_BODY */
+
+ return APR_SUCCESS;
+}
+
+apr_status_t serf_bucket_bwtp_incoming_frame_wait_for_headers(
+ serf_bucket_t *bucket)
+{
+ incoming_context_t *ctx = bucket->data;
+
+ return wait_for_body(bucket, ctx);
+}
+
+static apr_status_t bwtp_incoming_read(serf_bucket_t *bucket,
+ apr_size_t requested,
+ const char **data, apr_size_t *len)
+{
+ incoming_context_t *ctx = bucket->data;
+ apr_status_t rv;
+
+ rv = wait_for_body(bucket, ctx);
+ if (rv) {
+ /* It's not possible to have read anything yet! */
+ if (APR_STATUS_IS_EOF(rv) || APR_STATUS_IS_EAGAIN(rv)) {
+ *len = 0;
+ }
+ return rv;
+ }
+
+ rv = serf_bucket_read(ctx->body, requested, data, len);
+ if (APR_STATUS_IS_EOF(rv)) {
+ ctx->state = STATE_DONE;
+ }
+ return rv;
+}
+
+static apr_status_t bwtp_incoming_readline(serf_bucket_t *bucket,
+ int acceptable, int *found,
+ const char **data, apr_size_t *len)
+{
+ incoming_context_t *ctx = bucket->data;
+ apr_status_t rv;
+
+ rv = wait_for_body(bucket, ctx);
+ if (rv) {
+ return rv;
+ }
+
+ /* Delegate to the stream bucket to do the readline. */
+ return serf_bucket_readline(ctx->body, acceptable, found, data, len);
+}
+
+/* ### need to implement */
+#define bwtp_incoming_peek NULL
+
+const serf_bucket_type_t serf_bucket_type_bwtp_incoming_frame = {
+ "BWTP-INCOMING",
+ bwtp_incoming_read,
+ bwtp_incoming_readline,
+ serf_default_read_iovec,
+ serf_default_read_for_sendfile,
+ serf_default_read_bucket,
+ bwtp_incoming_peek,
+ bwtp_incoming_destroy_and_data,
+};
diff --git a/contrib/serf/buckets/chunk_buckets.c b/contrib/serf/buckets/chunk_buckets.c
new file mode 100644
index 0000000..7f25508
--- /dev/null
+++ b/contrib/serf/buckets/chunk_buckets.c
@@ -0,0 +1,235 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <apr_pools.h>
+#include <apr_strings.h>
+
+#include "serf.h"
+#include "serf_bucket_util.h"
+
+
+typedef struct {
+ enum {
+ STATE_FETCH,
+ STATE_CHUNK,
+ STATE_EOF
+ } state;
+
+ apr_status_t last_status;
+
+ serf_bucket_t *chunk;
+ serf_bucket_t *stream;
+
+ char chunk_hdr[20];
+} chunk_context_t;
+
+
+serf_bucket_t *serf_bucket_chunk_create(
+ serf_bucket_t *stream, serf_bucket_alloc_t *allocator)
+{
+ chunk_context_t *ctx;
+
+ ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
+ ctx->state = STATE_FETCH;
+ ctx->chunk = serf_bucket_aggregate_create(allocator);
+ ctx->stream = stream;
+
+ return serf_bucket_create(&serf_bucket_type_chunk, allocator, ctx);
+}
+
+#define CRLF "\r\n"
+
+static apr_status_t create_chunk(serf_bucket_t *bucket)
+{
+ chunk_context_t *ctx = bucket->data;
+ serf_bucket_t *simple_bkt;
+ apr_size_t chunk_len;
+ apr_size_t stream_len;
+ struct iovec vecs[66]; /* 64 + chunk trailer + EOF trailer = 66 */
+ int vecs_read;
+ int i;
+
+ if (ctx->state != STATE_FETCH) {
+ return APR_SUCCESS;
+ }
+
+ ctx->last_status =
+ serf_bucket_read_iovec(ctx->stream, SERF_READ_ALL_AVAIL,
+ 64, vecs, &vecs_read);
+
+ if (SERF_BUCKET_READ_ERROR(ctx->last_status)) {
+ /* Uh-oh. */
+ return ctx->last_status;
+ }
+
+ /* Count the length of the data we read. */
+ stream_len = 0;
+ for (i = 0; i < vecs_read; i++) {
+ stream_len += vecs[i].iov_len;
+ }
+
+ /* assert: stream_len in hex < sizeof(ctx->chunk_hdr) */
+
+ /* Inserting a 0 byte chunk indicates a terminator, which already happens
+ * during the EOF handler below. Adding another one here will cause the
+ * EOF chunk to be interpreted by the server as a new request. So,
+ * we'll only do this if we have something to write.
+ */
+ if (stream_len) {
+ /* Build the chunk header. */
+ chunk_len = apr_snprintf(ctx->chunk_hdr, sizeof(ctx->chunk_hdr),
+ "%" APR_UINT64_T_HEX_FMT CRLF,
+ (apr_uint64_t)stream_len);
+
+ /* Create a copy of the chunk header so we can have multiple chunks
+ * in the pipeline at the same time.
+ */
+ simple_bkt = serf_bucket_simple_copy_create(ctx->chunk_hdr, chunk_len,
+ bucket->allocator);
+ serf_bucket_aggregate_append(ctx->chunk, simple_bkt);
+
+ /* Insert the chunk footer. */
+ vecs[vecs_read].iov_base = CRLF;
+ vecs[vecs_read++].iov_len = sizeof(CRLF) - 1;
+ }
+
+ /* We've reached the end of the line for the stream. */
+ if (APR_STATUS_IS_EOF(ctx->last_status)) {
+ /* Insert the chunk footer. */
+ vecs[vecs_read].iov_base = "0" CRLF CRLF;
+ vecs[vecs_read++].iov_len = sizeof("0" CRLF CRLF) - 1;
+
+ ctx->state = STATE_EOF;
+ }
+ else {
+ /* Okay, we can return data. */
+ ctx->state = STATE_CHUNK;
+ }
+
+ serf_bucket_aggregate_append_iovec(ctx->chunk, vecs, vecs_read);
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t serf_chunk_read(serf_bucket_t *bucket,
+ apr_size_t requested,
+ const char **data, apr_size_t *len)
+{
+ chunk_context_t *ctx = bucket->data;
+ apr_status_t status;
+
+ /* Before proceeding, we need to fetch some data from the stream. */
+ if (ctx->state == STATE_FETCH) {
+ status = create_chunk(bucket);
+ if (status) {
+ return status;
+ }
+ }
+
+ status = serf_bucket_read(ctx->chunk, requested, data, len);
+
+ /* Mask EOF from aggregate bucket. */
+ if (APR_STATUS_IS_EOF(status) && ctx->state == STATE_CHUNK) {
+ status = ctx->last_status;
+ ctx->state = STATE_FETCH;
+ }
+
+ return status;
+}
+
+static apr_status_t serf_chunk_readline(serf_bucket_t *bucket,
+ int acceptable, int *found,
+ const char **data, apr_size_t *len)
+{
+ chunk_context_t *ctx = bucket->data;
+ apr_status_t status;
+
+ status = serf_bucket_readline(ctx->chunk, acceptable, found, data, len);
+
+ /* Mask EOF from aggregate bucket. */
+ if (APR_STATUS_IS_EOF(status) && ctx->state == STATE_CHUNK) {
+ status = APR_EAGAIN;
+ ctx->state = STATE_FETCH;
+ }
+
+ return status;
+}
+
+static apr_status_t serf_chunk_read_iovec(serf_bucket_t *bucket,
+ apr_size_t requested,
+ int vecs_size,
+ struct iovec *vecs,
+ int *vecs_used)
+{
+ chunk_context_t *ctx = bucket->data;
+ apr_status_t status;
+
+ /* Before proceeding, we need to fetch some data from the stream. */
+ if (ctx->state == STATE_FETCH) {
+ status = create_chunk(bucket);
+ if (status) {
+ return status;
+ }
+ }
+
+ status = serf_bucket_read_iovec(ctx->chunk, requested, vecs_size, vecs,
+ vecs_used);
+
+ /* Mask EOF from aggregate bucket. */
+ if (APR_STATUS_IS_EOF(status) && ctx->state == STATE_CHUNK) {
+ status = ctx->last_status;
+ ctx->state = STATE_FETCH;
+ }
+
+ return status;
+}
+
+static apr_status_t serf_chunk_peek(serf_bucket_t *bucket,
+ const char **data,
+ apr_size_t *len)
+{
+ chunk_context_t *ctx = bucket->data;
+ apr_status_t status;
+
+ status = serf_bucket_peek(ctx->chunk, data, len);
+
+ /* Mask EOF from aggregate bucket. */
+ if (APR_STATUS_IS_EOF(status) && ctx->state == STATE_CHUNK) {
+ status = APR_EAGAIN;
+ }
+
+ return status;
+}
+
+static void serf_chunk_destroy(serf_bucket_t *bucket)
+{
+ chunk_context_t *ctx = bucket->data;
+
+ serf_bucket_destroy(ctx->stream);
+ serf_bucket_destroy(ctx->chunk);
+
+ serf_default_destroy_and_data(bucket);
+}
+
+const serf_bucket_type_t serf_bucket_type_chunk = {
+ "CHUNK",
+ serf_chunk_read,
+ serf_chunk_readline,
+ serf_chunk_read_iovec,
+ serf_default_read_for_sendfile,
+ serf_default_read_bucket,
+ serf_chunk_peek,
+ serf_chunk_destroy,
+};
diff --git a/contrib/serf/buckets/dechunk_buckets.c b/contrib/serf/buckets/dechunk_buckets.c
new file mode 100644
index 0000000..eda1e22
--- /dev/null
+++ b/contrib/serf/buckets/dechunk_buckets.c
@@ -0,0 +1,199 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <apr_strings.h>
+
+#include "serf.h"
+#include "serf_bucket_util.h"
+
+typedef struct {
+ serf_bucket_t *stream;
+
+ enum {
+ STATE_SIZE, /* reading the chunk size */
+ STATE_CHUNK, /* reading the chunk */
+ STATE_TERM, /* reading the chunk terminator */
+ STATE_DONE /* body is done; we've returned EOF */
+ } state;
+
+ /* Buffer for accumulating a chunk size. */
+ serf_linebuf_t linebuf;
+
+ /* How much of the chunk, or the terminator, do we have left to read? */
+ apr_int64_t body_left;
+
+} dechunk_context_t;
+
+
+serf_bucket_t *serf_bucket_dechunk_create(
+ serf_bucket_t *stream,
+ serf_bucket_alloc_t *allocator)
+{
+ dechunk_context_t *ctx;
+
+ ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
+ ctx->stream = stream;
+ ctx->state = STATE_SIZE;
+
+ serf_linebuf_init(&ctx->linebuf);
+
+ return serf_bucket_create(&serf_bucket_type_dechunk, allocator, ctx);
+}
+
+static void serf_dechunk_destroy_and_data(serf_bucket_t *bucket)
+{
+ dechunk_context_t *ctx = bucket->data;
+
+ serf_bucket_destroy(ctx->stream);
+
+ serf_default_destroy_and_data(bucket);
+}
+
+static apr_status_t serf_dechunk_read(serf_bucket_t *bucket,
+ apr_size_t requested,
+ const char **data, apr_size_t *len)
+{
+ dechunk_context_t *ctx = bucket->data;
+ apr_status_t status;
+
+ while (1) {
+ switch (ctx->state) {
+ case STATE_SIZE:
+
+ /* fetch a line terminated by CRLF */
+ status = serf_linebuf_fetch(&ctx->linebuf, ctx->stream,
+ SERF_NEWLINE_CRLF);
+ if (SERF_BUCKET_READ_ERROR(status))
+ return status;
+
+ /* if a line was read, then parse it. */
+ if (ctx->linebuf.state == SERF_LINEBUF_READY) {
+ /* NUL-terminate the line. if it filled the entire buffer,
+ then just assume the thing is too large. */
+ if (ctx->linebuf.used == sizeof(ctx->linebuf.line))
+ return APR_FROM_OS_ERROR(ERANGE);
+ ctx->linebuf.line[ctx->linebuf.used] = '\0';
+
+ /* convert from HEX digits. */
+ ctx->body_left = apr_strtoi64(ctx->linebuf.line, NULL, 16);
+ if (errno == ERANGE) {
+ return APR_FROM_OS_ERROR(ERANGE);
+ }
+
+ if (ctx->body_left == 0) {
+ /* Just read the last-chunk marker. We're DONE. */
+ ctx->state = STATE_DONE;
+ status = APR_EOF;
+ }
+ else {
+ /* Got a size, so we'll start reading the chunk now. */
+ ctx->state = STATE_CHUNK;
+ }
+
+ /* If we can read more, then go do so. */
+ if (!status)
+ continue;
+ }
+ /* assert: status != 0 */
+
+ /* Note that we didn't actually read anything, so our callers
+ * don't get confused.
+ */
+ *len = 0;
+
+ return status;
+
+ case STATE_CHUNK:
+
+ if (requested > ctx->body_left) {
+ requested = ctx->body_left;
+ }
+
+ /* Delegate to the stream bucket to do the read. */
+ status = serf_bucket_read(ctx->stream, requested, data, len);
+ if (SERF_BUCKET_READ_ERROR(status))
+ return status;
+
+ /* Some data was read, so decrement the amount left and see
+ * if we're done reading this chunk.
+ */
+ ctx->body_left -= *len;
+ if (!ctx->body_left) {
+ ctx->state = STATE_TERM;
+ ctx->body_left = 2; /* CRLF */
+ }
+
+ /* We need more data but there is no more available. */
+ if (ctx->body_left && APR_STATUS_IS_EOF(status)) {
+ return SERF_ERROR_TRUNCATED_HTTP_RESPONSE;
+ }
+
+ /* Return the data we just read. */
+ return status;
+
+ case STATE_TERM:
+ /* Delegate to the stream bucket to do the read. */
+ status = serf_bucket_read(ctx->stream, ctx->body_left, data, len);
+ if (SERF_BUCKET_READ_ERROR(status))
+ return status;
+
+ /* Some data was read, so decrement the amount left and see
+ * if we're done reading the chunk terminator.
+ */
+ ctx->body_left -= *len;
+
+ /* We need more data but there is no more available. */
+ if (ctx->body_left && APR_STATUS_IS_EOF(status))
+ return SERF_ERROR_TRUNCATED_HTTP_RESPONSE;
+
+ if (!ctx->body_left) {
+ ctx->state = STATE_SIZE;
+ }
+
+ /* Don't return the CR of CRLF to the caller! */
+ *len = 0;
+
+ if (status)
+ return status;
+
+ break;
+
+ case STATE_DONE:
+ /* Just keep returning EOF */
+ *len = 0;
+ return APR_EOF;
+
+ default:
+ /* Not reachable */
+ return APR_EGENERAL;
+ }
+ }
+ /* NOTREACHED */
+}
+
+/* ### need to implement */
+#define serf_dechunk_readline NULL
+#define serf_dechunk_peek NULL
+
+const serf_bucket_type_t serf_bucket_type_dechunk = {
+ "DECHUNK",
+ serf_dechunk_read,
+ serf_dechunk_readline,
+ serf_default_read_iovec,
+ serf_default_read_for_sendfile,
+ serf_default_read_bucket,
+ serf_dechunk_peek,
+ serf_dechunk_destroy_and_data,
+};
diff --git a/contrib/serf/buckets/deflate_buckets.c b/contrib/serf/buckets/deflate_buckets.c
new file mode 100644
index 0000000..7a8e8e4
--- /dev/null
+++ b/contrib/serf/buckets/deflate_buckets.c
@@ -0,0 +1,384 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <apr_strings.h>
+
+#include <zlib.h>
+
+/* This conditional isn't defined anywhere yet. */
+#ifdef HAVE_ZUTIL_H
+#include <zutil.h>
+#endif
+
+#include "serf.h"
+#include "serf_bucket_util.h"
+
+/* magic header */
+static char deflate_magic[2] = { '\037', '\213' };
+#define DEFLATE_MAGIC_SIZE 10
+#define DEFLATE_VERIFY_SIZE 8
+#define DEFLATE_BUFFER_SIZE 8096
+
+static const int DEFLATE_WINDOW_SIZE = -15;
+static const int DEFLATE_MEMLEVEL = 9;
+
+typedef struct {
+ serf_bucket_t *stream;
+ serf_bucket_t *inflate_stream;
+
+ int format; /* Are we 'deflate' or 'gzip'? */
+
+ enum {
+ STATE_READING_HEADER, /* reading the gzip header */
+ STATE_HEADER, /* read the gzip header */
+ STATE_INIT, /* init'ing zlib functions */
+ STATE_INFLATE, /* inflating the content now */
+ STATE_READING_VERIFY, /* reading the final gzip CRC */
+ STATE_VERIFY, /* verifying the final gzip CRC */
+ STATE_FINISH, /* clean up after reading body */
+ STATE_DONE, /* body is done; we'll return EOF here */
+ } state;
+
+ z_stream zstream;
+ char hdr_buffer[DEFLATE_MAGIC_SIZE];
+ unsigned char buffer[DEFLATE_BUFFER_SIZE];
+ unsigned long crc;
+ int windowSize;
+ int memLevel;
+ int bufferSize;
+
+ /* How much of the chunk, or the terminator, do we have left to read? */
+ apr_size_t stream_left;
+
+ /* How much are we supposed to read? */
+ apr_size_t stream_size;
+
+ int stream_status; /* What was the last status we read? */
+
+} deflate_context_t;
+
+/* Inputs a string and returns a long. */
+static unsigned long getLong(unsigned char *string)
+{
+ return ((unsigned long)string[0])
+ | (((unsigned long)string[1]) << 8)
+ | (((unsigned long)string[2]) << 16)
+ | (((unsigned long)string[3]) << 24);
+}
+
+serf_bucket_t *serf_bucket_deflate_create(
+ serf_bucket_t *stream,
+ serf_bucket_alloc_t *allocator,
+ int format)
+{
+ deflate_context_t *ctx;
+
+ ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
+ ctx->stream = stream;
+ ctx->stream_status = APR_SUCCESS;
+ ctx->inflate_stream = serf_bucket_aggregate_create(allocator);
+ ctx->format = format;
+ ctx->crc = 0;
+ /* zstream must be NULL'd out. */
+ memset(&ctx->zstream, 0, sizeof(ctx->zstream));
+
+ switch (ctx->format) {
+ case SERF_DEFLATE_GZIP:
+ ctx->state = STATE_READING_HEADER;
+ break;
+ case SERF_DEFLATE_DEFLATE:
+ /* deflate doesn't have a header. */
+ ctx->state = STATE_INIT;
+ break;
+ default:
+ /* Not reachable */
+ return NULL;
+ }
+
+ /* Initial size of gzip header. */
+ ctx->stream_left = ctx->stream_size = DEFLATE_MAGIC_SIZE;
+
+ ctx->windowSize = DEFLATE_WINDOW_SIZE;
+ ctx->memLevel = DEFLATE_MEMLEVEL;
+ ctx->bufferSize = DEFLATE_BUFFER_SIZE;
+
+ return serf_bucket_create(&serf_bucket_type_deflate, allocator, ctx);
+}
+
+static void serf_deflate_destroy_and_data(serf_bucket_t *bucket)
+{
+ deflate_context_t *ctx = bucket->data;
+
+ if (ctx->state > STATE_INIT &&
+ ctx->state <= STATE_FINISH)
+ inflateEnd(&ctx->zstream);
+
+ /* We may have appended inflate_stream into the stream bucket.
+ * If so, avoid free'ing it twice.
+ */
+ if (ctx->inflate_stream) {
+ serf_bucket_destroy(ctx->inflate_stream);
+ }
+ serf_bucket_destroy(ctx->stream);
+
+ serf_default_destroy_and_data(bucket);
+}
+
+static apr_status_t serf_deflate_read(serf_bucket_t *bucket,
+ apr_size_t requested,
+ const char **data, apr_size_t *len)
+{
+ deflate_context_t *ctx = bucket->data;
+ unsigned long compCRC, compLen;
+ apr_status_t status;
+ const char *private_data;
+ apr_size_t private_len;
+ int zRC;
+
+ while (1) {
+ switch (ctx->state) {
+ case STATE_READING_HEADER:
+ case STATE_READING_VERIFY:
+ status = serf_bucket_read(ctx->stream, ctx->stream_left,
+ &private_data, &private_len);
+
+ if (SERF_BUCKET_READ_ERROR(status)) {
+ return status;
+ }
+
+ memcpy(ctx->hdr_buffer + (ctx->stream_size - ctx->stream_left),
+ private_data, private_len);
+
+ ctx->stream_left -= private_len;
+
+ if (ctx->stream_left == 0) {
+ ctx->state++;
+ if (APR_STATUS_IS_EAGAIN(status)) {
+ *len = 0;
+ return status;
+ }
+ }
+ else if (status) {
+ *len = 0;
+ return status;
+ }
+ break;
+ case STATE_HEADER:
+ if (ctx->hdr_buffer[0] != deflate_magic[0] ||
+ ctx->hdr_buffer[1] != deflate_magic[1]) {
+ return SERF_ERROR_DECOMPRESSION_FAILED;
+ }
+ if (ctx->hdr_buffer[3] != 0) {
+ return SERF_ERROR_DECOMPRESSION_FAILED;
+ }
+ ctx->state++;
+ break;
+ case STATE_VERIFY:
+ /* Do the checksum computation. */
+ compCRC = getLong((unsigned char*)ctx->hdr_buffer);
+ if (ctx->crc != compCRC) {
+ return SERF_ERROR_DECOMPRESSION_FAILED;
+ }
+ compLen = getLong((unsigned char*)ctx->hdr_buffer + 4);
+ if (ctx->zstream.total_out != compLen) {
+ return SERF_ERROR_DECOMPRESSION_FAILED;
+ }
+ ctx->state++;
+ break;
+ case STATE_INIT:
+ zRC = inflateInit2(&ctx->zstream, ctx->windowSize);
+ if (zRC != Z_OK) {
+ return SERF_ERROR_DECOMPRESSION_FAILED;
+ }
+ ctx->zstream.next_out = ctx->buffer;
+ ctx->zstream.avail_out = ctx->bufferSize;
+ ctx->state++;
+ break;
+ case STATE_FINISH:
+ inflateEnd(&ctx->zstream);
+ serf_bucket_aggregate_prepend(ctx->stream, ctx->inflate_stream);
+ ctx->inflate_stream = 0;
+ ctx->state++;
+ break;
+ case STATE_INFLATE:
+ /* Do we have anything already uncompressed to read? */
+ status = serf_bucket_read(ctx->inflate_stream, requested, data,
+ len);
+ if (SERF_BUCKET_READ_ERROR(status)) {
+ return status;
+ }
+ /* Hide EOF. */
+ if (APR_STATUS_IS_EOF(status)) {
+ status = ctx->stream_status;
+ if (APR_STATUS_IS_EOF(status)) {
+ /* We've read all of the data from our stream, but we
+ * need to continue to iterate until we flush
+ * out the zlib buffer.
+ */
+ status = APR_SUCCESS;
+ }
+ }
+ if (*len != 0) {
+ return status;
+ }
+
+ /* We tried; but we have nothing buffered. Fetch more. */
+
+ /* It is possible that we maxed out avail_out before
+ * exhausting avail_in; therefore, continue using the
+ * previous buffer. Otherwise, fetch more data from
+ * our stream bucket.
+ */
+ if (ctx->zstream.avail_in == 0) {
+ /* When we empty our inflated stream, we'll return this
+ * status - this allow us to eventually pass up EAGAINs.
+ */
+ ctx->stream_status = serf_bucket_read(ctx->stream,
+ ctx->bufferSize,
+ &private_data,
+ &private_len);
+
+ if (SERF_BUCKET_READ_ERROR(ctx->stream_status)) {
+ return ctx->stream_status;
+ }
+
+ if (!private_len && APR_STATUS_IS_EAGAIN(ctx->stream_status)) {
+ *len = 0;
+ status = ctx->stream_status;
+ ctx->stream_status = APR_SUCCESS;
+ return status;
+ }
+
+ ctx->zstream.next_in = (unsigned char*)private_data;
+ ctx->zstream.avail_in = private_len;
+ }
+ zRC = Z_OK;
+ while (ctx->zstream.avail_in != 0) {
+ /* We're full, clear out our buffer, reset, and return. */
+ if (ctx->zstream.avail_out == 0) {
+ serf_bucket_t *tmp;
+ ctx->zstream.next_out = ctx->buffer;
+ private_len = ctx->bufferSize - ctx->zstream.avail_out;
+
+ ctx->crc = crc32(ctx->crc, (const Bytef *)ctx->buffer,
+ private_len);
+
+ /* FIXME: There probably needs to be a free func. */
+ tmp = SERF_BUCKET_SIMPLE_STRING_LEN((char *)ctx->buffer,
+ private_len,
+ bucket->allocator);
+ serf_bucket_aggregate_append(ctx->inflate_stream, tmp);
+ ctx->zstream.avail_out = ctx->bufferSize;
+ break;
+ }
+ zRC = inflate(&ctx->zstream, Z_NO_FLUSH);
+
+ if (zRC == Z_STREAM_END) {
+ serf_bucket_t *tmp;
+
+ private_len = ctx->bufferSize - ctx->zstream.avail_out;
+ ctx->crc = crc32(ctx->crc, (const Bytef *)ctx->buffer,
+ private_len);
+ /* FIXME: There probably needs to be a free func. */
+ tmp = SERF_BUCKET_SIMPLE_STRING_LEN((char *)ctx->buffer,
+ private_len,
+ bucket->allocator);
+ serf_bucket_aggregate_append(ctx->inflate_stream, tmp);
+
+ ctx->zstream.avail_out = ctx->bufferSize;
+
+ /* Push back the remaining data to be read. */
+ tmp = serf_bucket_aggregate_create(bucket->allocator);
+ serf_bucket_aggregate_prepend(tmp, ctx->stream);
+ ctx->stream = tmp;
+
+ /* We now need to take the remaining avail_in and
+ * throw it in ctx->stream so our next read picks it up.
+ */
+ tmp = SERF_BUCKET_SIMPLE_STRING_LEN(
+ (const char*)ctx->zstream.next_in,
+ ctx->zstream.avail_in,
+ bucket->allocator);
+ serf_bucket_aggregate_prepend(ctx->stream, tmp);
+
+ switch (ctx->format) {
+ case SERF_DEFLATE_GZIP:
+ ctx->stream_left = ctx->stream_size =
+ DEFLATE_VERIFY_SIZE;
+ ctx->state++;
+ break;
+ case SERF_DEFLATE_DEFLATE:
+ /* Deflate does not have a verify footer. */
+ ctx->state = STATE_FINISH;
+ break;
+ default:
+ /* Not reachable */
+ return APR_EGENERAL;
+ }
+
+ break;
+ }
+ if (zRC != Z_OK) {
+ return SERF_ERROR_DECOMPRESSION_FAILED;
+ }
+ }
+ /* Okay, we've inflated. Try to read. */
+ status = serf_bucket_read(ctx->inflate_stream, requested, data,
+ len);
+ /* Hide EOF. */
+ if (APR_STATUS_IS_EOF(status)) {
+ status = ctx->stream_status;
+ /* If our stream is finished too, return SUCCESS so
+ * we'll iterate one more time.
+ */
+ if (APR_STATUS_IS_EOF(status)) {
+ /* No more data to read from the stream, and everything
+ inflated. If all data was received correctly, state
+ should have been advanced to STATE_READING_VERIFY or
+ STATE_FINISH. If not, then the data was incomplete
+ and we have an error. */
+ if (ctx->state != STATE_INFLATE)
+ return APR_SUCCESS;
+ else
+ return SERF_ERROR_DECOMPRESSION_FAILED;
+ }
+ }
+ return status;
+ case STATE_DONE:
+ /* We're done inflating. Use our finished buffer. */
+ return serf_bucket_read(ctx->stream, requested, data, len);
+ default:
+ /* Not reachable */
+ return APR_EGENERAL;
+ }
+ }
+
+ /* NOTREACHED */
+}
+
+/* ### need to implement */
+#define serf_deflate_readline NULL
+#define serf_deflate_peek NULL
+
+const serf_bucket_type_t serf_bucket_type_deflate = {
+ "DEFLATE",
+ serf_deflate_read,
+ serf_deflate_readline,
+ serf_default_read_iovec,
+ serf_default_read_for_sendfile,
+ serf_default_read_bucket,
+ serf_deflate_peek,
+ serf_deflate_destroy_and_data,
+};
diff --git a/contrib/serf/buckets/file_buckets.c b/contrib/serf/buckets/file_buckets.c
new file mode 100644
index 0000000..bd41cab
--- /dev/null
+++ b/contrib/serf/buckets/file_buckets.c
@@ -0,0 +1,117 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <apr_pools.h>
+
+#include "serf.h"
+#include "serf_bucket_util.h"
+
+typedef struct {
+ apr_file_t *file;
+
+ serf_databuf_t databuf;
+
+} file_context_t;
+
+
+static apr_status_t file_reader(void *baton, apr_size_t bufsize,
+ char *buf, apr_size_t *len)
+{
+ file_context_t *ctx = baton;
+
+ *len = bufsize;
+ return apr_file_read(ctx->file, buf, len);
+}
+
+serf_bucket_t *serf_bucket_file_create(
+ apr_file_t *file,
+ serf_bucket_alloc_t *allocator)
+{
+ file_context_t *ctx;
+#if APR_HAS_MMAP
+ apr_finfo_t finfo;
+ const char *file_path;
+
+ /* See if we'd be better off mmap'ing this file instead.
+ *
+ * Note that there is a failure case here that we purposely fall through:
+ * if a file is buffered, apr_mmap will reject it. However, on older
+ * versions of APR, we have no way of knowing this - but apr_mmap_create
+ * will check for this and return APR_EBADF.
+ */
+ apr_file_name_get(&file_path, file);
+ apr_stat(&finfo, file_path, APR_FINFO_SIZE,
+ serf_bucket_allocator_get_pool(allocator));
+ if (APR_MMAP_CANDIDATE(finfo.size)) {
+ apr_status_t status;
+ apr_mmap_t *file_mmap;
+ status = apr_mmap_create(&file_mmap, file, 0, finfo.size,
+ APR_MMAP_READ,
+ serf_bucket_allocator_get_pool(allocator));
+
+ if (status == APR_SUCCESS) {
+ return serf_bucket_mmap_create(file_mmap, allocator);
+ }
+ }
+#endif
+
+ /* Oh, well. */
+ ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
+ ctx->file = file;
+
+ serf_databuf_init(&ctx->databuf);
+ ctx->databuf.read = file_reader;
+ ctx->databuf.read_baton = ctx;
+
+ return serf_bucket_create(&serf_bucket_type_file, allocator, ctx);
+}
+
+static apr_status_t serf_file_read(serf_bucket_t *bucket,
+ apr_size_t requested,
+ const char **data, apr_size_t *len)
+{
+ file_context_t *ctx = bucket->data;
+
+ return serf_databuf_read(&ctx->databuf, requested, data, len);
+}
+
+static apr_status_t serf_file_readline(serf_bucket_t *bucket,
+ int acceptable, int *found,
+ const char **data, apr_size_t *len)
+{
+ file_context_t *ctx = bucket->data;
+
+ return serf_databuf_readline(&ctx->databuf, acceptable, found, data, len);
+}
+
+static apr_status_t serf_file_peek(serf_bucket_t *bucket,
+ const char **data,
+ apr_size_t *len)
+{
+ file_context_t *ctx = bucket->data;
+
+ return serf_databuf_peek(&ctx->databuf, data, len);
+}
+
+const serf_bucket_type_t serf_bucket_type_file = {
+ "FILE",
+ serf_file_read,
+ serf_file_readline,
+ serf_default_read_iovec,
+ serf_default_read_for_sendfile,
+ serf_default_read_bucket,
+ serf_file_peek,
+ serf_default_destroy_and_data,
+};
diff --git a/contrib/serf/buckets/headers_buckets.c b/contrib/serf/buckets/headers_buckets.c
new file mode 100644
index 0000000..6401f7f
--- /dev/null
+++ b/contrib/serf/buckets/headers_buckets.c
@@ -0,0 +1,431 @@
+/* Copyright 2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdlib.h>
+
+#include <apr_general.h> /* for strcasecmp() */
+
+#include "serf.h"
+#include "serf_bucket_util.h"
+
+
+typedef struct header_list {
+ const char *header;
+ const char *value;
+
+ apr_size_t header_size;
+ apr_size_t value_size;
+
+ int alloc_flags;
+#define ALLOC_HEADER 0x0001 /* header lives in our allocator */
+#define ALLOC_VALUE 0x0002 /* value lives in our allocator */
+
+ struct header_list *next;
+} header_list_t;
+
+typedef struct {
+ header_list_t *list;
+
+ header_list_t *cur_read;
+ enum {
+ READ_START, /* haven't started reading yet */
+ READ_HEADER, /* reading cur_read->header */
+ READ_SEP, /* reading ": " */
+ READ_VALUE, /* reading cur_read->value */
+ READ_CRLF, /* reading "\r\n" */
+ READ_TERM, /* reading the final "\r\n" */
+ READ_DONE /* no more data to read */
+ } state;
+ apr_size_t amt_read; /* how much of the current state we've read */
+
+} headers_context_t;
+
+
+serf_bucket_t *serf_bucket_headers_create(
+ serf_bucket_alloc_t *allocator)
+{
+ headers_context_t *ctx;
+
+ ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
+ ctx->list = NULL;
+ ctx->state = READ_START;
+
+ return serf_bucket_create(&serf_bucket_type_headers, allocator, ctx);
+}
+
+void serf_bucket_headers_setx(
+ serf_bucket_t *bkt,
+ const char *header, apr_size_t header_size, int header_copy,
+ const char *value, apr_size_t value_size, int value_copy)
+{
+ headers_context_t *ctx = bkt->data;
+ header_list_t *iter = ctx->list;
+ header_list_t *hdr;
+
+#if 0
+ /* ### include this? */
+ if (ctx->cur_read) {
+ /* we started reading. can't change now. */
+ abort();
+ }
+#endif
+
+ hdr = serf_bucket_mem_alloc(bkt->allocator, sizeof(*hdr));
+ hdr->header_size = header_size;
+ hdr->value_size = value_size;
+ hdr->alloc_flags = 0;
+ hdr->next = NULL;
+
+ if (header_copy) {
+ hdr->header = serf_bstrmemdup(bkt->allocator, header, header_size);
+ hdr->alloc_flags |= ALLOC_HEADER;
+ }
+ else {
+ hdr->header = header;
+ }
+
+ if (value_copy) {
+ hdr->value = serf_bstrmemdup(bkt->allocator, value, value_size);
+ hdr->alloc_flags |= ALLOC_VALUE;
+ }
+ else {
+ hdr->value = value;
+ }
+
+ /* Add the new header at the end of the list. */
+ while (iter && iter->next) {
+ iter = iter->next;
+ }
+ if (iter)
+ iter->next = hdr;
+ else
+ ctx->list = hdr;
+}
+
+void serf_bucket_headers_set(
+ serf_bucket_t *headers_bucket,
+ const char *header,
+ const char *value)
+{
+ serf_bucket_headers_setx(headers_bucket,
+ header, strlen(header), 0,
+ value, strlen(value), 1);
+}
+
+void serf_bucket_headers_setc(
+ serf_bucket_t *headers_bucket,
+ const char *header,
+ const char *value)
+{
+ serf_bucket_headers_setx(headers_bucket,
+ header, strlen(header), 1,
+ value, strlen(value), 1);
+}
+
+void serf_bucket_headers_setn(
+ serf_bucket_t *headers_bucket,
+ const char *header,
+ const char *value)
+{
+ serf_bucket_headers_setx(headers_bucket,
+ header, strlen(header), 0,
+ value, strlen(value), 0);
+}
+
+const char *serf_bucket_headers_get(
+ serf_bucket_t *headers_bucket,
+ const char *header)
+{
+ headers_context_t *ctx = headers_bucket->data;
+ header_list_t *found = ctx->list;
+ const char *val = NULL;
+ int value_size = 0;
+ int val_alloc = 0;
+
+ while (found) {
+ if (strcasecmp(found->header, header) == 0) {
+ if (val) {
+ /* The header is already present. RFC 2616, section 4.2
+ indicates that we should append the new value, separated by
+ a comma. Reasoning: for headers whose values are known to
+ be comma-separated, that is clearly the correct behavior;
+ for others, the correct behavior is undefined anyway. */
+
+ /* The "+1" is for the comma; the +1 in the alloc
+ call is for the terminating '\0' */
+ apr_size_t new_size = found->value_size + value_size + 1;
+ char *new_val = serf_bucket_mem_alloc(headers_bucket->allocator,
+ new_size + 1);
+ memcpy(new_val, val, value_size);
+ new_val[value_size] = ',';
+ memcpy(new_val + value_size + 1, found->value,
+ found->value_size);
+ new_val[new_size] = '\0';
+ /* Copy the new value over the already existing value. */
+ if (val_alloc)
+ serf_bucket_mem_free(headers_bucket->allocator, (void*)val);
+ val_alloc |= ALLOC_VALUE;
+ val = new_val;
+ value_size = new_size;
+ }
+ else {
+ val = found->value;
+ value_size = found->value_size;
+ }
+ }
+ found = found->next;
+ }
+
+ return val;
+}
+
+void serf_bucket_headers_do(
+ serf_bucket_t *headers_bucket,
+ serf_bucket_headers_do_callback_fn_t func,
+ void *baton)
+{
+ headers_context_t *ctx = headers_bucket->data;
+ header_list_t *scan = ctx->list;
+
+ while (scan) {
+ if (func(baton, scan->header, scan->value) != 0) {
+ break;
+ }
+ scan = scan->next;
+ }
+}
+
+static void serf_headers_destroy_and_data(serf_bucket_t *bucket)
+{
+ headers_context_t *ctx = bucket->data;
+ header_list_t *scan = ctx->list;
+
+ while (scan) {
+ header_list_t *next_hdr = scan->next;
+
+ if (scan->alloc_flags & ALLOC_HEADER)
+ serf_bucket_mem_free(bucket->allocator, (void *)scan->header);
+ if (scan->alloc_flags & ALLOC_VALUE)
+ serf_bucket_mem_free(bucket->allocator, (void *)scan->value);
+ serf_bucket_mem_free(bucket->allocator, scan);
+
+ scan = next_hdr;
+ }
+
+ serf_default_destroy_and_data(bucket);
+}
+
+static void select_value(
+ headers_context_t *ctx,
+ const char **value,
+ apr_size_t *len)
+{
+ const char *v;
+ apr_size_t l;
+
+ if (ctx->state == READ_START) {
+ if (ctx->list == NULL) {
+ /* No headers. Move straight to the TERM state. */
+ ctx->state = READ_TERM;
+ }
+ else {
+ ctx->state = READ_HEADER;
+ ctx->cur_read = ctx->list;
+ }
+ ctx->amt_read = 0;
+ }
+
+ switch (ctx->state) {
+ case READ_HEADER:
+ v = ctx->cur_read->header;
+ l = ctx->cur_read->header_size;
+ break;
+ case READ_SEP:
+ v = ": ";
+ l = 2;
+ break;
+ case READ_VALUE:
+ v = ctx->cur_read->value;
+ l = ctx->cur_read->value_size;
+ break;
+ case READ_CRLF:
+ case READ_TERM:
+ v = "\r\n";
+ l = 2;
+ break;
+ case READ_DONE:
+ *len = 0;
+ return;
+ default:
+ /* Not reachable */
+ return;
+ }
+
+ *value = v + ctx->amt_read;
+ *len = l - ctx->amt_read;
+}
+
+/* the current data chunk has been read/consumed. move our internal state. */
+static apr_status_t consume_chunk(headers_context_t *ctx)
+{
+ /* move to the next state, resetting the amount read. */
+ ++ctx->state;
+ ctx->amt_read = 0;
+
+ /* just sent the terminator and moved to DONE. signal completion. */
+ if (ctx->state == READ_DONE)
+ return APR_EOF;
+
+ /* end of this header. move to the next one. */
+ if (ctx->state == READ_TERM) {
+ ctx->cur_read = ctx->cur_read->next;
+ if (ctx->cur_read != NULL) {
+ /* We've got another head to send. Reset the read state. */
+ ctx->state = READ_HEADER;
+ }
+ /* else leave in READ_TERM */
+ }
+
+ /* there is more data which can be read immediately. */
+ return APR_SUCCESS;
+}
+
+static apr_status_t serf_headers_peek(serf_bucket_t *bucket,
+ const char **data,
+ apr_size_t *len)
+{
+ headers_context_t *ctx = bucket->data;
+
+ select_value(ctx, data, len);
+
+ /* already done or returning the CRLF terminator? return EOF */
+ if (ctx->state == READ_DONE || ctx->state == READ_TERM)
+ return APR_EOF;
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t serf_headers_read(serf_bucket_t *bucket,
+ apr_size_t requested,
+ const char **data, apr_size_t *len)
+{
+ headers_context_t *ctx = bucket->data;
+ apr_size_t avail;
+
+ select_value(ctx, data, &avail);
+ if (ctx->state == READ_DONE) {
+ *len = avail;
+ return APR_EOF;
+ }
+
+ if (requested >= avail) {
+ /* return everything from this chunk */
+ *len = avail;
+
+ /* we consumed this chunk. advance the state. */
+ return consume_chunk(ctx);
+ }
+
+ /* return just the amount requested, and advance our pointer */
+ *len = requested;
+ ctx->amt_read += requested;
+
+ /* there is more that can be read immediately */
+ return APR_SUCCESS;
+}
+
+static apr_status_t serf_headers_readline(serf_bucket_t *bucket,
+ int acceptable, int *found,
+ const char **data, apr_size_t *len)
+{
+ headers_context_t *ctx = bucket->data;
+ apr_status_t status;
+
+ /* ### what behavior should we use here? APR_EGENERAL for now */
+ if ((acceptable & SERF_NEWLINE_CRLF) == 0)
+ return APR_EGENERAL;
+
+ /* get whatever is in this chunk */
+ select_value(ctx, data, len);
+ if (ctx->state == READ_DONE)
+ return APR_EOF;
+
+ /* we consumed this chunk. advance the state. */
+ status = consume_chunk(ctx);
+
+ /* the type of newline found is easy... */
+ *found = (ctx->state == READ_CRLF || ctx->state == READ_TERM)
+ ? SERF_NEWLINE_CRLF : SERF_NEWLINE_NONE;
+
+ return status;
+}
+
+static apr_status_t serf_headers_read_iovec(serf_bucket_t *bucket,
+ apr_size_t requested,
+ int vecs_size,
+ struct iovec *vecs,
+ int *vecs_used)
+{
+ apr_size_t avail = requested;
+ int i;
+
+ *vecs_used = 0;
+
+ for (i = 0; i < vecs_size; i++) {
+ const char *data;
+ apr_size_t len;
+ apr_status_t status;
+
+ /* Calling read() would not be a safe opt in the general case, but it
+ * is here for the header bucket as it only frees all of the header
+ * keys and values when the entire bucket goes away - not on a
+ * per-read() basis as is normally the case.
+ */
+ status = serf_headers_read(bucket, avail, &data, &len);
+
+ if (len) {
+ vecs[*vecs_used].iov_base = (char*)data;
+ vecs[*vecs_used].iov_len = len;
+
+ (*vecs_used)++;
+
+ if (avail != SERF_READ_ALL_AVAIL) {
+ avail -= len;
+
+ /* If we reach 0, then read()'s status will suffice. */
+ if (avail == 0) {
+ return status;
+ }
+ }
+ }
+
+ if (status) {
+ return status;
+ }
+ }
+
+ return APR_SUCCESS;
+}
+
+const serf_bucket_type_t serf_bucket_type_headers = {
+ "HEADERS",
+ serf_headers_read,
+ serf_headers_readline,
+ serf_headers_read_iovec,
+ serf_default_read_for_sendfile,
+ serf_default_read_bucket,
+ serf_headers_peek,
+ serf_headers_destroy_and_data,
+};
diff --git a/contrib/serf/buckets/iovec_buckets.c b/contrib/serf/buckets/iovec_buckets.c
new file mode 100644
index 0000000..9ac1d8d
--- /dev/null
+++ b/contrib/serf/buckets/iovec_buckets.c
@@ -0,0 +1,169 @@
+/* Copyright 2011 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <apr_pools.h>
+
+#include "serf.h"
+#include "serf_bucket_util.h"
+
+
+typedef struct {
+ struct iovec *vecs;
+
+ /* Total number of buffer stored in the vecs var. */
+ int vecs_len;
+ /* Points to the first unread buffer. */
+ int current_vec;
+ /* First buffer offset. */
+ int offset;
+} iovec_context_t;
+
+serf_bucket_t *serf_bucket_iovec_create(
+ struct iovec vecs[],
+ int len,
+ serf_bucket_alloc_t *allocator)
+{
+ iovec_context_t *ctx;
+ int i;
+
+ ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
+ ctx->vecs = serf_bucket_mem_alloc(allocator, len * sizeof(struct iovec));
+ ctx->vecs_len = len;
+ ctx->current_vec = 0;
+ ctx->offset = 0;
+
+ /* copy all buffers to our iovec. */
+ for (i = 0; i < len; i++) {
+ ctx->vecs[i].iov_base = vecs[i].iov_base;
+ ctx->vecs[i].iov_len = vecs[i].iov_len;
+ }
+
+ return serf_bucket_create(&serf_bucket_type_iovec, allocator, ctx);
+}
+
+static apr_status_t serf_iovec_readline(serf_bucket_t *bucket,
+ int acceptable, int *found,
+ const char **data, apr_size_t *len)
+{
+ return APR_ENOTIMPL;
+}
+
+static apr_status_t serf_iovec_read_iovec(serf_bucket_t *bucket,
+ apr_size_t requested,
+ int vecs_size,
+ struct iovec *vecs,
+ int *vecs_used)
+{
+ iovec_context_t *ctx = bucket->data;
+
+ *vecs_used = 0;
+
+ /* copy the requested amount of buffers to the provided iovec. */
+ for (; ctx->current_vec < ctx->vecs_len; ctx->current_vec++) {
+ struct iovec vec = ctx->vecs[ctx->current_vec];
+ apr_size_t remaining;
+
+ if (requested != SERF_READ_ALL_AVAIL && requested <= 0)
+ break;
+ if (*vecs_used >= vecs_size)
+ break;
+
+ vecs[*vecs_used].iov_base = (char*)vec.iov_base + ctx->offset;
+ remaining = vec.iov_len - ctx->offset;
+
+ /* Less bytes requested than remaining in the current buffer. */
+ if (requested != SERF_READ_ALL_AVAIL && requested < remaining) {
+ vecs[*vecs_used].iov_len = requested;
+ ctx->offset += requested;
+ requested = 0;
+ (*vecs_used)++;
+ break;
+ } else {
+ /* Copy the complete buffer. */
+ vecs[*vecs_used].iov_len = remaining;
+ ctx->offset = 0;
+ if (requested != SERF_READ_ALL_AVAIL)
+ requested -= remaining;
+ (*vecs_used)++;
+ }
+ }
+
+ if (ctx->current_vec == ctx->vecs_len && !ctx->offset)
+ return APR_EOF;
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t serf_iovec_read(serf_bucket_t *bucket,
+ apr_size_t requested,
+ const char **data, apr_size_t *len)
+{
+ struct iovec vec[1];
+ apr_status_t status;
+ int vecs_used;
+
+ status = serf_iovec_read_iovec(bucket, requested, 1, vec, &vecs_used);
+
+ if (vecs_used) {
+ *data = vec[0].iov_base;
+ *len = vec[0].iov_len;
+ } else {
+ *len = 0;
+ }
+
+ return status;
+}
+
+static apr_status_t serf_iovec_peek(serf_bucket_t *bucket,
+ const char **data,
+ apr_size_t *len)
+{
+ iovec_context_t *ctx = bucket->data;
+
+ if (ctx->current_vec >= ctx->vecs_len) {
+ *len = 0;
+ return APR_EOF;
+ }
+
+ /* Return the first unread buffer, don't bother combining all
+ remaining data. */
+ *data = ctx->vecs[ctx->current_vec].iov_base;
+ *len = ctx->vecs[ctx->current_vec].iov_len;
+
+ if (ctx->current_vec + 1 == ctx->vecs_len)
+ return APR_EOF;
+
+ return APR_SUCCESS;
+}
+
+static void serf_iovec_destroy(serf_bucket_t *bucket)
+{
+ iovec_context_t *ctx = bucket->data;
+
+ serf_bucket_mem_free(bucket->allocator, ctx->vecs);
+ serf_default_destroy_and_data(bucket);
+}
+
+
+const serf_bucket_type_t serf_bucket_type_iovec = {
+ "IOVEC",
+ serf_iovec_read,
+ serf_iovec_readline,
+ serf_iovec_read_iovec,
+ serf_default_read_for_sendfile,
+ serf_default_read_bucket,
+ serf_iovec_peek,
+ serf_iovec_destroy,
+};
diff --git a/contrib/serf/buckets/limit_buckets.c b/contrib/serf/buckets/limit_buckets.c
new file mode 100644
index 0000000..70b7efb
--- /dev/null
+++ b/contrib/serf/buckets/limit_buckets.c
@@ -0,0 +1,127 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <apr_pools.h>
+
+#include "serf.h"
+#include "serf_bucket_util.h"
+#include "serf_private.h"
+
+typedef struct {
+ serf_bucket_t *stream;
+ apr_uint64_t remaining;
+} limit_context_t;
+
+
+serf_bucket_t *serf_bucket_limit_create(
+ serf_bucket_t *stream, apr_uint64_t len, serf_bucket_alloc_t *allocator)
+{
+ limit_context_t *ctx;
+
+ ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
+ ctx->stream = stream;
+ ctx->remaining = len;
+
+ return serf_bucket_create(&serf_bucket_type_limit, allocator, ctx);
+}
+
+static apr_status_t serf_limit_read(serf_bucket_t *bucket,
+ apr_size_t requested,
+ const char **data, apr_size_t *len)
+{
+ limit_context_t *ctx = bucket->data;
+ apr_status_t status;
+
+ if (!ctx->remaining) {
+ *len = 0;
+ return APR_EOF;
+ }
+
+ if (requested == SERF_READ_ALL_AVAIL || requested > ctx->remaining) {
+ if (ctx->remaining <= REQUESTED_MAX) {
+ requested = (apr_size_t) ctx->remaining;
+ } else {
+ requested = REQUESTED_MAX;
+ }
+ }
+
+ status = serf_bucket_read(ctx->stream, requested, data, len);
+
+ if (!SERF_BUCKET_READ_ERROR(status)) {
+ ctx->remaining -= *len;
+ }
+
+ /* If we have met our limit and don't have a status, return EOF. */
+ if (!ctx->remaining && !status) {
+ status = APR_EOF;
+ }
+
+ return status;
+}
+
+static apr_status_t serf_limit_readline(serf_bucket_t *bucket,
+ int acceptable, int *found,
+ const char **data, apr_size_t *len)
+{
+ limit_context_t *ctx = bucket->data;
+ apr_status_t status;
+
+ if (!ctx->remaining) {
+ *len = 0;
+ return APR_EOF;
+ }
+
+ status = serf_bucket_readline(ctx->stream, acceptable, found, data, len);
+
+ if (!SERF_BUCKET_READ_ERROR(status)) {
+ ctx->remaining -= *len;
+ }
+
+ /* If we have met our limit and don't have a status, return EOF. */
+ if (!ctx->remaining && !status) {
+ status = APR_EOF;
+ }
+
+ return status;
+}
+
+static apr_status_t serf_limit_peek(serf_bucket_t *bucket,
+ const char **data,
+ apr_size_t *len)
+{
+ limit_context_t *ctx = bucket->data;
+
+ return serf_bucket_peek(ctx->stream, data, len);
+}
+
+static void serf_limit_destroy(serf_bucket_t *bucket)
+{
+ limit_context_t *ctx = bucket->data;
+
+ serf_bucket_destroy(ctx->stream);
+
+ serf_default_destroy_and_data(bucket);
+}
+
+const serf_bucket_type_t serf_bucket_type_limit = {
+ "LIMIT",
+ serf_limit_read,
+ serf_limit_readline,
+ serf_default_read_iovec,
+ serf_default_read_for_sendfile,
+ serf_default_read_bucket,
+ serf_limit_peek,
+ serf_limit_destroy,
+};
diff --git a/contrib/serf/buckets/mmap_buckets.c b/contrib/serf/buckets/mmap_buckets.c
new file mode 100644
index 0000000..c96bce4
--- /dev/null
+++ b/contrib/serf/buckets/mmap_buckets.c
@@ -0,0 +1,140 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <apr_pools.h>
+#include <apr_mmap.h>
+
+#include "serf.h"
+#include "serf_bucket_util.h"
+
+#if APR_HAS_MMAP
+
+typedef struct {
+ apr_mmap_t *mmap;
+ void *current;
+ apr_off_t offset;
+ apr_off_t remaining;
+} mmap_context_t;
+
+
+serf_bucket_t *serf_bucket_mmap_create(
+ apr_mmap_t *file_mmap,
+ serf_bucket_alloc_t *allocator)
+{
+ mmap_context_t *ctx;
+
+ ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
+ ctx->mmap = file_mmap;
+ ctx->current = NULL;
+ ctx->offset = 0;
+ ctx->remaining = ctx->mmap->size;
+
+ return serf_bucket_create(&serf_bucket_type_mmap, allocator, ctx);
+}
+
+static apr_status_t serf_mmap_read(serf_bucket_t *bucket,
+ apr_size_t requested,
+ const char **data, apr_size_t *len)
+{
+ mmap_context_t *ctx = bucket->data;
+
+ if (requested == SERF_READ_ALL_AVAIL || requested > ctx->remaining) {
+ *len = ctx->remaining;
+ }
+ else {
+ *len = requested;
+ }
+
+ /* ### Would it be faster to call this once and do the offset ourselves? */
+ apr_mmap_offset((void**)data, ctx->mmap, ctx->offset);
+
+ /* For the next read... */
+ ctx->offset += *len;
+ ctx->remaining -= *len;
+
+ if (ctx->remaining == 0) {
+ return APR_EOF;
+ }
+ return APR_SUCCESS;
+}
+
+static apr_status_t serf_mmap_readline(serf_bucket_t *bucket,
+ int acceptable, int *found,
+ const char **data, apr_size_t *len)
+{
+ mmap_context_t *ctx = bucket->data;
+ const char *end;
+
+ /* ### Would it be faster to call this once and do the offset ourselves? */
+ apr_mmap_offset((void**)data, ctx->mmap, ctx->offset);
+ end = *data;
+
+ /* XXX An overflow is generated if we pass &ctx->remaining to readline.
+ * Not real clear why.
+ */
+ *len = ctx->remaining;
+
+ serf_util_readline(&end, len, acceptable, found);
+
+ *len = end - *data;
+
+ ctx->offset += *len;
+ ctx->remaining -= *len;
+
+ if (ctx->remaining == 0) {
+ return APR_EOF;
+ }
+ return APR_SUCCESS;
+}
+
+static apr_status_t serf_mmap_peek(serf_bucket_t *bucket,
+ const char **data,
+ apr_size_t *len)
+{
+ /* Oh, bah. */
+ return APR_ENOTIMPL;
+}
+
+const serf_bucket_type_t serf_bucket_type_mmap = {
+ "MMAP",
+ serf_mmap_read,
+ serf_mmap_readline,
+ serf_default_read_iovec,
+ serf_default_read_for_sendfile,
+ serf_default_read_bucket,
+ serf_mmap_peek,
+ serf_default_destroy_and_data,
+};
+
+#else /* !APR_HAS_MMAP */
+
+serf_bucket_t *serf_bucket_mmap_create(apr_mmap_t *file_mmap,
+ serf_bucket_alloc_t *allocator)
+{
+ return NULL;
+}
+
+const serf_bucket_type_t serf_bucket_type_mmap = {
+ "MMAP",
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+};
+
+#endif
diff --git a/contrib/serf/buckets/request_buckets.c b/contrib/serf/buckets/request_buckets.c
new file mode 100644
index 0000000..912da8a
--- /dev/null
+++ b/contrib/serf/buckets/request_buckets.c
@@ -0,0 +1,223 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <apr_pools.h>
+#include <apr_strings.h>
+
+#include "serf.h"
+#include "serf_bucket_util.h"
+
+
+typedef struct {
+ const char *method;
+ const char *uri;
+ serf_bucket_t *headers;
+ serf_bucket_t *body;
+ apr_int64_t len;
+} request_context_t;
+
+#define LENGTH_UNKNOWN ((apr_int64_t)-1)
+
+
+serf_bucket_t *serf_bucket_request_create(
+ const char *method,
+ const char *URI,
+ serf_bucket_t *body,
+ serf_bucket_alloc_t *allocator)
+{
+ request_context_t *ctx;
+
+ ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
+ ctx->method = method;
+ ctx->uri = URI;
+ ctx->headers = serf_bucket_headers_create(allocator);
+ ctx->body = body;
+ ctx->len = LENGTH_UNKNOWN;
+
+ return serf_bucket_create(&serf_bucket_type_request, allocator, ctx);
+}
+
+void serf_bucket_request_set_CL(
+ serf_bucket_t *bucket,
+ apr_int64_t len)
+{
+ request_context_t *ctx = (request_context_t *)bucket->data;
+
+ ctx->len = len;
+}
+
+serf_bucket_t *serf_bucket_request_get_headers(
+ serf_bucket_t *bucket)
+{
+ return ((request_context_t *)bucket->data)->headers;
+}
+
+void serf_bucket_request_set_root(
+ serf_bucket_t *bucket,
+ const char *root_url)
+{
+ request_context_t *ctx = (request_context_t *)bucket->data;
+
+ /* If uri is already absolute, don't change it. */
+ if (ctx->uri[0] != '/')
+ return;
+
+ /* If uri is '/' replace it with root_url. */
+ if (ctx->uri[1] == '\0')
+ ctx->uri = root_url;
+ else
+ ctx->uri =
+ apr_pstrcat(serf_bucket_allocator_get_pool(bucket->allocator),
+ root_url,
+ ctx->uri,
+ NULL);
+}
+
+static void serialize_data(serf_bucket_t *bucket)
+{
+ request_context_t *ctx = bucket->data;
+ serf_bucket_t *new_bucket;
+ const char *new_data;
+ struct iovec iov[4];
+ apr_size_t nbytes;
+
+ /* Serialize the request-line and headers into one mother string,
+ * and wrap a bucket around it.
+ */
+ iov[0].iov_base = (char*)ctx->method;
+ iov[0].iov_len = strlen(ctx->method);
+ iov[1].iov_base = " ";
+ iov[1].iov_len = sizeof(" ") - 1;
+ iov[2].iov_base = (char*)ctx->uri;
+ iov[2].iov_len = strlen(ctx->uri);
+ iov[3].iov_base = " HTTP/1.1\r\n";
+ iov[3].iov_len = sizeof(" HTTP/1.1\r\n") - 1;
+
+ /* Create a new bucket for this string with a flat string. */
+ new_data = serf_bstrcatv(bucket->allocator, iov, 4, &nbytes);
+ new_bucket = serf_bucket_simple_own_create(new_data, nbytes,
+ bucket->allocator);
+
+ /* Build up the new bucket structure.
+ *
+ * Note that self needs to become an aggregate bucket so that a
+ * pointer to self still represents the "right" data.
+ */
+ serf_bucket_aggregate_become(bucket);
+
+ /* Insert the two buckets. */
+ serf_bucket_aggregate_append(bucket, new_bucket);
+ serf_bucket_aggregate_append(bucket, ctx->headers);
+
+ /* If we know the length, then use C-L and the raw body. Otherwise,
+ use chunked encoding for the request. */
+ if (ctx->len != LENGTH_UNKNOWN) {
+ char buf[30];
+ sprintf(buf, "%" APR_INT64_T_FMT, ctx->len);
+ serf_bucket_headers_set(ctx->headers, "Content-Length", buf);
+ if (ctx->body != NULL)
+ serf_bucket_aggregate_append(bucket, ctx->body);
+ }
+ else if (ctx->body != NULL) {
+ /* Morph the body bucket to a chunked encoding bucket for now. */
+ serf_bucket_headers_setn(ctx->headers, "Transfer-Encoding", "chunked");
+ ctx->body = serf_bucket_chunk_create(ctx->body, bucket->allocator);
+ serf_bucket_aggregate_append(bucket, ctx->body);
+ }
+
+ /* Our private context is no longer needed, and is not referred to by
+ * any existing bucket. Toss it.
+ */
+ serf_bucket_mem_free(bucket->allocator, ctx);
+}
+
+static apr_status_t serf_request_read(serf_bucket_t *bucket,
+ apr_size_t requested,
+ const char **data, apr_size_t *len)
+{
+ /* Seralize our private data into a new aggregate bucket. */
+ serialize_data(bucket);
+
+ /* Delegate to the "new" aggregate bucket to do the read. */
+ return serf_bucket_read(bucket, requested, data, len);
+}
+
+static apr_status_t serf_request_readline(serf_bucket_t *bucket,
+ int acceptable, int *found,
+ const char **data, apr_size_t *len)
+{
+ /* Seralize our private data into a new aggregate bucket. */
+ serialize_data(bucket);
+
+ /* Delegate to the "new" aggregate bucket to do the readline. */
+ return serf_bucket_readline(bucket, acceptable, found, data, len);
+}
+
+static apr_status_t serf_request_read_iovec(serf_bucket_t *bucket,
+ apr_size_t requested,
+ int vecs_size,
+ struct iovec *vecs,
+ int *vecs_used)
+{
+ /* Seralize our private data into a new aggregate bucket. */
+ serialize_data(bucket);
+
+ /* Delegate to the "new" aggregate bucket to do the read. */
+ return serf_bucket_read_iovec(bucket, requested,
+ vecs_size, vecs, vecs_used);
+}
+
+static apr_status_t serf_request_peek(serf_bucket_t *bucket,
+ const char **data,
+ apr_size_t *len)
+{
+ /* Seralize our private data into a new aggregate bucket. */
+ serialize_data(bucket);
+
+ /* Delegate to the "new" aggregate bucket to do the peek. */
+ return serf_bucket_peek(bucket, data, len);
+}
+
+void serf_bucket_request_become(
+ serf_bucket_t *bucket,
+ const char *method,
+ const char *uri,
+ serf_bucket_t *body)
+{
+ request_context_t *ctx;
+
+ ctx = serf_bucket_mem_alloc(bucket->allocator, sizeof(*ctx));
+ ctx->method = method;
+ ctx->uri = uri;
+ ctx->headers = serf_bucket_headers_create(bucket->allocator);
+ ctx->body = body;
+
+ bucket->type = &serf_bucket_type_request;
+ bucket->data = ctx;
+
+ /* The allocator remains the same. */
+}
+
+const serf_bucket_type_t serf_bucket_type_request = {
+ "REQUEST",
+ serf_request_read,
+ serf_request_readline,
+ serf_request_read_iovec,
+ serf_default_read_for_sendfile,
+ serf_default_read_bucket,
+ serf_request_peek,
+ serf_default_destroy_and_data,
+};
+
diff --git a/contrib/serf/buckets/response_body_buckets.c b/contrib/serf/buckets/response_body_buckets.c
new file mode 100644
index 0000000..c9648a6
--- /dev/null
+++ b/contrib/serf/buckets/response_body_buckets.c
@@ -0,0 +1,135 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <apr_pools.h>
+
+#include "serf.h"
+#include "serf_bucket_util.h"
+
+/* Older versions of APR do not have this macro. */
+#ifdef APR_SIZE_MAX
+#define REQUESTED_MAX APR_SIZE_MAX
+#else
+#define REQUESTED_MAX (~((apr_size_t)0))
+#endif
+
+
+typedef struct {
+ serf_bucket_t *stream;
+ apr_uint64_t remaining;
+} body_context_t;
+
+serf_bucket_t *serf_bucket_response_body_create(
+ serf_bucket_t *stream, apr_uint64_t len, serf_bucket_alloc_t *allocator)
+{
+ body_context_t *ctx;
+
+ ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
+ ctx->stream = stream;
+ ctx->remaining = len;
+
+ return serf_bucket_create(&serf_bucket_type_response_body, allocator, ctx);
+}
+
+static apr_status_t serf_response_body_read(serf_bucket_t *bucket,
+ apr_size_t requested,
+ const char **data,
+ apr_size_t *len)
+{
+ body_context_t *ctx = bucket->data;
+ apr_status_t status;
+
+ if (!ctx->remaining) {
+ *len = 0;
+ return APR_EOF;
+ }
+
+ if (requested == SERF_READ_ALL_AVAIL || requested > ctx->remaining) {
+ if (ctx->remaining <= REQUESTED_MAX) {
+ requested = (apr_size_t) ctx->remaining;
+ } else {
+ requested = REQUESTED_MAX;
+ }
+ }
+
+ status = serf_bucket_read(ctx->stream, requested, data, len);
+
+ if (!SERF_BUCKET_READ_ERROR(status)) {
+ ctx->remaining -= *len;
+ }
+
+ if (APR_STATUS_IS_EOF(status) && ctx->remaining > 0) {
+ /* The server sent less data than expected. */
+ status = SERF_ERROR_TRUNCATED_HTTP_RESPONSE;
+ }
+
+ return status;
+}
+
+static apr_status_t serf_response_body_readline(serf_bucket_t *bucket,
+ int acceptable, int *found,
+ const char **data,
+ apr_size_t *len)
+{
+ body_context_t *ctx = bucket->data;
+ apr_status_t status;
+
+ if (!ctx->remaining) {
+ *len = 0;
+ return APR_EOF;
+ }
+
+ status = serf_bucket_readline(ctx->stream, acceptable, found, data, len);
+
+ if (!SERF_BUCKET_READ_ERROR(status)) {
+ ctx->remaining -= *len;
+ }
+
+ if (APR_STATUS_IS_EOF(status) && ctx->remaining > 0) {
+ /* The server sent less data than expected. */
+ status = SERF_ERROR_TRUNCATED_HTTP_RESPONSE;
+ }
+
+ return status;
+}
+
+static apr_status_t serf_response_body_peek(serf_bucket_t *bucket,
+ const char **data,
+ apr_size_t *len)
+{
+ body_context_t *ctx = bucket->data;
+
+ return serf_bucket_peek(ctx->stream, data, len);
+}
+
+static void serf_response_body_destroy(serf_bucket_t *bucket)
+{
+ body_context_t *ctx = bucket->data;
+
+ serf_bucket_destroy(ctx->stream);
+
+ serf_default_destroy_and_data(bucket);
+}
+
+const serf_bucket_type_t serf_bucket_type_response_body = {
+ "RESPONSE_BODY",
+ serf_response_body_read,
+ serf_response_body_readline,
+ serf_default_read_iovec,
+ serf_default_read_for_sendfile,
+ serf_default_read_bucket,
+ serf_response_body_peek,
+ serf_response_body_destroy,
+};
diff --git a/contrib/serf/buckets/response_buckets.c b/contrib/serf/buckets/response_buckets.c
new file mode 100644
index 0000000..d343a4c
--- /dev/null
+++ b/contrib/serf/buckets/response_buckets.c
@@ -0,0 +1,464 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <apr_lib.h>
+#include <apr_strings.h>
+#include <apr_date.h>
+
+#include "serf.h"
+#include "serf_bucket_util.h"
+#include "serf_private.h"
+
+typedef struct {
+ serf_bucket_t *stream;
+ serf_bucket_t *body; /* Pointer to the stream wrapping the body. */
+ serf_bucket_t *headers; /* holds parsed headers */
+
+ enum {
+ STATE_STATUS_LINE, /* reading status line */
+ STATE_HEADERS, /* reading headers */
+ STATE_BODY, /* reading body */
+ STATE_TRAILERS, /* reading trailers */
+ STATE_DONE /* we've sent EOF */
+ } state;
+
+ /* Buffer for accumulating a line from the response. */
+ serf_linebuf_t linebuf;
+
+ serf_status_line sl;
+
+ int chunked; /* Do we need to read trailers? */
+ int head_req; /* Was this a HEAD request? */
+} response_context_t;
+
+
+serf_bucket_t *serf_bucket_response_create(
+ serf_bucket_t *stream,
+ serf_bucket_alloc_t *allocator)
+{
+ response_context_t *ctx;
+
+ ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
+ ctx->stream = stream;
+ ctx->body = NULL;
+ ctx->headers = serf_bucket_headers_create(allocator);
+ ctx->state = STATE_STATUS_LINE;
+ ctx->chunked = 0;
+ ctx->head_req = 0;
+
+ serf_linebuf_init(&ctx->linebuf);
+
+ return serf_bucket_create(&serf_bucket_type_response, allocator, ctx);
+}
+
+void serf_bucket_response_set_head(
+ serf_bucket_t *bucket)
+{
+ response_context_t *ctx = bucket->data;
+
+ ctx->head_req = 1;
+}
+
+serf_bucket_t *serf_bucket_response_get_headers(
+ serf_bucket_t *bucket)
+{
+ return ((response_context_t *)bucket->data)->headers;
+}
+
+
+static void serf_response_destroy_and_data(serf_bucket_t *bucket)
+{
+ response_context_t *ctx = bucket->data;
+
+ if (ctx->state != STATE_STATUS_LINE) {
+ serf_bucket_mem_free(bucket->allocator, (void*)ctx->sl.reason);
+ }
+
+ serf_bucket_destroy(ctx->stream);
+ if (ctx->body != NULL)
+ serf_bucket_destroy(ctx->body);
+ serf_bucket_destroy(ctx->headers);
+
+ serf_default_destroy_and_data(bucket);
+}
+
+static apr_status_t fetch_line(response_context_t *ctx, int acceptable)
+{
+ return serf_linebuf_fetch(&ctx->linebuf, ctx->stream, acceptable);
+}
+
+static apr_status_t parse_status_line(response_context_t *ctx,
+ serf_bucket_alloc_t *allocator)
+{
+ int res;
+ char *reason; /* ### stupid APR interface makes this non-const */
+
+ /* ctx->linebuf.line should be of form: HTTP/1.1 200 OK */
+ res = apr_date_checkmask(ctx->linebuf.line, "HTTP/#.# ###*");
+ if (!res) {
+ /* Not an HTTP response? Well, at least we won't understand it. */
+ return SERF_ERROR_BAD_HTTP_RESPONSE;
+ }
+
+ ctx->sl.version = SERF_HTTP_VERSION(ctx->linebuf.line[5] - '0',
+ ctx->linebuf.line[7] - '0');
+ ctx->sl.code = apr_strtoi64(ctx->linebuf.line + 8, &reason, 10);
+
+ /* Skip leading spaces for the reason string. */
+ if (apr_isspace(*reason)) {
+ reason++;
+ }
+
+ /* Copy the reason value out of the line buffer. */
+ ctx->sl.reason = serf_bstrmemdup(allocator, reason,
+ ctx->linebuf.used
+ - (reason - ctx->linebuf.line));
+
+ return APR_SUCCESS;
+}
+
+/* This code should be replaced with header buckets. */
+static apr_status_t fetch_headers(serf_bucket_t *bkt, response_context_t *ctx)
+{
+ apr_status_t status;
+
+ /* RFC 2616 says that CRLF is the only line ending, but we can easily
+ * accept any kind of line ending.
+ */
+ status = fetch_line(ctx, SERF_NEWLINE_ANY);
+ if (SERF_BUCKET_READ_ERROR(status)) {
+ return status;
+ }
+ /* Something was read. Process it. */
+
+ if (ctx->linebuf.state == SERF_LINEBUF_READY && ctx->linebuf.used) {
+ const char *end_key;
+ const char *c;
+
+ end_key = c = memchr(ctx->linebuf.line, ':', ctx->linebuf.used);
+ if (!c) {
+ /* Bad headers? */
+ return SERF_ERROR_BAD_HTTP_RESPONSE;
+ }
+
+ /* Skip over initial ':' */
+ c++;
+
+ /* And skip all whitespaces. */
+ for(; c < ctx->linebuf.line + ctx->linebuf.used; c++)
+ {
+ if (!apr_isspace(*c))
+ {
+ break;
+ }
+ }
+
+ /* Always copy the headers (from the linebuf into new mem). */
+ /* ### we should be able to optimize some mem copies */
+ serf_bucket_headers_setx(
+ ctx->headers,
+ ctx->linebuf.line, end_key - ctx->linebuf.line, 1,
+ c, ctx->linebuf.line + ctx->linebuf.used - c, 1);
+ }
+
+ return status;
+}
+
+/* Perform one iteration of the state machine.
+ *
+ * Will return when one the following conditions occurred:
+ * 1) a state change
+ * 2) an error
+ * 3) the stream is not ready or at EOF
+ * 4) APR_SUCCESS, meaning the machine can be run again immediately
+ */
+static apr_status_t run_machine(serf_bucket_t *bkt, response_context_t *ctx)
+{
+ apr_status_t status = APR_SUCCESS; /* initialize to avoid gcc warnings */
+
+ switch (ctx->state) {
+ case STATE_STATUS_LINE:
+ /* RFC 2616 says that CRLF is the only line ending, but we can easily
+ * accept any kind of line ending.
+ */
+ status = fetch_line(ctx, SERF_NEWLINE_ANY);
+ if (SERF_BUCKET_READ_ERROR(status))
+ return status;
+
+ if (ctx->linebuf.state == SERF_LINEBUF_READY) {
+ /* The Status-Line is in the line buffer. Process it. */
+ status = parse_status_line(ctx, bkt->allocator);
+ if (status)
+ return status;
+
+ /* Good times ahead: we're switching protocols! */
+ if (ctx->sl.code == 101) {
+ ctx->body =
+ serf_bucket_barrier_create(ctx->stream, bkt->allocator);
+ ctx->state = STATE_DONE;
+ break;
+ }
+
+ /* Okay... move on to reading the headers. */
+ ctx->state = STATE_HEADERS;
+ }
+ else {
+ /* The connection closed before we could get the next
+ * response. Treat the request as lost so that our upper
+ * end knows the server never tried to give us a response.
+ */
+ if (APR_STATUS_IS_EOF(status)) {
+ return SERF_ERROR_REQUEST_LOST;
+ }
+ }
+ break;
+ case STATE_HEADERS:
+ status = fetch_headers(bkt, ctx);
+ if (SERF_BUCKET_READ_ERROR(status))
+ return status;
+
+ /* If an empty line was read, then we hit the end of the headers.
+ * Move on to the body.
+ */
+ if (ctx->linebuf.state == SERF_LINEBUF_READY && !ctx->linebuf.used) {
+ const void *v;
+
+ /* Advance the state. */
+ ctx->state = STATE_BODY;
+
+ ctx->body =
+ serf_bucket_barrier_create(ctx->stream, bkt->allocator);
+
+ /* Are we C-L, chunked, or conn close? */
+ v = serf_bucket_headers_get(ctx->headers, "Content-Length");
+ if (v) {
+ apr_uint64_t length;
+ length = apr_strtoi64(v, NULL, 10);
+ if (errno == ERANGE) {
+ return APR_FROM_OS_ERROR(ERANGE);
+ }
+ ctx->body = serf_bucket_response_body_create(
+ ctx->body, length, bkt->allocator);
+ }
+ else {
+ v = serf_bucket_headers_get(ctx->headers, "Transfer-Encoding");
+
+ /* Need to handle multiple transfer-encoding. */
+ if (v && strcasecmp("chunked", v) == 0) {
+ ctx->chunked = 1;
+ ctx->body = serf_bucket_dechunk_create(ctx->body,
+ bkt->allocator);
+ }
+
+ if (!v && (ctx->sl.code == 204 || ctx->sl.code == 304)) {
+ ctx->state = STATE_DONE;
+ }
+ }
+ v = serf_bucket_headers_get(ctx->headers, "Content-Encoding");
+ if (v) {
+ /* Need to handle multiple content-encoding. */
+ if (v && strcasecmp("gzip", v) == 0) {
+ ctx->body =
+ serf_bucket_deflate_create(ctx->body, bkt->allocator,
+ SERF_DEFLATE_GZIP);
+ }
+ else if (v && strcasecmp("deflate", v) == 0) {
+ ctx->body =
+ serf_bucket_deflate_create(ctx->body, bkt->allocator,
+ SERF_DEFLATE_DEFLATE);
+ }
+ }
+ /* If we're a HEAD request, we don't receive a body. */
+ if (ctx->head_req) {
+ ctx->state = STATE_DONE;
+ }
+ }
+ break;
+ case STATE_BODY:
+ /* Don't do anything. */
+ break;
+ case STATE_TRAILERS:
+ status = fetch_headers(bkt, ctx);
+ if (SERF_BUCKET_READ_ERROR(status))
+ return status;
+
+ /* If an empty line was read, then we're done. */
+ if (ctx->linebuf.state == SERF_LINEBUF_READY && !ctx->linebuf.used) {
+ ctx->state = STATE_DONE;
+ return APR_EOF;
+ }
+ break;
+ case STATE_DONE:
+ return APR_EOF;
+ default:
+ /* Not reachable */
+ return APR_EGENERAL;
+ }
+
+ return status;
+}
+
+static apr_status_t wait_for_body(serf_bucket_t *bkt, response_context_t *ctx)
+{
+ apr_status_t status;
+
+ /* Keep reading and moving through states if we aren't at the BODY */
+ while (ctx->state != STATE_BODY) {
+ status = run_machine(bkt, ctx);
+
+ /* Anything other than APR_SUCCESS means that we cannot immediately
+ * read again (for now).
+ */
+ if (status)
+ return status;
+ }
+ /* in STATE_BODY */
+
+ return APR_SUCCESS;
+}
+
+apr_status_t serf_bucket_response_wait_for_headers(
+ serf_bucket_t *bucket)
+{
+ response_context_t *ctx = bucket->data;
+
+ return wait_for_body(bucket, ctx);
+}
+
+apr_status_t serf_bucket_response_status(
+ serf_bucket_t *bkt,
+ serf_status_line *sline)
+{
+ response_context_t *ctx = bkt->data;
+ apr_status_t status;
+
+ if (ctx->state != STATE_STATUS_LINE) {
+ /* We already read it and moved on. Just return it. */
+ *sline = ctx->sl;
+ return APR_SUCCESS;
+ }
+
+ /* Running the state machine once will advance the machine, or state
+ * that the stream isn't ready with enough data. There isn't ever a
+ * need to run the machine more than once to try and satisfy this. We
+ * have to look at the state to tell whether it advanced, though, as
+ * it is quite possible to advance *and* to return APR_EAGAIN.
+ */
+ status = run_machine(bkt, ctx);
+ if (ctx->state == STATE_HEADERS) {
+ *sline = ctx->sl;
+ }
+ else {
+ /* Indicate that we don't have the information yet. */
+ sline->version = 0;
+ }
+
+ return status;
+}
+
+static apr_status_t serf_response_read(serf_bucket_t *bucket,
+ apr_size_t requested,
+ const char **data, apr_size_t *len)
+{
+ response_context_t *ctx = bucket->data;
+ apr_status_t rv;
+
+ rv = wait_for_body(bucket, ctx);
+ if (rv) {
+ /* It's not possible to have read anything yet! */
+ if (APR_STATUS_IS_EOF(rv) || APR_STATUS_IS_EAGAIN(rv)) {
+ *len = 0;
+ }
+ return rv;
+ }
+
+ rv = serf_bucket_read(ctx->body, requested, data, len);
+ if (SERF_BUCKET_READ_ERROR(rv))
+ return rv;
+
+ if (APR_STATUS_IS_EOF(rv)) {
+ if (ctx->chunked) {
+ ctx->state = STATE_TRAILERS;
+ /* Mask the result. */
+ rv = APR_SUCCESS;
+ } else {
+ ctx->state = STATE_DONE;
+ }
+ }
+ return rv;
+}
+
+static apr_status_t serf_response_readline(serf_bucket_t *bucket,
+ int acceptable, int *found,
+ const char **data, apr_size_t *len)
+{
+ response_context_t *ctx = bucket->data;
+ apr_status_t rv;
+
+ rv = wait_for_body(bucket, ctx);
+ if (rv) {
+ return rv;
+ }
+
+ /* Delegate to the stream bucket to do the readline. */
+ return serf_bucket_readline(ctx->body, acceptable, found, data, len);
+}
+
+apr_status_t serf_response_full_become_aggregate(serf_bucket_t *bucket)
+{
+ response_context_t *ctx = bucket->data;
+ serf_bucket_t *bkt;
+ char buf[256];
+ int size;
+
+ serf_bucket_aggregate_become(bucket);
+
+ /* Add reconstructed status line. */
+ size = apr_snprintf(buf, 256, "HTTP/%d.%d %d ",
+ SERF_HTTP_VERSION_MAJOR(ctx->sl.version),
+ SERF_HTTP_VERSION_MINOR(ctx->sl.version),
+ ctx->sl.code);
+ bkt = serf_bucket_simple_copy_create(buf, size,
+ bucket->allocator);
+ serf_bucket_aggregate_append(bucket, bkt);
+ bkt = serf_bucket_simple_copy_create(ctx->sl.reason, strlen(ctx->sl.reason),
+ bucket->allocator);
+ serf_bucket_aggregate_append(bucket, bkt);
+ bkt = SERF_BUCKET_SIMPLE_STRING_LEN("\r\n", 2,
+ bucket->allocator);
+ serf_bucket_aggregate_append(bucket, bkt);
+
+ /* Add headers and stream buckets in order. */
+ serf_bucket_aggregate_append(bucket, ctx->headers);
+ serf_bucket_aggregate_append(bucket, ctx->stream);
+
+ serf_bucket_mem_free(bucket->allocator, ctx);
+
+ return APR_SUCCESS;
+}
+
+/* ### need to implement */
+#define serf_response_peek NULL
+
+const serf_bucket_type_t serf_bucket_type_response = {
+ "RESPONSE",
+ serf_response_read,
+ serf_response_readline,
+ serf_default_read_iovec,
+ serf_default_read_for_sendfile,
+ serf_default_read_bucket,
+ serf_response_peek,
+ serf_response_destroy_and_data,
+};
diff --git a/contrib/serf/buckets/simple_buckets.c b/contrib/serf/buckets/simple_buckets.c
new file mode 100644
index 0000000..7ce7aeb
--- /dev/null
+++ b/contrib/serf/buckets/simple_buckets.c
@@ -0,0 +1,159 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <apr_pools.h>
+
+#include "serf.h"
+#include "serf_bucket_util.h"
+
+
+typedef struct {
+ const char *original;
+ const char *current;
+ apr_size_t remaining;
+
+ serf_simple_freefunc_t freefunc;
+ void *baton;
+
+} simple_context_t;
+
+
+static void free_copied_data(void *baton, const char *data)
+{
+ serf_bucket_mem_free(baton, (char*)data);
+}
+
+serf_bucket_t *serf_bucket_simple_create(
+ const char *data,
+ apr_size_t len,
+ serf_simple_freefunc_t freefunc,
+ void *freefunc_baton,
+ serf_bucket_alloc_t *allocator)
+{
+ simple_context_t *ctx;
+
+ ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
+ ctx->original = ctx->current = data;
+ ctx->remaining = len;
+ ctx->freefunc = freefunc;
+ ctx->baton = freefunc_baton;
+
+ return serf_bucket_create(&serf_bucket_type_simple, allocator, ctx);
+}
+
+serf_bucket_t *serf_bucket_simple_copy_create(
+ const char *data, apr_size_t len,
+ serf_bucket_alloc_t *allocator)
+{
+ simple_context_t *ctx;
+
+ ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
+
+ ctx->original = ctx->current = serf_bucket_mem_alloc(allocator, len);
+ memcpy((char*)ctx->original, data, len);
+
+ ctx->remaining = len;
+ ctx->freefunc = free_copied_data;
+ ctx->baton = allocator;
+
+ return serf_bucket_create(&serf_bucket_type_simple, allocator, ctx);
+}
+
+serf_bucket_t *serf_bucket_simple_own_create(
+ const char *data, apr_size_t len,
+ serf_bucket_alloc_t *allocator)
+{
+ simple_context_t *ctx;
+
+ ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
+
+ ctx->original = ctx->current = data;
+
+ ctx->remaining = len;
+ ctx->freefunc = free_copied_data;
+ ctx->baton = allocator;
+
+ return serf_bucket_create(&serf_bucket_type_simple, allocator, ctx);
+}
+
+static apr_status_t serf_simple_read(serf_bucket_t *bucket,
+ apr_size_t requested,
+ const char **data, apr_size_t *len)
+{
+ simple_context_t *ctx = bucket->data;
+
+ if (requested == SERF_READ_ALL_AVAIL || requested > ctx->remaining)
+ requested = ctx->remaining;
+
+ *data = ctx->current;
+ *len = requested;
+
+ ctx->current += requested;
+ ctx->remaining -= requested;
+
+ return ctx->remaining ? APR_SUCCESS : APR_EOF;
+}
+
+static apr_status_t serf_simple_readline(serf_bucket_t *bucket,
+ int acceptable, int *found,
+ const char **data, apr_size_t *len)
+{
+ simple_context_t *ctx = bucket->data;
+
+ /* Returned data will be from current position. */
+ *data = ctx->current;
+ serf_util_readline(&ctx->current, &ctx->remaining, acceptable, found);
+
+ /* See how much ctx->current moved forward. */
+ *len = ctx->current - *data;
+
+ return ctx->remaining ? APR_SUCCESS : APR_EOF;
+}
+
+static apr_status_t serf_simple_peek(serf_bucket_t *bucket,
+ const char **data,
+ apr_size_t *len)
+{
+ simple_context_t *ctx = bucket->data;
+
+ /* return whatever we have left */
+ *data = ctx->current;
+ *len = ctx->remaining;
+
+ /* we returned everything this bucket will ever hold */
+ return APR_EOF;
+}
+
+static void serf_simple_destroy(serf_bucket_t *bucket)
+{
+ simple_context_t *ctx = bucket->data;
+
+ if (ctx->freefunc)
+ (*ctx->freefunc)(ctx->baton, ctx->original);
+
+ serf_default_destroy_and_data(bucket);
+}
+
+
+const serf_bucket_type_t serf_bucket_type_simple = {
+ "SIMPLE",
+ serf_simple_read,
+ serf_simple_readline,
+ serf_default_read_iovec,
+ serf_default_read_for_sendfile,
+ serf_default_read_bucket,
+ serf_simple_peek,
+ serf_simple_destroy,
+};
diff --git a/contrib/serf/buckets/socket_buckets.c b/contrib/serf/buckets/socket_buckets.c
new file mode 100644
index 0000000..ef718af
--- /dev/null
+++ b/contrib/serf/buckets/socket_buckets.c
@@ -0,0 +1,125 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <apr_pools.h>
+#include <apr_network_io.h>
+
+#include "serf.h"
+#include "serf_private.h"
+#include "serf_bucket_util.h"
+
+
+typedef struct {
+ apr_socket_t *skt;
+
+ serf_databuf_t databuf;
+
+ /* Progress callback */
+ serf_progress_t progress_func;
+ void *progress_baton;
+} socket_context_t;
+
+
+static apr_status_t socket_reader(void *baton, apr_size_t bufsize,
+ char *buf, apr_size_t *len)
+{
+ socket_context_t *ctx = baton;
+ apr_status_t status;
+
+ *len = bufsize;
+ status = apr_socket_recv(ctx->skt, buf, len);
+
+ if (status && !APR_STATUS_IS_EAGAIN(status))
+ serf__log_skt(SOCK_VERBOSE, __FILE__, ctx->skt,
+ "socket_recv error %d\n", status);
+
+ if (*len)
+ serf__log_skt(SOCK_MSG_VERBOSE, __FILE__, ctx->skt,
+ "--- socket_recv:\n%.*s\n-(%d)-\n",
+ *len, buf, *len);
+
+ if (ctx->progress_func)
+ ctx->progress_func(ctx->progress_baton, *len, 0);
+
+ return status;
+}
+
+serf_bucket_t *serf_bucket_socket_create(
+ apr_socket_t *skt,
+ serf_bucket_alloc_t *allocator)
+{
+ socket_context_t *ctx;
+
+ /* Oh, well. */
+ ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
+ ctx->skt = skt;
+
+ serf_databuf_init(&ctx->databuf);
+ ctx->databuf.read = socket_reader;
+ ctx->databuf.read_baton = ctx;
+
+ ctx->progress_func = NULL;
+ ctx->progress_baton = NULL;
+ return serf_bucket_create(&serf_bucket_type_socket, allocator, ctx);
+}
+
+void serf_bucket_socket_set_read_progress_cb(
+ serf_bucket_t *bucket,
+ const serf_progress_t progress_func,
+ void *progress_baton)
+{
+ socket_context_t *ctx = bucket->data;
+
+ ctx->progress_func = progress_func;
+ ctx->progress_baton = progress_baton;
+}
+
+static apr_status_t serf_socket_read(serf_bucket_t *bucket,
+ apr_size_t requested,
+ const char **data, apr_size_t *len)
+{
+ socket_context_t *ctx = bucket->data;
+
+ return serf_databuf_read(&ctx->databuf, requested, data, len);
+}
+
+static apr_status_t serf_socket_readline(serf_bucket_t *bucket,
+ int acceptable, int *found,
+ const char **data, apr_size_t *len)
+{
+ socket_context_t *ctx = bucket->data;
+
+ return serf_databuf_readline(&ctx->databuf, acceptable, found, data, len);
+}
+
+static apr_status_t serf_socket_peek(serf_bucket_t *bucket,
+ const char **data,
+ apr_size_t *len)
+{
+ socket_context_t *ctx = bucket->data;
+
+ return serf_databuf_peek(&ctx->databuf, data, len);
+}
+
+const serf_bucket_type_t serf_bucket_type_socket = {
+ "SOCKET",
+ serf_socket_read,
+ serf_socket_readline,
+ serf_default_read_iovec,
+ serf_default_read_for_sendfile,
+ serf_default_read_bucket,
+ serf_socket_peek,
+ serf_default_destroy_and_data,
+};
diff --git a/contrib/serf/buckets/ssl_buckets.c b/contrib/serf/buckets/ssl_buckets.c
new file mode 100644
index 0000000..d2ced94
--- /dev/null
+++ b/contrib/serf/buckets/ssl_buckets.c
@@ -0,0 +1,1775 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ----
+ *
+ * For the OpenSSL thread-safety locking code:
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Originally developed by Aaron Bannert and Justin Erenkrantz, eBuilt.
+ */
+
+#include <apr_pools.h>
+#include <apr_network_io.h>
+#include <apr_portable.h>
+#include <apr_strings.h>
+#include <apr_base64.h>
+#include <apr_version.h>
+#include <apr_atomic.h>
+
+#include "serf.h"
+#include "serf_private.h"
+#include "serf_bucket_util.h"
+
+#include <openssl/bio.h>
+#include <openssl/ssl.h>
+#include <openssl/err.h>
+#include <openssl/pkcs12.h>
+#include <openssl/x509v3.h>
+
+#ifndef APR_VERSION_AT_LEAST /* Introduced in APR 1.3.0 */
+#define APR_VERSION_AT_LEAST(major,minor,patch) \
+ (((major) < APR_MAJOR_VERSION) \
+ || ((major) == APR_MAJOR_VERSION && (minor) < APR_MINOR_VERSION) \
+ || ((major) == APR_MAJOR_VERSION && (minor) == APR_MINOR_VERSION && \
+ (patch) <= APR_PATCH_VERSION))
+#endif /* APR_VERSION_AT_LEAST */
+
+#ifndef APR_ARRAY_PUSH
+#define APR_ARRAY_PUSH(ary,type) (*((type *)apr_array_push(ary)))
+#endif
+
+
+/*
+ * Here's an overview of the SSL bucket's relationship to OpenSSL and serf.
+ *
+ * HTTP request: SSLENCRYPT(REQUEST)
+ * [context.c reads from SSLENCRYPT and writes out to the socket]
+ * HTTP response: RESPONSE(SSLDECRYPT(SOCKET))
+ * [handler function reads from RESPONSE which in turn reads from SSLDECRYPT]
+ *
+ * HTTP request read call path:
+ *
+ * write_to_connection
+ * |- serf_bucket_read on SSLENCRYPT
+ * |- serf_ssl_read
+ * |- serf_databuf_read
+ * |- common_databuf_prep
+ * |- ssl_encrypt
+ * |- 1. Try to read pending encrypted data; If available, return.
+ * |- 2. Try to read from ctx->stream [REQUEST bucket]
+ * |- 3. Call SSL_write with read data
+ * |- ...
+ * |- bio_bucket_read can be called
+ * |- bio_bucket_write with encrypted data
+ * |- store in sink
+ * |- 4. If successful, read pending encrypted data and return.
+ * |- 5. If fails, place read data back in ctx->stream
+ *
+ * HTTP response read call path:
+ *
+ * read_from_connection
+ * |- acceptor
+ * |- handler
+ * |- ...
+ * |- serf_bucket_read(SSLDECRYPT)
+ * |- serf_ssl_read
+ * |- serf_databuf_read
+ * |- ssl_decrypt
+ * |- 1. SSL_read() for pending decrypted data; if any, return.
+ * |- 2. Try to read from ctx->stream [SOCKET bucket]
+ * |- 3. Append data to ssl_ctx->source
+ * |- 4. Call SSL_read()
+ * |- ...
+ * |- bio_bucket_write can be called
+ * |- bio_bucket_read
+ * |- read data from ssl_ctx->source
+ * |- If data read, return it.
+ * |- If an error, set the STATUS value and return.
+ *
+ */
+
+typedef struct bucket_list {
+ serf_bucket_t *bucket;
+ struct bucket_list *next;
+} bucket_list_t;
+
+typedef struct {
+ /* Helper to read data. Wraps stream. */
+ serf_databuf_t databuf;
+
+ /* Our source for more data. */
+ serf_bucket_t *stream;
+
+ /* The next set of buckets */
+ bucket_list_t *stream_next;
+
+ /* The status of the last thing we read. */
+ apr_status_t status;
+ apr_status_t exhausted;
+ int exhausted_reset;
+
+ /* Data we've read but not processed. */
+ serf_bucket_t *pending;
+} serf_ssl_stream_t;
+
+struct serf_ssl_context_t {
+ /* How many open buckets refer to this context. */
+ int refcount;
+
+ /* The pool that this context uses. */
+ apr_pool_t *pool;
+
+ /* The allocator associated with the above pool. */
+ serf_bucket_alloc_t *allocator;
+
+ /* Internal OpenSSL parameters */
+ SSL_CTX *ctx;
+ SSL *ssl;
+ BIO *bio;
+
+ serf_ssl_stream_t encrypt;
+ serf_ssl_stream_t decrypt;
+
+ /* Client cert callbacks */
+ serf_ssl_need_client_cert_t cert_callback;
+ void *cert_userdata;
+ apr_pool_t *cert_cache_pool;
+ const char *cert_file_success;
+
+ /* Client cert PW callbacks */
+ serf_ssl_need_cert_password_t cert_pw_callback;
+ void *cert_pw_userdata;
+ apr_pool_t *cert_pw_cache_pool;
+ const char *cert_pw_success;
+
+ /* Server cert callbacks */
+ serf_ssl_need_server_cert_t server_cert_callback;
+ serf_ssl_server_cert_chain_cb_t server_cert_chain_callback;
+ void *server_cert_userdata;
+
+ const char *cert_path;
+
+ X509 *cached_cert;
+ EVP_PKEY *cached_cert_pw;
+
+ apr_status_t pending_err;
+
+ /* Status of a fatal error, returned on subsequent encrypt or decrypt
+ requests. */
+ apr_status_t fatal_err;
+};
+
+typedef struct {
+ /* The bucket-independent ssl context that this bucket is associated with */
+ serf_ssl_context_t *ssl_ctx;
+
+ /* Pointer to the 'right' databuf. */
+ serf_databuf_t *databuf;
+
+ /* Pointer to our stream, so we can find it later. */
+ serf_bucket_t **our_stream;
+} ssl_context_t;
+
+struct serf_ssl_certificate_t {
+ X509 *ssl_cert;
+ int depth;
+};
+
+static void disable_compression(serf_ssl_context_t *ssl_ctx);
+
+#if SSL_VERBOSE
+/* Log all ssl alerts that we receive from the server. */
+static void
+apps_ssl_info_callback(const SSL *s, int where, int ret)
+{
+ const char *str;
+ int w;
+ w = where & ~SSL_ST_MASK;
+
+ if (w & SSL_ST_CONNECT)
+ str = "SSL_connect";
+ else if (w & SSL_ST_ACCEPT)
+ str = "SSL_accept";
+ else
+ str = "undefined";
+
+ if (where & SSL_CB_LOOP) {
+ serf__log(SSL_VERBOSE, __FILE__, "%s:%s\n", str,
+ SSL_state_string_long(s));
+ }
+ else if (where & SSL_CB_ALERT) {
+ str = (where & SSL_CB_READ) ? "read" : "write";
+ serf__log(SSL_VERBOSE, __FILE__, "SSL3 alert %s:%s:%s\n",
+ str,
+ SSL_alert_type_string_long(ret),
+ SSL_alert_desc_string_long(ret));
+ }
+ else if (where & SSL_CB_EXIT) {
+ if (ret == 0)
+ serf__log(SSL_VERBOSE, __FILE__, "%s:failed in %s\n", str,
+ SSL_state_string_long(s));
+ else if (ret < 0) {
+ serf__log(SSL_VERBOSE, __FILE__, "%s:error in %s\n", str,
+ SSL_state_string_long(s));
+ }
+ }
+}
+#endif
+
+/* Returns the amount read. */
+static int bio_bucket_read(BIO *bio, char *in, int inlen)
+{
+ serf_ssl_context_t *ctx = bio->ptr;
+ const char *data;
+ apr_status_t status;
+ apr_size_t len;
+
+ serf__log(SSL_VERBOSE, __FILE__, "bio_bucket_read called for %d bytes\n",
+ inlen);
+
+ if (ctx->encrypt.status == SERF_ERROR_WAIT_CONN
+ && BIO_should_read(ctx->bio)) {
+ serf__log(SSL_VERBOSE, __FILE__,
+ "bio_bucket_read waiting: (%d %d %d)\n",
+ BIO_should_retry(ctx->bio), BIO_should_read(ctx->bio),
+ BIO_get_retry_flags(ctx->bio));
+ /* Falling back... */
+ ctx->encrypt.exhausted_reset = 1;
+ BIO_clear_retry_flags(bio);
+ }
+
+ status = serf_bucket_read(ctx->decrypt.pending, inlen, &data, &len);
+
+ ctx->decrypt.status = status;
+
+ serf__log(SSL_VERBOSE, __FILE__, "bio_bucket_read received %d bytes (%d)\n",
+ len, status);
+
+ if (!SERF_BUCKET_READ_ERROR(status)) {
+ /* Oh suck. */
+ if (len) {
+ memcpy(in, data, len);
+ return len;
+ }
+ if (APR_STATUS_IS_EOF(status)) {
+ BIO_set_retry_read(bio);
+ return -1;
+ }
+ }
+
+ return -1;
+}
+
+/* Returns the amount written. */
+static int bio_bucket_write(BIO *bio, const char *in, int inl)
+{
+ serf_ssl_context_t *ctx = bio->ptr;
+ serf_bucket_t *tmp;
+
+ serf__log(SSL_VERBOSE, __FILE__, "bio_bucket_write called for %d bytes\n",
+ inl);
+
+ if (ctx->encrypt.status == SERF_ERROR_WAIT_CONN
+ && !BIO_should_read(ctx->bio)) {
+ serf__log(SSL_VERBOSE, __FILE__,
+ "bio_bucket_write waiting: (%d %d %d)\n",
+ BIO_should_retry(ctx->bio), BIO_should_read(ctx->bio),
+ BIO_get_retry_flags(ctx->bio));
+ /* Falling back... */
+ ctx->encrypt.exhausted_reset = 1;
+ BIO_clear_retry_flags(bio);
+ }
+
+ tmp = serf_bucket_simple_copy_create(in, inl,
+ ctx->encrypt.pending->allocator);
+
+ serf_bucket_aggregate_append(ctx->encrypt.pending, tmp);
+
+ return inl;
+}
+
+/* Returns the amount read. */
+static int bio_file_read(BIO *bio, char *in, int inlen)
+{
+ apr_file_t *file = bio->ptr;
+ apr_status_t status;
+ apr_size_t len;
+
+ BIO_clear_retry_flags(bio);
+
+ len = inlen;
+ status = apr_file_read(file, in, &len);
+
+ if (!SERF_BUCKET_READ_ERROR(status)) {
+ /* Oh suck. */
+ if (APR_STATUS_IS_EOF(status)) {
+ BIO_set_retry_read(bio);
+ return -1;
+ } else {
+ return len;
+ }
+ }
+
+ return -1;
+}
+
+/* Returns the amount written. */
+static int bio_file_write(BIO *bio, const char *in, int inl)
+{
+ apr_file_t *file = bio->ptr;
+ apr_size_t nbytes;
+
+ BIO_clear_retry_flags(bio);
+
+ nbytes = inl;
+ apr_file_write(file, in, &nbytes);
+
+ return nbytes;
+}
+
+static int bio_file_gets(BIO *bio, char *in, int inlen)
+{
+ return bio_file_read(bio, in, inlen);
+}
+
+static int bio_bucket_create(BIO *bio)
+{
+ bio->shutdown = 1;
+ bio->init = 1;
+ bio->num = -1;
+ bio->ptr = NULL;
+
+ return 1;
+}
+
+static int bio_bucket_destroy(BIO *bio)
+{
+ /* Did we already free this? */
+ if (bio == NULL) {
+ return 0;
+ }
+
+ return 1;
+}
+
+static long bio_bucket_ctrl(BIO *bio, int cmd, long num, void *ptr)
+{
+ long ret = 1;
+
+ switch (cmd) {
+ default:
+ /* abort(); */
+ break;
+ case BIO_CTRL_FLUSH:
+ /* At this point we can't force a flush. */
+ break;
+ case BIO_CTRL_PUSH:
+ case BIO_CTRL_POP:
+ ret = 0;
+ break;
+ }
+ return ret;
+}
+
+static BIO_METHOD bio_bucket_method = {
+ BIO_TYPE_MEM,
+ "Serf SSL encryption and decryption buckets",
+ bio_bucket_write,
+ bio_bucket_read,
+ NULL, /* Is this called? */
+ NULL, /* Is this called? */
+ bio_bucket_ctrl,
+ bio_bucket_create,
+ bio_bucket_destroy,
+#ifdef OPENSSL_VERSION_NUMBER
+ NULL /* sslc does not have the callback_ctrl field */
+#endif
+};
+
+static BIO_METHOD bio_file_method = {
+ BIO_TYPE_FILE,
+ "Wrapper around APR file structures",
+ bio_file_write,
+ bio_file_read,
+ NULL, /* Is this called? */
+ bio_file_gets, /* Is this called? */
+ bio_bucket_ctrl,
+ bio_bucket_create,
+ bio_bucket_destroy,
+#ifdef OPENSSL_VERSION_NUMBER
+ NULL /* sslc does not have the callback_ctrl field */
+#endif
+};
+
+static int
+validate_server_certificate(int cert_valid, X509_STORE_CTX *store_ctx)
+{
+ SSL *ssl;
+ serf_ssl_context_t *ctx;
+ X509 *server_cert;
+ int err, depth;
+ int failures = 0;
+
+ ssl = X509_STORE_CTX_get_ex_data(store_ctx,
+ SSL_get_ex_data_X509_STORE_CTX_idx());
+ ctx = SSL_get_app_data(ssl);
+
+ server_cert = X509_STORE_CTX_get_current_cert(store_ctx);
+ depth = X509_STORE_CTX_get_error_depth(store_ctx);
+
+ /* If the certification was found invalid, get the error and convert it to
+ something our caller will understand. */
+ if (! cert_valid) {
+ err = X509_STORE_CTX_get_error(store_ctx);
+
+ switch(err) {
+ case X509_V_ERR_CERT_NOT_YET_VALID:
+ failures |= SERF_SSL_CERT_NOTYETVALID;
+ break;
+ case X509_V_ERR_CERT_HAS_EXPIRED:
+ failures |= SERF_SSL_CERT_EXPIRED;
+ break;
+ case X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT:
+ case X509_V_ERR_SELF_SIGNED_CERT_IN_CHAIN:
+ failures |= SERF_SSL_CERT_SELF_SIGNED;
+ break;
+ case X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY:
+ case X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT:
+ case X509_V_ERR_CERT_UNTRUSTED:
+ case X509_V_ERR_INVALID_CA:
+ failures |= SERF_SSL_CERT_UNKNOWNCA;
+ break;
+ case X509_V_ERR_CERT_REVOKED:
+ failures |= SERF_SSL_CERT_REVOKED;
+ break;
+ default:
+ failures |= SERF_SSL_CERT_UNKNOWN_FAILURE;
+ break;
+ }
+ }
+
+ /* Check certificate expiry dates. */
+ if (X509_cmp_current_time(X509_get_notBefore(server_cert)) >= 0) {
+ failures |= SERF_SSL_CERT_NOTYETVALID;
+ }
+ else if (X509_cmp_current_time(X509_get_notAfter(server_cert)) <= 0) {
+ failures |= SERF_SSL_CERT_EXPIRED;
+ }
+
+ if (ctx->server_cert_callback &&
+ (depth == 0 || failures)) {
+ apr_status_t status;
+ serf_ssl_certificate_t *cert;
+ apr_pool_t *subpool;
+
+ apr_pool_create(&subpool, ctx->pool);
+
+ cert = apr_palloc(subpool, sizeof(serf_ssl_certificate_t));
+ cert->ssl_cert = server_cert;
+ cert->depth = depth;
+
+ /* Callback for further verification. */
+ status = ctx->server_cert_callback(ctx->server_cert_userdata,
+ failures, cert);
+ if (status == APR_SUCCESS)
+ cert_valid = 1;
+ else {
+ /* Even if openssl found the certificate valid, the application
+ told us to reject it. */
+ cert_valid = 0;
+ /* Pass the error back to the caller through the context-run. */
+ ctx->pending_err = status;
+ }
+ apr_pool_destroy(subpool);
+ }
+
+ if (ctx->server_cert_chain_callback
+ && (depth == 0 || failures)) {
+ apr_status_t status;
+ STACK_OF(X509) *chain;
+ const serf_ssl_certificate_t **certs;
+ int certs_len;
+ apr_pool_t *subpool;
+
+ apr_pool_create(&subpool, ctx->pool);
+
+ /* Borrow the chain to pass to the callback. */
+ chain = X509_STORE_CTX_get_chain(store_ctx);
+
+ /* If the chain can't be retrieved, just pass the current
+ certificate. */
+ /* ### can this actually happen with _get_chain() ? */
+ if (!chain) {
+ serf_ssl_certificate_t *cert = apr_palloc(subpool, sizeof(*cert));
+
+ cert->ssl_cert = server_cert;
+ cert->depth = depth;
+
+ /* Room for the server_cert and a trailing NULL. */
+ certs = apr_palloc(subpool, sizeof(*certs) * 2);
+ certs[0] = cert;
+
+ certs_len = 1;
+ } else {
+ int i;
+
+ certs_len = sk_X509_num(chain);
+
+ /* Room for all the certs and a trailing NULL. */
+ certs = apr_palloc(subpool, sizeof(*certs) * (certs_len + 1));
+ for (i = 0; i < certs_len; ++i) {
+ serf_ssl_certificate_t *cert;
+
+ cert = apr_palloc(subpool, sizeof(*cert));
+ cert->ssl_cert = sk_X509_value(chain, i);
+ cert->depth = i;
+
+ certs[i] = cert;
+ }
+ }
+ certs[certs_len] = NULL;
+
+ /* Callback for further verification. */
+ status = ctx->server_cert_chain_callback(ctx->server_cert_userdata,
+ failures, depth,
+ certs, certs_len);
+ if (status == APR_SUCCESS) {
+ cert_valid = 1;
+ } else {
+ /* Even if openssl found the certificate valid, the application
+ told us to reject it. */
+ cert_valid = 0;
+ /* Pass the error back to the caller through the context-run. */
+ ctx->pending_err = status;
+ }
+
+ apr_pool_destroy(subpool);
+ }
+
+ /* Return a specific error if the server certificate is not accepted by
+ OpenSSL and the application has not set callbacks to override this. */
+ if (!cert_valid &&
+ !ctx->server_cert_chain_callback &&
+ !ctx->server_cert_callback)
+ {
+ ctx->pending_err = SERF_ERROR_SSL_CERT_FAILED;
+ }
+
+ return cert_valid;
+}
+
+/* This function reads an encrypted stream and returns the decrypted stream. */
+static apr_status_t ssl_decrypt(void *baton, apr_size_t bufsize,
+ char *buf, apr_size_t *len)
+{
+ serf_ssl_context_t *ctx = baton;
+ apr_size_t priv_len;
+ apr_status_t status;
+ const char *data;
+ int ssl_len;
+
+ if (ctx->fatal_err)
+ return ctx->fatal_err;
+
+ serf__log(SSL_VERBOSE, __FILE__, "ssl_decrypt: begin %d\n", bufsize);
+
+ /* Is there some data waiting to be read? */
+ ssl_len = SSL_read(ctx->ssl, buf, bufsize);
+ if (ssl_len > 0) {
+ serf__log(SSL_VERBOSE, __FILE__,
+ "ssl_decrypt: %d bytes (%d); status: %d; flags: %d\n",
+ ssl_len, bufsize, ctx->decrypt.status,
+ BIO_get_retry_flags(ctx->bio));
+ *len = ssl_len;
+ return APR_SUCCESS;
+ }
+
+ status = serf_bucket_read(ctx->decrypt.stream, bufsize, &data, &priv_len);
+
+ if (!SERF_BUCKET_READ_ERROR(status) && priv_len) {
+ serf_bucket_t *tmp;
+
+ serf__log(SSL_VERBOSE, __FILE__,
+ "ssl_decrypt: read %d bytes (%d); status: %d\n",
+ priv_len, bufsize, status);
+
+ tmp = serf_bucket_simple_copy_create(data, priv_len,
+ ctx->decrypt.pending->allocator);
+
+ serf_bucket_aggregate_append(ctx->decrypt.pending, tmp);
+
+ ssl_len = SSL_read(ctx->ssl, buf, bufsize);
+ if (ssl_len < 0) {
+ int ssl_err;
+
+ ssl_err = SSL_get_error(ctx->ssl, ssl_len);
+ switch (ssl_err) {
+ case SSL_ERROR_SYSCALL:
+ *len = 0;
+ /* Return the underlying network error that caused OpenSSL
+ to fail. ### This can be a crypt error! */
+ status = ctx->decrypt.status;
+ break;
+ case SSL_ERROR_WANT_READ:
+ case SSL_ERROR_WANT_WRITE:
+ *len = 0;
+ status = APR_EAGAIN;
+ break;
+ case SSL_ERROR_SSL:
+ *len = 0;
+ if (ctx->pending_err) {
+ status = ctx->pending_err;
+ ctx->pending_err = 0;
+ } else {
+ ctx->fatal_err = status = SERF_ERROR_SSL_COMM_FAILED;
+ }
+ break;
+ default:
+ *len = 0;
+ ctx->fatal_err = status = SERF_ERROR_SSL_COMM_FAILED;
+ break;
+ }
+ } else if (ssl_len == 0) {
+ /* The server shut down the connection. */
+ int ssl_err, shutdown;
+ *len = 0;
+
+ /* Check for SSL_RECEIVED_SHUTDOWN */
+ shutdown = SSL_get_shutdown(ctx->ssl);
+ /* Check for SSL_ERROR_ZERO_RETURN */
+ ssl_err = SSL_get_error(ctx->ssl, ssl_len);
+
+ if (shutdown == SSL_RECEIVED_SHUTDOWN &&
+ ssl_err == SSL_ERROR_ZERO_RETURN) {
+ /* The server closed the SSL session. While this doesn't
+ necessary mean the connection is closed, let's close
+ it here anyway.
+ We can optimize this later. */
+ serf__log(SSL_VERBOSE, __FILE__,
+ "ssl_decrypt: SSL read error: server"
+ " shut down connection!\n");
+ status = APR_EOF;
+ } else {
+ /* A fatal error occurred. */
+ ctx->fatal_err = status = SERF_ERROR_SSL_COMM_FAILED;
+ }
+ } else {
+ *len = ssl_len;
+ serf__log(SSL_MSG_VERBOSE, __FILE__,
+ "---\n%.*s\n-(%d)-\n", *len, buf, *len);
+ }
+ }
+ else {
+ *len = 0;
+ }
+ serf__log(SSL_VERBOSE, __FILE__,
+ "ssl_decrypt: %d %d %d\n", status, *len,
+ BIO_get_retry_flags(ctx->bio));
+
+ return status;
+}
+
+/* This function reads a decrypted stream and returns an encrypted stream. */
+static apr_status_t ssl_encrypt(void *baton, apr_size_t bufsize,
+ char *buf, apr_size_t *len)
+{
+ const char *data;
+ apr_size_t interim_bufsize;
+ serf_ssl_context_t *ctx = baton;
+ apr_status_t status;
+
+ if (ctx->fatal_err)
+ return ctx->fatal_err;
+
+ serf__log(SSL_VERBOSE, __FILE__, "ssl_encrypt: begin %d\n", bufsize);
+
+ /* Try to read already encrypted but unread data first. */
+ status = serf_bucket_read(ctx->encrypt.pending, bufsize, &data, len);
+ if (SERF_BUCKET_READ_ERROR(status)) {
+ return status;
+ }
+
+ /* Aha, we read something. Return that now. */
+ if (*len) {
+ memcpy(buf, data, *len);
+ if (APR_STATUS_IS_EOF(status)) {
+ status = APR_SUCCESS;
+ }
+
+ serf__log(SSL_VERBOSE, __FILE__, "ssl_encrypt: %d %d %d (quick read)\n",
+ status, *len, BIO_get_retry_flags(ctx->bio));
+
+ return status;
+ }
+
+ if (BIO_should_retry(ctx->bio) && BIO_should_write(ctx->bio)) {
+ serf__log(SSL_VERBOSE, __FILE__,
+ "ssl_encrypt: %d %d %d (should write exit)\n",
+ status, *len, BIO_get_retry_flags(ctx->bio));
+
+ return APR_EAGAIN;
+ }
+
+ /* If we were previously blocked, unblock ourselves now. */
+ if (BIO_should_read(ctx->bio)) {
+ serf__log(SSL_VERBOSE, __FILE__, "ssl_encrypt: reset %d %d (%d %d %d)\n",
+ status, ctx->encrypt.status,
+ BIO_should_retry(ctx->bio), BIO_should_read(ctx->bio),
+ BIO_get_retry_flags(ctx->bio));
+
+ ctx->encrypt.status = APR_SUCCESS;
+ ctx->encrypt.exhausted_reset = 0;
+ }
+
+ /* Oh well, read from our stream now. */
+ interim_bufsize = bufsize;
+ do {
+ apr_size_t interim_len;
+
+ if (!ctx->encrypt.status) {
+ struct iovec vecs[64];
+ int vecs_read;
+
+ status = serf_bucket_read_iovec(ctx->encrypt.stream,
+ interim_bufsize, 64, vecs,
+ &vecs_read);
+
+ if (!SERF_BUCKET_READ_ERROR(status) && vecs_read) {
+ char *vecs_data;
+ int i, cur, vecs_data_len;
+ int ssl_len;
+
+ /* Combine the buffers of the iovec into one buffer, as
+ that is with SSL_write requires. */
+ vecs_data_len = 0;
+ for (i = 0; i < vecs_read; i++) {
+ vecs_data_len += vecs[i].iov_len;
+ }
+
+ vecs_data = serf_bucket_mem_alloc(ctx->allocator,
+ vecs_data_len);
+
+ cur = 0;
+ for (i = 0; i < vecs_read; i++) {
+ memcpy(vecs_data + cur, vecs[i].iov_base, vecs[i].iov_len);
+ cur += vecs[i].iov_len;
+ }
+
+ interim_bufsize -= vecs_data_len;
+ interim_len = vecs_data_len;
+
+ serf__log(SSL_VERBOSE, __FILE__,
+ "ssl_encrypt: bucket read %d bytes; "\
+ "status %d\n", interim_len, status);
+ serf__log(SSL_MSG_VERBOSE, __FILE__, "---\n%.*s\n-(%d)-\n",
+ interim_len, vecs_data, interim_len);
+
+ /* Stash our status away. */
+ ctx->encrypt.status = status;
+
+ ssl_len = SSL_write(ctx->ssl, vecs_data, interim_len);
+
+ serf__log(SSL_VERBOSE, __FILE__,
+ "ssl_encrypt: SSL write: %d\n", ssl_len);
+
+ /* If we failed to write... */
+ if (ssl_len < 0) {
+ int ssl_err;
+
+ /* Ah, bugger. We need to put that data back.
+ Note: use the copy here, we do not own the original iovec
+ data buffer so it will be freed on next read. */
+ serf_bucket_t *vecs_copy =
+ serf_bucket_simple_own_create(vecs_data,
+ vecs_data_len,
+ ctx->allocator);
+ serf_bucket_aggregate_prepend(ctx->encrypt.stream,
+ vecs_copy);
+
+ ssl_err = SSL_get_error(ctx->ssl, ssl_len);
+
+ serf__log(SSL_VERBOSE, __FILE__,
+ "ssl_encrypt: SSL write error: %d\n", ssl_err);
+
+ if (ssl_err == SSL_ERROR_SYSCALL) {
+ /* Return the underlying network error that caused OpenSSL
+ to fail. ### This can be a decrypt error! */
+ status = ctx->encrypt.status;
+ if (SERF_BUCKET_READ_ERROR(status)) {
+ return status;
+ }
+ }
+ else {
+ /* Oh, no. */
+ if (ssl_err == SSL_ERROR_WANT_READ) {
+ status = SERF_ERROR_WAIT_CONN;
+ }
+ else {
+ ctx->fatal_err = status =
+ SERF_ERROR_SSL_COMM_FAILED;
+ }
+ }
+
+ serf__log(SSL_VERBOSE, __FILE__,
+ "ssl_encrypt: SSL write error: %d %d\n",
+ status, *len);
+ } else {
+ /* We're done with this data. */
+ serf_bucket_mem_free(ctx->allocator, vecs_data);
+ }
+ }
+ }
+ else {
+ interim_len = 0;
+ *len = 0;
+ status = ctx->encrypt.status;
+ }
+
+ } while (!status && interim_bufsize);
+
+ /* Okay, we exhausted our underlying stream. */
+ if (!SERF_BUCKET_READ_ERROR(status)) {
+ apr_status_t agg_status;
+ struct iovec vecs[64];
+ int vecs_read, i;
+
+ /* We read something! */
+ agg_status = serf_bucket_read_iovec(ctx->encrypt.pending, bufsize,
+ 64, vecs, &vecs_read);
+ *len = 0;
+ for (i = 0; i < vecs_read; i++) {
+ memcpy(buf + *len, vecs[i].iov_base, vecs[i].iov_len);
+ *len += vecs[i].iov_len;
+ }
+
+ serf__log(SSL_VERBOSE, __FILE__,
+ "ssl_encrypt read agg: %d %d %d %d\n", status, agg_status,
+ ctx->encrypt.status, *len);
+
+ if (!agg_status) {
+ status = agg_status;
+ }
+ }
+
+ if (status == SERF_ERROR_WAIT_CONN
+ && BIO_should_retry(ctx->bio) && BIO_should_read(ctx->bio)) {
+ ctx->encrypt.exhausted = ctx->encrypt.status;
+ ctx->encrypt.status = SERF_ERROR_WAIT_CONN;
+ }
+
+ serf__log(SSL_VERBOSE, __FILE__,
+ "ssl_encrypt finished: %d %d (%d %d %d)\n", status, *len,
+ BIO_should_retry(ctx->bio), BIO_should_read(ctx->bio),
+ BIO_get_retry_flags(ctx->bio));
+
+ return status;
+}
+
+#if APR_HAS_THREADS
+static apr_pool_t *ssl_pool;
+static apr_thread_mutex_t **ssl_locks;
+
+typedef struct CRYPTO_dynlock_value {
+ apr_thread_mutex_t *lock;
+} CRYPTO_dynlock_value;
+
+static CRYPTO_dynlock_value *ssl_dyn_create(const char* file, int line)
+{
+ CRYPTO_dynlock_value *l;
+ apr_status_t rv;
+
+ l = apr_palloc(ssl_pool, sizeof(CRYPTO_dynlock_value));
+ rv = apr_thread_mutex_create(&l->lock, APR_THREAD_MUTEX_DEFAULT, ssl_pool);
+ if (rv != APR_SUCCESS) {
+ /* FIXME: return error here */
+ }
+ return l;
+}
+
+static void ssl_dyn_lock(int mode, CRYPTO_dynlock_value *l, const char *file,
+ int line)
+{
+ if (mode & CRYPTO_LOCK) {
+ apr_thread_mutex_lock(l->lock);
+ }
+ else if (mode & CRYPTO_UNLOCK) {
+ apr_thread_mutex_unlock(l->lock);
+ }
+}
+
+static void ssl_dyn_destroy(CRYPTO_dynlock_value *l, const char *file,
+ int line)
+{
+ apr_thread_mutex_destroy(l->lock);
+}
+
+static void ssl_lock(int mode, int n, const char *file, int line)
+{
+ if (mode & CRYPTO_LOCK) {
+ apr_thread_mutex_lock(ssl_locks[n]);
+ }
+ else if (mode & CRYPTO_UNLOCK) {
+ apr_thread_mutex_unlock(ssl_locks[n]);
+ }
+}
+
+static unsigned long ssl_id(void)
+{
+ /* FIXME: This is lame and not portable. -aaron */
+ return (unsigned long) apr_os_thread_current();
+}
+
+static apr_status_t cleanup_ssl(void *data)
+{
+ CRYPTO_set_locking_callback(NULL);
+ CRYPTO_set_id_callback(NULL);
+ CRYPTO_set_dynlock_create_callback(NULL);
+ CRYPTO_set_dynlock_lock_callback(NULL);
+ CRYPTO_set_dynlock_destroy_callback(NULL);
+
+ return APR_SUCCESS;
+}
+
+#endif
+
+static apr_uint32_t have_init_ssl = 0;
+
+static void init_ssl_libraries(void)
+{
+ apr_uint32_t val;
+#if APR_VERSION_AT_LEAST(1,0,0)
+ val = apr_atomic_xchg32(&have_init_ssl, 1);
+#else
+ val = apr_atomic_cas(&have_init_ssl, 1, 0);
+#endif
+
+ if (!val) {
+#if APR_HAS_THREADS
+ int i, numlocks;
+#endif
+
+#ifdef SSL_VERBOSE
+ /* Warn when compile-time and run-time version of OpenSSL differ in
+ major/minor version number. */
+ long libver = SSLeay();
+
+ if ((libver ^ OPENSSL_VERSION_NUMBER) & 0xFFF00000) {
+ serf__log(SSL_VERBOSE, __FILE__,
+ "Warning: OpenSSL library version mismatch, compile-time "
+ "was %lx, runtime is %lx.\n",
+ OPENSSL_VERSION_NUMBER, libver);
+ }
+#endif
+
+ CRYPTO_malloc_init();
+ ERR_load_crypto_strings();
+ SSL_load_error_strings();
+ SSL_library_init();
+ OpenSSL_add_all_algorithms();
+
+#if APR_HAS_THREADS
+ numlocks = CRYPTO_num_locks();
+ apr_pool_create(&ssl_pool, NULL);
+ ssl_locks = apr_palloc(ssl_pool, sizeof(apr_thread_mutex_t*)*numlocks);
+ for (i = 0; i < numlocks; i++) {
+ apr_status_t rv;
+
+ /* Intraprocess locks don't /need/ a filename... */
+ rv = apr_thread_mutex_create(&ssl_locks[i],
+ APR_THREAD_MUTEX_DEFAULT, ssl_pool);
+ if (rv != APR_SUCCESS) {
+ /* FIXME: error out here */
+ }
+ }
+ CRYPTO_set_locking_callback(ssl_lock);
+ CRYPTO_set_id_callback(ssl_id);
+ CRYPTO_set_dynlock_create_callback(ssl_dyn_create);
+ CRYPTO_set_dynlock_lock_callback(ssl_dyn_lock);
+ CRYPTO_set_dynlock_destroy_callback(ssl_dyn_destroy);
+
+ apr_pool_cleanup_register(ssl_pool, NULL, cleanup_ssl, cleanup_ssl);
+#endif
+ }
+}
+
+static int ssl_need_client_cert(SSL *ssl, X509 **cert, EVP_PKEY **pkey)
+{
+ serf_ssl_context_t *ctx = SSL_get_app_data(ssl);
+ apr_status_t status;
+
+ if (ctx->cached_cert) {
+ *cert = ctx->cached_cert;
+ *pkey = ctx->cached_cert_pw;
+ return 1;
+ }
+
+ while (ctx->cert_callback) {
+ const char *cert_path;
+ apr_file_t *cert_file;
+ BIO *bio;
+ PKCS12 *p12;
+ int i;
+ int retrying_success = 0;
+
+ if (ctx->cert_file_success) {
+ status = APR_SUCCESS;
+ cert_path = ctx->cert_file_success;
+ ctx->cert_file_success = NULL;
+ retrying_success = 1;
+ } else {
+ status = ctx->cert_callback(ctx->cert_userdata, &cert_path);
+ }
+
+ if (status || !cert_path) {
+ break;
+ }
+
+ /* Load the x.509 cert file stored in PKCS12 */
+ status = apr_file_open(&cert_file, cert_path, APR_READ, APR_OS_DEFAULT,
+ ctx->pool);
+
+ if (status) {
+ continue;
+ }
+
+ bio = BIO_new(&bio_file_method);
+ bio->ptr = cert_file;
+
+ ctx->cert_path = cert_path;
+ p12 = d2i_PKCS12_bio(bio, NULL);
+ apr_file_close(cert_file);
+
+ i = PKCS12_parse(p12, NULL, pkey, cert, NULL);
+
+ if (i == 1) {
+ PKCS12_free(p12);
+ ctx->cached_cert = *cert;
+ ctx->cached_cert_pw = *pkey;
+ if (!retrying_success && ctx->cert_cache_pool) {
+ const char *c;
+
+ c = apr_pstrdup(ctx->cert_cache_pool, ctx->cert_path);
+
+ apr_pool_userdata_setn(c, "serf:ssl:cert",
+ apr_pool_cleanup_null,
+ ctx->cert_cache_pool);
+ }
+ return 1;
+ }
+ else {
+ int err = ERR_get_error();
+ ERR_clear_error();
+ if (ERR_GET_LIB(err) == ERR_LIB_PKCS12 &&
+ ERR_GET_REASON(err) == PKCS12_R_MAC_VERIFY_FAILURE) {
+ if (ctx->cert_pw_callback) {
+ const char *password;
+
+ if (ctx->cert_pw_success) {
+ status = APR_SUCCESS;
+ password = ctx->cert_pw_success;
+ ctx->cert_pw_success = NULL;
+ } else {
+ status = ctx->cert_pw_callback(ctx->cert_pw_userdata,
+ ctx->cert_path,
+ &password);
+ }
+
+ if (!status && password) {
+ i = PKCS12_parse(p12, password, pkey, cert, NULL);
+ if (i == 1) {
+ PKCS12_free(p12);
+ ctx->cached_cert = *cert;
+ ctx->cached_cert_pw = *pkey;
+ if (!retrying_success && ctx->cert_cache_pool) {
+ const char *c;
+
+ c = apr_pstrdup(ctx->cert_cache_pool,
+ ctx->cert_path);
+
+ apr_pool_userdata_setn(c, "serf:ssl:cert",
+ apr_pool_cleanup_null,
+ ctx->cert_cache_pool);
+ }
+ if (!retrying_success && ctx->cert_pw_cache_pool) {
+ const char *c;
+
+ c = apr_pstrdup(ctx->cert_pw_cache_pool,
+ password);
+
+ apr_pool_userdata_setn(c, "serf:ssl:certpw",
+ apr_pool_cleanup_null,
+ ctx->cert_pw_cache_pool);
+ }
+ return 1;
+ }
+ }
+ }
+ PKCS12_free(p12);
+ return 0;
+ }
+ else {
+ printf("OpenSSL cert error: %d %d %d\n", ERR_GET_LIB(err),
+ ERR_GET_FUNC(err),
+ ERR_GET_REASON(err));
+ PKCS12_free(p12);
+ }
+ }
+ }
+
+ return 0;
+}
+
+
+void serf_ssl_client_cert_provider_set(
+ serf_ssl_context_t *context,
+ serf_ssl_need_client_cert_t callback,
+ void *data,
+ void *cache_pool)
+{
+ context->cert_callback = callback;
+ context->cert_userdata = data;
+ context->cert_cache_pool = cache_pool;
+ if (context->cert_cache_pool) {
+ apr_pool_userdata_get((void**)&context->cert_file_success,
+ "serf:ssl:cert", cache_pool);
+ }
+}
+
+
+void serf_ssl_client_cert_password_set(
+ serf_ssl_context_t *context,
+ serf_ssl_need_cert_password_t callback,
+ void *data,
+ void *cache_pool)
+{
+ context->cert_pw_callback = callback;
+ context->cert_pw_userdata = data;
+ context->cert_pw_cache_pool = cache_pool;
+ if (context->cert_pw_cache_pool) {
+ apr_pool_userdata_get((void**)&context->cert_pw_success,
+ "serf:ssl:certpw", cache_pool);
+ }
+}
+
+
+void serf_ssl_server_cert_callback_set(
+ serf_ssl_context_t *context,
+ serf_ssl_need_server_cert_t callback,
+ void *data)
+{
+ context->server_cert_callback = callback;
+ context->server_cert_userdata = data;
+}
+
+void serf_ssl_server_cert_chain_callback_set(
+ serf_ssl_context_t *context,
+ serf_ssl_need_server_cert_t cert_callback,
+ serf_ssl_server_cert_chain_cb_t cert_chain_callback,
+ void *data)
+{
+ context->server_cert_callback = cert_callback;
+ context->server_cert_chain_callback = cert_chain_callback;
+ context->server_cert_userdata = data;
+}
+
+static serf_ssl_context_t *ssl_init_context(void)
+{
+ serf_ssl_context_t *ssl_ctx;
+ apr_pool_t *pool;
+ serf_bucket_alloc_t *allocator;
+
+ init_ssl_libraries();
+
+ apr_pool_create(&pool, NULL);
+ allocator = serf_bucket_allocator_create(pool, NULL, NULL);
+
+ ssl_ctx = serf_bucket_mem_alloc(allocator, sizeof(*ssl_ctx));
+
+ ssl_ctx->refcount = 0;
+ ssl_ctx->pool = pool;
+ ssl_ctx->allocator = allocator;
+
+ ssl_ctx->ctx = SSL_CTX_new(SSLv23_client_method());
+
+ SSL_CTX_set_client_cert_cb(ssl_ctx->ctx, ssl_need_client_cert);
+ ssl_ctx->cached_cert = 0;
+ ssl_ctx->cached_cert_pw = 0;
+ ssl_ctx->pending_err = APR_SUCCESS;
+ ssl_ctx->fatal_err = APR_SUCCESS;
+
+ ssl_ctx->cert_callback = NULL;
+ ssl_ctx->cert_pw_callback = NULL;
+ ssl_ctx->server_cert_callback = NULL;
+ ssl_ctx->server_cert_chain_callback = NULL;
+
+ SSL_CTX_set_verify(ssl_ctx->ctx, SSL_VERIFY_PEER,
+ validate_server_certificate);
+ SSL_CTX_set_options(ssl_ctx->ctx, SSL_OP_ALL);
+ /* Disable SSL compression by default. */
+ disable_compression(ssl_ctx);
+
+ ssl_ctx->ssl = SSL_new(ssl_ctx->ctx);
+ ssl_ctx->bio = BIO_new(&bio_bucket_method);
+ ssl_ctx->bio->ptr = ssl_ctx;
+
+ SSL_set_bio(ssl_ctx->ssl, ssl_ctx->bio, ssl_ctx->bio);
+
+ SSL_set_connect_state(ssl_ctx->ssl);
+
+ SSL_set_app_data(ssl_ctx->ssl, ssl_ctx);
+
+#if SSL_VERBOSE
+ SSL_CTX_set_info_callback(ssl_ctx->ctx, apps_ssl_info_callback);
+#endif
+
+ ssl_ctx->encrypt.stream = NULL;
+ ssl_ctx->encrypt.stream_next = NULL;
+ ssl_ctx->encrypt.pending = serf_bucket_aggregate_create(allocator);
+ ssl_ctx->encrypt.status = APR_SUCCESS;
+ serf_databuf_init(&ssl_ctx->encrypt.databuf);
+ ssl_ctx->encrypt.databuf.read = ssl_encrypt;
+ ssl_ctx->encrypt.databuf.read_baton = ssl_ctx;
+
+ ssl_ctx->decrypt.stream = NULL;
+ ssl_ctx->decrypt.pending = serf_bucket_aggregate_create(allocator);
+ ssl_ctx->decrypt.status = APR_SUCCESS;
+ serf_databuf_init(&ssl_ctx->decrypt.databuf);
+ ssl_ctx->decrypt.databuf.read = ssl_decrypt;
+ ssl_ctx->decrypt.databuf.read_baton = ssl_ctx;
+
+ return ssl_ctx;
+}
+
+static apr_status_t ssl_free_context(
+ serf_ssl_context_t *ssl_ctx)
+{
+ apr_pool_t *p;
+
+ /* If never had the pending buckets, don't try to free them. */
+ if (ssl_ctx->decrypt.pending != NULL) {
+ serf_bucket_destroy(ssl_ctx->decrypt.pending);
+ }
+ if (ssl_ctx->encrypt.pending != NULL) {
+ serf_bucket_destroy(ssl_ctx->encrypt.pending);
+ }
+
+ /* SSL_free implicitly frees the underlying BIO. */
+ SSL_free(ssl_ctx->ssl);
+ SSL_CTX_free(ssl_ctx->ctx);
+
+ p = ssl_ctx->pool;
+
+ serf_bucket_mem_free(ssl_ctx->allocator, ssl_ctx);
+ apr_pool_destroy(p);
+
+ return APR_SUCCESS;
+}
+
+static serf_bucket_t * serf_bucket_ssl_create(
+ serf_ssl_context_t *ssl_ctx,
+ serf_bucket_alloc_t *allocator,
+ const serf_bucket_type_t *type)
+{
+ ssl_context_t *ctx;
+
+ ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
+ if (!ssl_ctx) {
+ ctx->ssl_ctx = ssl_init_context();
+ }
+ else {
+ ctx->ssl_ctx = ssl_ctx;
+ }
+ ctx->ssl_ctx->refcount++;
+
+ return serf_bucket_create(type, allocator, ctx);
+}
+
+apr_status_t serf_ssl_set_hostname(serf_ssl_context_t *context,
+ const char * hostname)
+{
+#ifdef SSL_set_tlsext_host_name
+ if (SSL_set_tlsext_host_name(context->ssl, hostname) != 1) {
+ ERR_clear_error();
+ }
+#endif
+ return APR_SUCCESS;
+}
+
+apr_status_t serf_ssl_use_default_certificates(serf_ssl_context_t *ssl_ctx)
+{
+ X509_STORE *store = SSL_CTX_get_cert_store(ssl_ctx->ctx);
+
+ int result = X509_STORE_set_default_paths(store);
+
+ return result ? APR_SUCCESS : SERF_ERROR_SSL_CERT_FAILED;
+}
+
+apr_status_t serf_ssl_load_cert_file(
+ serf_ssl_certificate_t **cert,
+ const char *file_path,
+ apr_pool_t *pool)
+{
+ FILE *fp = fopen(file_path, "r");
+
+ if (fp) {
+ X509 *ssl_cert = PEM_read_X509(fp, NULL, NULL, NULL);
+ fclose(fp);
+
+ if (ssl_cert) {
+ *cert = apr_palloc(pool, sizeof(serf_ssl_certificate_t));
+ (*cert)->ssl_cert = ssl_cert;
+
+ return APR_SUCCESS;
+ }
+ }
+
+ return SERF_ERROR_SSL_CERT_FAILED;
+}
+
+
+apr_status_t serf_ssl_trust_cert(
+ serf_ssl_context_t *ssl_ctx,
+ serf_ssl_certificate_t *cert)
+{
+ X509_STORE *store = SSL_CTX_get_cert_store(ssl_ctx->ctx);
+
+ int result = X509_STORE_add_cert(store, cert->ssl_cert);
+
+ return result ? APR_SUCCESS : SERF_ERROR_SSL_CERT_FAILED;
+}
+
+
+serf_bucket_t *serf_bucket_ssl_decrypt_create(
+ serf_bucket_t *stream,
+ serf_ssl_context_t *ssl_ctx,
+ serf_bucket_alloc_t *allocator)
+{
+ serf_bucket_t *bkt;
+ ssl_context_t *ctx;
+
+ bkt = serf_bucket_ssl_create(ssl_ctx, allocator,
+ &serf_bucket_type_ssl_decrypt);
+
+ ctx = bkt->data;
+
+ ctx->databuf = &ctx->ssl_ctx->decrypt.databuf;
+ if (ctx->ssl_ctx->decrypt.stream != NULL) {
+ return NULL;
+ }
+ ctx->ssl_ctx->decrypt.stream = stream;
+ ctx->our_stream = &ctx->ssl_ctx->decrypt.stream;
+
+ return bkt;
+}
+
+
+serf_ssl_context_t *serf_bucket_ssl_decrypt_context_get(
+ serf_bucket_t *bucket)
+{
+ ssl_context_t *ctx = bucket->data;
+ return ctx->ssl_ctx;
+}
+
+
+serf_bucket_t *serf_bucket_ssl_encrypt_create(
+ serf_bucket_t *stream,
+ serf_ssl_context_t *ssl_ctx,
+ serf_bucket_alloc_t *allocator)
+{
+ serf_bucket_t *bkt;
+ ssl_context_t *ctx;
+
+ bkt = serf_bucket_ssl_create(ssl_ctx, allocator,
+ &serf_bucket_type_ssl_encrypt);
+
+ ctx = bkt->data;
+
+ ctx->databuf = &ctx->ssl_ctx->encrypt.databuf;
+ ctx->our_stream = &ctx->ssl_ctx->encrypt.stream;
+ if (ctx->ssl_ctx->encrypt.stream == NULL) {
+ serf_bucket_t *tmp = serf_bucket_aggregate_create(stream->allocator);
+ serf_bucket_aggregate_append(tmp, stream);
+ ctx->ssl_ctx->encrypt.stream = tmp;
+ }
+ else {
+ bucket_list_t *new_list;
+
+ new_list = serf_bucket_mem_alloc(ctx->ssl_ctx->allocator,
+ sizeof(*new_list));
+ new_list->bucket = stream;
+ new_list->next = NULL;
+ if (ctx->ssl_ctx->encrypt.stream_next == NULL) {
+ ctx->ssl_ctx->encrypt.stream_next = new_list;
+ }
+ else {
+ bucket_list_t *scan = ctx->ssl_ctx->encrypt.stream_next;
+
+ while (scan->next != NULL)
+ scan = scan->next;
+ scan->next = new_list;
+ }
+ }
+
+ return bkt;
+}
+
+
+serf_ssl_context_t *serf_bucket_ssl_encrypt_context_get(
+ serf_bucket_t *bucket)
+{
+ ssl_context_t *ctx = bucket->data;
+ return ctx->ssl_ctx;
+}
+
+/* Functions to read a serf_ssl_certificate structure. */
+
+/* Creates a hash_table with keys (E, CN, OU, O, L, ST and C). */
+static apr_hash_t *
+convert_X509_NAME_to_table(X509_NAME *org, apr_pool_t *pool)
+{
+ char buf[1024];
+ int ret;
+
+ apr_hash_t *tgt = apr_hash_make(pool);
+
+ ret = X509_NAME_get_text_by_NID(org,
+ NID_commonName,
+ buf, 1024);
+ if (ret != -1)
+ apr_hash_set(tgt, "CN", APR_HASH_KEY_STRING, apr_pstrdup(pool, buf));
+ ret = X509_NAME_get_text_by_NID(org,
+ NID_pkcs9_emailAddress,
+ buf, 1024);
+ if (ret != -1)
+ apr_hash_set(tgt, "E", APR_HASH_KEY_STRING, apr_pstrdup(pool, buf));
+ ret = X509_NAME_get_text_by_NID(org,
+ NID_organizationalUnitName,
+ buf, 1024);
+ if (ret != -1)
+ apr_hash_set(tgt, "OU", APR_HASH_KEY_STRING, apr_pstrdup(pool, buf));
+ ret = X509_NAME_get_text_by_NID(org,
+ NID_organizationName,
+ buf, 1024);
+ if (ret != -1)
+ apr_hash_set(tgt, "O", APR_HASH_KEY_STRING, apr_pstrdup(pool, buf));
+ ret = X509_NAME_get_text_by_NID(org,
+ NID_localityName,
+ buf, 1024);
+ if (ret != -1)
+ apr_hash_set(tgt, "L", APR_HASH_KEY_STRING, apr_pstrdup(pool, buf));
+ ret = X509_NAME_get_text_by_NID(org,
+ NID_stateOrProvinceName,
+ buf, 1024);
+ if (ret != -1)
+ apr_hash_set(tgt, "ST", APR_HASH_KEY_STRING, apr_pstrdup(pool, buf));
+ ret = X509_NAME_get_text_by_NID(org,
+ NID_countryName,
+ buf, 1024);
+ if (ret != -1)
+ apr_hash_set(tgt, "C", APR_HASH_KEY_STRING, apr_pstrdup(pool, buf));
+
+ return tgt;
+}
+
+
+int serf_ssl_cert_depth(const serf_ssl_certificate_t *cert)
+{
+ return cert->depth;
+}
+
+
+apr_hash_t *serf_ssl_cert_issuer(
+ const serf_ssl_certificate_t *cert,
+ apr_pool_t *pool)
+{
+ X509_NAME *issuer = X509_get_issuer_name(cert->ssl_cert);
+
+ if (!issuer)
+ return NULL;
+
+ return convert_X509_NAME_to_table(issuer, pool);
+}
+
+
+apr_hash_t *serf_ssl_cert_subject(
+ const serf_ssl_certificate_t *cert,
+ apr_pool_t *pool)
+{
+ X509_NAME *subject = X509_get_subject_name(cert->ssl_cert);
+
+ if (!subject)
+ return NULL;
+
+ return convert_X509_NAME_to_table(subject, pool);
+}
+
+
+apr_hash_t *serf_ssl_cert_certificate(
+ const serf_ssl_certificate_t *cert,
+ apr_pool_t *pool)
+{
+ apr_hash_t *tgt = apr_hash_make(pool);
+ unsigned int md_size, i;
+ unsigned char md[EVP_MAX_MD_SIZE];
+ BIO *bio;
+ STACK_OF(GENERAL_NAME) *names;
+
+ /* sha1 fingerprint */
+ if (X509_digest(cert->ssl_cert, EVP_sha1(), md, &md_size)) {
+ const char hex[] = "0123456789ABCDEF";
+ char fingerprint[EVP_MAX_MD_SIZE * 3];
+
+ for (i=0; i<md_size; i++) {
+ fingerprint[3*i] = hex[(md[i] & 0xf0) >> 4];
+ fingerprint[(3*i)+1] = hex[(md[i] & 0x0f)];
+ fingerprint[(3*i)+2] = ':';
+ }
+ if (md_size > 0)
+ fingerprint[(3*(md_size-1))+2] = '\0';
+ else
+ fingerprint[0] = '\0';
+
+ apr_hash_set(tgt, "sha1", APR_HASH_KEY_STRING,
+ apr_pstrdup(pool, fingerprint));
+ }
+
+ /* set expiry dates */
+ bio = BIO_new(BIO_s_mem());
+ if (bio) {
+ ASN1_TIME *notBefore, *notAfter;
+ char buf[256];
+
+ memset (buf, 0, sizeof (buf));
+ notBefore = X509_get_notBefore(cert->ssl_cert);
+ if (ASN1_TIME_print(bio, notBefore)) {
+ BIO_read(bio, buf, 255);
+ apr_hash_set(tgt, "notBefore", APR_HASH_KEY_STRING,
+ apr_pstrdup(pool, buf));
+ }
+ memset (buf, 0, sizeof (buf));
+ notAfter = X509_get_notAfter(cert->ssl_cert);
+ if (ASN1_TIME_print(bio, notAfter)) {
+ BIO_read(bio, buf, 255);
+ apr_hash_set(tgt, "notAfter", APR_HASH_KEY_STRING,
+ apr_pstrdup(pool, buf));
+ }
+ }
+ BIO_free(bio);
+
+ /* Get subjectAltNames */
+ names = X509_get_ext_d2i(cert->ssl_cert, NID_subject_alt_name, NULL, NULL);
+ if (names) {
+ int names_count = sk_GENERAL_NAME_num(names);
+
+ apr_array_header_t *san_arr = apr_array_make(pool, names_count,
+ sizeof(char*));
+ apr_hash_set(tgt, "subjectAltName", APR_HASH_KEY_STRING, san_arr);
+ for (i = 0; i < names_count; i++) {
+ char *p = NULL;
+ GENERAL_NAME *nm = sk_GENERAL_NAME_value(names, i);
+
+ switch (nm->type) {
+ case GEN_DNS:
+ p = apr_pstrmemdup(pool, (const char *)nm->d.ia5->data,
+ nm->d.ia5->length);
+ break;
+ default:
+ /* Don't know what to do - skip. */
+ break;
+ }
+ if (p) {
+ APR_ARRAY_PUSH(san_arr, char*) = p;
+ }
+ }
+ sk_GENERAL_NAME_pop_free(names, GENERAL_NAME_free);
+ }
+
+ return tgt;
+}
+
+
+const char *serf_ssl_cert_export(
+ const serf_ssl_certificate_t *cert,
+ apr_pool_t *pool)
+{
+ char *binary_cert;
+ char *encoded_cert;
+ int len;
+ unsigned char *unused;
+
+ /* find the length of the DER encoding. */
+ len = i2d_X509(cert->ssl_cert, NULL);
+ if (len < 0) {
+ return NULL;
+ }
+
+ binary_cert = apr_palloc(pool, len);
+ unused = (unsigned char *)binary_cert;
+ len = i2d_X509(cert->ssl_cert, &unused); /* unused is incremented */
+ if (len < 0) {
+ return NULL;
+ }
+
+ encoded_cert = apr_palloc(pool, apr_base64_encode_len(len));
+ apr_base64_encode(encoded_cert, binary_cert, len);
+
+ return encoded_cert;
+}
+
+/* Disables compression for all SSL sessions. */
+static void disable_compression(serf_ssl_context_t *ssl_ctx)
+{
+#ifdef SSL_OP_NO_COMPRESSION
+ SSL_CTX_set_options(ssl_ctx->ctx, SSL_OP_NO_COMPRESSION);
+#endif
+}
+
+apr_status_t serf_ssl_use_compression(serf_ssl_context_t *ssl_ctx, int enabled)
+{
+ if (enabled) {
+#ifdef SSL_OP_NO_COMPRESSION
+ SSL_clear_options(ssl_ctx->ssl, SSL_OP_NO_COMPRESSION);
+ return APR_SUCCESS;
+#endif
+ } else {
+#ifdef SSL_OP_NO_COMPRESSION
+ SSL_set_options(ssl_ctx->ssl, SSL_OP_NO_COMPRESSION);
+ return APR_SUCCESS;
+#endif
+ }
+
+ return APR_EGENERAL;
+}
+
+static void serf_ssl_destroy_and_data(serf_bucket_t *bucket)
+{
+ ssl_context_t *ctx = bucket->data;
+
+ if (!--ctx->ssl_ctx->refcount) {
+ ssl_free_context(ctx->ssl_ctx);
+ }
+
+ serf_default_destroy_and_data(bucket);
+}
+
+static void serf_ssl_decrypt_destroy_and_data(serf_bucket_t *bucket)
+{
+ ssl_context_t *ctx = bucket->data;
+
+ serf_bucket_destroy(*ctx->our_stream);
+
+ serf_ssl_destroy_and_data(bucket);
+}
+
+static void serf_ssl_encrypt_destroy_and_data(serf_bucket_t *bucket)
+{
+ ssl_context_t *ctx = bucket->data;
+ serf_ssl_context_t *ssl_ctx = ctx->ssl_ctx;
+
+ if (ssl_ctx->encrypt.stream == *ctx->our_stream) {
+ serf_bucket_destroy(*ctx->our_stream);
+ serf_bucket_destroy(ssl_ctx->encrypt.pending);
+
+ /* Reset our encrypted status and databuf. */
+ ssl_ctx->encrypt.status = APR_SUCCESS;
+ ssl_ctx->encrypt.databuf.status = APR_SUCCESS;
+
+ /* Advance to the next stream - if we have one. */
+ if (ssl_ctx->encrypt.stream_next == NULL) {
+ ssl_ctx->encrypt.stream = NULL;
+ ssl_ctx->encrypt.pending = NULL;
+ }
+ else {
+ bucket_list_t *cur;
+
+ cur = ssl_ctx->encrypt.stream_next;
+ ssl_ctx->encrypt.stream = cur->bucket;
+ ssl_ctx->encrypt.pending =
+ serf_bucket_aggregate_create(cur->bucket->allocator);
+ ssl_ctx->encrypt.stream_next = cur->next;
+ serf_bucket_mem_free(ssl_ctx->allocator, cur);
+ }
+ }
+ else {
+ /* Ah, darn. We haven't sent this one along yet. */
+ return;
+ }
+ serf_ssl_destroy_and_data(bucket);
+}
+
+static apr_status_t serf_ssl_read(serf_bucket_t *bucket,
+ apr_size_t requested,
+ const char **data, apr_size_t *len)
+{
+ ssl_context_t *ctx = bucket->data;
+
+ return serf_databuf_read(ctx->databuf, requested, data, len);
+}
+
+static apr_status_t serf_ssl_readline(serf_bucket_t *bucket,
+ int acceptable, int *found,
+ const char **data,
+ apr_size_t *len)
+{
+ ssl_context_t *ctx = bucket->data;
+
+ return serf_databuf_readline(ctx->databuf, acceptable, found, data, len);
+}
+
+static apr_status_t serf_ssl_peek(serf_bucket_t *bucket,
+ const char **data,
+ apr_size_t *len)
+{
+ ssl_context_t *ctx = bucket->data;
+
+ return serf_databuf_peek(ctx->databuf, data, len);
+}
+
+
+const serf_bucket_type_t serf_bucket_type_ssl_encrypt = {
+ "SSLENCRYPT",
+ serf_ssl_read,
+ serf_ssl_readline,
+ serf_default_read_iovec,
+ serf_default_read_for_sendfile,
+ serf_default_read_bucket,
+ serf_ssl_peek,
+ serf_ssl_encrypt_destroy_and_data,
+};
+
+const serf_bucket_type_t serf_bucket_type_ssl_decrypt = {
+ "SSLDECRYPT",
+ serf_ssl_read,
+ serf_ssl_readline,
+ serf_default_read_iovec,
+ serf_default_read_for_sendfile,
+ serf_default_read_bucket,
+ serf_ssl_peek,
+ serf_ssl_decrypt_destroy_and_data,
+};
diff --git a/contrib/serf/build/check.py b/contrib/serf/build/check.py
new file mode 100755
index 0000000..7dfcda9
--- /dev/null
+++ b/contrib/serf/build/check.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+#
+# check.py : Run all the test cases.
+#
+# ====================================================================
+# Copyright 2013 Justin Erenkrantz and Greg Stein
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ====================================================================
+
+import sys
+import glob
+import subprocess
+import os
+
+
+if __name__ == '__main__':
+ # get the test directory from the commandline, if set.
+ if len(sys.argv) > 1:
+ testdir = sys.argv[1]
+ else:
+ testdir = 'test'
+
+ # define test executable paths
+ if sys.platform == 'win32':
+ SERF_RESPONSE_EXE = 'serf_response.exe'
+ TEST_ALL_EXE = 'test_all.exe'
+ else:
+ SERF_RESPONSE_EXE = 'serf_response'
+ TEST_ALL_EXE = 'test_all'
+ SERF_RESPONSE_EXE = os.path.join(testdir, SERF_RESPONSE_EXE)
+ TEST_ALL_EXE = os.path.join(testdir, TEST_ALL_EXE)
+
+ # Find test responses and run them one by one
+ for case in glob.glob(testdir + "/testcases/*.response"):
+ print "== Testing %s ==" % (case)
+ try:
+ subprocess.check_call([SERF_RESPONSE_EXE, case])
+ except subprocess.CalledProcessError:
+ print "ERROR: test case %s failed" % (case)
+
+ print "== Running the unit tests =="
+ try:
+ subprocess.check_call(TEST_ALL_EXE)
+ except subprocess.CalledProcessError:
+ print "ERROR: test(s) failed in test_all"
diff --git a/contrib/serf/build/gen_def.py b/contrib/serf/build/gen_def.py
new file mode 100755
index 0000000..d10d8c8
--- /dev/null
+++ b/contrib/serf/build/gen_def.py
@@ -0,0 +1,76 @@
+#!/usr/bin/env python
+#
+# gen_def.py : Generate the .DEF file for Windows builds
+#
+# ====================================================================
+# Copyright 2002-2010 Justin Erenkrantz and Greg Stein
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ====================================================================
+#
+#
+# Typically, this script is used like:
+#
+# C:\PATH> python build/gen_def.py serf.h serf_bucket_types.h serf_bucket_util.h > build/serf.def
+#
+
+import re
+import sys
+
+# This regex parses function declarations that look like:
+#
+# return_type serf_func1(...
+# return_type *serf_func2(...
+#
+# Where return_type is a combination of words and "*" each separated by a
+# SINGLE space. If the function returns a pointer type (like serf_func2),
+# then a space may exist between the "*" and the function name. Thus,
+# a more complicated example might be:
+# const type * const * serf_func3(...
+#
+_funcs = re.compile(r'^(?:(?:\w+|\*) )+\*?(serf_[a-z][a-z_0-9]*)\(',
+ re.MULTILINE)
+
+# This regex parses the bucket type definitions which look like:
+#
+# extern const serf_bucket_type_t serf_bucket_type_FOO;
+#
+_types = re.compile(r'^extern const serf_bucket_type_t (serf_[a-z_]*);',
+ re.MULTILINE)
+
+
+def extract_exports(fname):
+ content = open(fname).read()
+ exports = [ ]
+ for name in _funcs.findall(content):
+ exports.append(name)
+ for name in _types.findall(content):
+ exports.append(name)
+ return exports
+
+# Blacklist the serf v2 API for now
+blacklist = ['serf_connection_switch_protocol',
+ 'serf_http_protocol_create',
+ 'serf_http_request_create',
+ 'serf_https_protocol_create']
+
+if __name__ == '__main__':
+ # run the extraction over each file mentioned
+ import sys
+ print("EXPORTS")
+
+ for fname in sys.argv[1:]:
+ funclist = extract_exports(fname)
+ funclist = set(funclist) - set(blacklist)
+ for func in funclist:
+ print(func)
diff --git a/contrib/serf/build/serf.pc.in b/contrib/serf/build/serf.pc.in
new file mode 100644
index 0000000..8e49c8a
--- /dev/null
+++ b/contrib/serf/build/serf.pc.in
@@ -0,0 +1,13 @@
+SERF_MAJOR_VERSION=@MAJOR@
+prefix=@PREFIX@
+exec_prefix=${prefix}
+libdir=${exec_prefix}/lib
+includedir=${prefix}/include/@INCLUDE_SUBDIR@
+
+Name: serf
+Description: HTTP client library
+Version: @VERSION@
+Requires.private: libssl libcrypto
+Libs: -L${libdir} -lserf-${SERF_MAJOR_VERSION}
+Libs.private: @LIBS@
+Cflags: -I${includedir}
diff --git a/contrib/serf/context.c b/contrib/serf/context.c
new file mode 100644
index 0000000..c219264
--- /dev/null
+++ b/contrib/serf/context.c
@@ -0,0 +1,379 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <apr_pools.h>
+#include <apr_poll.h>
+#include <apr_version.h>
+
+#include "serf.h"
+#include "serf_bucket_util.h"
+
+#include "serf_private.h"
+
+/**
+ * Callback function (implements serf_progress_t). Takes a number of bytes
+ * read @a read and bytes written @a written, adds those to the total for this
+ * context and notifies an interested party (if any).
+ */
+void serf__context_progress_delta(
+ void *progress_baton,
+ apr_off_t read,
+ apr_off_t written)
+{
+ serf_context_t *ctx = progress_baton;
+
+ ctx->progress_read += read;
+ ctx->progress_written += written;
+
+ if (ctx->progress_func)
+ ctx->progress_func(ctx->progress_baton,
+ ctx->progress_read,
+ ctx->progress_written);
+}
+
+
+/* Check for dirty connections and update their pollsets accordingly. */
+static apr_status_t check_dirty_pollsets(serf_context_t *ctx)
+{
+ int i;
+
+ /* if we're not dirty, return now. */
+ if (!ctx->dirty_pollset) {
+ return APR_SUCCESS;
+ }
+
+ for (i = ctx->conns->nelts; i--; ) {
+ serf_connection_t *conn = GET_CONN(ctx, i);
+ apr_status_t status;
+
+ /* if this connection isn't dirty, skip it. */
+ if (!conn->dirty_conn) {
+ continue;
+ }
+
+ /* reset this connection's flag before we update. */
+ conn->dirty_conn = 0;
+
+ if ((status = serf__conn_update_pollset(conn)) != APR_SUCCESS)
+ return status;
+ }
+
+ /* reset our context flag now */
+ ctx->dirty_pollset = 0;
+
+ return APR_SUCCESS;
+}
+
+
+static apr_status_t pollset_add(void *user_baton,
+ apr_pollfd_t *pfd,
+ void *serf_baton)
+{
+ serf_pollset_t *s = (serf_pollset_t*)user_baton;
+ pfd->client_data = serf_baton;
+ return apr_pollset_add(s->pollset, pfd);
+}
+
+static apr_status_t pollset_rm(void *user_baton,
+ apr_pollfd_t *pfd,
+ void *serf_baton)
+{
+ serf_pollset_t *s = (serf_pollset_t*)user_baton;
+ pfd->client_data = serf_baton;
+ return apr_pollset_remove(s->pollset, pfd);
+}
+
+
+void serf_config_proxy(serf_context_t *ctx,
+ apr_sockaddr_t *address)
+{
+ ctx->proxy_address = address;
+}
+
+
+void serf_config_credentials_callback(serf_context_t *ctx,
+ serf_credentials_callback_t cred_cb)
+{
+ ctx->cred_cb = cred_cb;
+}
+
+
+void serf_config_authn_types(serf_context_t *ctx,
+ int authn_types)
+{
+ ctx->authn_types = authn_types;
+}
+
+
+serf_context_t *serf_context_create_ex(
+ void *user_baton,
+ serf_socket_add_t addf,
+ serf_socket_remove_t rmf,
+ apr_pool_t *pool)
+{
+ serf_context_t *ctx = apr_pcalloc(pool, sizeof(*ctx));
+
+ ctx->pool = pool;
+
+ if (user_baton != NULL) {
+ ctx->pollset_baton = user_baton;
+ ctx->pollset_add = addf;
+ ctx->pollset_rm = rmf;
+ }
+ else {
+ /* build the pollset with a (default) number of connections */
+ serf_pollset_t *ps = apr_pcalloc(pool, sizeof(*ps));
+
+ /* ### TODO: As of APR 1.4.x apr_pollset_create_ex can return a status
+ ### other than APR_SUCCESS, so we should handle it.
+ ### Probably move creation of the pollset to later when we have
+ ### the possibility of returning status to the caller.
+ */
+#ifdef BROKEN_WSAPOLL
+ /* APR 1.4.x switched to using WSAPoll() on Win32, but it does not
+ * properly handle errors on a non-blocking sockets (such as
+ * connecting to a server where no listener is active).
+ *
+ * So, sadly, we must force using select() on Win32.
+ *
+ * http://mail-archives.apache.org/mod_mbox/apr-dev/201105.mbox/%3CBANLkTin3rBCecCBRvzUA5B-14u-NWxR_Kg@mail.gmail.com%3E
+ */
+ (void) apr_pollset_create_ex(&ps->pollset, MAX_CONN, pool, 0,
+ APR_POLLSET_SELECT);
+#else
+ (void) apr_pollset_create(&ps->pollset, MAX_CONN, pool, 0);
+#endif
+ ctx->pollset_baton = ps;
+ ctx->pollset_add = pollset_add;
+ ctx->pollset_rm = pollset_rm;
+ }
+
+ /* default to a single connection since that is the typical case */
+ ctx->conns = apr_array_make(pool, 1, sizeof(serf_connection_t *));
+
+ /* Initialize progress status */
+ ctx->progress_read = 0;
+ ctx->progress_written = 0;
+
+ ctx->authn_types = SERF_AUTHN_ALL;
+ ctx->server_authn_info = apr_hash_make(pool);
+
+ return ctx;
+}
+
+
+serf_context_t *serf_context_create(apr_pool_t *pool)
+{
+ return serf_context_create_ex(NULL, NULL, NULL, pool);
+}
+
+apr_status_t serf_context_prerun(serf_context_t *ctx)
+{
+ apr_status_t status = APR_SUCCESS;
+ if ((status = serf__open_connections(ctx)) != APR_SUCCESS)
+ return status;
+
+ if ((status = check_dirty_pollsets(ctx)) != APR_SUCCESS)
+ return status;
+ return status;
+}
+
+
+apr_status_t serf_event_trigger(
+ serf_context_t *s,
+ void *serf_baton,
+ const apr_pollfd_t *desc)
+{
+ apr_pollfd_t tdesc = { 0 };
+ apr_status_t status = APR_SUCCESS;
+ serf_io_baton_t *io = serf_baton;
+
+ if (io->type == SERF_IO_CONN) {
+ serf_connection_t *conn = io->u.conn;
+ serf_context_t *ctx = conn->ctx;
+
+ /* If this connection has already failed, return the error again, and try
+ * to remove it from the pollset again
+ */
+ if (conn->status) {
+ tdesc.desc_type = APR_POLL_SOCKET;
+ tdesc.desc.s = conn->skt;
+ tdesc.reqevents = conn->reqevents;
+ ctx->pollset_rm(ctx->pollset_baton,
+ &tdesc, conn);
+ return conn->status;
+ }
+ /* apr_pollset_poll() can return a conn multiple times... */
+ if ((conn->seen_in_pollset & desc->rtnevents) != 0 ||
+ (conn->seen_in_pollset & APR_POLLHUP) != 0) {
+ return APR_SUCCESS;
+ }
+
+ conn->seen_in_pollset |= desc->rtnevents;
+
+ if ((conn->status = serf__process_connection(conn,
+ desc->rtnevents)) != APR_SUCCESS) {
+
+ /* it's possible that the connection was already reset and thus the
+ socket cleaned up. */
+ if (conn->skt) {
+ tdesc.desc_type = APR_POLL_SOCKET;
+ tdesc.desc.s = conn->skt;
+ tdesc.reqevents = conn->reqevents;
+ ctx->pollset_rm(ctx->pollset_baton,
+ &tdesc, conn);
+ }
+ return conn->status;
+ }
+ }
+ else if (io->type == SERF_IO_LISTENER) {
+ serf_listener_t *l = io->u.listener;
+
+ status = serf__process_listener(l);
+
+ if (status) {
+ return status;
+ }
+ }
+ else if (io->type == SERF_IO_CLIENT) {
+ serf_incoming_t *c = io->u.client;
+
+ status = serf__process_client(c, desc->rtnevents);
+
+ if (status) {
+ return status;
+ }
+ }
+ return status;
+}
+
+
+apr_status_t serf_context_run(
+ serf_context_t *ctx,
+ apr_short_interval_time_t duration,
+ apr_pool_t *pool)
+{
+ apr_status_t status;
+ apr_int32_t num;
+ const apr_pollfd_t *desc;
+ serf_pollset_t *ps = (serf_pollset_t*)ctx->pollset_baton;
+
+ if ((status = serf_context_prerun(ctx)) != APR_SUCCESS) {
+ return status;
+ }
+
+ if ((status = apr_pollset_poll(ps->pollset, duration, &num,
+ &desc)) != APR_SUCCESS) {
+ /* EINTR indicates a handled signal happened during the poll call,
+ ignore, the application can safely retry. */
+ if (APR_STATUS_IS_EINTR(status))
+ return APR_SUCCESS;
+
+ /* ### do we still need to dispatch stuff here?
+ ### look at the potential return codes. map to our defined
+ ### return values? ...
+ */
+ return status;
+ }
+
+ while (num--) {
+ serf_connection_t *conn = desc->client_data;
+
+ status = serf_event_trigger(ctx, conn, desc);
+ if (status) {
+ return status;
+ }
+
+ desc++;
+ }
+
+ return APR_SUCCESS;
+}
+
+
+void serf_context_set_progress_cb(
+ serf_context_t *ctx,
+ const serf_progress_t progress_func,
+ void *progress_baton)
+{
+ ctx->progress_func = progress_func;
+ ctx->progress_baton = progress_baton;
+}
+
+
+serf_bucket_t *serf_context_bucket_socket_create(
+ serf_context_t *ctx,
+ apr_socket_t *skt,
+ serf_bucket_alloc_t *allocator)
+{
+ serf_bucket_t *bucket = serf_bucket_socket_create(skt, allocator);
+
+ /* Use serf's default bytes read/written callback */
+ serf_bucket_socket_set_read_progress_cb(bucket,
+ serf__context_progress_delta,
+ ctx);
+
+ return bucket;
+}
+
+
+/* ### this really ought to go somewhere else, but... meh. */
+void serf_lib_version(int *major, int *minor, int *patch)
+{
+ *major = SERF_MAJOR_VERSION;
+ *minor = SERF_MINOR_VERSION;
+ *patch = SERF_PATCH_VERSION;
+}
+
+
+const char *serf_error_string(apr_status_t errcode)
+{
+ switch (errcode)
+ {
+ case SERF_ERROR_CLOSING:
+ return "The connection is closing";
+ case SERF_ERROR_REQUEST_LOST:
+ return "A request has been lost";
+ case SERF_ERROR_WAIT_CONN:
+ return "The connection is blocked, pending further action";
+ case SERF_ERROR_DECOMPRESSION_FAILED:
+ return "An error occurred during decompression";
+ case SERF_ERROR_BAD_HTTP_RESPONSE:
+ return "The server sent an improper HTTP response";
+ case SERF_ERROR_TRUNCATED_HTTP_RESPONSE:
+ return "The server sent a truncated HTTP response body.";
+ case SERF_ERROR_ABORTED_CONNECTION:
+ return "The server unexpectedly closed the connection.";
+ case SERF_ERROR_SSL_COMM_FAILED:
+ return "An error occurred during SSL communication";
+ case SERF_ERROR_SSL_CERT_FAILED:
+ return "An SSL certificate related error occurred ";
+ case SERF_ERROR_AUTHN_FAILED:
+ return "An error occurred during authentication";
+ case SERF_ERROR_AUTHN_NOT_SUPPORTED:
+ return "The requested authentication type(s) are not supported";
+ case SERF_ERROR_AUTHN_MISSING_ATTRIBUTE:
+ return "An authentication attribute is missing";
+ case SERF_ERROR_AUTHN_INITALIZATION_FAILED:
+ return "Initialization of an authentication type failed";
+ case SERF_ERROR_SSLTUNNEL_SETUP_FAILED:
+ return "The proxy server returned an error while setting up the "
+ "SSL tunnel.";
+ default:
+ return NULL;
+ }
+
+ /* NOTREACHED */
+}
diff --git a/contrib/serf/design-guide.txt b/contrib/serf/design-guide.txt
new file mode 100644
index 0000000..9e931d1
--- /dev/null
+++ b/contrib/serf/design-guide.txt
@@ -0,0 +1,152 @@
+APACHE COMMONS: serf -*-indented-text-*-
+
+
+TOPICS
+
+ 1. Introduction
+ 2. Thread Safety
+ 3. Pool Usage
+ 4. Bucket Read Functions
+ 5. Versioning
+ 6. Bucket lifetimes
+
+
+-----------------------------------------------------------------------------
+
+1. INTRODUCTION
+
+This document details various design choices for the serf library. It
+is intended to be a guide for serf developers. Of course, these design
+principles, choices made, etc are a good source of information for
+users of the serf library, too.
+
+
+-----------------------------------------------------------------------------
+
+2. THREAD SAFETY
+
+The serf library should contain no mutable globals, making it is safe
+to use in a multi-threaded environment.
+
+Each "object" within the system does not need to be used from multiple
+threads at a time. Thus, they require no internal mutexes, and can
+disable mutexes within APR objects where applicable (e.g. pools that
+are created).
+
+The objects should not have any thread affinity (i.e. don't use
+thread-local storage). This enables an application to use external
+mutexes to guard entry to the serf objects, which then allows the
+objects to be used from multiple threads.
+
+
+-----------------------------------------------------------------------------
+
+3. POOL USAGE
+
+For general information on the proper use of pools, please see:
+
+ http://cvs.apache.org/viewcvs/*checkout*/apr/docs/pool-design.html
+
+Within serf itself, the buckets introduce a significant issue related
+to pools. Since it is very possible to end up creating *many* buckets
+within a transaction, and that creation could be proportional to an
+incoming or outgoing data stream, a lot of care must be take to avoid
+tying bucket allocations to pools. If a bucket allocated any internal
+memory against a pool, and if that bucket is created an unbounded
+number of times, then the pool memory could be exhausted.
+
+Thus, buckets are allocated using a custom allocator which allows the
+memory to be freed when that bucket is no longer needed. This
+contrasts with pools where the "free" operation occurs over a large
+set of objects, which is problematic if some are still in use.
+
+### need more explanation of strategy/solution ...
+
+
+-----------------------------------------------------------------------------
+
+4. BUCKET READ FUNCTIONS
+
+The bucket reading and peek functions must not block. Each read
+function should return (up to) the specified amount of data. If
+SERF_READ_ALL_AVAIL is passed, then the function should provide
+whatever is immediately available, without blocking.
+
+The peek function does not take a requested length because it is
+non-destructive. It is not possible to "read past" any barrier with a
+peek function. Thus, peek should operate like SERF_READ_ALL_AVAIL.
+
+The return values from the read functions should follow this general
+pattern:
+
+ APR_SUCCESS Some data was returned, and the caller can
+ immediately call the read function again to read
+ more data.
+
+ NOTE: when bucket behavior tracking is enabled,
+ then you must read more data from this bucket
+ before returning to the serf context loop. If a
+ bucket is not completely drained first, then it is
+ possible to deadlock (the server might not read
+ anything until you read everything it has already
+ given to you).
+
+ APR_EAGAIN Some data was returned, but no more is available
+ for now. The caller must "wait for a bit" or wait
+ for some event before attempting to read again
+ (basically, this simply means re-run the serf
+ context loop). Though it shouldn't be done, reading
+ again will, in all likelihood, return zero length
+ data and APR_EAGAIN again.
+
+ NOTE: when bucket behavior tracking is enabled,
+ then it is illegal to immediately read a bucket
+ again after it has returned APR_EAGAIN. You must
+ run the serf context loop again to (potentially)
+ fetch more data for the bucket.
+
+ APR_EOF Some data was returned, and this bucket has no more
+ data available and should not be read again. If you
+ happen to read it again, then it will return zero
+ length data and APR_EOF.
+
+ NOTE: when bucket behavior tracking is enabled,
+ then it is illegal to read this bucket ever again.
+
+ other An error has occurred. No data was returned. The
+ returned length is undefined.
+
+In the above paragraphs, when it says "some data was returned", note
+that this could be data of length zero.
+
+If a length of zero is returned, then the caller should not attempt to
+dereference the data pointer. It may be invalid. Note that there is no
+reason to dereference that pointer, since it doesn't point to any
+valid data.
+
+Any data returned by the bucket should live as long as the bucket, or
+until the next read or peek occurs.
+
+The read_bucket function falls into a very different pattern. See its
+doc string for more information.
+
+
+-----------------------------------------------------------------------------
+
+5. VERSIONING
+
+The serf project uses the APR versioning guidelines described here:
+
+ http://apr.apache.org/versioning.html
+
+
+-----------------------------------------------------------------------------
+
+6. BUCKET LIFETIMES
+
+### flesh out. basically: if you hold a bucket pointer, then you own
+### it. passing a bucket into another transfers ownership. use barrier
+### buckets to limit destruction of a tree of buckets.
+
+
+-----------------------------------------------------------------------------
diff --git a/contrib/serf/incoming.c b/contrib/serf/incoming.c
new file mode 100644
index 0000000..2757428
--- /dev/null
+++ b/contrib/serf/incoming.c
@@ -0,0 +1,176 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <apr_pools.h>
+#include <apr_poll.h>
+#include <apr_version.h>
+
+#include "serf.h"
+#include "serf_bucket_util.h"
+
+#include "serf_private.h"
+
+static apr_status_t read_from_client(serf_incoming_t *client)
+{
+ return APR_ENOTIMPL;
+}
+
+static apr_status_t write_to_client(serf_incoming_t *client)
+{
+ return APR_ENOTIMPL;
+}
+
+apr_status_t serf__process_client(serf_incoming_t *client, apr_int16_t events)
+{
+ apr_status_t rv;
+ if ((events & APR_POLLIN) != 0) {
+ rv = read_from_client(client);
+ if (rv) {
+ return rv;
+ }
+ }
+
+ if ((events & APR_POLLHUP) != 0) {
+ return APR_ECONNRESET;
+ }
+
+ if ((events & APR_POLLERR) != 0) {
+ return APR_EGENERAL;
+ }
+
+ if ((events & APR_POLLOUT) != 0) {
+ rv = write_to_client(client);
+ if (rv) {
+ return rv;
+ }
+ }
+
+ return APR_SUCCESS;
+}
+
+apr_status_t serf__process_listener(serf_listener_t *l)
+{
+ apr_status_t rv;
+ apr_socket_t *in;
+ apr_pool_t *p;
+ /* THIS IS NOT OPTIMAL */
+ apr_pool_create(&p, l->pool);
+
+ rv = apr_socket_accept(&in, l->skt, p);
+
+ if (rv) {
+ apr_pool_destroy(p);
+ return rv;
+ }
+
+ rv = l->accept_func(l->ctx, l, l->accept_baton, in, p);
+
+ if (rv) {
+ apr_pool_destroy(p);
+ return rv;
+ }
+
+ return rv;
+}
+
+
+apr_status_t serf_incoming_create(
+ serf_incoming_t **client,
+ serf_context_t *ctx,
+ apr_socket_t *insock,
+ void *request_baton,
+ serf_incoming_request_cb_t request,
+ apr_pool_t *pool)
+{
+ apr_status_t rv;
+ serf_incoming_t *ic = apr_palloc(pool, sizeof(*ic));
+
+ ic->ctx = ctx;
+ ic->baton.type = SERF_IO_CLIENT;
+ ic->baton.u.client = ic;
+ ic->request_baton = request_baton;
+ ic->request = request;
+ ic->skt = insock;
+ ic->desc.desc_type = APR_POLL_SOCKET;
+ ic->desc.desc.s = ic->skt;
+ ic->desc.reqevents = APR_POLLIN;
+
+ rv = ctx->pollset_add(ctx->pollset_baton,
+ &ic->desc, &ic->baton);
+ *client = ic;
+
+ return rv;
+}
+
+
+apr_status_t serf_listener_create(
+ serf_listener_t **listener,
+ serf_context_t *ctx,
+ const char *host,
+ apr_uint16_t port,
+ void *accept_baton,
+ serf_accept_client_t accept,
+ apr_pool_t *pool)
+{
+ apr_sockaddr_t *sa;
+ apr_status_t rv;
+ serf_listener_t *l = apr_palloc(pool, sizeof(*l));
+
+ l->ctx = ctx;
+ l->baton.type = SERF_IO_LISTENER;
+ l->baton.u.listener = l;
+ l->accept_func = accept;
+ l->accept_baton = accept_baton;
+
+ apr_pool_create(&l->pool, pool);
+
+ rv = apr_sockaddr_info_get(&sa, host, APR_UNSPEC, port, 0, l->pool);
+ if (rv)
+ return rv;
+
+ rv = apr_socket_create(&l->skt, sa->family,
+ SOCK_STREAM,
+#if APR_MAJOR_VERSION > 0
+ APR_PROTO_TCP,
+#endif
+ l->pool);
+ if (rv)
+ return rv;
+
+ rv = apr_socket_opt_set(l->skt, APR_SO_REUSEADDR, 1);
+ if (rv)
+ return rv;
+
+ rv = apr_socket_bind(l->skt, sa);
+ if (rv)
+ return rv;
+
+ rv = apr_socket_listen(l->skt, 5);
+ if (rv)
+ return rv;
+
+ l->desc.desc_type = APR_POLL_SOCKET;
+ l->desc.desc.s = l->skt;
+ l->desc.reqevents = APR_POLLIN;
+
+ rv = ctx->pollset_add(ctx->pollset_baton,
+ &l->desc, &l->baton);
+ if (rv)
+ return rv;
+
+ *listener = l;
+
+ return APR_SUCCESS;
+}
diff --git a/contrib/serf/outgoing.c b/contrib/serf/outgoing.c
new file mode 100644
index 0000000..a12746c
--- /dev/null
+++ b/contrib/serf/outgoing.c
@@ -0,0 +1,1683 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <apr_pools.h>
+#include <apr_poll.h>
+#include <apr_version.h>
+#include <apr_portable.h>
+
+#include "serf.h"
+#include "serf_bucket_util.h"
+
+#include "serf_private.h"
+
+/* cleanup for sockets */
+static apr_status_t clean_skt(void *data)
+{
+ serf_connection_t *conn = data;
+ apr_status_t status = APR_SUCCESS;
+
+ if (conn->skt) {
+ serf__log_skt(SOCK_VERBOSE, __FILE__, conn->skt, "cleanup - ");
+ status = apr_socket_close(conn->skt);
+ conn->skt = NULL;
+ serf__log_nopref(SOCK_VERBOSE, "closed socket, status %d\n", status);
+ }
+
+ return status;
+}
+
+static apr_status_t clean_resp(void *data)
+{
+ serf_request_t *request = data;
+
+ /* The request's RESPOOL is being cleared. */
+
+ /* If the response has allocated some buckets, then destroy them (since
+ the bucket may hold resources other than memory in RESPOOL). Also
+ make sure to set their fields to NULL so connection closure does
+ not attempt to free them again. */
+ if (request->resp_bkt) {
+ serf_bucket_destroy(request->resp_bkt);
+ request->resp_bkt = NULL;
+ }
+ if (request->req_bkt) {
+ serf_bucket_destroy(request->req_bkt);
+ request->req_bkt = NULL;
+ }
+
+ /* ### should we worry about debug stuff, like that performed in
+ ### destroy_request()? should we worry about calling req->handler
+ ### to notify this "cancellation" due to pool clearing? */
+
+ /* This pool just got cleared/destroyed. Don't try to destroy the pool
+ (again) when the request is canceled. */
+ request->respool = NULL;
+
+ return APR_SUCCESS;
+}
+
+/* cleanup for conns */
+static apr_status_t clean_conn(void *data)
+{
+ serf_connection_t *conn = data;
+
+ serf__log(CONN_VERBOSE, __FILE__, "cleaning up connection 0x%x\n",
+ conn);
+ serf_connection_close(conn);
+
+ return APR_SUCCESS;
+}
+
+/* Update the pollset for this connection. We tweak the pollset based on
+ * whether we want to read and/or write, given conditions within the
+ * connection. If the connection is not (yet) in the pollset, then it
+ * will be added.
+ */
+apr_status_t serf__conn_update_pollset(serf_connection_t *conn)
+{
+ serf_context_t *ctx = conn->ctx;
+ apr_status_t status;
+ apr_pollfd_t desc = { 0 };
+
+ if (!conn->skt) {
+ return APR_SUCCESS;
+ }
+
+ /* Remove the socket from the poll set. */
+ desc.desc_type = APR_POLL_SOCKET;
+ desc.desc.s = conn->skt;
+ desc.reqevents = conn->reqevents;
+
+ status = ctx->pollset_rm(ctx->pollset_baton,
+ &desc, conn);
+ if (status && !APR_STATUS_IS_NOTFOUND(status))
+ return status;
+
+ /* Now put it back in with the correct read/write values. */
+ desc.reqevents = APR_POLLHUP | APR_POLLERR;
+ if (conn->requests &&
+ conn->state != SERF_CONN_INIT) {
+ /* If there are any outstanding events, then we want to read. */
+ /* ### not true. we only want to read IF we have sent some data */
+ desc.reqevents |= APR_POLLIN;
+
+ /* Don't write if OpenSSL told us that it needs to read data first. */
+ if (conn->stop_writing != 1) {
+
+ /* If the connection is not closing down and
+ * has unwritten data or
+ * there are any requests that still have buckets to write out,
+ * then we want to write.
+ */
+ if (conn->vec_len &&
+ conn->state != SERF_CONN_CLOSING)
+ desc.reqevents |= APR_POLLOUT;
+ else {
+ serf_request_t *request = conn->requests;
+
+ if ((conn->probable_keepalive_limit &&
+ conn->completed_requests > conn->probable_keepalive_limit) ||
+ (conn->max_outstanding_requests &&
+ conn->completed_requests - conn->completed_responses >=
+ conn->max_outstanding_requests)) {
+ /* we wouldn't try to write any way right now. */
+ }
+ else {
+ while (request != NULL && request->req_bkt == NULL &&
+ request->written)
+ request = request->next;
+ if (request != NULL)
+ desc.reqevents |= APR_POLLOUT;
+ }
+ }
+ }
+ }
+
+ /* If we can have async responses, always look for something to read. */
+ if (conn->async_responses) {
+ desc.reqevents |= APR_POLLIN;
+ }
+
+ /* save our reqevents, so we can pass it in to remove later. */
+ conn->reqevents = desc.reqevents;
+
+ /* Note: even if we don't want to read/write this socket, we still
+ * want to poll it for hangups and errors.
+ */
+ return ctx->pollset_add(ctx->pollset_baton,
+ &desc, &conn->baton);
+}
+
+#ifdef SERF_DEBUG_BUCKET_USE
+
+/* Make sure all response buckets were drained. */
+static void check_buckets_drained(serf_connection_t *conn)
+{
+ serf_request_t *request = conn->requests;
+
+ for ( ; request ; request = request->next ) {
+ if (request->resp_bkt != NULL) {
+ /* ### crap. can't do this. this allocator may have un-drained
+ * ### REQUEST buckets.
+ */
+ /* serf_debug__entered_loop(request->resp_bkt->allocator); */
+ /* ### for now, pretend we closed the conn (resets the tracking) */
+ serf_debug__closed_conn(request->resp_bkt->allocator);
+ }
+ }
+}
+
+#endif
+
+static void destroy_ostream(serf_connection_t *conn)
+{
+ if (conn->ostream_head != NULL) {
+ serf_bucket_destroy(conn->ostream_head);
+ conn->ostream_head = NULL;
+ conn->ostream_tail = NULL;
+ }
+}
+
+static apr_status_t detect_eof(void *baton, serf_bucket_t *aggregate_bucket)
+{
+ serf_connection_t *conn = baton;
+ conn->hit_eof = 1;
+ return APR_EAGAIN;
+}
+
+static apr_status_t do_conn_setup(serf_connection_t *conn)
+{
+ apr_status_t status;
+ serf_bucket_t *ostream;
+
+ if (conn->ostream_head == NULL) {
+ conn->ostream_head = serf_bucket_aggregate_create(conn->allocator);
+ }
+
+ if (conn->ostream_tail == NULL) {
+ conn->ostream_tail = serf__bucket_stream_create(conn->allocator,
+ detect_eof,
+ conn);
+ }
+
+ ostream = conn->ostream_tail;
+
+ status = (*conn->setup)(conn->skt,
+ &conn->stream,
+ &ostream,
+ conn->setup_baton,
+ conn->pool);
+ if (status) {
+ /* extra destroy here since it wasn't added to the head bucket yet. */
+ serf_bucket_destroy(conn->ostream_tail);
+ destroy_ostream(conn);
+ return status;
+ }
+
+ serf_bucket_aggregate_append(conn->ostream_head,
+ ostream);
+
+ return status;
+}
+
+/* Set up the input and output stream buckets.
+ When a tunnel over an http proxy is needed, create a socket bucket and
+ empty aggregate bucket for sending and receiving unencrypted requests
+ over the socket.
+
+ After the tunnel is there, or no tunnel was needed, ask the application
+ to create the input and output buckets, which should take care of the
+ [en/de]cryption.
+ */
+
+static apr_status_t prepare_conn_streams(serf_connection_t *conn,
+ serf_bucket_t **istream,
+ serf_bucket_t **ostreamt,
+ serf_bucket_t **ostreamh)
+{
+ apr_status_t status;
+
+ if (conn->stream == NULL) {
+ conn->latency = apr_time_now() - conn->connect_time;
+ }
+
+ /* Do we need a SSL tunnel first? */
+ if (conn->state == SERF_CONN_CONNECTED) {
+ /* If the connection does not have an associated bucket, then
+ * call the setup callback to get one.
+ */
+ if (conn->stream == NULL) {
+ status = do_conn_setup(conn);
+ if (status) {
+ return status;
+ }
+ }
+ *ostreamt = conn->ostream_tail;
+ *ostreamh = conn->ostream_head;
+ *istream = conn->stream;
+ } else {
+ /* SSL tunnel needed and not set up yet, get a direct unencrypted
+ stream for this socket */
+ if (conn->stream == NULL) {
+ *istream = serf_bucket_socket_create(conn->skt,
+ conn->allocator);
+ }
+ /* Don't create the ostream bucket chain including the ssl_encrypt
+ bucket yet. This ensure the CONNECT request is sent unencrypted
+ to the proxy. */
+ *ostreamt = *ostreamh = conn->ssltunnel_ostream;
+ }
+
+ return APR_SUCCESS;
+}
+
+/* Create and connect sockets for any connections which don't have them
+ * yet. This is the core of our lazy-connect behavior.
+ */
+apr_status_t serf__open_connections(serf_context_t *ctx)
+{
+ int i;
+
+ for (i = ctx->conns->nelts; i--; ) {
+ serf_connection_t *conn = GET_CONN(ctx, i);
+ serf__authn_info_t *authn_info;
+ apr_status_t status;
+ apr_socket_t *skt;
+
+ conn->seen_in_pollset = 0;
+
+ if (conn->skt != NULL) {
+#ifdef SERF_DEBUG_BUCKET_USE
+ check_buckets_drained(conn);
+#endif
+ continue;
+ }
+
+ /* Delay opening until we have something to deliver! */
+ if (conn->requests == NULL) {
+ continue;
+ }
+
+ apr_pool_clear(conn->skt_pool);
+ apr_pool_cleanup_register(conn->skt_pool, conn, clean_skt, clean_skt);
+
+ status = apr_socket_create(&skt, conn->address->family,
+ SOCK_STREAM,
+#if APR_MAJOR_VERSION > 0
+ APR_PROTO_TCP,
+#endif
+ conn->skt_pool);
+ serf__log(SOCK_VERBOSE, __FILE__,
+ "created socket for conn 0x%x, status %d\n", conn, status);
+ if (status != APR_SUCCESS)
+ return status;
+
+ /* Set the socket to be non-blocking */
+ if ((status = apr_socket_timeout_set(skt, 0)) != APR_SUCCESS)
+ return status;
+
+ /* Disable Nagle's algorithm */
+ if ((status = apr_socket_opt_set(skt,
+ APR_TCP_NODELAY, 1)) != APR_SUCCESS)
+ return status;
+
+ /* Configured. Store it into the connection now. */
+ conn->skt = skt;
+
+ /* Remember time when we started connecting to server to calculate
+ network latency. */
+ conn->connect_time = apr_time_now();
+
+ /* Now that the socket is set up, let's connect it. This should
+ * return immediately.
+ */
+ status = apr_socket_connect(skt, conn->address);
+ serf__log_skt(SOCK_VERBOSE, __FILE__, skt,
+ "connected socket for conn 0x%x, status %d\n",
+ conn, status);
+ if (status != APR_SUCCESS) {
+ if (!APR_STATUS_IS_EINPROGRESS(status))
+ return status;
+ }
+
+ /* Flag our pollset as dirty now that we have a new socket. */
+ conn->dirty_conn = 1;
+ ctx->dirty_pollset = 1;
+
+ /* If the authentication was already started on another connection,
+ prepare this connection (it might be possible to skip some
+ part of the handshaking). */
+ if (ctx->proxy_address) {
+ authn_info = &ctx->proxy_authn_info;
+ if (authn_info->scheme) {
+ authn_info->scheme->init_conn_func(authn_info->scheme, 407,
+ conn, conn->pool);
+ }
+ }
+
+ authn_info = serf__get_authn_info_for_server(conn);
+ if (authn_info->scheme) {
+ authn_info->scheme->init_conn_func(authn_info->scheme, 401,
+ conn, conn->pool);
+ }
+
+ /* Does this connection require a SSL tunnel over the proxy? */
+ if (ctx->proxy_address && strcmp(conn->host_info.scheme, "https") == 0)
+ serf__ssltunnel_connect(conn);
+ else {
+ serf_bucket_t *dummy1, *dummy2;
+
+ conn->state = SERF_CONN_CONNECTED;
+
+ status = prepare_conn_streams(conn, &conn->stream,
+ &dummy1, &dummy2);
+ if (status) {
+ return status;
+ }
+ }
+ }
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t no_more_writes(serf_connection_t *conn,
+ serf_request_t *request)
+{
+ /* Note that we should hold new requests until we open our new socket. */
+ conn->state = SERF_CONN_CLOSING;
+ serf__log(CONN_VERBOSE, __FILE__, "stop writing on conn 0x%x\n",
+ conn);
+
+ /* Clear our iovec. */
+ conn->vec_len = 0;
+
+ /* Update the pollset to know we don't want to write on this socket any
+ * more.
+ */
+ conn->dirty_conn = 1;
+ conn->ctx->dirty_pollset = 1;
+ return APR_SUCCESS;
+}
+
+/* Read the 'Connection' header from the response. Return SERF_ERROR_CLOSING if
+ * the header contains value 'close' indicating the server is closing the
+ * connection right after this response.
+ * Otherwise returns APR_SUCCESS.
+ */
+static apr_status_t is_conn_closing(serf_bucket_t *response)
+{
+ serf_bucket_t *hdrs;
+ const char *val;
+
+ hdrs = serf_bucket_response_get_headers(response);
+ val = serf_bucket_headers_get(hdrs, "Connection");
+ if (val && strcasecmp("close", val) == 0)
+ {
+ return SERF_ERROR_CLOSING;
+ }
+
+ return APR_SUCCESS;
+}
+
+static void link_requests(serf_request_t **list, serf_request_t **tail,
+ serf_request_t *request)
+{
+ if (*list == NULL) {
+ *list = request;
+ *tail = request;
+ }
+ else {
+ (*tail)->next = request;
+ *tail = request;
+ }
+}
+
+static apr_status_t destroy_request(serf_request_t *request)
+{
+ serf_connection_t *conn = request->conn;
+
+ /* The request and response buckets are no longer needed,
+ nor is the request's pool. */
+ if (request->resp_bkt) {
+ serf_debug__closed_conn(request->resp_bkt->allocator);
+ serf_bucket_destroy(request->resp_bkt);
+ request->resp_bkt = NULL;
+ }
+ if (request->req_bkt) {
+ serf_debug__closed_conn(request->req_bkt->allocator);
+ serf_bucket_destroy(request->req_bkt);
+ request->req_bkt = NULL;
+ }
+
+ serf_debug__bucket_alloc_check(request->allocator);
+ if (request->respool) {
+ /* ### unregister the pool cleanup for self? */
+ apr_pool_destroy(request->respool);
+ }
+
+ serf_bucket_mem_free(conn->allocator, request);
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t cancel_request(serf_request_t *request,
+ serf_request_t **list,
+ int notify_request)
+{
+ /* If we haven't run setup, then we won't have a handler to call. */
+ if (request->handler && notify_request) {
+ /* We actually don't care what the handler returns.
+ * We have bigger matters at hand.
+ */
+ (*request->handler)(request, NULL, request->handler_baton,
+ request->respool);
+ }
+
+ if (*list == request) {
+ *list = request->next;
+ }
+ else {
+ serf_request_t *scan = *list;
+
+ while (scan->next && scan->next != request)
+ scan = scan->next;
+
+ if (scan->next) {
+ scan->next = scan->next->next;
+ }
+ }
+
+ return destroy_request(request);
+}
+
+static apr_status_t remove_connection(serf_context_t *ctx,
+ serf_connection_t *conn)
+{
+ apr_pollfd_t desc = { 0 };
+
+ desc.desc_type = APR_POLL_SOCKET;
+ desc.desc.s = conn->skt;
+ desc.reqevents = conn->reqevents;
+
+ return ctx->pollset_rm(ctx->pollset_baton,
+ &desc, conn);
+}
+
+/* A socket was closed, inform the application. */
+static void handle_conn_closed(serf_connection_t *conn, apr_status_t status)
+{
+ (*conn->closed)(conn, conn->closed_baton, status,
+ conn->pool);
+}
+
+static apr_status_t reset_connection(serf_connection_t *conn,
+ int requeue_requests)
+{
+ serf_context_t *ctx = conn->ctx;
+ apr_status_t status;
+ serf_request_t *old_reqs;
+
+ conn->probable_keepalive_limit = conn->completed_responses;
+ conn->completed_requests = 0;
+ conn->completed_responses = 0;
+
+ old_reqs = conn->requests;
+
+ conn->requests = NULL;
+ conn->requests_tail = NULL;
+
+ /* Handle all outstanding requests. These have either not been written yet,
+ or have been written but the expected reply wasn't received yet. */
+ while (old_reqs) {
+ /* If we haven't started to write the connection, bring it over
+ * unchanged to our new socket.
+ */
+ if (requeue_requests && !old_reqs->written) {
+ serf_request_t *req = old_reqs;
+ old_reqs = old_reqs->next;
+ req->next = NULL;
+ link_requests(&conn->requests, &conn->requests_tail, req);
+ }
+ else {
+ /* Request has been consumed, or we don't want to requeue the
+ request. Either way, inform the application that the request
+ is cancelled. */
+ cancel_request(old_reqs, &old_reqs, requeue_requests);
+ }
+ }
+
+ /* Requests queue has been prepared for a new socket, close the old one. */
+ if (conn->skt != NULL) {
+ remove_connection(ctx, conn);
+ status = apr_socket_close(conn->skt);
+ serf__log_skt(SOCK_VERBOSE, __FILE__, conn->skt,
+ "closed socket, status %d\n", status);
+ if (conn->closed != NULL) {
+ handle_conn_closed(conn, status);
+ }
+ conn->skt = NULL;
+ }
+
+ if (conn->stream != NULL) {
+ serf_bucket_destroy(conn->stream);
+ conn->stream = NULL;
+ }
+
+ destroy_ostream(conn);
+
+ /* Don't try to resume any writes */
+ conn->vec_len = 0;
+
+ conn->dirty_conn = 1;
+ conn->ctx->dirty_pollset = 1;
+ conn->state = SERF_CONN_INIT;
+
+ serf__log(CONN_VERBOSE, __FILE__, "reset connection 0x%x\n", conn);
+
+ conn->status = APR_SUCCESS;
+
+ /* Let our context know that we've 'reset' the socket already. */
+ conn->seen_in_pollset |= APR_POLLHUP;
+
+ /* Found the connection. Closed it. All done. */
+ return APR_SUCCESS;
+}
+
+static apr_status_t socket_writev(serf_connection_t *conn)
+{
+ apr_size_t written;
+ apr_status_t status;
+
+ status = apr_socket_sendv(conn->skt, conn->vec,
+ conn->vec_len, &written);
+ if (status && !APR_STATUS_IS_EAGAIN(status))
+ serf__log_skt(SOCK_VERBOSE, __FILE__, conn->skt,
+ "socket_sendv error %d\n", status);
+
+ /* did we write everything? */
+ if (written) {
+ apr_size_t len = 0;
+ int i;
+
+ serf__log_skt(SOCK_MSG_VERBOSE, __FILE__, conn->skt,
+ "--- socket_sendv:\n");
+
+ for (i = 0; i < conn->vec_len; i++) {
+ len += conn->vec[i].iov_len;
+ if (written < len) {
+ serf__log_nopref(SOCK_MSG_VERBOSE, "%.*s",
+ conn->vec[i].iov_len - (len - written),
+ conn->vec[i].iov_base);
+ if (i) {
+ memmove(conn->vec, &conn->vec[i],
+ sizeof(struct iovec) * (conn->vec_len - i));
+ conn->vec_len -= i;
+ }
+ conn->vec[0].iov_base = (char *)conn->vec[0].iov_base + (conn->vec[0].iov_len - (len - written));
+ conn->vec[0].iov_len = len - written;
+ break;
+ } else {
+ serf__log_nopref(SOCK_MSG_VERBOSE, "%.*s",
+ conn->vec[i].iov_len, conn->vec[i].iov_base);
+ }
+ }
+ if (len == written) {
+ conn->vec_len = 0;
+ }
+ serf__log_nopref(SOCK_MSG_VERBOSE, "-(%d)-\n", written);
+
+ /* Log progress information */
+ serf__context_progress_delta(conn->ctx, 0, written);
+ }
+
+ return status;
+}
+
+static apr_status_t setup_request(serf_request_t *request)
+{
+ serf_connection_t *conn = request->conn;
+ apr_status_t status;
+
+ /* Now that we are about to serve the request, allocate a pool. */
+ apr_pool_create(&request->respool, conn->pool);
+ request->allocator = serf_bucket_allocator_create(request->respool,
+ NULL, NULL);
+ apr_pool_cleanup_register(request->respool, request,
+ clean_resp, clean_resp);
+
+ /* Fill in the rest of the values for the request. */
+ status = request->setup(request, request->setup_baton,
+ &request->req_bkt,
+ &request->acceptor,
+ &request->acceptor_baton,
+ &request->handler,
+ &request->handler_baton,
+ request->respool);
+ return status;
+}
+
+/* write data out to the connection */
+static apr_status_t write_to_connection(serf_connection_t *conn)
+{
+ serf_request_t *request = conn->requests;
+
+ if (conn->probable_keepalive_limit &&
+ conn->completed_requests > conn->probable_keepalive_limit) {
+
+ conn->dirty_conn = 1;
+ conn->ctx->dirty_pollset = 1;
+
+ /* backoff for now. */
+ return APR_SUCCESS;
+ }
+
+ /* Find a request that has data which needs to be delivered. */
+ while (request != NULL &&
+ request->req_bkt == NULL && request->written)
+ request = request->next;
+
+ /* assert: request != NULL || conn->vec_len */
+
+ /* Keep reading and sending until we run out of stuff to read, or
+ * writing would block.
+ */
+ while (1) {
+ int stop_reading = 0;
+ apr_status_t status;
+ apr_status_t read_status;
+ serf_bucket_t *ostreamt, *ostreamh;
+ int max_outstanding_requests = conn->max_outstanding_requests;
+
+ /* If we're setting up an ssl tunnel, we can't send real requests
+ at yet, as they need to be encrypted and our encrypt buckets
+ aren't created yet as we still need to read the unencrypted
+ response of the CONNECT request. */
+ if (conn->state != SERF_CONN_CONNECTED)
+ max_outstanding_requests = 1;
+
+ if (max_outstanding_requests &&
+ conn->completed_requests -
+ conn->completed_responses >= max_outstanding_requests) {
+ /* backoff for now. */
+ return APR_SUCCESS;
+ }
+
+ /* If we have unwritten data, then write what we can. */
+ while (conn->vec_len) {
+ status = socket_writev(conn);
+
+ /* If the write would have blocked, then we're done. Don't try
+ * to write anything else to the socket.
+ */
+ if (APR_STATUS_IS_EAGAIN(status))
+ return APR_SUCCESS;
+ if (APR_STATUS_IS_EPIPE(status) ||
+ APR_STATUS_IS_ECONNRESET(status) ||
+ APR_STATUS_IS_ECONNABORTED(status))
+ return no_more_writes(conn, request);
+ if (status)
+ return status;
+ }
+ /* ### can we have a short write, yet no EAGAIN? a short write
+ ### would imply unwritten_len > 0 ... */
+ /* assert: unwritten_len == 0. */
+
+ /* We may need to move forward to a request which has something
+ * to write.
+ */
+ while (request != NULL &&
+ request->req_bkt == NULL && request->written)
+ request = request->next;
+
+ if (request == NULL) {
+ /* No more requests (with data) are registered with the
+ * connection. Let's update the pollset so that we don't
+ * try to write to this socket again.
+ */
+ conn->dirty_conn = 1;
+ conn->ctx->dirty_pollset = 1;
+ return APR_SUCCESS;
+ }
+
+ status = prepare_conn_streams(conn, &conn->stream, &ostreamt, &ostreamh);
+ if (status) {
+ return status;
+ }
+
+ if (request->req_bkt == NULL) {
+ read_status = setup_request(request);
+ if (read_status) {
+ /* Something bad happened. Propagate any errors. */
+ return read_status;
+ }
+ }
+
+ if (!request->written) {
+ request->written = 1;
+ serf_bucket_aggregate_append(ostreamt, request->req_bkt);
+ }
+
+ /* ### optimize at some point by using read_for_sendfile */
+ /* TODO: now that read_iovec will effectively try to return as much
+ data as available, we probably don't want to read ALL_AVAIL, but
+ a lower number, like the size of one or a few TCP packets, the
+ available TCP buffer size ... */
+ read_status = serf_bucket_read_iovec(ostreamh,
+ SERF_READ_ALL_AVAIL,
+ IOV_MAX,
+ conn->vec,
+ &conn->vec_len);
+
+ if (!conn->hit_eof) {
+ if (APR_STATUS_IS_EAGAIN(read_status)) {
+ /* We read some stuff, but should not try to read again. */
+ stop_reading = 1;
+ }
+ else if (read_status == SERF_ERROR_WAIT_CONN) {
+ /* The bucket told us that it can't provide more data until
+ more data is read from the socket. This normally happens
+ during a SSL handshake.
+
+ We should avoid looking for writability for a while so
+ that (hopefully) something will appear in the bucket so
+ we can actually write something. otherwise, we could
+ end up in a CPU spin: socket wants something, but we
+ don't have anything (and keep returning EAGAIN)
+ */
+ conn->stop_writing = 1;
+ conn->dirty_conn = 1;
+ conn->ctx->dirty_pollset = 1;
+ }
+ else if (read_status && !APR_STATUS_IS_EOF(read_status)) {
+ /* Something bad happened. Propagate any errors. */
+ return read_status;
+ }
+ }
+
+ /* If we got some data, then deliver it. */
+ /* ### what to do if we got no data?? is that a problem? */
+ if (conn->vec_len > 0) {
+ status = socket_writev(conn);
+
+ /* If we can't write any more, or an error occurred, then
+ * we're done here.
+ */
+ if (APR_STATUS_IS_EAGAIN(status))
+ return APR_SUCCESS;
+ if (APR_STATUS_IS_EPIPE(status))
+ return no_more_writes(conn, request);
+ if (APR_STATUS_IS_ECONNRESET(status) ||
+ APR_STATUS_IS_ECONNABORTED(status)) {
+ return no_more_writes(conn, request);
+ }
+ if (status)
+ return status;
+ }
+
+ if (read_status == SERF_ERROR_WAIT_CONN) {
+ stop_reading = 1;
+ conn->stop_writing = 1;
+ conn->dirty_conn = 1;
+ conn->ctx->dirty_pollset = 1;
+ }
+ else if (read_status && conn->hit_eof && conn->vec_len == 0) {
+ /* If we hit the end of the request bucket and all of its data has
+ * been written, then clear it out to signify that we're done
+ * sending the request. On the next iteration through this loop:
+ * - if there are remaining bytes they will be written, and as the
+ * request bucket will be completely read it will be destroyed then.
+ * - we'll see if there are other requests that need to be sent
+ * ("pipelining").
+ */
+ conn->hit_eof = 0;
+ serf_bucket_destroy(request->req_bkt);
+ request->req_bkt = NULL;
+
+ /* If our connection has async responses enabled, we're not
+ * going to get a reply back, so kill the request.
+ */
+ if (conn->async_responses) {
+ conn->requests = request->next;
+ destroy_request(request);
+ }
+
+ conn->completed_requests++;
+
+ if (conn->probable_keepalive_limit &&
+ conn->completed_requests > conn->probable_keepalive_limit) {
+ /* backoff for now. */
+ stop_reading = 1;
+ }
+ }
+
+ if (stop_reading) {
+ return APR_SUCCESS;
+ }
+ }
+ /* NOTREACHED */
+}
+
+/* A response message was received from the server, so call
+ the handler as specified on the original request. */
+static apr_status_t handle_response(serf_request_t *request,
+ apr_pool_t *pool)
+{
+ apr_status_t status = APR_SUCCESS;
+ int consumed_response = 0;
+
+ /* Only enable the new authentication framework if the program has
+ * registered an authentication credential callback.
+ *
+ * This permits older Serf apps to still handle authentication
+ * themselves by not registering credential callbacks.
+ */
+ if (request->conn->ctx->cred_cb) {
+ status = serf__handle_auth_response(&consumed_response,
+ request,
+ request->resp_bkt,
+ request->handler_baton,
+ pool);
+
+ /* If there was an error reading the response (maybe there wasn't
+ enough data available), don't bother passing the response to the
+ application.
+
+ If the authentication was tried, but failed, pass the response
+ to the application, maybe it can do better. */
+ if (APR_STATUS_IS_EOF(status) ||
+ APR_STATUS_IS_EAGAIN(status)) {
+ return status;
+ }
+ }
+
+ if (!consumed_response) {
+ return (*request->handler)(request,
+ request->resp_bkt,
+ request->handler_baton,
+ pool);
+ }
+
+ return status;
+}
+
+/* An async response message was received from the server. */
+static apr_status_t handle_async_response(serf_connection_t *conn,
+ apr_pool_t *pool)
+{
+ apr_status_t status;
+
+ if (conn->current_async_response == NULL) {
+ conn->current_async_response =
+ (*conn->async_acceptor)(NULL, conn->stream,
+ conn->async_acceptor_baton, pool);
+ }
+
+ status = (*conn->async_handler)(NULL, conn->current_async_response,
+ conn->async_handler_baton, pool);
+
+ if (APR_STATUS_IS_EOF(status)) {
+ serf_bucket_destroy(conn->current_async_response);
+ conn->current_async_response = NULL;
+ status = APR_SUCCESS;
+ }
+
+ return status;
+}
+
+
+apr_status_t
+serf__provide_credentials(serf_context_t *ctx,
+ char **username,
+ char **password,
+ serf_request_t *request, void *baton,
+ int code, const char *authn_type,
+ const char *realm,
+ apr_pool_t *pool)
+{
+ serf_connection_t *conn = request->conn;
+ serf_request_t *authn_req = request;
+ apr_status_t status;
+
+ if (request->ssltunnel == 1 &&
+ conn->state == SERF_CONN_SETUP_SSLTUNNEL) {
+ /* This is a CONNECT request to set up an SSL tunnel over a proxy.
+ This request is created by serf, so if the proxy requires
+ authentication, we can't ask the application for credentials with
+ this request.
+
+ Solution: setup the first request created by the application on
+ this connection, and use that request and its handler_baton to
+ call back to the application. */
+
+ authn_req = request->next;
+ /* assert: app_request != NULL */
+ if (!authn_req)
+ return APR_EGENERAL;
+
+ if (!authn_req->req_bkt) {
+ apr_status_t status;
+
+ status = setup_request(authn_req);
+ /* If we can't setup a request, don't bother setting up the
+ ssl tunnel. */
+ if (status)
+ return status;
+ }
+ }
+
+ /* Ask the application. */
+ status = (*ctx->cred_cb)(username, password,
+ authn_req, authn_req->handler_baton,
+ code, authn_type, realm, pool);
+ if (status)
+ return status;
+
+ return APR_SUCCESS;
+}
+
+/* read data from the connection */
+static apr_status_t read_from_connection(serf_connection_t *conn)
+{
+ apr_status_t status;
+ apr_pool_t *tmppool;
+ int close_connection = FALSE;
+
+ /* Whatever is coming in on the socket corresponds to the first request
+ * on our chain.
+ */
+ serf_request_t *request = conn->requests;
+
+ /* If the stop_writing flag was set on the connection, reset it now because
+ there is some data to read. */
+ if (conn->stop_writing) {
+ conn->stop_writing = 0;
+ conn->dirty_conn = 1;
+ conn->ctx->dirty_pollset = 1;
+ }
+
+ /* assert: request != NULL */
+
+ if ((status = apr_pool_create(&tmppool, conn->pool)) != APR_SUCCESS)
+ goto error;
+
+ /* Invoke response handlers until we have no more work. */
+ while (1) {
+ serf_bucket_t *dummy1, *dummy2;
+
+ apr_pool_clear(tmppool);
+
+ /* Only interested in the input stream here. */
+ status = prepare_conn_streams(conn, &conn->stream, &dummy1, &dummy2);
+ if (status) {
+ goto error;
+ }
+
+ /* We have a different codepath when we can have async responses. */
+ if (conn->async_responses) {
+ /* TODO What about socket errors? */
+ status = handle_async_response(conn, tmppool);
+ if (APR_STATUS_IS_EAGAIN(status)) {
+ status = APR_SUCCESS;
+ goto error;
+ }
+ if (status) {
+ goto error;
+ }
+ continue;
+ }
+
+ /* We are reading a response for a request we haven't
+ * written yet!
+ *
+ * This shouldn't normally happen EXCEPT:
+ *
+ * 1) when the other end has closed the socket and we're
+ * pending an EOF return.
+ * 2) Doing the initial SSL handshake - we'll get EAGAIN
+ * as the SSL buckets will hide the handshake from us
+ * but not return any data.
+ * 3) When the server sends us an SSL alert.
+ *
+ * In these cases, we should not receive any actual user data.
+ *
+ * 4) When the server sends a error response, like 408 Request timeout.
+ * This response should be passed to the application.
+ *
+ * If we see an EOF (due to either an expired timeout or the server
+ * sending the SSL 'close notify' shutdown alert), we'll reset the
+ * connection and open a new one.
+ */
+ if (request->req_bkt || !request->written) {
+ const char *data;
+ apr_size_t len;
+
+ status = serf_bucket_peek(conn->stream, &data, &len);
+
+ if (APR_STATUS_IS_EOF(status)) {
+ reset_connection(conn, 1);
+ status = APR_SUCCESS;
+ goto error;
+ }
+ else if (APR_STATUS_IS_EAGAIN(status) && !len) {
+ status = APR_SUCCESS;
+ goto error;
+ } else if (status && !APR_STATUS_IS_EAGAIN(status)) {
+ /* Read error */
+ goto error;
+ }
+
+ /* Unexpected response from the server */
+
+ }
+
+ /* If the request doesn't have a response bucket, then call the
+ * acceptor to get one created.
+ */
+ if (request->resp_bkt == NULL) {
+ request->resp_bkt = (*request->acceptor)(request, conn->stream,
+ request->acceptor_baton,
+ tmppool);
+ apr_pool_clear(tmppool);
+ }
+
+ status = handle_response(request, tmppool);
+
+ /* Some systems will not generate a HUP poll event so we have to
+ * handle the ECONNRESET issue and ECONNABORT here.
+ */
+ if (APR_STATUS_IS_ECONNRESET(status) ||
+ APR_STATUS_IS_ECONNABORTED(status) ||
+ status == SERF_ERROR_REQUEST_LOST) {
+ /* If the connection had ever been good, be optimistic & try again.
+ * If it has never tried again (incl. a retry), fail.
+ */
+ if (conn->completed_responses) {
+ reset_connection(conn, 1);
+ status = APR_SUCCESS;
+ }
+ else if (status == SERF_ERROR_REQUEST_LOST) {
+ status = SERF_ERROR_ABORTED_CONNECTION;
+ }
+ goto error;
+ }
+
+ /* If our response handler says it can't do anything more, we now
+ * treat that as a success.
+ */
+ if (APR_STATUS_IS_EAGAIN(status)) {
+ status = APR_SUCCESS;
+ goto error;
+ }
+
+ /* If we received APR_SUCCESS, run this loop again. */
+ if (!status) {
+ continue;
+ }
+
+ close_connection = is_conn_closing(request->resp_bkt);
+
+ if (!APR_STATUS_IS_EOF(status) &&
+ close_connection != SERF_ERROR_CLOSING) {
+ /* Whether success, or an error, there is no more to do unless
+ * this request has been completed.
+ */
+ goto error;
+ }
+
+ /* The response has been fully-read, so that means the request has
+ * either been fully-delivered (most likely), or that we don't need to
+ * write the rest of it anymore, e.g. when a 408 Request timeout was
+ $ received.
+ * Remove it from our queue and loop to read another response.
+ */
+ conn->requests = request->next;
+
+ destroy_request(request);
+
+ request = conn->requests;
+
+ /* If we're truly empty, update our tail. */
+ if (request == NULL) {
+ conn->requests_tail = NULL;
+ }
+
+ conn->completed_responses++;
+
+ /* We've to rebuild pollset since completed_responses is changed. */
+ conn->dirty_conn = 1;
+ conn->ctx->dirty_pollset = 1;
+
+ /* This means that we're being advised that the connection is done. */
+ if (close_connection == SERF_ERROR_CLOSING) {
+ reset_connection(conn, 1);
+ if (APR_STATUS_IS_EOF(status))
+ status = APR_SUCCESS;
+ goto error;
+ }
+
+ /* The server is suddenly deciding to serve more responses than we've
+ * seen before.
+ *
+ * Let our requests go.
+ */
+ if (conn->probable_keepalive_limit &&
+ conn->completed_responses > conn->probable_keepalive_limit) {
+ conn->probable_keepalive_limit = 0;
+ }
+
+ /* If we just ran out of requests or have unwritten requests, then
+ * update the pollset. We don't want to read from this socket any
+ * more. We are definitely done with this loop, too.
+ */
+ if (request == NULL || !request->written) {
+ conn->dirty_conn = 1;
+ conn->ctx->dirty_pollset = 1;
+ status = APR_SUCCESS;
+ goto error;
+ }
+ }
+
+error:
+ apr_pool_destroy(tmppool);
+ return status;
+}
+
+/* process all events on the connection */
+apr_status_t serf__process_connection(serf_connection_t *conn,
+ apr_int16_t events)
+{
+ apr_status_t status;
+
+ /* POLLHUP/ERR should come after POLLIN so if there's an error message or
+ * the like sitting on the connection, we give the app a chance to read
+ * it before we trigger a reset condition.
+ */
+ if ((events & APR_POLLIN) != 0) {
+ if ((status = read_from_connection(conn)) != APR_SUCCESS)
+ return status;
+
+ /* If we decided to reset our connection, return now as we don't
+ * want to write.
+ */
+ if ((conn->seen_in_pollset & APR_POLLHUP) != 0) {
+ return APR_SUCCESS;
+ }
+ }
+ if ((events & APR_POLLHUP) != 0) {
+ /* The connection got reset by the server. On Windows this can happen
+ when all data is read, so just cleanup the connection and open
+ a new one.
+ If we haven't had any successful responses on this connection,
+ then error out as it is likely a server issue. */
+ if (conn->completed_responses) {
+ return reset_connection(conn, 1);
+ }
+ return SERF_ERROR_ABORTED_CONNECTION;
+ }
+ if ((events & APR_POLLERR) != 0) {
+ /* We might be talking to a buggy HTTP server that doesn't
+ * do lingering-close. (httpd < 2.1.8 does this.)
+ *
+ * See:
+ *
+ * http://issues.apache.org/bugzilla/show_bug.cgi?id=35292
+ */
+ if (conn->completed_requests && !conn->probable_keepalive_limit) {
+ return reset_connection(conn, 1);
+ }
+#ifdef SO_ERROR
+ /* If possible, get the error from the platform's socket layer and
+ convert it to an APR status code. */
+ {
+ apr_os_sock_t osskt;
+ if (!apr_os_sock_get(&osskt, conn->skt)) {
+ int error;
+ apr_socklen_t l = sizeof(error);
+
+ if (!getsockopt(osskt, SOL_SOCKET, SO_ERROR, (char*)&error, &l))
+ return APR_FROM_OS_ERROR(error);
+ }
+ }
+#endif
+ return APR_EGENERAL;
+ }
+ if ((events & APR_POLLOUT) != 0) {
+ if ((status = write_to_connection(conn)) != APR_SUCCESS)
+ return status;
+ }
+ return APR_SUCCESS;
+}
+
+serf_connection_t *serf_connection_create(
+ serf_context_t *ctx,
+ apr_sockaddr_t *address,
+ serf_connection_setup_t setup,
+ void *setup_baton,
+ serf_connection_closed_t closed,
+ void *closed_baton,
+ apr_pool_t *pool)
+{
+ serf_connection_t *conn = apr_pcalloc(pool, sizeof(*conn));
+
+ conn->ctx = ctx;
+ conn->status = APR_SUCCESS;
+ /* Ignore server address if proxy was specified. */
+ conn->address = ctx->proxy_address ? ctx->proxy_address : address;
+ conn->setup = setup;
+ conn->setup_baton = setup_baton;
+ conn->closed = closed;
+ conn->closed_baton = closed_baton;
+ conn->pool = pool;
+ conn->allocator = serf_bucket_allocator_create(pool, NULL, NULL);
+ conn->stream = NULL;
+ conn->ostream_head = NULL;
+ conn->ostream_tail = NULL;
+ conn->baton.type = SERF_IO_CONN;
+ conn->baton.u.conn = conn;
+ conn->hit_eof = 0;
+ conn->state = SERF_CONN_INIT;
+ conn->latency = -1; /* unknown */
+
+ /* Create a subpool for our connection. */
+ apr_pool_create(&conn->skt_pool, conn->pool);
+
+ /* register a cleanup */
+ apr_pool_cleanup_register(conn->pool, conn, clean_conn,
+ apr_pool_cleanup_null);
+
+ /* Add the connection to the context. */
+ *(serf_connection_t **)apr_array_push(ctx->conns) = conn;
+
+ serf__log(CONN_VERBOSE, __FILE__, "created connection 0x%x\n",
+ conn);
+
+ return conn;
+}
+
+apr_status_t serf_connection_create2(
+ serf_connection_t **conn,
+ serf_context_t *ctx,
+ apr_uri_t host_info,
+ serf_connection_setup_t setup,
+ void *setup_baton,
+ serf_connection_closed_t closed,
+ void *closed_baton,
+ apr_pool_t *pool)
+{
+ apr_status_t status = APR_SUCCESS;
+ serf_connection_t *c;
+ apr_sockaddr_t *host_address = NULL;
+
+ /* Set the port number explicitly, needed to create the socket later. */
+ if (!host_info.port) {
+ host_info.port = apr_uri_port_of_scheme(host_info.scheme);
+ }
+
+ /* Only lookup the address of the server if no proxy server was
+ configured. */
+ if (!ctx->proxy_address) {
+ status = apr_sockaddr_info_get(&host_address,
+ host_info.hostname,
+ APR_UNSPEC, host_info.port, 0, pool);
+ if (status)
+ return status;
+ }
+
+ c = serf_connection_create(ctx, host_address, setup, setup_baton,
+ closed, closed_baton, pool);
+
+ /* We're not interested in the path following the hostname. */
+ c->host_url = apr_uri_unparse(c->pool,
+ &host_info,
+ APR_URI_UNP_OMITPATHINFO);
+
+ /* Store the host info without the path on the connection. */
+ (void)apr_uri_parse(c->pool, c->host_url, &(c->host_info));
+ if (!c->host_info.port) {
+ c->host_info.port = apr_uri_port_of_scheme(c->host_info.scheme);
+ }
+
+ *conn = c;
+
+ return status;
+}
+
+apr_status_t serf_connection_reset(
+ serf_connection_t *conn)
+{
+ return reset_connection(conn, 0);
+}
+
+
+apr_status_t serf_connection_close(
+ serf_connection_t *conn)
+{
+ int i;
+ serf_context_t *ctx = conn->ctx;
+ apr_status_t status;
+
+ for (i = ctx->conns->nelts; i--; ) {
+ serf_connection_t *conn_seq = GET_CONN(ctx, i);
+
+ if (conn_seq == conn) {
+ while (conn->requests) {
+ serf_request_cancel(conn->requests);
+ }
+ if (conn->skt != NULL) {
+ remove_connection(ctx, conn);
+ status = apr_socket_close(conn->skt);
+ serf__log_skt(SOCK_VERBOSE, __FILE__, conn->skt,
+ "closed socket, status %d\n",
+ status);
+ if (conn->closed != NULL) {
+ handle_conn_closed(conn, status);
+ }
+ conn->skt = NULL;
+ }
+ if (conn->stream != NULL) {
+ serf_bucket_destroy(conn->stream);
+ conn->stream = NULL;
+ }
+
+ destroy_ostream(conn);
+
+ /* Remove the connection from the context. We don't want to
+ * deal with it any more.
+ */
+ if (i < ctx->conns->nelts - 1) {
+ /* move later connections over this one. */
+ memmove(
+ &GET_CONN(ctx, i),
+ &GET_CONN(ctx, i + 1),
+ (ctx->conns->nelts - i - 1) * sizeof(serf_connection_t *));
+ }
+ --ctx->conns->nelts;
+
+ serf__log(CONN_VERBOSE, __FILE__, "closed connection 0x%x\n",
+ conn);
+
+ /* Found the connection. Closed it. All done. */
+ return APR_SUCCESS;
+ }
+ }
+
+ /* We didn't find the specified connection. */
+ /* ### doc talks about this w.r.t poll structures. use something else? */
+ return APR_NOTFOUND;
+}
+
+
+void serf_connection_set_max_outstanding_requests(
+ serf_connection_t *conn,
+ unsigned int max_requests)
+{
+ if (max_requests == 0)
+ serf__log_skt(CONN_VERBOSE, __FILE__, conn->skt,
+ "Set max. nr. of outstanding requests for this "
+ "connection to unlimited.\n");
+ else
+ serf__log_skt(CONN_VERBOSE, __FILE__, conn->skt,
+ "Limit max. nr. of outstanding requests for this "
+ "connection to %u.\n", max_requests);
+
+ conn->max_outstanding_requests = max_requests;
+}
+
+
+void serf_connection_set_async_responses(
+ serf_connection_t *conn,
+ serf_response_acceptor_t acceptor,
+ void *acceptor_baton,
+ serf_response_handler_t handler,
+ void *handler_baton)
+{
+ conn->async_responses = 1;
+ conn->async_acceptor = acceptor;
+ conn->async_acceptor_baton = acceptor_baton;
+ conn->async_handler = handler;
+ conn->async_handler_baton = handler_baton;
+}
+
+static serf_request_t *
+create_request(serf_connection_t *conn,
+ serf_request_setup_t setup,
+ void *setup_baton,
+ int priority,
+ int ssltunnel)
+{
+ serf_request_t *request;
+
+ request = serf_bucket_mem_alloc(conn->allocator, sizeof(*request));
+ request->conn = conn;
+ request->setup = setup;
+ request->setup_baton = setup_baton;
+ request->handler = NULL;
+ request->respool = NULL;
+ request->req_bkt = NULL;
+ request->resp_bkt = NULL;
+ request->priority = priority;
+ request->written = 0;
+ request->ssltunnel = ssltunnel;
+ request->next = NULL;
+
+ return request;
+}
+
+serf_request_t *serf_connection_request_create(
+ serf_connection_t *conn,
+ serf_request_setup_t setup,
+ void *setup_baton)
+{
+ serf_request_t *request;
+
+ request = create_request(conn, setup, setup_baton,
+ 0, /* priority */
+ 0 /* ssl tunnel */);
+
+ /* Link the request to the end of the request chain. */
+ link_requests(&conn->requests, &conn->requests_tail, request);
+
+ /* Ensure our pollset becomes writable in context run */
+ conn->ctx->dirty_pollset = 1;
+ conn->dirty_conn = 1;
+
+ return request;
+}
+
+static serf_request_t *
+priority_request_create(serf_connection_t *conn,
+ int ssltunnelreq,
+ serf_request_setup_t setup,
+ void *setup_baton)
+{
+ serf_request_t *request;
+ serf_request_t *iter, *prev;
+
+ request = create_request(conn, setup, setup_baton,
+ 1, /* priority */
+ ssltunnelreq);
+
+ /* Link the new request after the last written request. */
+ iter = conn->requests;
+ prev = NULL;
+
+ /* Find a request that has data which needs to be delivered. */
+ while (iter != NULL && iter->req_bkt == NULL && iter->written) {
+ prev = iter;
+ iter = iter->next;
+ }
+
+ /* A CONNECT request to setup an ssltunnel has absolute priority over all
+ other requests on the connection, so:
+ a. add it first to the queue
+ b. ensure that other priority requests are added after the CONNECT
+ request */
+ if (!request->ssltunnel) {
+ /* Advance to next non priority request */
+ while (iter != NULL && iter->priority) {
+ prev = iter;
+ iter = iter->next;
+ }
+ }
+
+ if (prev) {
+ request->next = iter;
+ prev->next = request;
+ } else {
+ request->next = iter;
+ conn->requests = request;
+ }
+
+ /* Ensure our pollset becomes writable in context run */
+ conn->ctx->dirty_pollset = 1;
+ conn->dirty_conn = 1;
+
+ return request;
+}
+
+serf_request_t *serf_connection_priority_request_create(
+ serf_connection_t *conn,
+ serf_request_setup_t setup,
+ void *setup_baton)
+{
+ return priority_request_create(conn,
+ 0, /* not a ssltunnel CONNECT request */
+ setup, setup_baton);
+}
+
+serf_request_t *serf__ssltunnel_request_create(serf_connection_t *conn,
+ serf_request_setup_t setup,
+ void *setup_baton)
+{
+ return priority_request_create(conn,
+ 1, /* This is a ssltunnel CONNECT request */
+ setup, setup_baton);
+}
+
+apr_status_t serf_request_cancel(serf_request_t *request)
+{
+ return cancel_request(request, &request->conn->requests, 0);
+}
+
+apr_status_t serf_request_is_written(serf_request_t *request)
+{
+ if (request->written && !request->req_bkt)
+ return APR_SUCCESS;
+
+ return APR_EBUSY;
+}
+
+apr_pool_t *serf_request_get_pool(const serf_request_t *request)
+{
+ return request->respool;
+}
+
+
+serf_bucket_alloc_t *serf_request_get_alloc(
+ const serf_request_t *request)
+{
+ return request->allocator;
+}
+
+
+serf_connection_t *serf_request_get_conn(
+ const serf_request_t *request)
+{
+ return request->conn;
+}
+
+
+void serf_request_set_handler(
+ serf_request_t *request,
+ const serf_response_handler_t handler,
+ const void **handler_baton)
+{
+ request->handler = handler;
+ request->handler_baton = handler_baton;
+}
+
+
+serf_bucket_t *serf_request_bucket_request_create(
+ serf_request_t *request,
+ const char *method,
+ const char *uri,
+ serf_bucket_t *body,
+ serf_bucket_alloc_t *allocator)
+{
+ serf_bucket_t *req_bkt, *hdrs_bkt;
+ serf_connection_t *conn = request->conn;
+ serf_context_t *ctx = conn->ctx;
+ int ssltunnel;
+
+ ssltunnel = ctx->proxy_address &&
+ (strcmp(conn->host_info.scheme, "https") == 0);
+
+ req_bkt = serf_bucket_request_create(method, uri, body, allocator);
+ hdrs_bkt = serf_bucket_request_get_headers(req_bkt);
+
+ /* Use absolute uri's in requests to a proxy. USe relative uri's in
+ requests directly to a server or sent through an SSL tunnel. */
+ if (ctx->proxy_address && conn->host_url &&
+ !(ssltunnel && !request->ssltunnel)) {
+
+ serf_bucket_request_set_root(req_bkt, conn->host_url);
+ }
+
+ if (conn->host_info.hostinfo)
+ serf_bucket_headers_setn(hdrs_bkt, "Host",
+ conn->host_info.hostinfo);
+
+ /* Setup server authorization headers, unless this is a CONNECT request. */
+ if (!request->ssltunnel) {
+ serf__authn_info_t *authn_info;
+ authn_info = serf__get_authn_info_for_server(conn);
+ if (authn_info->scheme)
+ authn_info->scheme->setup_request_func(HOST, 0, conn, request,
+ method, uri,
+ hdrs_bkt);
+ }
+
+ /* Setup proxy authorization headers.
+ Don't set these headers on the requests to the server if we're using
+ an SSL tunnel, only on the CONNECT request to setup the tunnel. */
+ if (ctx->proxy_authn_info.scheme) {
+ if (strcmp(conn->host_info.scheme, "https") == 0) {
+ if (request->ssltunnel)
+ ctx->proxy_authn_info.scheme->setup_request_func(PROXY, 0, conn,
+ request,
+ method, uri,
+ hdrs_bkt);
+ } else {
+ ctx->proxy_authn_info.scheme->setup_request_func(PROXY, 0, conn,
+ request,
+ method, uri,
+ hdrs_bkt);
+ }
+ }
+
+ return req_bkt;
+}
+
+apr_interval_time_t serf_connection_get_latency(serf_connection_t *conn)
+{
+ if (conn->ctx->proxy_address) {
+ /* Detecting network latency for proxied connection is not implemented
+ yet. */
+ return -1;
+ }
+
+ return conn->latency;
+}
diff --git a/contrib/serf/serf.h b/contrib/serf/serf.h
new file mode 100644
index 0000000..b4fce07
--- /dev/null
+++ b/contrib/serf/serf.h
@@ -0,0 +1,1117 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SERF_H
+#define SERF_H
+
+/**
+ * @file serf.h
+ * @brief Main serf header file
+ */
+
+#include <apr.h>
+#include <apr_errno.h>
+#include <apr_allocator.h>
+#include <apr_pools.h>
+#include <apr_network_io.h>
+#include <apr_time.h>
+#include <apr_poll.h>
+#include <apr_uri.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Forward declare some structures */
+typedef struct serf_context_t serf_context_t;
+
+typedef struct serf_bucket_t serf_bucket_t;
+typedef struct serf_bucket_type_t serf_bucket_type_t;
+typedef struct serf_bucket_alloc_t serf_bucket_alloc_t;
+
+typedef struct serf_connection_t serf_connection_t;
+typedef struct serf_listener_t serf_listener_t;
+typedef struct serf_incoming_t serf_incoming_t;
+typedef struct serf_incoming_request_t serf_incoming_request_t;
+
+typedef struct serf_request_t serf_request_t;
+
+
+/**
+ * @defgroup serf high-level constructs
+ * @ingroup serf
+ * @{
+ */
+
+/**
+ * Serf-specific error codes
+ */
+#define SERF_ERROR_RANGE 100
+#define SERF_ERROR_START (APR_OS_START_USERERR + SERF_ERROR_RANGE)
+
+/* This code is for when this is the last response on this connection:
+ * i.e. do not send any more requests on this connection or expect
+ * any more responses.
+ */
+#define SERF_ERROR_CLOSING (SERF_ERROR_START + 1)
+/* This code is for when the connection terminated before the request
+ * could be processed on the other side.
+ */
+#define SERF_ERROR_REQUEST_LOST (SERF_ERROR_START + 2)
+/* This code is for when the connection is blocked - we can not proceed
+ * until something happens - generally due to SSL negotiation-like behavior
+ * where a write() is blocked until a read() is processed.
+ */
+#define SERF_ERROR_WAIT_CONN (SERF_ERROR_START + 3)
+/* This code is for when something went wrong during deflating compressed
+ * data e.g. a CRC error. */
+#define SERF_ERROR_DECOMPRESSION_FAILED (SERF_ERROR_START + 4)
+/* This code is for when a response received from a http server is not in
+ * http-compliant syntax. */
+#define SERF_ERROR_BAD_HTTP_RESPONSE (SERF_ERROR_START + 5)
+/* The server sent less data than what was announced. */
+#define SERF_ERROR_TRUNCATED_HTTP_RESPONSE (SERF_ERROR_START + 6)
+/* The proxy server returned an error while setting up the SSL tunnel. */
+#define SERF_ERROR_SSLTUNNEL_SETUP_FAILED (SERF_ERROR_START + 7)
+/* The server unexpectedly closed the connection prematurely. */
+#define SERF_ERROR_ABORTED_CONNECTION (SERF_ERROR_START + 8)
+
+/* SSL certificates related errors */
+#define SERF_ERROR_SSL_CERT_FAILED (SERF_ERROR_START + 70)
+
+/* SSL communications related errors */
+#define SERF_ERROR_SSL_COMM_FAILED (SERF_ERROR_START + 71)
+
+/* General authentication related errors */
+#define SERF_ERROR_AUTHN_FAILED (SERF_ERROR_START + 90)
+
+/* None of the available authn mechanisms for the request are supported */
+#define SERF_ERROR_AUTHN_NOT_SUPPORTED (SERF_ERROR_START + 91)
+
+/* Authn was requested by the server but the header lacked some attribute */
+#define SERF_ERROR_AUTHN_MISSING_ATTRIBUTE (SERF_ERROR_START + 92)
+
+/* Authentication handler initialization related errors */
+#define SERF_ERROR_AUTHN_INITALIZATION_FAILED (SERF_ERROR_START + 93)
+
+/* Error code reserved for use in the test suite. */
+#define SERF_ERROR_ISSUE_IN_TESTSUITE (SERF_ERROR_START + 99)
+
+/* This macro groups errors potentially raised when reading a http response. */
+#define SERF_BAD_RESPONSE_ERROR(status) ((status) \
+ && ((SERF_ERROR_DECOMPRESSION_FAILED == (status)) \
+ ||(SERF_ERROR_BAD_HTTP_RESPONSE == (status)) \
+ ||(SERF_ERROR_TRUNCATED_HTTP_RESPONSE == (status))))
+
+/**
+ * Return a string that describes the specified error code.
+ *
+ * If the error code is not one of the above Serf error codes, then
+ * NULL will be returned.
+ *
+ * Note regarding lifetime: the string is a statically-allocated constant
+ */
+const char *serf_error_string(apr_status_t errcode);
+
+
+/**
+ * Create a new context for serf operations.
+ *
+ * A serf context defines a control loop which processes multiple
+ * connections simultaneously.
+ *
+ * The context will be allocated within @a pool.
+ */
+serf_context_t *serf_context_create(
+ apr_pool_t *pool);
+
+/**
+ * Callback function. Add a socket to the externally managed poll set.
+ *
+ * Both @a pfd and @a serf_baton should be used when calling serf_event_trigger
+ * later.
+ */
+typedef apr_status_t (*serf_socket_add_t)(
+ void *user_baton,
+ apr_pollfd_t *pfd,
+ void *serf_baton);
+
+/**
+ * Callback function. Remove the socket, identified by both @a pfd and
+ * @a serf_baton from the externally managed poll set.
+ */
+typedef apr_status_t (*serf_socket_remove_t)(
+ void *user_baton,
+ apr_pollfd_t *pfd,
+ void *serf_baton);
+
+/* Create a new context for serf operations.
+ *
+ * Use this function to make serf not use its internal control loop, but
+ * instead rely on an external event loop. Serf will use the @a addf and @a rmf
+ * callbacks to notify of any event on a connection. The @a user_baton will be
+ * passed through the addf and rmf callbacks.
+ *
+ * The context will be allocated within @a pool.
+ */
+serf_context_t *serf_context_create_ex(
+ void *user_baton,
+ serf_socket_add_t addf,
+ serf_socket_remove_t rmf,
+ apr_pool_t *pool);
+
+/**
+ * Make serf process events on a connection, identified by both @a pfd and
+ * @a serf_baton.
+ *
+ * Any outbound data is delivered, and incoming data is made available to
+ * the associated response handlers and their buckets.
+ *
+ * If any data is processed (incoming or outgoing), then this function will
+ * return with APR_SUCCESS.
+ */
+apr_status_t serf_event_trigger(
+ serf_context_t *s,
+ void *serf_baton,
+ const apr_pollfd_t *pfd);
+
+/** @see serf_context_run should not block at all. */
+#define SERF_DURATION_NOBLOCK 0
+/** @see serf_context_run should run for (nearly) "forever". */
+#define SERF_DURATION_FOREVER 2000000000 /* approx 1^31 */
+
+/**
+ * Run the main networking control loop.
+ *
+ * The set of connections defined by the serf context @a ctx are processed.
+ * Any outbound data is delivered, and incoming data is made available to
+ * the associated response handlers and their buckets. This function will
+ * block on the network for no longer than @a duration microseconds.
+ *
+ * If any data is processed (incoming or outgoing), then this function will
+ * return with APR_SUCCESS. Typically, the caller will just want to call it
+ * again to continue processing data.
+ *
+ * If no activity occurs within the specified timeout duration, then
+ * APR_TIMEUP is returned.
+ *
+ * All temporary allocations will be made in @a pool.
+ */
+apr_status_t serf_context_run(
+ serf_context_t *ctx,
+ apr_short_interval_time_t duration,
+ apr_pool_t *pool);
+
+
+apr_status_t serf_context_prerun(
+ serf_context_t *ctx);
+
+/**
+ * Callback function for progress information. @a progress indicates cumulative
+ * number of bytes read or written, for the whole context.
+ */
+typedef void (*serf_progress_t)(
+ void *progress_baton,
+ apr_off_t read,
+ apr_off_t write);
+
+/**
+ * Sets the progress callback function. @a progress_func will be called every
+ * time bytes are read of or written on a socket.
+ */
+void serf_context_set_progress_cb(
+ serf_context_t *ctx,
+ const serf_progress_t progress_func,
+ void *progress_baton);
+
+/** @} */
+
+/**
+ * @defgroup serf connections and requests
+ * @ingroup serf
+ * @{
+ */
+
+/**
+ * When a connection is established, the application needs to wrap some
+ * buckets around @a skt to enable serf to process incoming responses. This
+ * is the control point for assembling connection-level processing logic
+ * around the given socket.
+ *
+ * The @a setup_baton is the baton established at connection creation time.
+ *
+ * This callback corresponds to reading from the server. Since this is an
+ * on-demand activity, we use a callback. The corresponding write operation
+ * is based on the @see serf_request_deliver function, where the application
+ * can assemble the appropriate bucket(s) before delivery.
+ *
+ * The returned bucket should live at least as long as the connection itself.
+ * It is assumed that an appropriate allocator is passed in @a setup_baton.
+ * ### we may want to create a connection-level allocator and pass that
+ * ### along. however, that allocator would *only* be used for this
+ * ### callback. it may be wasteful to create a per-conn allocator, so this
+ * ### baton-based, app-responsible form might be best.
+ *
+ * Responsibility for the buckets is passed to the serf library. They will be
+ * destroyed when the connection is closed.
+ *
+ * All temporary allocations should be made in @a pool.
+ */
+typedef apr_status_t (*serf_connection_setup_t)(
+ apr_socket_t *skt,
+ serf_bucket_t **read_bkt,
+ serf_bucket_t **write_bkt,
+ void *setup_baton,
+ apr_pool_t *pool);
+
+/**
+ * ### need to update docco w.r.t socket. became "stream" recently.
+ * ### the stream does not have a barrier, this callback should generally
+ * ### add a barrier around the stream before incorporating it into a
+ * ### response bucket stack.
+ * ### should serf add the barrier automatically to protect its data
+ * ### structure? i.e. the passed bucket becomes owned rather than
+ * ### borrowed. that might suit overall semantics better.
+ * Accept an incoming response for @a request, and its @a socket. A bucket
+ * for the response should be constructed and returned. This is the control
+ * point for assembling the appropriate wrapper buckets around the socket to
+ * enable processing of the incoming response.
+ *
+ * The @a acceptor_baton is the baton provided when the specified request
+ * was created.
+ *
+ * The request's pool and bucket allocator should be used for any allocations
+ * that need to live for the duration of the response. Care should be taken
+ * to bound the amount of memory stored in this pool -- to ensure that
+ * allocations are not proportional to the amount of data in the response.
+ *
+ * Responsibility for the bucket is passed to the serf library. It will be
+ * destroyed when the response has been fully read (the bucket returns an
+ * APR_EOF status from its read functions).
+ *
+ * All temporary allocations should be made in @a pool.
+ */
+/* ### do we need to return an error? */
+typedef serf_bucket_t * (*serf_response_acceptor_t)(
+ serf_request_t *request,
+ serf_bucket_t *stream,
+ void *acceptor_baton,
+ apr_pool_t *pool);
+
+/**
+ * Notification callback for when a connection closes.
+ *
+ * This callback is used to inform an application that the @a conn
+ * connection has been (abnormally) closed. The @a closed_baton is the
+ * baton provided when the connection was first opened. The reason for
+ * closure is given in @a why, and will be APR_SUCCESS if the application
+ * requested closure (by clearing the pool used to allocate this
+ * connection or calling serf_connection_close).
+ *
+ * All temporary allocations should be made in @a pool.
+ */
+typedef void (*serf_connection_closed_t)(
+ serf_connection_t *conn,
+ void *closed_baton,
+ apr_status_t why,
+ apr_pool_t *pool);
+
+/**
+ * Response data has arrived and should be processed.
+ *
+ * Whenever response data for @a request arrives (initially, or continued data
+ * arrival), this handler is invoked. The response data is available in the
+ * @a response bucket. The @a handler_baton is passed along from the baton
+ * provided by the request setup callback (@see serf_request_setup_t).
+ *
+ * The handler MUST process data from the @a response bucket until the
+ * bucket's read function states it would block (see APR_STATUS_IS_EAGAIN).
+ * The handler is invoked only when new data arrives. If no further data
+ * arrives, and the handler does not process all available data, then the
+ * system can result in a deadlock around the unprocessed, but read, data.
+ *
+ * The handler should return APR_EOF when the response has been fully read.
+ * If calling the handler again would block, APR_EAGAIN should be returned.
+ * If the handler should be invoked again, simply return APR_SUCCESS.
+ *
+ * Note: if the connection closed (at the request of the application, or
+ * because of an (abnormal) termination) while a request is being delivered,
+ * or before a response arrives, then @a response will be NULL. This is the
+ * signal that the request was not delivered properly, and no further
+ * response should be expected (this callback will not be invoked again).
+ * If a request is injected into the connection (during this callback's
+ * execution, or otherwise), then the connection will be reopened.
+ *
+ * All temporary allocations should be made in @a pool.
+ */
+typedef apr_status_t (*serf_response_handler_t)(
+ serf_request_t *request,
+ serf_bucket_t *response,
+ void *handler_baton,
+ apr_pool_t *pool);
+
+/**
+ * Callback function to be implemented by the application, so that serf
+ * can handle server and proxy authentication.
+ * code = 401 (server) or 407 (proxy).
+ * baton = the baton passed to serf_context_run.
+ * authn_type = one of "Basic", "Digest".
+ */
+typedef apr_status_t (*serf_credentials_callback_t)(
+ char **username,
+ char **password,
+ serf_request_t *request, void *baton,
+ int code, const char *authn_type,
+ const char *realm,
+ apr_pool_t *pool);
+
+/**
+ * Create a new connection associated with the @a ctx serf context.
+ *
+ * If no proxy server is configured, a connection will be created to
+ * (eventually) connect to the address specified by @a address. The address must
+ * live at least as long as @a pool (thus, as long as the connection object).
+ * If a proxy server is configured, @address will be ignored.
+ *
+ * The connection object will be allocated within @a pool. Clearing or
+ * destroying this pool will close the connection, and terminate any
+ * outstanding requests or responses.
+ *
+ * When the connection is closed (upon request or because of an error),
+ * then the @a closed callback is invoked, and @a closed_baton is passed.
+ *
+ * ### doc on setup(_baton). tweak below comment re: acceptor.
+ * NULL may be passed for @a acceptor and @a closed; default implementations
+ * will be used.
+ *
+ * Note: the connection is not made immediately. It will be opened on
+ * the next call to @see serf_context_run.
+ */
+serf_connection_t *serf_connection_create(
+ serf_context_t *ctx,
+ apr_sockaddr_t *address,
+ serf_connection_setup_t setup,
+ void *setup_baton,
+ serf_connection_closed_t closed,
+ void *closed_baton,
+ apr_pool_t *pool);
+
+/**
+ * Create a new connection associated with the @a ctx serf context.
+ *
+ * A connection will be created to (eventually) connect to the address
+ * specified by @a address. The address must live at least as long as
+ * @a pool (thus, as long as the connection object).
+ *
+ * The host address will be looked up based on the hostname in @a host_info.
+ *
+ * The connection object will be allocated within @a pool. Clearing or
+ * destroying this pool will close the connection, and terminate any
+ * outstanding requests or responses.
+ *
+ * When the connection is closed (upon request or because of an error),
+ * then the @a closed callback is invoked, and @a closed_baton is passed.
+ *
+ * ### doc on setup(_baton). tweak below comment re: acceptor.
+ * NULL may be passed for @a acceptor and @a closed; default implementations
+ * will be used.
+ *
+ * Note: the connection is not made immediately. It will be opened on
+ * the next call to @see serf_context_run.
+ */
+apr_status_t serf_connection_create2(
+ serf_connection_t **conn,
+ serf_context_t *ctx,
+ apr_uri_t host_info,
+ serf_connection_setup_t setup,
+ void *setup_baton,
+ serf_connection_closed_t closed,
+ void *closed_baton,
+ apr_pool_t *pool);
+
+
+typedef apr_status_t (*serf_accept_client_t)(
+ serf_context_t *ctx,
+ serf_listener_t *l,
+ void *accept_baton,
+ apr_socket_t *insock,
+ apr_pool_t *pool);
+
+apr_status_t serf_listener_create(
+ serf_listener_t **listener,
+ serf_context_t *ctx,
+ const char *host,
+ apr_uint16_t port,
+ void *accept_baton,
+ serf_accept_client_t accept_func,
+ apr_pool_t *pool);
+
+typedef apr_status_t (*serf_incoming_request_cb_t)(
+ serf_context_t *ctx,
+ serf_incoming_request_t *req,
+ void *request_baton,
+ apr_pool_t *pool);
+
+apr_status_t serf_incoming_create(
+ serf_incoming_t **client,
+ serf_context_t *ctx,
+ apr_socket_t *insock,
+ void *request_baton,
+ serf_incoming_request_cb_t request,
+ apr_pool_t *pool);
+
+
+
+
+/**
+ * Reset the connection, but re-open the socket again.
+ */
+apr_status_t serf_connection_reset(
+ serf_connection_t *conn);
+
+/**
+ * Close the connection associated with @a conn and cancel all pending requests.
+ *
+ * The closed callback passed to serf_connection_create() will be invoked
+ * with APR_SUCCESS.
+ */
+apr_status_t serf_connection_close(
+ serf_connection_t *conn);
+
+/**
+ * Sets the maximum number of outstanding requests @a max_requests on the
+ * connection @a conn. Setting max_requests to 0 means unlimited (the default).
+ * Ex.: setting max_requests to 1 means a request is sent when a response on the
+ * previous request was received and handled.
+ *
+ * In general, serf tends to take around 16KB per outstanding request.
+ */
+void serf_connection_set_max_outstanding_requests(
+ serf_connection_t *conn,
+ unsigned int max_requests);
+
+void serf_connection_set_async_responses(
+ serf_connection_t *conn,
+ serf_response_acceptor_t acceptor,
+ void *acceptor_baton,
+ serf_response_handler_t handler,
+ void *handler_baton);
+
+/**
+ * Setup the @a request for delivery on its connection.
+ *
+ * Right before this is invoked, @a pool will be built within the
+ * connection's pool for the request to use. The associated response will
+ * be allocated within that subpool. An associated bucket allocator will
+ * be built. These items may be fetched from the request object through
+ * @see serf_request_get_pool or @see serf_request_get_alloc.
+ *
+ * The content of the request is specified by the @a req_bkt bucket. When
+ * a response arrives, the @a acceptor callback will be invoked (along with
+ * the @a acceptor_baton) to produce a response bucket. That bucket will then
+ * be passed to @a handler, along with the @a handler_baton.
+ *
+ * The responsibility for the request bucket is passed to the request
+ * object. When the request is done with the bucket, it will be destroyed.
+ */
+typedef apr_status_t (*serf_request_setup_t)(
+ serf_request_t *request,
+ void *setup_baton,
+ serf_bucket_t **req_bkt,
+ serf_response_acceptor_t *acceptor,
+ void **acceptor_baton,
+ serf_response_handler_t *handler,
+ void **handler_baton,
+ apr_pool_t *pool);
+
+/**
+ * Construct a request object for the @a conn connection.
+ *
+ * When it is time to deliver the request, the @a setup callback will
+ * be invoked with the @a setup_baton passed into it to complete the
+ * construction of the request object.
+ *
+ * If the request has not (yet) been delivered, then it may be canceled
+ * with @see serf_request_cancel.
+ *
+ * Invoking any calls other than @see serf_request_cancel before the setup
+ * callback executes is not supported.
+ */
+serf_request_t *serf_connection_request_create(
+ serf_connection_t *conn,
+ serf_request_setup_t setup,
+ void *setup_baton);
+
+/**
+ * Construct a request object for the @a conn connection, add it in the
+ * list as the next to-be-written request before all unwritten requests.
+ *
+ * When it is time to deliver the request, the @a setup callback will
+ * be invoked with the @a setup_baton passed into it to complete the
+ * construction of the request object.
+ *
+ * If the request has not (yet) been delivered, then it may be canceled
+ * with @see serf_request_cancel.
+ *
+ * Invoking any calls other than @see serf_request_cancel before the setup
+ * callback executes is not supported.
+ */
+serf_request_t *serf_connection_priority_request_create(
+ serf_connection_t *conn,
+ serf_request_setup_t setup,
+ void *setup_baton);
+
+
+/** Returns detected network latency for the @a conn connection. Negative
+ * value means that latency is unknwon.
+ */
+apr_interval_time_t serf_connection_get_latency(serf_connection_t *conn);
+
+/** Check if a @a request has been completely written.
+ *
+ * Returns APR_SUCCESS if the request was written completely on the connection.
+ * Returns APR_EBUSY if the request is not yet or partially written.
+ */
+apr_status_t serf_request_is_written(
+ serf_request_t *request);
+
+/**
+ * Cancel the request specified by the @a request object.
+ *
+ * If the request has been scheduled for delivery, then its response
+ * handler will be run, passing NULL for the response bucket.
+ *
+ * If the request has already been (partially or fully) delivered, then
+ * APR_EBUSY is returned and the request is *NOT* canceled. To properly
+ * cancel the request, the connection must be closed (by clearing or
+ * destroying its associated pool).
+ */
+apr_status_t serf_request_cancel(
+ serf_request_t *request);
+
+/**
+ * Return the pool associated with @a request.
+ *
+ * WARNING: be very careful about the kinds of things placed into this
+ * pool. In particular, all allocation should be bounded in size, rather
+ * than proportional to any data stream.
+ */
+apr_pool_t *serf_request_get_pool(
+ const serf_request_t *request);
+
+/**
+ * Return the bucket allocator associated with @a request.
+ */
+serf_bucket_alloc_t *serf_request_get_alloc(
+ const serf_request_t *request);
+
+/**
+ * Return the connection associated with @a request.
+ */
+serf_connection_t *serf_request_get_conn(
+ const serf_request_t *request);
+
+/**
+ * Update the @a handler and @a handler_baton for this @a request.
+ *
+ * This can be called after the request has started processing -
+ * subsequent data will be delivered to this new handler.
+ */
+void serf_request_set_handler(
+ serf_request_t *request,
+ const serf_response_handler_t handler,
+ const void **handler_baton);
+
+/**
+ * Configure proxy server settings, to be used by all connections associated
+ * with the @a ctx serf context.
+ *
+ * The next connection will be created to connect to the proxy server
+ * specified by @a address. The address must live at least as long as the
+ * serf context.
+ */
+void serf_config_proxy(
+ serf_context_t *ctx,
+ apr_sockaddr_t *address);
+
+/* Supported authentication types. */
+#define SERF_AUTHN_NONE 0x00
+#define SERF_AUTHN_BASIC 0x01
+#define SERF_AUTHN_DIGEST 0x02
+#define SERF_AUTHN_NTLM 0x04
+#define SERF_AUTHN_NEGOTIATE 0x08
+#define SERF_AUTHN_ALL 0xFF
+
+/**
+ * Define the authentication handlers that serf will try on incoming requests.
+ */
+void serf_config_authn_types(
+ serf_context_t *ctx,
+ int authn_types);
+
+/**
+ * Set the credentials callback handler.
+ */
+void serf_config_credentials_callback(
+ serf_context_t *ctx,
+ serf_credentials_callback_t cred_cb);
+
+/* ### maybe some connection control functions for flood? */
+
+/*** Special bucket creation functions ***/
+
+/**
+ * Create a bucket of type 'socket bucket'.
+ * This is basically a wrapper around @a serf_bucket_socket_create, which
+ * initializes the bucket using connection and/or context specific settings.
+ */
+serf_bucket_t *serf_context_bucket_socket_create(
+ serf_context_t *ctx,
+ apr_socket_t *skt,
+ serf_bucket_alloc_t *allocator);
+
+/**
+ * Create a bucket of type 'request bucket'.
+ * This is basically a wrapper around @a serf_bucket_request_create, which
+ * initializes the bucket using request, connection and/or context specific
+ * settings.
+ *
+ * This function will set following header(s):
+ * - Host: if the connection was created with @a serf_connection_create2.
+ */
+serf_bucket_t *serf_request_bucket_request_create(
+ serf_request_t *request,
+ const char *method,
+ const char *uri,
+ serf_bucket_t *body,
+ serf_bucket_alloc_t *allocator);
+
+/** @} */
+
+
+/**
+ * @defgroup serf buckets
+ * @ingroup serf
+ * @{
+ */
+
+/** Pass as REQUESTED to the read function of a bucket to read, consume,
+ * and return all available data.
+ */
+#define SERF_READ_ALL_AVAIL ((apr_size_t)-1)
+
+/** Acceptable newline types for bucket->readline(). */
+#define SERF_NEWLINE_CR 0x0001
+#define SERF_NEWLINE_CRLF 0x0002
+#define SERF_NEWLINE_LF 0x0004
+#define SERF_NEWLINE_ANY 0x0007
+
+/** Used to indicate that a newline is not present in the data buffer. */
+/* ### should we make this zero? */
+#define SERF_NEWLINE_NONE 0x0008
+
+/** Used to indicate that a CR was found at the end of a buffer, and CRLF
+ * was acceptable. It may be that the LF is present, but it needs to be
+ * read first.
+ *
+ * Note: an alternative to using this symbol would be for callers to see
+ * the SERF_NEWLINE_CR return value, and know that some "end of buffer" was
+ * reached. While this works well for @see serf_util_readline, it does not
+ * necessary work as well for buckets (there is no obvious "end of buffer",
+ * although there is an "end of bucket"). The other problem with that
+ * alternative is that developers might miss the condition. This symbol
+ * calls out the possibility and ensures that callers will watch for it.
+ */
+#define SERF_NEWLINE_CRLF_SPLIT 0x0010
+
+
+struct serf_bucket_type_t {
+
+ /** name of this bucket type */
+ const char *name;
+
+ /**
+ * Read (and consume) up to @a requested bytes from @a bucket.
+ *
+ * A pointer to the data will be returned in @a data, and its length
+ * is specified by @a len.
+ *
+ * The data will exist until one of two conditions occur:
+ *
+ * 1) this bucket is destroyed
+ * 2) another call to any read function or to peek()
+ *
+ * If an application needs the data to exist for a longer duration,
+ * then it must make a copy.
+ */
+ apr_status_t (*read)(serf_bucket_t *bucket, apr_size_t requested,
+ const char **data, apr_size_t *len);
+
+ /**
+ * Read (and consume) a line of data from @a bucket.
+ *
+ * The acceptable forms of a newline are given by @a acceptable, and
+ * the type found is returned in @a found. If a newline is not present
+ * in the returned data, then SERF_NEWLINE_NONE is stored into @a found.
+ *
+ * A pointer to the data is returned in @a data, and its length is
+ * specified by @a len. The data will include the newline, if present.
+ *
+ * Note that there is no way to limit the amount of data returned
+ * by this function.
+ *
+ * The lifetime of the data is the same as that of the @see read
+ * function above.
+ */
+ apr_status_t (*readline)(serf_bucket_t *bucket, int acceptable,
+ int *found,
+ const char **data, apr_size_t *len);
+
+ /**
+ * Read a set of pointer/length pairs from the bucket.
+ *
+ * The size of the @a vecs array is specified by @a vecs_size. The
+ * bucket should fill in elements of the array, and return the number
+ * used in @a vecs_used.
+ *
+ * Each element of @a vecs should specify a pointer to a block of
+ * data and a length of that data.
+ *
+ * The total length of all data elements should not exceed the
+ * amount specified in @a requested.
+ *
+ * The lifetime of the data is the same as that of the @see read
+ * function above.
+ */
+ apr_status_t (*read_iovec)(serf_bucket_t *bucket, apr_size_t requested,
+ int vecs_size, struct iovec *vecs,
+ int *vecs_used);
+
+ /**
+ * Read data from the bucket in a form suitable for apr_socket_sendfile()
+ *
+ * On input, hdtr->numheaders and hdtr->numtrailers specify the size
+ * of the hdtr->headers and hdtr->trailers arrays, respectively. The
+ * bucket should fill in the headers and trailers, up to the specified
+ * limits, and set numheaders and numtrailers to the number of iovecs
+ * filled in for each item.
+ *
+ * @a file should be filled in with a file that can be read. If a file
+ * is not available or appropriate, then NULL should be stored. The
+ * file offset for the data should be stored in @a offset, and the
+ * length of that data should be stored in @a len. If a file is not
+ * returned, then @a offset and @a len should be ignored.
+ *
+ * The file position is not required to correspond to @a offset, and
+ * the caller may manipulate it at will.
+ *
+ * The total length of all data elements, and the portion of the
+ * file should not exceed the amount specified in @a requested.
+ *
+ * The lifetime of the data is the same as that of the @see read
+ * function above.
+ */
+ apr_status_t (*read_for_sendfile)(serf_bucket_t *bucket,
+ apr_size_t requested, apr_hdtr_t *hdtr,
+ apr_file_t **file, apr_off_t *offset,
+ apr_size_t *len);
+
+ /**
+ * Look within @a bucket for a bucket of the given @a type. The bucket
+ * must be the "initial" data because it will be consumed by this
+ * function. If the given bucket type is available, then read and consume
+ * it, and return it to the caller.
+ *
+ * This function is usually used by readers that have custom handling
+ * for specific bucket types (e.g. looking for a file bucket to pass
+ * to apr_socket_sendfile).
+ *
+ * If a bucket of the given type is not found, then NULL is returned.
+ *
+ * The returned bucket becomes the responsibility of the caller. When
+ * the caller is done with the bucket, it should be destroyed.
+ */
+ serf_bucket_t * (*read_bucket)(serf_bucket_t *bucket,
+ const serf_bucket_type_t *type);
+
+ /**
+ * Peek, but don't consume, the data in @a bucket.
+ *
+ * Since this function is non-destructive, the implicit read size is
+ * SERF_READ_ALL_AVAIL. The caller can then use whatever amount is
+ * appropriate.
+ *
+ * The @a data parameter will point to the data, and @a len will
+ * specify how much data is available. The lifetime of the data follows
+ * the same rules as the @see read function above.
+ *
+ * Note: if the peek does not return enough data for your particular
+ * use, then you must read/consume some first, then peek again.
+ *
+ * If the returned data represents all available data, then APR_EOF
+ * will be returned. Since this function does not consume data, it
+ * can return the same data repeatedly rather than blocking; thus,
+ * APR_EAGAIN will never be returned.
+ */
+ apr_status_t (*peek)(serf_bucket_t *bucket,
+ const char **data, apr_size_t *len);
+
+ /**
+ * Destroy @a bucket, along with any associated resources.
+ */
+ void (*destroy)(serf_bucket_t *bucket);
+
+ /* ### apr buckets have 'copy', 'split', and 'setaside' functions.
+ ### not sure whether those will be needed in this bucket model.
+ */
+};
+
+/**
+ * Should the use and lifecycle of buckets be tracked?
+ *
+ * When tracking, the system will ensure several semantic requirements
+ * of bucket use:
+ *
+ * - if a bucket returns APR_EAGAIN, one of its read functions should
+ * not be called immediately. the context's run loop should be called.
+ * ### and for APR_EOF, too?
+ * - all buckets must be drained of input before returning to the
+ * context's run loop.
+ * - buckets should not be destroyed before they return APR_EOF unless
+ * the connection is closed for some reason.
+ *
+ * Undefine this symbol to avoid the tracking (and a performance gain).
+ *
+ * ### we may want to examine when/how we provide this. should it always
+ * ### be compiled in? and apps select it before including this header?
+ */
+/* #define SERF_DEBUG_BUCKET_USE */
+
+
+/* Internal macros for tracking bucket use. */
+#ifdef SERF_DEBUG_BUCKET_USE
+#define SERF__RECREAD(b,s) serf_debug__record_read(b,s)
+#else
+#define SERF__RECREAD(b,s) (s)
+#endif
+
+#define serf_bucket_read(b,r,d,l) SERF__RECREAD(b, (b)->type->read(b,r,d,l))
+#define serf_bucket_readline(b,a,f,d,l) \
+ SERF__RECREAD(b, (b)->type->readline(b,a,f,d,l))
+#define serf_bucket_read_iovec(b,r,s,v,u) \
+ SERF__RECREAD(b, (b)->type->read_iovec(b,r,s,v,u))
+#define serf_bucket_read_for_sendfile(b,r,h,f,o,l) \
+ SERF__RECREAD(b, (b)->type->read_for_sendfile(b,r,h,f,o,l))
+#define serf_bucket_read_bucket(b,t) ((b)->type->read_bucket(b,t))
+#define serf_bucket_peek(b,d,l) ((b)->type->peek(b,d,l))
+#define serf_bucket_destroy(b) ((b)->type->destroy(b))
+
+/**
+ * Check whether a real error occurred. Note that bucket read functions
+ * can return EOF and EAGAIN as part of their "normal" operation, so they
+ * should not be considered an error.
+ */
+#define SERF_BUCKET_READ_ERROR(status) ((status) \
+ && !APR_STATUS_IS_EOF(status) \
+ && !APR_STATUS_IS_EAGAIN(status) \
+ && (SERF_ERROR_WAIT_CONN != status))
+
+
+struct serf_bucket_t {
+
+ /** the type of this bucket */
+ const serf_bucket_type_t *type;
+
+ /** bucket-private data */
+ void *data;
+
+ /** the allocator used for this bucket (needed at destroy time) */
+ serf_bucket_alloc_t *allocator;
+};
+
+
+/**
+ * Generic macro to construct "is TYPE" macros.
+ */
+#define SERF_BUCKET_CHECK(b, btype) ((b)->type == &serf_bucket_type_ ## btype)
+
+
+/**
+ * Notification callback for a block that was not returned to the bucket
+ * allocator when its pool was destroyed.
+ *
+ * The block of memory is given by @a block. The baton provided when the
+ * allocator was constructed is passed as @a unfreed_baton.
+ */
+typedef void (*serf_unfreed_func_t)(
+ void *unfreed_baton,
+ void *block);
+
+/**
+ * Create a new allocator for buckets.
+ *
+ * All buckets are associated with a serf bucket allocator. This allocator
+ * will be created within @a pool and will be destroyed when that pool is
+ * cleared or destroyed.
+ *
+ * When the allocator is destroyed, if any allocations were not explicitly
+ * returned (by calling serf_bucket_mem_free), then the @a unfreed callback
+ * will be invoked for each block. @a unfreed_baton will be passed to the
+ * callback.
+ *
+ * If @a unfreed is NULL, then the library will invoke the abort() stdlib
+ * call. Any failure to return memory is a bug in the application, and an
+ * abort can assist with determining what kinds of memory were not freed.
+ */
+serf_bucket_alloc_t *serf_bucket_allocator_create(
+ apr_pool_t *pool,
+ serf_unfreed_func_t unfreed,
+ void *unfreed_baton);
+
+/**
+ * Return the pool that was used for this @a allocator.
+ *
+ * WARNING: the use of this pool for allocations requires a very
+ * detailed understanding of pool behaviors, the bucket system,
+ * and knowledge of the bucket's use within the overall pattern
+ * of request/response behavior.
+ *
+ * See design-guide.txt for more information about pool usage.
+ */
+apr_pool_t *serf_bucket_allocator_get_pool(
+ const serf_bucket_alloc_t *allocator);
+
+
+/**
+ * Utility structure for reading a complete line of input from a bucket.
+ *
+ * Since it is entirely possible for a line to be broken by APR_EAGAIN,
+ * this structure can be used to accumulate the data until a complete line
+ * has been read from a bucket.
+ */
+
+/* This limit applies to the line buffer functions. If an application needs
+ * longer lines, then they will need to manually handle line buffering.
+ */
+#define SERF_LINEBUF_LIMIT 8000
+
+typedef struct {
+
+ /* Current state of the buffer. */
+ enum {
+ SERF_LINEBUF_EMPTY,
+ SERF_LINEBUF_READY,
+ SERF_LINEBUF_PARTIAL,
+ SERF_LINEBUF_CRLF_SPLIT
+ } state;
+
+ /* How much of the buffer have we used? */
+ apr_size_t used;
+
+ /* The line is read into this buffer, minus CR/LF */
+ char line[SERF_LINEBUF_LIMIT];
+
+} serf_linebuf_t;
+
+/**
+ * Initialize the @a linebuf structure.
+ */
+void serf_linebuf_init(serf_linebuf_t *linebuf);
+
+/**
+ * Fetch a line of text from @a bucket, accumulating the line into
+ * @a linebuf. @a acceptable specifies the types of newlines which are
+ * acceptable for this fetch.
+ *
+ * ### we should return a data/len pair so that we can avoid a copy,
+ * ### rather than having callers look into our state and line buffer.
+ */
+apr_status_t serf_linebuf_fetch(
+ serf_linebuf_t *linebuf,
+ serf_bucket_t *bucket,
+ int acceptable);
+
+/** @} */
+
+
+/* Internal functions for bucket use and lifecycle tracking */
+apr_status_t serf_debug__record_read(
+ const serf_bucket_t *bucket,
+ apr_status_t status);
+void serf_debug__entered_loop(
+ serf_bucket_alloc_t *allocator);
+void serf_debug__closed_conn(
+ serf_bucket_alloc_t *allocator);
+void serf_debug__bucket_destroy(
+ const serf_bucket_t *bucket);
+void serf_debug__bucket_alloc_check(
+ serf_bucket_alloc_t *allocator);
+
+/* Version info */
+#define SERF_MAJOR_VERSION 1
+#define SERF_MINOR_VERSION 3
+#define SERF_PATCH_VERSION 0
+
+/* Version number string */
+#define SERF_VERSION_STRING APR_STRINGIFY(SERF_MAJOR_VERSION) "." \
+ APR_STRINGIFY(SERF_MINOR_VERSION) "." \
+ APR_STRINGIFY(SERF_PATCH_VERSION)
+
+/**
+ * Check at compile time if the Serf version is at least a certain
+ * level.
+ * @param major The major version component of the version checked
+ * for (e.g., the "1" of "1.3.0").
+ * @param minor The minor version component of the version checked
+ * for (e.g., the "3" of "1.3.0").
+ * @param patch The patch level component of the version checked
+ * for (e.g., the "0" of "1.3.0").
+ */
+#define SERF_VERSION_AT_LEAST(major,minor,patch) \
+(((major) < SERF_MAJOR_VERSION) \
+ || ((major) == SERF_MAJOR_VERSION && (minor) < SERF_MINOR_VERSION) \
+ || ((major) == SERF_MAJOR_VERSION && (minor) == SERF_MINOR_VERSION && \
+ (patch) <= SERF_PATCH_VERSION))
+
+
+/**
+ * Returns the version of the library the application has linked/loaded.
+ * Values are returned in @a major, @a minor, and @a patch.
+ *
+ * Applications will want to use this function to verify compatibility,
+ * expecially while serf has not reached a 1.0 milestone. APIs and
+ * semantics may change drastically until the library hits 1.0.
+ */
+void serf_lib_version(
+ int *major,
+ int *minor,
+ int *patch);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+
+/*
+ * Every user of serf will want to deal with our various bucket types.
+ * Go ahead and include that header right now.
+ *
+ * Note: make sure this occurs outside of the C++ namespace block
+ */
+#include "serf_bucket_types.h"
+
+
+#endif /* !SERF_H */
diff --git a/contrib/serf/serf_bucket_types.h b/contrib/serf/serf_bucket_types.h
new file mode 100644
index 0000000..900f67d
--- /dev/null
+++ b/contrib/serf/serf_bucket_types.h
@@ -0,0 +1,688 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SERF_BUCKET_TYPES_H
+#define SERF_BUCKET_TYPES_H
+
+#include <apr_mmap.h>
+#include <apr_hash.h>
+
+/* this header and serf.h refer to each other, so take a little extra care */
+#ifndef SERF_H
+#include "serf.h"
+#endif
+
+
+/**
+ * @file serf_bucket_types.h
+ * @brief serf-supported bucket types
+ */
+/* ### this whole file needs docco ... */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* ==================================================================== */
+
+
+extern const serf_bucket_type_t serf_bucket_type_request;
+#define SERF_BUCKET_IS_REQUEST(b) SERF_BUCKET_CHECK((b), request)
+
+serf_bucket_t *serf_bucket_request_create(
+ const char *method,
+ const char *URI,
+ serf_bucket_t *body,
+ serf_bucket_alloc_t *allocator);
+
+/* Send a Content-Length header with @a len. The @a body bucket should
+ contain precisely that much data. */
+void serf_bucket_request_set_CL(
+ serf_bucket_t *bucket,
+ apr_int64_t len);
+
+serf_bucket_t *serf_bucket_request_get_headers(
+ serf_bucket_t *request);
+
+void serf_bucket_request_become(
+ serf_bucket_t *bucket,
+ const char *method,
+ const char *uri,
+ serf_bucket_t *body);
+
+/**
+ * Sets the root url of the remote host. If this request contains a relative
+ * url, it will be prefixed with the root url to form an absolute url.
+ * @a bucket is the request bucket. @a root_url is the absolute url of the
+ * root of the remote host, without the closing '/'.
+ */
+void serf_bucket_request_set_root(
+ serf_bucket_t *bucket,
+ const char *root_url);
+
+/* ==================================================================== */
+
+
+extern const serf_bucket_type_t serf_bucket_type_response;
+#define SERF_BUCKET_IS_RESPONSE(b) SERF_BUCKET_CHECK((b), response)
+
+serf_bucket_t *serf_bucket_response_create(
+ serf_bucket_t *stream,
+ serf_bucket_alloc_t *allocator);
+
+#define SERF_HTTP_VERSION(major, minor) ((major) * 1000 + (minor))
+#define SERF_HTTP_11 SERF_HTTP_VERSION(1, 1)
+#define SERF_HTTP_10 SERF_HTTP_VERSION(1, 0)
+#define SERF_HTTP_VERSION_MAJOR(shv) ((int)shv / 1000)
+#define SERF_HTTP_VERSION_MINOR(shv) ((int)shv % 1000)
+
+typedef struct {
+ int version;
+ int code;
+ const char *reason;
+} serf_status_line;
+
+/**
+ * Return the Status-Line information, if available. This function
+ * works like other bucket read functions: it may return APR_EAGAIN or
+ * APR_EOF to signal the state of the bucket for reading. A return
+ * value of APR_SUCCESS will always indicate that status line
+ * information was returned; for other return values the caller must
+ * check the version field in @a sline. A value of 0 means that the
+ * data is not (yet) present.
+ */
+apr_status_t serf_bucket_response_status(
+ serf_bucket_t *bkt,
+ serf_status_line *sline);
+
+/**
+ * Wait for the HTTP headers to be processed for a @a response.
+ *
+ * If the headers are available, APR_SUCCESS is returned.
+ * If the headers aren't available, APR_EAGAIN is returned.
+ */
+apr_status_t serf_bucket_response_wait_for_headers(
+ serf_bucket_t *response);
+
+/**
+ * Get the headers bucket for @a response.
+ */
+serf_bucket_t *serf_bucket_response_get_headers(
+ serf_bucket_t *response);
+
+/**
+ * Advise the response @a bucket that this was from a HEAD request and
+ * that it should not expect to see a response body.
+ */
+void serf_bucket_response_set_head(
+ serf_bucket_t *bucket);
+
+/* ==================================================================== */
+
+extern const serf_bucket_type_t serf_bucket_type_response_body;
+#define SERF_BUCKET_IS_RESPONSE_BODY(b) SERF_BUCKET_CHECK((b), response_body)
+
+serf_bucket_t *serf_bucket_response_body_create(
+ serf_bucket_t *stream,
+ apr_uint64_t limit,
+ serf_bucket_alloc_t *allocator);
+
+/* ==================================================================== */
+
+extern const serf_bucket_type_t serf_bucket_type_bwtp_frame;
+#define SERF_BUCKET_IS_BWTP_FRAME(b) SERF_BUCKET_CHECK((b), bwtp_frame)
+
+extern const serf_bucket_type_t serf_bucket_type_bwtp_incoming_frame;
+#define SERF_BUCKET_IS_BWTP_INCOMING_FRAME(b) SERF_BUCKET_CHECK((b), bwtp_incoming_frame)
+
+int serf_bucket_bwtp_frame_get_channel(
+ serf_bucket_t *hdr);
+
+int serf_bucket_bwtp_frame_get_type(
+ serf_bucket_t *hdr);
+
+const char *serf_bucket_bwtp_frame_get_phrase(
+ serf_bucket_t *hdr);
+
+serf_bucket_t *serf_bucket_bwtp_frame_get_headers(
+ serf_bucket_t *hdr);
+
+serf_bucket_t *serf_bucket_bwtp_channel_open(
+ int channel,
+ const char *URI,
+ serf_bucket_alloc_t *allocator);
+
+serf_bucket_t *serf_bucket_bwtp_channel_close(
+ int channel,
+ serf_bucket_alloc_t *allocator);
+
+serf_bucket_t *serf_bucket_bwtp_header_create(
+ int channel,
+ const char *phrase,
+ serf_bucket_alloc_t *allocator);
+
+serf_bucket_t *serf_bucket_bwtp_message_create(
+ int channel,
+ serf_bucket_t *body,
+ serf_bucket_alloc_t *allocator);
+
+serf_bucket_t *serf_bucket_bwtp_incoming_frame_create(
+ serf_bucket_t *bkt,
+ serf_bucket_alloc_t *allocator);
+
+apr_status_t serf_bucket_bwtp_incoming_frame_wait_for_headers(
+ serf_bucket_t *bkt);
+
+/* ==================================================================== */
+
+
+extern const serf_bucket_type_t serf_bucket_type_aggregate;
+#define SERF_BUCKET_IS_AGGREGATE(b) SERF_BUCKET_CHECK((b), aggregate)
+
+typedef apr_status_t (*serf_bucket_aggregate_eof_t)(
+ void *baton,
+ serf_bucket_t *aggregate_bucket);
+
+/** serf_bucket_aggregate_cleanup will instantly destroy all buckets in
+ the aggregate bucket that have been read completely. Whereas normally,
+ these buckets are destroyed on every read operation. */
+void serf_bucket_aggregate_cleanup(
+ serf_bucket_t *bucket,
+ serf_bucket_alloc_t *allocator);
+
+serf_bucket_t *serf_bucket_aggregate_create(
+ serf_bucket_alloc_t *allocator);
+
+/* Creates a stream bucket.
+ A stream bucket is like an aggregate bucket, but:
+ - it doesn't destroy its child buckets on cleanup
+ - one can always keep adding child buckets, the handler FN should return
+ APR_EOF when no more buckets will be added.
+
+ Note: keep this factory function internal for now. If it turns out this
+ bucket type is useful outside serf, we should make it an actual separate
+ type.
+ */
+serf_bucket_t *serf__bucket_stream_create(
+ serf_bucket_alloc_t *allocator,
+ serf_bucket_aggregate_eof_t fn,
+ void *baton);
+
+/** Transform @a bucket in-place into an aggregate bucket. */
+void serf_bucket_aggregate_become(
+ serf_bucket_t *bucket);
+
+void serf_bucket_aggregate_prepend(
+ serf_bucket_t *aggregate_bucket,
+ serf_bucket_t *prepend_bucket);
+
+void serf_bucket_aggregate_append(
+ serf_bucket_t *aggregate_bucket,
+ serf_bucket_t *append_bucket);
+
+void serf_bucket_aggregate_hold_open(
+ serf_bucket_t *aggregate_bucket,
+ serf_bucket_aggregate_eof_t fn,
+ void *baton);
+
+void serf_bucket_aggregate_prepend_iovec(
+ serf_bucket_t *aggregate_bucket,
+ struct iovec *vecs,
+ int vecs_count);
+
+void serf_bucket_aggregate_append_iovec(
+ serf_bucket_t *aggregate_bucket,
+ struct iovec *vecs,
+ int vecs_count);
+
+/* ==================================================================== */
+
+
+extern const serf_bucket_type_t serf_bucket_type_file;
+#define SERF_BUCKET_IS_FILE(b) SERF_BUCKET_CHECK((b), file)
+
+serf_bucket_t *serf_bucket_file_create(
+ apr_file_t *file,
+ serf_bucket_alloc_t *allocator);
+
+
+/* ==================================================================== */
+
+
+extern const serf_bucket_type_t serf_bucket_type_socket;
+#define SERF_BUCKET_IS_SOCKET(b) SERF_BUCKET_CHECK((b), socket)
+
+serf_bucket_t *serf_bucket_socket_create(
+ apr_socket_t *skt,
+ serf_bucket_alloc_t *allocator);
+
+/**
+ * Call @a progress_func every time bytes are read from the socket, pass
+ * the number of bytes read.
+ *
+ * When using serf's bytes read & written progress indicator, pass
+ * @a serf_context_progress_delta for progress_func and the serf_context for
+ * progress_baton.
+ */
+void serf_bucket_socket_set_read_progress_cb(
+ serf_bucket_t *bucket,
+ const serf_progress_t progress_func,
+ void *progress_baton);
+
+/* ==================================================================== */
+
+
+extern const serf_bucket_type_t serf_bucket_type_simple;
+#define SERF_BUCKET_IS_SIMPLE(b) SERF_BUCKET_CHECK((b), simple)
+
+typedef void (*serf_simple_freefunc_t)(
+ void *baton,
+ const char *data);
+
+serf_bucket_t *serf_bucket_simple_create(
+ const char *data,
+ apr_size_t len,
+ serf_simple_freefunc_t freefunc,
+ void *freefunc_baton,
+ serf_bucket_alloc_t *allocator);
+
+/**
+ * Equivalent to serf_bucket_simple_create, except that the bucket takes
+ * ownership of a private copy of the data.
+ */
+serf_bucket_t *serf_bucket_simple_copy_create(
+ const char *data,
+ apr_size_t len,
+ serf_bucket_alloc_t *allocator);
+
+/**
+ * Equivalent to serf_bucket_simple_create, except that the bucket assumes
+ * responsibility for freeing the data on this allocator without making
+ * a copy. It is assumed that data was created by a call from allocator.
+ */
+serf_bucket_t *serf_bucket_simple_own_create(
+ const char *data,
+ apr_size_t len,
+ serf_bucket_alloc_t *allocator);
+
+#define SERF_BUCKET_SIMPLE_STRING(s,a) \
+ serf_bucket_simple_create(s, strlen(s), NULL, NULL, a);
+
+#define SERF_BUCKET_SIMPLE_STRING_LEN(s,l,a) \
+ serf_bucket_simple_create(s, l, NULL, NULL, a);
+
+/* ==================================================================== */
+
+
+/* Note: apr_mmap_t is always defined, but if APR doesn't have mmaps, then
+ the caller can never create an apr_mmap_t to pass to this function. */
+
+extern const serf_bucket_type_t serf_bucket_type_mmap;
+#define SERF_BUCKET_IS_MMAP(b) SERF_BUCKET_CHECK((b), mmap)
+
+serf_bucket_t *serf_bucket_mmap_create(
+ apr_mmap_t *mmap,
+ serf_bucket_alloc_t *allocator);
+
+
+/* ==================================================================== */
+
+
+extern const serf_bucket_type_t serf_bucket_type_headers;
+#define SERF_BUCKET_IS_HEADERS(b) SERF_BUCKET_CHECK((b), headers)
+
+serf_bucket_t *serf_bucket_headers_create(
+ serf_bucket_alloc_t *allocator);
+
+/**
+ * Set, default: value copied.
+ *
+ * Set the specified @a header within the bucket, copying the @a value
+ * into space from this bucket's allocator. The header is NOT copied,
+ * so it should remain in scope at least as long as the bucket.
+ */
+void serf_bucket_headers_set(
+ serf_bucket_t *headers_bucket,
+ const char *header,
+ const char *value);
+
+/**
+ * Set, copies: header and value copied.
+ *
+ * Copy the specified @a header and @a value into the bucket, using space
+ * from this bucket's allocator.
+ */
+void serf_bucket_headers_setc(
+ serf_bucket_t *headers_bucket,
+ const char *header,
+ const char *value);
+
+/**
+ * Set, no copies.
+ *
+ * Set the specified @a header and @a value into the bucket, without
+ * copying either attribute. Both attributes should remain in scope at
+ * least as long as the bucket.
+ *
+ * @note In the case where a header already exists this will result
+ * in a reallocation and copy, @see serf_bucket_headers_setn.
+ */
+void serf_bucket_headers_setn(
+ serf_bucket_t *headers_bucket,
+ const char *header,
+ const char *value);
+
+/**
+ * Set, extended: fine grained copy control of header and value.
+ *
+ * Set the specified @a header, with length @a header_size with the
+ * @a value, and length @a value_size, into the bucket. The header will
+ * be copied if @a header_copy is set, and the value is copied if
+ * @a value_copy is set. If the values are not copied, then they should
+ * remain in scope at least as long as the bucket.
+ *
+ * If @a headers_bucket already contains a header with the same name
+ * as @a header, then append @a value to the existing value,
+ * separating with a comma (as per RFC 2616, section 4.2). In this
+ * case, the new value must be allocated and the header re-used, so
+ * behave as if @a value_copy were true and @a header_copy false.
+ */
+void serf_bucket_headers_setx(
+ serf_bucket_t *headers_bucket,
+ const char *header,
+ apr_size_t header_size,
+ int header_copy,
+ const char *value,
+ apr_size_t value_size,
+ int value_copy);
+
+const char *serf_bucket_headers_get(
+ serf_bucket_t *headers_bucket,
+ const char *header);
+
+/**
+ * @param baton opaque baton as passed to @see serf_bucket_headers_do
+ * @param key The header key from this iteration through the table
+ * @param value The header value from this iteration through the table
+ */
+typedef int (serf_bucket_headers_do_callback_fn_t)(
+ void *baton,
+ const char *key,
+ const char *value);
+
+/**
+ * Iterates over all headers of the message and invokes the callback
+ * function with header key and value. Stop iterating when no more
+ * headers are available or when the callback function returned a
+ * non-0 value.
+ *
+ * @param headers_bucket headers to iterate over
+ * @param func callback routine to invoke for every header in the bucket
+ * @param baton baton to pass on each invocation to func
+ */
+void serf_bucket_headers_do(
+ serf_bucket_t *headers_bucket,
+ serf_bucket_headers_do_callback_fn_t func,
+ void *baton);
+
+
+/* ==================================================================== */
+
+
+extern const serf_bucket_type_t serf_bucket_type_chunk;
+#define SERF_BUCKET_IS_CHUNK(b) SERF_BUCKET_CHECK((b), chunk)
+
+serf_bucket_t *serf_bucket_chunk_create(
+ serf_bucket_t *stream,
+ serf_bucket_alloc_t *allocator);
+
+
+/* ==================================================================== */
+
+
+extern const serf_bucket_type_t serf_bucket_type_dechunk;
+#define SERF_BUCKET_IS_DECHUNK(b) SERF_BUCKET_CHECK((b), dechunk)
+
+serf_bucket_t *serf_bucket_dechunk_create(
+ serf_bucket_t *stream,
+ serf_bucket_alloc_t *allocator);
+
+
+/* ==================================================================== */
+
+
+extern const serf_bucket_type_t serf_bucket_type_deflate;
+#define SERF_BUCKET_IS_DEFLATE(b) SERF_BUCKET_CHECK((b), deflate)
+
+#define SERF_DEFLATE_GZIP 0
+#define SERF_DEFLATE_DEFLATE 1
+
+serf_bucket_t *serf_bucket_deflate_create(
+ serf_bucket_t *stream,
+ serf_bucket_alloc_t *allocator,
+ int format);
+
+
+/* ==================================================================== */
+
+
+extern const serf_bucket_type_t serf_bucket_type_limit;
+#define SERF_BUCKET_IS_LIMIT(b) SERF_BUCKET_CHECK((b), limit)
+
+serf_bucket_t *serf_bucket_limit_create(
+ serf_bucket_t *stream,
+ apr_uint64_t limit,
+ serf_bucket_alloc_t *allocator);
+
+
+/* ==================================================================== */
+#define SERF_SSL_CERT_NOTYETVALID 1
+#define SERF_SSL_CERT_EXPIRED 2
+#define SERF_SSL_CERT_UNKNOWNCA 4
+#define SERF_SSL_CERT_SELF_SIGNED 8
+#define SERF_SSL_CERT_UNKNOWN_FAILURE 16
+#define SERF_SSL_CERT_REVOKED 32
+
+extern const serf_bucket_type_t serf_bucket_type_ssl_encrypt;
+#define SERF_BUCKET_IS_SSL_ENCRYPT(b) SERF_BUCKET_CHECK((b), ssl_encrypt)
+
+typedef struct serf_ssl_context_t serf_ssl_context_t;
+typedef struct serf_ssl_certificate_t serf_ssl_certificate_t;
+
+typedef apr_status_t (*serf_ssl_need_client_cert_t)(
+ void *data,
+ const char **cert_path);
+
+typedef apr_status_t (*serf_ssl_need_cert_password_t)(
+ void *data,
+ const char *cert_path,
+ const char **password);
+
+typedef apr_status_t (*serf_ssl_need_server_cert_t)(
+ void *data,
+ int failures,
+ const serf_ssl_certificate_t *cert);
+
+typedef apr_status_t (*serf_ssl_server_cert_chain_cb_t)(
+ void *data,
+ int failures,
+ int error_depth,
+ const serf_ssl_certificate_t * const * certs,
+ apr_size_t certs_len);
+
+void serf_ssl_client_cert_provider_set(
+ serf_ssl_context_t *context,
+ serf_ssl_need_client_cert_t callback,
+ void *data,
+ void *cache_pool);
+
+void serf_ssl_client_cert_password_set(
+ serf_ssl_context_t *context,
+ serf_ssl_need_cert_password_t callback,
+ void *data,
+ void *cache_pool);
+
+/**
+ * Set a callback to override the default SSL server certificate validation
+ * algorithm.
+ */
+void serf_ssl_server_cert_callback_set(
+ serf_ssl_context_t *context,
+ serf_ssl_need_server_cert_t callback,
+ void *data);
+
+/**
+ * Set callbacks to override the default SSL server certificate validation
+ * algorithm for the current certificate or the entire certificate chain.
+ */
+void serf_ssl_server_cert_chain_callback_set(
+ serf_ssl_context_t *context,
+ serf_ssl_need_server_cert_t cert_callback,
+ serf_ssl_server_cert_chain_cb_t cert_chain_callback,
+ void *data);
+
+/**
+ * Use the default root CA certificates as included with the OpenSSL library.
+ */
+apr_status_t serf_ssl_use_default_certificates(
+ serf_ssl_context_t *context);
+
+/**
+ * Allow SNI indicators to be sent to the server.
+ */
+apr_status_t serf_ssl_set_hostname(
+ serf_ssl_context_t *context, const char *hostname);
+
+/**
+ * Return the depth of the certificate.
+ */
+int serf_ssl_cert_depth(
+ const serf_ssl_certificate_t *cert);
+
+/**
+ * Extract the fields of the issuer in a table with keys (E, CN, OU, O, L,
+ * ST and C). The returned table will be allocated in @a pool.
+ */
+apr_hash_t *serf_ssl_cert_issuer(
+ const serf_ssl_certificate_t *cert,
+ apr_pool_t *pool);
+
+/**
+ * Extract the fields of the subject in a table with keys (E, CN, OU, O, L,
+ * ST and C). The returned table will be allocated in @a pool.
+ */
+apr_hash_t *serf_ssl_cert_subject(
+ const serf_ssl_certificate_t *cert,
+ apr_pool_t *pool);
+
+/**
+ * Extract the fields of the certificate in a table with keys (sha1, notBefore,
+ * notAfter, subjectAltName). The returned table will be allocated in @a pool.
+ */
+apr_hash_t *serf_ssl_cert_certificate(
+ const serf_ssl_certificate_t *cert,
+ apr_pool_t *pool);
+
+/**
+ * Export a certificate to base64-encoded, zero-terminated string.
+ * The returned string is allocated in @a pool. Returns NULL on failure.
+ */
+const char *serf_ssl_cert_export(
+ const serf_ssl_certificate_t *cert,
+ apr_pool_t *pool);
+
+/**
+ * Load a CA certificate file from a path @a file_path. If the file was loaded
+ * and parsed correctly, a certificate @a cert will be created and returned.
+ * This certificate object will be alloced in @a pool.
+ */
+apr_status_t serf_ssl_load_cert_file(
+ serf_ssl_certificate_t **cert,
+ const char *file_path,
+ apr_pool_t *pool);
+
+/**
+ * Adds the certificate @a cert to the list of trusted certificates in
+ * @a ssl_ctx that will be used for verification.
+ * See also @a serf_ssl_load_cert_file.
+ */
+apr_status_t serf_ssl_trust_cert(
+ serf_ssl_context_t *ssl_ctx,
+ serf_ssl_certificate_t *cert);
+
+/**
+ * Enable or disable SSL compression on a SSL session.
+ * @a enabled = 1 to enable compression, 0 to disable compression.
+ * Default = disabled.
+ */
+apr_status_t serf_ssl_use_compression(
+ serf_ssl_context_t *ssl_ctx,
+ int enabled);
+
+serf_bucket_t *serf_bucket_ssl_encrypt_create(
+ serf_bucket_t *stream,
+ serf_ssl_context_t *ssl_context,
+ serf_bucket_alloc_t *allocator);
+
+serf_ssl_context_t *serf_bucket_ssl_encrypt_context_get(
+ serf_bucket_t *bucket);
+
+/* ==================================================================== */
+
+
+extern const serf_bucket_type_t serf_bucket_type_ssl_decrypt;
+#define SERF_BUCKET_IS_SSL_DECRYPT(b) SERF_BUCKET_CHECK((b), ssl_decrypt)
+
+serf_bucket_t *serf_bucket_ssl_decrypt_create(
+ serf_bucket_t *stream,
+ serf_ssl_context_t *ssl_context,
+ serf_bucket_alloc_t *allocator);
+
+serf_ssl_context_t *serf_bucket_ssl_decrypt_context_get(
+ serf_bucket_t *bucket);
+
+
+/* ==================================================================== */
+
+
+extern const serf_bucket_type_t serf_bucket_type_barrier;
+#define SERF_BUCKET_IS_BARRIER(b) SERF_BUCKET_CHECK((b), barrier)
+
+serf_bucket_t *serf_bucket_barrier_create(
+ serf_bucket_t *stream,
+ serf_bucket_alloc_t *allocator);
+
+
+/* ==================================================================== */
+
+extern const serf_bucket_type_t serf_bucket_type_iovec;
+#define SERF_BUCKET_IS_IOVEC(b) SERF_BUCKET_CHECK((b), iovec)
+
+serf_bucket_t *serf_bucket_iovec_create(
+ struct iovec vecs[],
+ int len,
+ serf_bucket_alloc_t *allocator);
+
+
+/* ==================================================================== */
+
+/* ### do we need a PIPE bucket type? they are simple apr_file_t objects */
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !SERF_BUCKET_TYPES_H */
diff --git a/contrib/serf/serf_bucket_util.h b/contrib/serf/serf_bucket_util.h
new file mode 100644
index 0000000..b146c22
--- /dev/null
+++ b/contrib/serf/serf_bucket_util.h
@@ -0,0 +1,294 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SERF_BUCKET_UTIL_H
+#define SERF_BUCKET_UTIL_H
+
+/**
+ * @file serf_bucket_util.h
+ * @brief This header defines a set of functions and other utilities
+ * for implementing buckets. It is not needed by users of the bucket
+ * system.
+ */
+
+#include "serf.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/**
+ * Basic bucket creation function.
+ *
+ * This function will create a bucket of @a type, allocating the necessary
+ * memory from @a allocator. The @a data bucket-private information will
+ * be stored into the bucket.
+ */
+serf_bucket_t *serf_bucket_create(
+ const serf_bucket_type_t *type,
+ serf_bucket_alloc_t *allocator,
+ void *data);
+
+/**
+ * Default implementation of the @see read_iovec functionality.
+ *
+ * This function will use the @see read function to get a block of memory,
+ * then return it in the iovec.
+ */
+apr_status_t serf_default_read_iovec(
+ serf_bucket_t *bucket,
+ apr_size_t requested,
+ int vecs_size,
+ struct iovec *vecs,
+ int *vecs_used);
+
+/**
+ * Default implementation of the @see read_for_sendfile functionality.
+ *
+ * This function will use the @see read function to get a block of memory,
+ * then return it as a header. No file will be returned.
+ */
+apr_status_t serf_default_read_for_sendfile(
+ serf_bucket_t *bucket,
+ apr_size_t requested,
+ apr_hdtr_t *hdtr,
+ apr_file_t **file,
+ apr_off_t *offset,
+ apr_size_t *len);
+
+/**
+ * Default implementation of the @see read_bucket functionality.
+ *
+ * This function will always return NULL, indicating that the @a type
+ * of bucket cannot be found within @a bucket.
+ */
+serf_bucket_t *serf_default_read_bucket(
+ serf_bucket_t *bucket,
+ const serf_bucket_type_t *type);
+
+/**
+ * Default implementation of the @see destroy functionality.
+ *
+ * This function will return the @a bucket to its allcoator.
+ */
+void serf_default_destroy(
+ serf_bucket_t *bucket);
+
+
+/**
+ * Default implementation of the @see destroy functionality.
+ *
+ * This function will return the @a bucket, and the data member to its
+ * allocator.
+ */
+void serf_default_destroy_and_data(
+ serf_bucket_t *bucket);
+
+
+/**
+ * Allocate @a size bytes of memory using @a allocator.
+ *
+ * Returns NULL of the requested memory size could not be allocated.
+ */
+void *serf_bucket_mem_alloc(
+ serf_bucket_alloc_t *allocator,
+ apr_size_t size);
+
+/**
+ * Allocate @a size bytes of memory using @a allocator and set all of the
+ * memory to 0.
+ *
+ * Returns NULL of the requested memory size could not be allocated.
+ */
+void *serf_bucket_mem_calloc(
+ serf_bucket_alloc_t *allocator,
+ apr_size_t size);
+
+/**
+ * Free the memory at @a block, returning it to @a allocator.
+ */
+void serf_bucket_mem_free(
+ serf_bucket_alloc_t *allocator,
+ void *block);
+
+
+/**
+ * Analogous to apr_pstrmemdup, using a bucket allocator instead.
+ */
+char *serf_bstrmemdup(
+ serf_bucket_alloc_t *allocator,
+ const char *str,
+ apr_size_t size);
+
+/**
+ * Analogous to apr_pmemdup, using a bucket allocator instead.
+ */
+void * serf_bmemdup(
+ serf_bucket_alloc_t *allocator,
+ const void *mem,
+ apr_size_t size);
+
+/**
+ * Analogous to apr_pstrdup, using a bucket allocator instead.
+ */
+char * serf_bstrdup(
+ serf_bucket_alloc_t *allocator,
+ const char *str);
+
+/**
+ * Analogous to apr_pstrcatv, using a bucket allocator instead.
+ */
+char * serf_bstrcatv(
+ serf_bucket_alloc_t *allocator,
+ struct iovec *vec,
+ int vecs,
+ apr_size_t *bytes_written);
+
+/**
+ * Read data up to a newline.
+ *
+ * @a acceptable contains the allowed forms of a newline, and @a found
+ * will return the particular newline type that was found. If a newline
+ * is not found, then SERF_NEWLINE_NONE will be placed in @a found.
+ *
+ * @a data should contain a pointer to the data to be scanned. @a len
+ * should specify the length of that data buffer. On exit, @a data will
+ * be advanced past the newline, and @a len will specify the remaining
+ * amount of data in the buffer.
+ *
+ * Given this pattern of behavior, the caller should store the initial
+ * value of @a data as the line start. The difference between the
+ * returned value of @a data and the saved start is the length of the
+ * line.
+ *
+ * Note that the newline character(s) will remain within the buffer.
+ * This function scans at a byte level for the newline characters. Thus,
+ * the data buffer may contain NUL characters. As a corollary, this
+ * function only works on 8-bit character encodings.
+ *
+ * If the data is fully consumed (@a len gets set to zero) and a CR
+ * character is found at the end and the CRLF sequence is allowed, then
+ * this function may store SERF_NEWLINE_CRLF_SPLIT into @a found. The
+ * caller should take particular consideration for the CRLF sequence
+ * that may be split across data buffer boundaries.
+ */
+void serf_util_readline(
+ const char **data,
+ apr_size_t *len,
+ int acceptable,
+ int *found);
+
+
+/** The buffer size used within @see serf_databuf_t. */
+#define SERF_DATABUF_BUFSIZE 8000
+
+/** Callback function which is used to refill the data buffer.
+ *
+ * The function takes @a baton, which is the @see read_baton value
+ * from the serf_databuf_t structure. Data should be placed into
+ * a buffer specified by @a buf, which is @a bufsize bytes long.
+ * The amount of data read should be returned in @a len.
+ *
+ * APR_EOF should be returned if no more data is available. APR_EAGAIN
+ * should be returned, rather than blocking. In both cases, @a buf
+ * should be filled in and @a len set, as appropriate.
+ */
+typedef apr_status_t (*serf_databuf_reader_t)(
+ void *baton,
+ apr_size_t bufsize,
+ char *buf,
+ apr_size_t *len);
+
+/**
+ * This structure is used as an intermediate data buffer for some "external"
+ * source of data. It works as a scratch pad area for incoming data to be
+ * stored, and then returned as a ptr/len pair by the bucket read functions.
+ *
+ * This structure should be initialized by calling @see serf_databuf_init.
+ * Users should not bother to zero the structure beforehand.
+ */
+typedef struct {
+ /** The current data position within the buffer. */
+ const char *current;
+
+ /** Amount of data remaining in the buffer. */
+ apr_size_t remaining;
+
+ /** Callback function. */
+ serf_databuf_reader_t read;
+
+ /** A baton to hold context-specific data. */
+ void *read_baton;
+
+ /** Records the status from the last @see read operation. */
+ apr_status_t status;
+
+ /** Holds the data until it can be returned. */
+ char buf[SERF_DATABUF_BUFSIZE];
+
+} serf_databuf_t;
+
+/**
+ * Initialize the @see serf_databuf_t structure specified by @a databuf.
+ */
+void serf_databuf_init(
+ serf_databuf_t *databuf);
+
+/**
+ * Implement a bucket-style read function from the @see serf_databuf_t
+ * structure given by @a databuf.
+ *
+ * The @a requested, @a data, and @a len fields are interpreted and used
+ * as in the read function of @see serf_bucket_t.
+ */
+apr_status_t serf_databuf_read(
+ serf_databuf_t *databuf,
+ apr_size_t requested,
+ const char **data,
+ apr_size_t *len);
+
+/**
+ * Implement a bucket-style readline function from the @see serf_databuf_t
+ * structure given by @a databuf.
+ *
+ * The @a acceptable, @a found, @a data, and @a len fields are interpreted
+ * and used as in the read function of @see serf_bucket_t.
+ */
+apr_status_t serf_databuf_readline(
+ serf_databuf_t *databuf,
+ int acceptable,
+ int *found,
+ const char **data,
+ apr_size_t *len);
+
+/**
+ * Implement a bucket-style peek function from the @see serf_databuf_t
+ * structure given by @a databuf.
+ *
+ * The @a data, and @a len fields are interpreted and used as in the
+ * peek function of @see serf_bucket_t.
+ */
+apr_status_t serf_databuf_peek(
+ serf_databuf_t *databuf,
+ const char **data,
+ apr_size_t *len);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !SERF_BUCKET_UTIL_H */
diff --git a/contrib/serf/serf_private.h b/contrib/serf/serf_private.h
new file mode 100644
index 0000000..982d977
--- /dev/null
+++ b/contrib/serf/serf_private.h
@@ -0,0 +1,455 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _SERF_PRIVATE_H_
+#define _SERF_PRIVATE_H_
+
+/* ### what the hell? why does the APR interface have a "size" ??
+ ### the implication is that, if we bust this limit, we'd need to
+ ### stop, rebuild a pollset, and repopulate it. what suckage. */
+#define MAX_CONN 16
+
+/* Windows does not define IOV_MAX, so we need to ensure it is defined. */
+#ifndef IOV_MAX
+#define IOV_MAX 16
+#endif
+
+/* Older versions of APR do not have this macro. */
+#ifdef APR_SIZE_MAX
+#define REQUESTED_MAX APR_SIZE_MAX
+#else
+#define REQUESTED_MAX (~((apr_size_t)0))
+#endif
+
+#define SERF_IO_CLIENT (1)
+#define SERF_IO_CONN (2)
+#define SERF_IO_LISTENER (3)
+
+/* Internal logging facilities, set flag to 1 to enable console logging for
+ the selected component. */
+#define SSL_VERBOSE 0
+#define SSL_MSG_VERBOSE 0 /* logs decrypted requests and responses. */
+#define SOCK_VERBOSE 0
+#define SOCK_MSG_VERBOSE 0 /* logs bytes received from or written to a socket. */
+#define CONN_VERBOSE 0
+#define AUTH_VERBOSE 0
+
+/* Older versions of APR do not have the APR_VERSION_AT_LEAST macro. Those
+ implementations are safe.
+
+ If the macro *is* defined, and we're on WIN32, and APR is version 1.4.0+,
+ then we have a broken WSAPoll() implementation.
+
+ See serf_context_create_ex() below. */
+#if defined(APR_VERSION_AT_LEAST) && defined(WIN32)
+#if APR_VERSION_AT_LEAST(1,4,0)
+#define BROKEN_WSAPOLL
+#endif
+#endif
+
+typedef struct serf__authn_scheme_t serf__authn_scheme_t;
+
+typedef struct serf_io_baton_t {
+ int type;
+ union {
+ serf_incoming_t *client;
+ serf_connection_t *conn;
+ serf_listener_t *listener;
+ } u;
+} serf_io_baton_t;
+
+/* Holds all the information corresponding to a request/response pair. */
+struct serf_request_t {
+ serf_connection_t *conn;
+
+ apr_pool_t *respool;
+ serf_bucket_alloc_t *allocator;
+
+ /* The bucket corresponding to the request. Will be NULL once the
+ * bucket has been emptied (for delivery into the socket).
+ */
+ serf_bucket_t *req_bkt;
+
+ serf_request_setup_t setup;
+ void *setup_baton;
+
+ serf_response_acceptor_t acceptor;
+ void *acceptor_baton;
+
+ serf_response_handler_t handler;
+ void *handler_baton;
+
+ serf_bucket_t *resp_bkt;
+
+ int written;
+ int priority;
+ /* 1 if this is a request to setup a SSL tunnel, 0 for normal requests. */
+ int ssltunnel;
+
+ /* This baton is currently only used for digest authentication, which
+ needs access to the uri of the request in the response handler.
+ If serf_request_t is replaced by a serf_http_request_t in the future,
+ which knows about uri and method and such, this baton won't be needed
+ anymore. */
+ void *auth_baton;
+
+ struct serf_request_t *next;
+};
+
+typedef struct serf_pollset_t {
+ /* the set of connections to poll */
+ apr_pollset_t *pollset;
+} serf_pollset_t;
+
+typedef struct serf__authn_info_t {
+ const serf__authn_scheme_t *scheme;
+
+ void *baton;
+} serf__authn_info_t;
+
+struct serf_context_t {
+ /* the pool used for self and for other allocations */
+ apr_pool_t *pool;
+
+ void *pollset_baton;
+ serf_socket_add_t pollset_add;
+ serf_socket_remove_t pollset_rm;
+
+ /* one of our connections has a dirty pollset state. */
+ int dirty_pollset;
+
+ /* the list of active connections */
+ apr_array_header_t *conns;
+#define GET_CONN(ctx, i) (((serf_connection_t **)(ctx)->conns->elts)[i])
+
+ /* Proxy server address */
+ apr_sockaddr_t *proxy_address;
+
+ /* Progress callback */
+ serf_progress_t progress_func;
+ void *progress_baton;
+ apr_off_t progress_read;
+ apr_off_t progress_written;
+
+ /* authentication info for the servers used in this context. Shared by all
+ connections to the same server.
+ Structure of the hashtable: key: host url, e.g. https://localhost:80
+ value: serf__authn_info_t *
+ */
+ apr_hash_t *server_authn_info;
+
+ /* authentication info for the proxy configured in this context, shared by
+ all connections. */
+ serf__authn_info_t proxy_authn_info;
+
+ /* List of authn types supported by the client.*/
+ int authn_types;
+ /* Callback function used to get credentials for a realm. */
+ serf_credentials_callback_t cred_cb;
+};
+
+struct serf_listener_t {
+ serf_context_t *ctx;
+ serf_io_baton_t baton;
+ apr_socket_t *skt;
+ apr_pool_t *pool;
+ apr_pollfd_t desc;
+ void *accept_baton;
+ serf_accept_client_t accept_func;
+};
+
+struct serf_incoming_t {
+ serf_context_t *ctx;
+ serf_io_baton_t baton;
+ void *request_baton;
+ serf_incoming_request_cb_t request;
+ apr_socket_t *skt;
+ apr_pollfd_t desc;
+};
+
+/* States for the different stages in the lifecyle of a connection. */
+typedef enum {
+ SERF_CONN_INIT, /* no socket created yet */
+ SERF_CONN_SETUP_SSLTUNNEL, /* ssl tunnel being setup, no requests sent */
+ SERF_CONN_CONNECTED, /* conn is ready to send requests */
+ SERF_CONN_CLOSING /* conn is closing, no more requests,
+ start a new socket */
+} serf__connection_state_t;
+
+struct serf_connection_t {
+ serf_context_t *ctx;
+
+ apr_status_t status;
+ serf_io_baton_t baton;
+
+ apr_pool_t *pool;
+ serf_bucket_alloc_t *allocator;
+
+ apr_sockaddr_t *address;
+
+ apr_socket_t *skt;
+ apr_pool_t *skt_pool;
+
+ /* the last reqevents we gave to pollset_add */
+ apr_int16_t reqevents;
+
+ /* the events we've seen for this connection in our returned pollset */
+ apr_int16_t seen_in_pollset;
+
+ /* are we a dirty connection that needs its poll status updated? */
+ int dirty_conn;
+
+ /* number of completed requests we've sent */
+ unsigned int completed_requests;
+
+ /* number of completed responses we've got */
+ unsigned int completed_responses;
+
+ /* keepalive */
+ unsigned int probable_keepalive_limit;
+
+ /* Current state of the connection (whether or not it is connected). */
+ serf__connection_state_t state;
+
+ /* This connection may have responses without a request! */
+ int async_responses;
+ serf_bucket_t *current_async_response;
+ serf_response_acceptor_t async_acceptor;
+ void *async_acceptor_baton;
+ serf_response_handler_t async_handler;
+ void *async_handler_baton;
+
+ /* A bucket wrapped around our socket (for reading responses). */
+ serf_bucket_t *stream;
+ /* A reference to the aggregate bucket that provides the boundary between
+ * request level buckets and connection level buckets.
+ */
+ serf_bucket_t *ostream_head;
+ serf_bucket_t *ostream_tail;
+
+ /* Aggregate bucket used to send the CONNECT request. */
+ serf_bucket_t *ssltunnel_ostream;
+
+ /* The list of active requests. */
+ serf_request_t *requests;
+ serf_request_t *requests_tail;
+
+ struct iovec vec[IOV_MAX];
+ int vec_len;
+
+ serf_connection_setup_t setup;
+ void *setup_baton;
+ serf_connection_closed_t closed;
+ void *closed_baton;
+
+ /* Max. number of outstanding requests. */
+ unsigned int max_outstanding_requests;
+
+ int hit_eof;
+
+ /* Host url, path ommitted, syntax: https://svn.apache.org . */
+ const char *host_url;
+
+ /* Exploded host url, path ommitted. Only scheme, hostinfo, hostname &
+ port values are filled in. */
+ apr_uri_t host_info;
+
+ /* connection and authentication scheme specific information */
+ void *authn_baton;
+ void *proxy_authn_baton;
+
+ /* Time marker when connection begins. */
+ apr_time_t connect_time;
+
+ /* Calculated connection latency. Negative value if latency is unknown. */
+ apr_interval_time_t latency;
+
+ /* Needs to read first before we can write again. */
+ int stop_writing;
+};
+
+/*** Internal bucket functions ***/
+
+/** Transform a response_bucket in-place into an aggregate bucket. Restore the
+ status line and all headers, not just the body.
+
+ This can only be used when we haven't started reading the body of the
+ response yet.
+
+ Keep internal for now, probably only useful within serf.
+ */
+apr_status_t serf_response_full_become_aggregate(serf_bucket_t *bucket);
+
+/*** Authentication handler declarations ***/
+
+typedef enum { PROXY, HOST } peer_t;
+
+/**
+ * For each authentication scheme we need a handler function of type
+ * serf__auth_handler_func_t. This function will be called when an
+ * authentication challenge is received in a session.
+ */
+typedef apr_status_t
+(*serf__auth_handler_func_t)(int code,
+ serf_request_t *request,
+ serf_bucket_t *response,
+ const char *auth_hdr,
+ const char *auth_attr,
+ void *baton,
+ apr_pool_t *pool);
+
+/**
+ * For each authentication scheme we need an initialization function of type
+ * serf__init_context_func_t. This function will be called the first time
+ * serf tries a specific authentication scheme handler.
+ */
+typedef apr_status_t
+(*serf__init_context_func_t)(int code,
+ serf_context_t *conn,
+ apr_pool_t *pool);
+
+/**
+ * For each authentication scheme we need an initialization function of type
+ * serf__init_conn_func_t. This function will be called when a new
+ * connection is opened.
+ */
+typedef apr_status_t
+(*serf__init_conn_func_t)(const serf__authn_scheme_t *scheme,
+ int code,
+ serf_connection_t *conn,
+ apr_pool_t *pool);
+
+/**
+ * For each authentication scheme we need a setup_request function of type
+ * serf__setup_request_func_t. This function will be called when a
+ * new serf_request_t object is created and should fill in the correct
+ * authentication headers (if needed).
+ */
+typedef apr_status_t
+(*serf__setup_request_func_t)(peer_t peer,
+ int code,
+ serf_connection_t *conn,
+ serf_request_t *request,
+ const char *method,
+ const char *uri,
+ serf_bucket_t *hdrs_bkt);
+
+/**
+ * This function will be called when a response is received, so that the
+ * scheme handler can validate the Authentication related response headers
+ * (if needed).
+ */
+typedef apr_status_t
+(*serf__validate_response_func_t)(peer_t peer,
+ int code,
+ serf_connection_t *conn,
+ serf_request_t *request,
+ serf_bucket_t *response,
+ apr_pool_t *pool);
+
+/**
+ * serf__authn_scheme_t: vtable for an authn scheme provider.
+ */
+struct serf__authn_scheme_t {
+ /* The name of this authentication scheme. Used in headers of requests and
+ for logging. */
+ const char *name;
+
+ /* Key is the name of the authentication scheme in lower case, to
+ facilitate case insensitive matching of the response headers. */
+ const char *key;
+
+ /* Internal code used for this authn type. */
+ int type;
+
+ /* The context initialization function if any; otherwise, NULL */
+ serf__init_context_func_t init_ctx_func;
+
+ /* The connection initialization function if any; otherwise, NULL */
+ serf__init_conn_func_t init_conn_func;
+
+ /* The authentication handler function */
+ serf__auth_handler_func_t handle_func;
+
+ /* Function to set up the authentication header of a request */
+ serf__setup_request_func_t setup_request_func;
+
+ /* Function to validate the authentication header of a response */
+ serf__validate_response_func_t validate_response_func;
+};
+
+/**
+ * Handles a 401 or 407 response, tries the different available authentication
+ * handlers.
+ */
+apr_status_t serf__handle_auth_response(int *consumed_response,
+ serf_request_t *request,
+ serf_bucket_t *response,
+ void *baton,
+ apr_pool_t *pool);
+
+/* Get the cached serf__authn_info_t object for the target server, or create one
+ when this is the first connection to the server.
+ TODO: The serf__authn_info_t objects are allocated in the context pool, so
+ a context that's used to connect to many different servers using Basic or
+ Digest authencation will hold on to many objects indefinitely. We should be
+ able to cleanup stale objects from time to time. */
+serf__authn_info_t *serf__get_authn_info_for_server(serf_connection_t *conn);
+
+/* fromt context.c */
+void serf__context_progress_delta(void *progress_baton, apr_off_t read,
+ apr_off_t written);
+
+/* from incoming.c */
+apr_status_t serf__process_client(serf_incoming_t *l, apr_int16_t events);
+apr_status_t serf__process_listener(serf_listener_t *l);
+
+/* from outgoing.c */
+apr_status_t serf__open_connections(serf_context_t *ctx);
+apr_status_t serf__process_connection(serf_connection_t *conn,
+ apr_int16_t events);
+apr_status_t serf__conn_update_pollset(serf_connection_t *conn);
+serf_request_t *serf__ssltunnel_request_create(serf_connection_t *conn,
+ serf_request_setup_t setup,
+ void *setup_baton);
+apr_status_t serf__provide_credentials(serf_context_t *ctx,
+ char **username,
+ char **password,
+ serf_request_t *request,
+ void *baton,
+ int code, const char *authn_type,
+ const char *realm,
+ apr_pool_t *pool);
+
+/* from ssltunnel.c */
+apr_status_t serf__ssltunnel_connect(serf_connection_t *conn);
+
+
+/** Logging functions. Use one of the [COMP]_VERBOSE flags to enable specific
+ logging.
+ **/
+
+/* Logs a standard event, with filename & timestamp header */
+void serf__log(int verbose_flag, const char *filename, const char *fmt, ...);
+
+/* Logs a standard event, but without prefix. This is useful to build up
+ log lines in parts. */
+void serf__log_nopref(int verbose_flag, const char *fmt, ...);
+
+/* Logs a socket event, add local and remote ip address:port */
+void serf__log_skt(int verbose_flag, const char *filename, apr_socket_t *skt,
+ const char *fmt, ...);
+
+#endif
diff --git a/contrib/serf/ssltunnel.c b/contrib/serf/ssltunnel.c
new file mode 100644
index 0000000..480c799
--- /dev/null
+++ b/contrib/serf/ssltunnel.c
@@ -0,0 +1,178 @@
+/* Copyright 2011 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*** Setup a SSL tunnel over a HTTP proxy, according to RFC 2817. ***/
+
+#include <apr_pools.h>
+#include <apr_strings.h>
+
+#include "serf.h"
+#include "serf_private.h"
+
+
+/* Structure passed around as baton for the CONNECT request and respone. */
+typedef struct {
+ apr_pool_t *pool;
+ const char *uri;
+} req_ctx_t;
+
+/* forward declaration. */
+static apr_status_t setup_request(serf_request_t *request,
+ void *setup_baton,
+ serf_bucket_t **req_bkt,
+ serf_response_acceptor_t *acceptor,
+ void **acceptor_baton,
+ serf_response_handler_t *handler,
+ void **handler_baton,
+ apr_pool_t *pool);
+
+static serf_bucket_t* accept_response(serf_request_t *request,
+ serf_bucket_t *stream,
+ void *acceptor_baton,
+ apr_pool_t *pool)
+{
+ serf_bucket_t *c;
+ serf_bucket_alloc_t *bkt_alloc;
+#if 0
+ req_ctx_t *ctx = acceptor_baton;
+#endif
+
+ /* get the per-request bucket allocator */
+ bkt_alloc = serf_request_get_alloc(request);
+
+ /* Create a barrier so the response doesn't eat us! */
+ c = serf_bucket_barrier_create(stream, bkt_alloc);
+
+ return serf_bucket_response_create(c, bkt_alloc);
+}
+
+/* If a 200 OK was received for the CONNECT request, consider the connection
+ as ready for use. */
+static apr_status_t handle_response(serf_request_t *request,
+ serf_bucket_t *response,
+ void *handler_baton,
+ apr_pool_t *pool)
+{
+ apr_status_t status;
+ serf_status_line sl;
+ req_ctx_t *ctx = handler_baton;
+
+ if (! response) {
+ serf_connection_request_create(request->conn,
+ setup_request,
+ ctx);
+ return APR_SUCCESS;
+ }
+
+ status = serf_bucket_response_status(response, &sl);
+ if (SERF_BUCKET_READ_ERROR(status)) {
+ return status;
+ }
+ if (!sl.version && (APR_STATUS_IS_EOF(status) ||
+ APR_STATUS_IS_EAGAIN(status)))
+ {
+ return status;
+ }
+
+ status = serf_bucket_response_wait_for_headers(response);
+ if (status && !APR_STATUS_IS_EOF(status)) {
+ return status;
+ }
+
+ /* RFC 2817: Any successful (2xx) response to a CONNECT request indicates
+ that the proxy has established a connection to the requested host and
+ port, and has switched to tunneling the current connection to that server
+ connection.
+ */
+ if (sl.code >= 200 && sl.code < 300) {
+ request->conn->state = SERF_CONN_CONNECTED;
+
+ /* Body is supposed to be empty. */
+ apr_pool_destroy(ctx->pool);
+ serf_bucket_destroy(request->conn->ssltunnel_ostream);
+ request->conn->stream = NULL;
+ ctx = NULL;
+
+ serf__log(CONN_VERBOSE, __FILE__,
+ "successfully set up ssl tunnel on connection 0x%x\n",
+ request->conn);
+
+ return APR_EOF;
+ }
+
+ /* Authentication failure and 2xx Ok are handled at this point,
+ the rest are errors. */
+ return SERF_ERROR_SSLTUNNEL_SETUP_FAILED;
+}
+
+/* Prepare the CONNECT request. */
+static apr_status_t setup_request(serf_request_t *request,
+ void *setup_baton,
+ serf_bucket_t **req_bkt,
+ serf_response_acceptor_t *acceptor,
+ void **acceptor_baton,
+ serf_response_handler_t *handler,
+ void **handler_baton,
+ apr_pool_t *pool)
+{
+ req_ctx_t *ctx = setup_baton;
+
+ *req_bkt =
+ serf_request_bucket_request_create(request,
+ "CONNECT", ctx->uri,
+ NULL,
+ serf_request_get_alloc(request));
+ *acceptor = accept_response;
+ *acceptor_baton = ctx;
+ *handler = handle_response;
+ *handler_baton = ctx;
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t detect_eof(void *baton, serf_bucket_t *aggregate_bucket)
+{
+ serf_connection_t *conn = baton;
+ conn->hit_eof = 1;
+ return APR_EAGAIN;
+}
+
+/* SSL tunnel is needed, push a CONNECT request on the connection. */
+apr_status_t serf__ssltunnel_connect(serf_connection_t *conn)
+{
+ req_ctx_t *ctx;
+ apr_pool_t *ssltunnel_pool;
+
+ apr_pool_create(&ssltunnel_pool, conn->pool);
+
+ ctx = apr_palloc(ssltunnel_pool, sizeof(*ctx));
+ ctx->pool = ssltunnel_pool;
+ ctx->uri = apr_psprintf(ctx->pool, "%s:%d", conn->host_info.hostname,
+ conn->host_info.port);
+
+ conn->ssltunnel_ostream = serf__bucket_stream_create(conn->allocator,
+ detect_eof,
+ conn);
+
+ serf__ssltunnel_request_create(conn,
+ setup_request,
+ ctx);
+
+ conn->state = SERF_CONN_SETUP_SSLTUNNEL;
+ serf__log(CONN_VERBOSE, __FILE__,
+ "setting up ssl tunnel on connection 0x%x\n", conn);
+
+ return APR_SUCCESS;
+}
OpenPOWER on IntegriCloud