diff -u -r -N squid-3.1.0.13/acinclude.m4 squid-3.1.0.14/acinclude.m4
--- squid-3.1.0.13/acinclude.m4 2009-08-05 01:32:06.000000000 +1200
+++ squid-3.1.0.14/acinclude.m4 2009-09-27 15:28:23.000000000 +1200
@@ -72,8 +72,8 @@
AC_DEFUN([AC_TEST_CHECKFORHUGEOBJECTS],[
AC_MSG_CHECKING([whether compiler accepts -fhuge-objects])
AC_CACHE_VAL([ac_cv_test_checkforhugeobjects],[
- ac_cv_test_checkforhugeobjects=`echo "int foo;" > conftest.cc
-${CXX} -Werror -fhuge-objects -c conftest.cc 2>/dev/null
+ ac_cv_test_checkforhugeobjects=`echo "int main(int argc, char **argv) { int foo; }" > conftest.cc
+${CXX} -Werror -fhuge-objects -o conftest.bin conftest.cc 2>/dev/null
res=$?
rm -f conftest.*
echo yes
diff -u -r -N squid-3.1.0.13/aclocal.m4 squid-3.1.0.14/aclocal.m4
--- squid-3.1.0.13/aclocal.m4 2009-08-05 01:32:24.000000000 +1200
+++ squid-3.1.0.14/aclocal.m4 2009-09-27 15:28:42.000000000 +1200
@@ -7428,18 +7428,6 @@
[AMDEP_TRUE="$AMDEP_TRUE" ac_aux_dir="$ac_aux_dir"])
])
-# Copyright (C) 1996, 1997, 2000, 2001, 2003, 2005
-# Free Software Foundation, Inc.
-#
-# This file is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# serial 8
-
-# AM_CONFIG_HEADER is obsolete. It has been replaced by AC_CONFIG_HEADERS.
-AU_DEFUN([AM_CONFIG_HEADER], [AC_CONFIG_HEADERS($@)])
-
# Do all the work for Automake. -*- Autoconf -*-
# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004,
diff -u -r -N squid-3.1.0.13/bootstrap.sh squid-3.1.0.14/bootstrap.sh
--- squid-3.1.0.13/bootstrap.sh 2009-08-05 01:32:06.000000000 +1200
+++ squid-3.1.0.14/bootstrap.sh 2009-09-27 15:28:23.000000000 +1200
@@ -16,7 +16,30 @@
eval $2 --version 2>/dev/null | grep -i "$1.*$3" >/dev/null
}
-find_version()
+show_version()
+{
+ tool=$1
+ found="NOT_FOUND"
+ shift
+ versions="$*"
+ for version in $versions; do
+ for variant in "" "-${version}" "`echo $version | sed -e 's/\.//g'`"; do
+ if check_version $tool ${tool}${variant} $version; then
+ found="${version}"
+ break
+ fi
+ done
+ if [ "x$found" != "xNOT_FOUND" ]; then
+ break
+ fi
+ done
+ if [ "x$found" = "xNOT_FOUND" ]; then
+ found="??"
+ fi
+ echo $found
+}
+
+find_variant()
{
tool=$1
found="NOT_FOUND"
@@ -77,7 +100,7 @@
src=libltdl
# do not bundle with the huge standard license text
- rm -fv $src/COPYING.LIB
+ rm -f $src/COPYING.LIB
makefile=$src/Makefile.in
sed 's/COPYING.LIB/ /g' $makefile > $makefile.new;
chmod u+w $makefile
@@ -100,16 +123,21 @@
}
# Adjust paths of required autool packages
-amver=`find_version automake ${amversions}`
-acver=`find_version autoconf ${acversions}`
-ltver=`find_version libtool ${ltversions}`
+amver=`find_variant automake ${amversions}`
+acver=`find_variant autoconf ${acversions}`
+ltver=`find_variant libtool ${ltversions}`
+
+# Produce debug output about what version actually found.
+amversion=`show_version automake ${amversions}`
+acversion=`show_version autoconf ${acversions}`
+ltversion=`show_version libtool ${ltversions}`
# Set environment variable to tell automake which autoconf to use.
AUTOCONF="autoconf${acver}" ; export AUTOCONF
-echo "automake : $amver"
-echo "autoconfg: $acver"
-echo "libtool : $ltver"
+echo "automake ($amversion) : automake$amver"
+echo "autoconf ($acversion) : autoconf$acver"
+echo "libtool ($ltversion) : libtool$ltver"
for dir in \
"" \
diff -u -r -N squid-3.1.0.13/ChangeLog squid-3.1.0.14/ChangeLog
--- squid-3.1.0.13/ChangeLog 2009-08-05 01:32:06.000000000 +1200
+++ squid-3.1.0.14/ChangeLog 2009-09-27 15:28:23.000000000 +1200
@@ -1,3 +1,28 @@
+Changes to squid-3.1.0.14 (27 Sep 2009):
+
+ - Bug 2777: Various build issues on OpenSolaris
+ - Bug 2773: Segfault in RFC2069 Digest authentication
+ - Bug 2747: Compile errors on Solaris 10
+ - Bug 2735: Incomplete -fhuge-objects detection
+ - Bug 2722: Fix http_port accel combined with CONNECT
+ - Bug 2718: FTP sends EPSV2 on IPv4 connection
+ - Bug 2648: stateful helpers stuck in reserved
+ - Bug 2570: wccp2 "Here I Am" announcements not sent in memory-ony mode
+ - Bug 2510: digest_ldap_auth uses incorrect logic with TLS
+ - Bug 2483: bind() called before connect()
+ - Bug 2215: config file line length limit (extended to 2 KB)
+ - Support Accept-Language: * wildcard
+ - Support autoconf 2.64
+ - Support TPROXY for IPv6 traffic (requires kernel support)
+ - Support TPROXY cache cluster behind WCCPv2
+ - Correct ESI support to work in multi-mode Squid
+ - Add 0.0.0.0 as an to_localhost address
+ - DiskIO detection fixes and use optimal IO in default build.
+ - Correct peer connect-fail-limit default of 10
+ - Prevent squidclient sending two Accept: headers
+ - ... all bug fixes from 3.0.STABLE19
+ - ... and many more documentation fixes
+
Changes to squid-3.1.0.13 (04 Aug 2009):
- Bug 2723 regression: enable PURGE requests if PURGE method ACL is present.
@@ -217,6 +242,19 @@
- Bug #2223: Follow XFF extensions added
- ... and many code and documentation cleanups
+Changes to squid-3.0.STABLE19 (06 Sep 2009):
+
+ - Bug 2745: Invalid Response error on small reads
+ - Bug 2739: DNS resolver option ndots can't be parsed from resolv.conf
+ - Bug 2734: some compile errors on Solaris
+ - Bug 2648: stateful helpers stuck in reserved if client disconnects while helper busy
+ - Bug 2541: Hang in 100% CPU loop while extacting header details using a delimiter other than comma
+ - Bug 2362: Remove support for deferred state in stateful helpers
+ - Add 0.0.0.0 as a to_localhost address
+ - Docs: Improve chroot directive documentation slightly
+ - Fixup libxml2 include magics, was failing when a configure cache was used
+ - ... and some minor testing improvements.
+
Changes to squid-3.0.STABLE18 (04 Aug 2009):
- Bug 2728: regression: assertion failed: !eof
diff -u -r -N squid-3.1.0.13/compat/assert.h squid-3.1.0.14/compat/assert.h
--- squid-3.1.0.13/compat/assert.h 2009-08-05 01:32:07.000000000 +1200
+++ squid-3.1.0.14/compat/assert.h 2009-09-27 15:28:23.000000000 +1200
@@ -51,6 +51,6 @@
#else
extern void
#endif
- xassert(const char *, const char *, int);
+xassert(const char *, const char *, int);
#endif /* SQUID_ASSERT_H */
diff -u -r -N squid-3.1.0.13/compat/GnuRegex.h squid-3.1.0.14/compat/GnuRegex.h
--- squid-3.1.0.13/compat/GnuRegex.h 2009-08-05 01:32:07.000000000 +1200
+++ squid-3.1.0.14/compat/GnuRegex.h 2009-09-27 15:28:23.000000000 +1200
@@ -23,8 +23,7 @@
#else /* USE_GNUREGEX */
#ifdef __cplusplus
-extern "C"
-{
+extern "C" {
#endif
/* Definitions for data structures and routines for the regular
@@ -419,11 +418,11 @@
/* POSIX compatibility. */
extern int regcomp _RE_ARGS((regex_t * preg, const char *pattern, int cflags));
extern int regexec
- _RE_ARGS((const regex_t * preg, const char *string, size_t nmatch,
- regmatch_t pmatch[], int eflags));
+ _RE_ARGS((const regex_t * preg, const char *string, size_t nmatch,
+ regmatch_t pmatch[], int eflags));
extern size_t regerror
- _RE_ARGS((int errcode, const regex_t * preg, char *errbuf,
- size_t errbuf_size));
+ _RE_ARGS((int errcode, const regex_t * preg, char *errbuf,
+ size_t errbuf_size));
extern void regfree _RE_ARGS((regex_t * preg));
#ifdef __cplusplus
diff -u -r -N squid-3.1.0.13/compat/Makefile.in squid-3.1.0.14/compat/Makefile.in
--- squid-3.1.0.13/compat/Makefile.in 2009-08-05 01:32:30.000000000 +1200
+++ squid-3.1.0.14/compat/Makefile.in 2009-09-27 15:28:48.000000000 +1200
@@ -80,6 +80,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/compat/os/linux.h squid-3.1.0.14/compat/os/linux.h
--- squid-3.1.0.13/compat/os/linux.h 2009-08-05 01:32:07.000000000 +1200
+++ squid-3.1.0.14/compat/os/linux.h 2009-09-27 15:28:23.000000000 +1200
@@ -27,5 +27,23 @@
#endif
+/*
+ * sys/capability.h is only needed in Linux apparently.
+ *
+ * HACK: LIBCAP_BROKEN Ugly glue to get around linux header madness colliding with glibc
+ */
+#if HAVE_SYS_CAPABILITY_H
+
+#if LIBCAP_BROKEN
+#undef _POSIX_SOURCE
+#define _LINUX_TYPES_H
+#define _LINUX_FS_H
+typedef uint32_t __u32;
+#endif
+
+#include
+#endif /* HAVE_SYS_CAPABILITY_H */
+
+
#endif /* _SQUID_LINUX_ */
#endif /* SQUID_OS_LINUX_H */
diff -u -r -N squid-3.1.0.13/compat/os/mswin.h squid-3.1.0.14/compat/os/mswin.h
--- squid-3.1.0.13/compat/os/mswin.h 2009-08-05 01:32:07.000000000 +1200
+++ squid-3.1.0.14/compat/os/mswin.h 2009-09-27 15:28:23.000000000 +1200
@@ -661,7 +661,8 @@
#undef WSASocket
inline
-int WSASocket(int a, int t, int p, LPWSAPROTOCOL_INFO i, GROUP g, DWORD f) {
+int WSASocket(int a, int t, int p, LPWSAPROTOCOL_INFO i, GROUP g, DWORD f)
+{
SOCKET result;
#ifdef UNICODE
if ((result = ::WSASocketW(a, t, p, i, g, f)) == INVALID_SOCKET) {
diff -u -r -N squid-3.1.0.13/compat/os/solaris.h squid-3.1.0.14/compat/os/solaris.h
--- squid-3.1.0.13/compat/os/solaris.h 2009-08-05 01:32:07.000000000 +1200
+++ squid-3.1.0.14/compat/os/solaris.h 2009-09-27 15:28:23.000000000 +1200
@@ -30,6 +30,7 @@
/**
* prototypes for system function missing from system includes
*/
+#include
SQUIDCEXTERN int getrusage(int, struct rusage *);
diff -u -r -N squid-3.1.0.13/compat/osdetect.h squid-3.1.0.14/compat/osdetect.h
--- squid-3.1.0.13/compat/osdetect.h 2009-08-05 01:32:07.000000000 +1200
+++ squid-3.1.0.14/compat/osdetect.h 2009-09-27 15:28:23.000000000 +1200
@@ -42,6 +42,9 @@
#elif defined(__FreeBSD__) /* FreeBSD */
#define _SQUID_FREEBSD_
+#elif defined(__FreeBSD_kernel__) /* GNU/kFreeBSD */
+#define _SQUID_KFREEBSD_
+
#elif defined(__sgi__) || defined(sgi) || defined(__sgi) /* SGI */
#define _SQUID_SGI_
diff -u -r -N squid-3.1.0.13/configure squid-3.1.0.14/configure
--- squid-3.1.0.13/configure 2009-08-05 01:32:53.000000000 +1200
+++ squid-3.1.0.14/configure 2009-09-27 15:29:12.000000000 +1200
@@ -1,7 +1,7 @@
#! /bin/sh
# From configure.in Revision.
# Guess values for system-dependent variables and create Makefiles.
-# Generated by GNU Autoconf 2.62 for Squid Web Proxy 3.1.0.13.
+# Generated by GNU Autoconf 2.62 for Squid Web Proxy 3.1.0.14.
#
# Report bugs to .
#
@@ -751,8 +751,8 @@
# Identity of this package.
PACKAGE_NAME='Squid Web Proxy'
PACKAGE_TARNAME='squid'
-PACKAGE_VERSION='3.1.0.13'
-PACKAGE_STRING='Squid Web Proxy 3.1.0.13'
+PACKAGE_VERSION='3.1.0.14'
+PACKAGE_STRING='Squid Web Proxy 3.1.0.14'
PACKAGE_BUGREPORT='http://www.squid-cache.org/bugs/'
ac_unique_file="src/main.cc"
@@ -921,17 +921,18 @@
DEFAULT_PIDFILE
SQUID_CFLAGS
SQUID_CXXFLAGS
-STORE_LIBS_TO_BUILD
-STORE_LIBS_TO_ADD
-STORE_TESTS
DISK_MODULES
DISK_LIBS
DISK_PROGRAMS
DISK_LINKOBJS
+AIOLIB
USE_AIOPS_WIN32_TRUE
USE_AIOPS_WIN32_FALSE
USE_AIO_WIN32_TRUE
USE_AIO_WIN32_FALSE
+STORE_LIBS_TO_BUILD
+STORE_LIBS_TO_ADD
+STORE_TESTS
REPL_POLICIES
REPL_OBJS
REPL_LIBS
@@ -1041,11 +1042,11 @@
enable_xmalloc_statistics
enable_async_io
with_aufs_threads
+with_dl
+enable_disk_io
with_pthreads
with_aio
-with_dl
enable_storeio
-enable_disk_io
enable_removal_policies
enable_icmp
enable_delay_pools
@@ -1101,6 +1102,7 @@
with_maxfd
with_filedescriptors
with_cppunit_basedir
+enable_caps
enable_ipv6
with_ipv6_split_stack
enable_gnuregex
@@ -1678,7 +1680,7 @@
# Omit some internal or obsolete options to make the list less imposing.
# This message is too long to be a string in the A/UX 3.1 sh.
cat <<_ACEOF
-\`configure' configures Squid Web Proxy 3.1.0.13 to adapt to many kinds of systems.
+\`configure' configures Squid Web Proxy 3.1.0.14 to adapt to many kinds of systems.
Usage: $0 [OPTION]... [VAR=VALUE]...
@@ -1748,7 +1750,7 @@
if test -n "$ac_init_help"; then
case $ac_init_help in
- short | recursive ) echo "Configuration of Squid Web Proxy 3.1.0.13:";;
+ short | recursive ) echo "Configuration of Squid Web Proxy 3.1.0.14:";;
esac
cat <<\_ACEOF
@@ -1788,19 +1790,19 @@
--enable-async-io=N_THREADS
Shorthand for "--with-aufs-threads=N_THREADS
--with-pthreads --enable-storeio=ufs,aufs"
+ --enable-disk-io="list of modules"
+ Build support for the list of disk I/O modules. Set
+ without a value or omitted, all available modules
+ will be built. See src/DiskIO for a list of
+ available modules, or Programmers Guide section on
+ DiskIO for details on how to build your custom disk
+ module
--enable-storeio="list of modules"
Build support for the list of store I/O modules. The
default is only to build the "ufs" module. See
src/fs for a list of available modules, or
Programmers Guide section for
details on how to build your custom store module
- --enable-disk-io="list of modules"
- Build support for the list of disk I/O modules. If
- unset only the "Blocking" module will be built. Set
- without a value all available modules will be built.
- See src/DiskIO for a list of available modules, or
- Programmers Guide section on DiskIO for details on
- how to build your custom disk module
--enable-removal-policies="list of policies"
Build support for the list of removal policies. The
default is only to build the "lru" module. See
@@ -1930,6 +1932,8 @@
other code that adds custom HTTP headers to the
requests.
--enable-zph-qos Enable ZPH QOS support
+ --disable-caps disable usage of Linux capabilities library to
+ control privileges
--disable-ipv6 Disable IPv6 support
--enable-gnuregex Compile GNUregex. Unless you have reason to use this
option, you should not enable it. This library file
@@ -1960,9 +1964,9 @@
--with-aufs-threads=N_THREADS
Tune the number of worker threads for the aufs
object store.
- --with-pthreads Use POSIX Threads
- --with-aio Use POSIX AIO
--with-dl Use dynamic linking
+ --without-pthreads Disable POSIX Threads
+ --without-aio Do not use POSIX AIO. Default: auto-detect
--with-openssl{=PATH} Compile with the OpenSSL libraries. The path to the
OpenSSL development libraries and headers
installation can be specified if outside of the
@@ -2075,7 +2079,7 @@
test -n "$ac_init_help" && exit $ac_status
if $ac_init_version; then
cat <<\_ACEOF
-Squid Web Proxy configure 3.1.0.13
+Squid Web Proxy configure 3.1.0.14
generated by GNU Autoconf 2.62
Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001,
@@ -2089,7 +2093,7 @@
This file contains any messages produced by compilers while
running configure, to aid debugging if configure makes a mistake.
-It was created by Squid Web Proxy $as_me 3.1.0.13, which was
+It was created by Squid Web Proxy $as_me 3.1.0.14, which was
generated by GNU Autoconf 2.62. Invocation command line was
$ $0 $@
@@ -2807,7 +2811,7 @@
# Define the identity of the package.
PACKAGE='squid'
- VERSION='3.1.0.13'
+ VERSION='3.1.0.14'
cat >>confdefs.h <<_ACEOF
@@ -5660,7 +5664,7 @@
;;
*-*-irix6*)
# Find out which ABI we are using.
- echo '#line 5663 "configure"' > conftest.$ac_ext
+ echo '#line 5667 "configure"' > conftest.$ac_ext
if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
(eval $ac_compile) 2>&5
ac_status=$?
@@ -8282,11 +8286,11 @@
-e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
-e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
-e 's:$: $lt_compiler_flag:'`
- (eval echo "\"\$as_me:8285: $lt_compile\"" >&5)
+ (eval echo "\"\$as_me:8289: $lt_compile\"" >&5)
(eval "$lt_compile" 2>conftest.err)
ac_status=$?
cat conftest.err >&5
- echo "$as_me:8289: \$? = $ac_status" >&5
+ echo "$as_me:8293: \$? = $ac_status" >&5
if (exit $ac_status) && test -s "$ac_outfile"; then
# The compiler can only warn and ignore the option if not recognized
# So say no if there are warnings other than the usual output.
@@ -8572,11 +8576,11 @@
-e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
-e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
-e 's:$: $lt_compiler_flag:'`
- (eval echo "\"\$as_me:8575: $lt_compile\"" >&5)
+ (eval echo "\"\$as_me:8579: $lt_compile\"" >&5)
(eval "$lt_compile" 2>conftest.err)
ac_status=$?
cat conftest.err >&5
- echo "$as_me:8579: \$? = $ac_status" >&5
+ echo "$as_me:8583: \$? = $ac_status" >&5
if (exit $ac_status) && test -s "$ac_outfile"; then
# The compiler can only warn and ignore the option if not recognized
# So say no if there are warnings other than the usual output.
@@ -8676,11 +8680,11 @@
-e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
-e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
-e 's:$: $lt_compiler_flag:'`
- (eval echo "\"\$as_me:8679: $lt_compile\"" >&5)
+ (eval echo "\"\$as_me:8683: $lt_compile\"" >&5)
(eval "$lt_compile" 2>out/conftest.err)
ac_status=$?
cat out/conftest.err >&5
- echo "$as_me:8683: \$? = $ac_status" >&5
+ echo "$as_me:8687: \$? = $ac_status" >&5
if (exit $ac_status) && test -s out/conftest2.$ac_objext
then
# The compiler can only warn and ignore the option if not recognized
@@ -11076,7 +11080,7 @@
lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
lt_status=$lt_dlunknown
cat > conftest.$ac_ext < conftest.$ac_ext <&5)
+ (eval echo "\"\$as_me:13592: $lt_compile\"" >&5)
(eval "$lt_compile" 2>conftest.err)
ac_status=$?
cat conftest.err >&5
- echo "$as_me:13592: \$? = $ac_status" >&5
+ echo "$as_me:13596: \$? = $ac_status" >&5
if (exit $ac_status) && test -s "$ac_outfile"; then
# The compiler can only warn and ignore the option if not recognized
# So say no if there are warnings other than the usual output.
@@ -13689,11 +13693,11 @@
-e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
-e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
-e 's:$: $lt_compiler_flag:'`
- (eval echo "\"\$as_me:13692: $lt_compile\"" >&5)
+ (eval echo "\"\$as_me:13696: $lt_compile\"" >&5)
(eval "$lt_compile" 2>out/conftest.err)
ac_status=$?
cat out/conftest.err >&5
- echo "$as_me:13696: \$? = $ac_status" >&5
+ echo "$as_me:13700: \$? = $ac_status" >&5
if (exit $ac_status) && test -s out/conftest2.$ac_objext
then
# The compiler can only warn and ignore the option if not recognized
@@ -15272,11 +15276,11 @@
-e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
-e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
-e 's:$: $lt_compiler_flag:'`
- (eval echo "\"\$as_me:15275: $lt_compile\"" >&5)
+ (eval echo "\"\$as_me:15279: $lt_compile\"" >&5)
(eval "$lt_compile" 2>conftest.err)
ac_status=$?
cat conftest.err >&5
- echo "$as_me:15279: \$? = $ac_status" >&5
+ echo "$as_me:15283: \$? = $ac_status" >&5
if (exit $ac_status) && test -s "$ac_outfile"; then
# The compiler can only warn and ignore the option if not recognized
# So say no if there are warnings other than the usual output.
@@ -15376,11 +15380,11 @@
-e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
-e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
-e 's:$: $lt_compiler_flag:'`
- (eval echo "\"\$as_me:15379: $lt_compile\"" >&5)
+ (eval echo "\"\$as_me:15383: $lt_compile\"" >&5)
(eval "$lt_compile" 2>out/conftest.err)
ac_status=$?
cat out/conftest.err >&5
- echo "$as_me:15383: \$? = $ac_status" >&5
+ echo "$as_me:15387: \$? = $ac_status" >&5
if (exit $ac_status) && test -s out/conftest2.$ac_objext
then
# The compiler can only warn and ignore the option if not recognized
@@ -17591,11 +17595,11 @@
-e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
-e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
-e 's:$: $lt_compiler_flag:'`
- (eval echo "\"\$as_me:17594: $lt_compile\"" >&5)
+ (eval echo "\"\$as_me:17598: $lt_compile\"" >&5)
(eval "$lt_compile" 2>conftest.err)
ac_status=$?
cat conftest.err >&5
- echo "$as_me:17598: \$? = $ac_status" >&5
+ echo "$as_me:17602: \$? = $ac_status" >&5
if (exit $ac_status) && test -s "$ac_outfile"; then
# The compiler can only warn and ignore the option if not recognized
# So say no if there are warnings other than the usual output.
@@ -17881,11 +17885,11 @@
-e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
-e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
-e 's:$: $lt_compiler_flag:'`
- (eval echo "\"\$as_me:17884: $lt_compile\"" >&5)
+ (eval echo "\"\$as_me:17888: $lt_compile\"" >&5)
(eval "$lt_compile" 2>conftest.err)
ac_status=$?
cat conftest.err >&5
- echo "$as_me:17888: \$? = $ac_status" >&5
+ echo "$as_me:17892: \$? = $ac_status" >&5
if (exit $ac_status) && test -s "$ac_outfile"; then
# The compiler can only warn and ignore the option if not recognized
# So say no if there are warnings other than the usual output.
@@ -17985,11 +17989,11 @@
-e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
-e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
-e 's:$: $lt_compiler_flag:'`
- (eval echo "\"\$as_me:17988: $lt_compile\"" >&5)
+ (eval echo "\"\$as_me:17992: $lt_compile\"" >&5)
(eval "$lt_compile" 2>out/conftest.err)
ac_status=$?
cat out/conftest.err >&5
- echo "$as_me:17992: \$? = $ac_status" >&5
+ echo "$as_me:17996: \$? = $ac_status" >&5
if (exit $ac_status) && test -s out/conftest2.$ac_objext
then
# The compiler can only warn and ignore the option if not recognized
@@ -21738,8 +21742,8 @@
$as_echo_n "(cached) " >&6
else
- ac_cv_test_checkforhugeobjects=`echo "int foo;" > conftest.cc
-${CXX} -Werror -fhuge-objects -c conftest.cc 2>/dev/null
+ ac_cv_test_checkforhugeobjects=`echo "int main(int argc, char **argv) { int foo; }" > conftest.cc
+${CXX} -Werror -fhuge-objects -o conftest.bin conftest.cc 2>/dev/null
res=$?
rm -f conftest.*
echo yes
@@ -21844,6 +21848,56 @@
fi
+if test "$cross_compiling" = yes; then
+ { { $as_echo "$as_me:$LINENO: error: cannot run test program while cross compiling
+See \`config.log' for more details." >&5
+$as_echo "$as_me: error: cannot run test program while cross compiling
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }
+else
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+ int main(int argc, char **argv) { return 0; }
+_ACEOF
+rm -f conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_link") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && { ac_try='./conftest$ac_exeext'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ :
+else
+ $as_echo "$as_me: program exited with status $ac_status" >&5
+$as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -rf conftest.dSYM
+rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext
+fi
+
+
+
# Check whether --enable-xmalloc-statistics was given.
@@ -21907,29 +21961,6 @@
fi
-# Check whether --with-pthreads was given.
-if test "${with_pthreads+set}" = set; then
- withval=$with_pthreads;
-fi
-
-if test "$with_pthreads" = "yes"; then
- { $as_echo "$as_me:$LINENO: With pthreads" >&5
-$as_echo "$as_me: With pthreads" >&6;}
-fi
-
-
-# Check whether --with-aio was given.
-if test "${with_aio+set}" = set; then
- withval=$with_aio;
-fi
-
-if test "$with_aio" = "yes"; then
- { $as_echo "$as_me:$LINENO: With aio" >&5
-$as_echo "$as_me: With aio" >&6;}
-fi
-
-
-
# Check whether --with-dl was given.
if test "${with_dl+set}" = set; then
withval=$with_dl;
@@ -21940,271 +21971,624 @@
$as_echo "$as_me: With dl" >&6;}
fi
-# Check whether --enable-storeio was given.
-if test "${enable_storeio+set}" = set; then
- enableval=$enable_storeio; case $enableval in
+# Check whether --enable-disk-io was given.
+if test "${enable_disk_io+set}" = set; then
+ enableval=$enable_disk_io; case $enableval in
yes)
- for dir in $srcdir/src/fs/*; do
+ for dir in $srcdir/src/DiskIO/*; do
module="`basename $dir`"
- if test -d "$dir" && test "$module" != CVS && test "$module" != coss; then
- STORE_MODULES="$STORE_MODULES $module"
+ if test -d "$dir" && test "$module" != CVS; then
+ { $as_echo "$as_me:$LINENO: Autodetected $module DiskIO module" >&5
+$as_echo "$as_me: Autodetected $module DiskIO module" >&6;}
+ MAYBE_DISK_MODULES="$MAYBE_DISK_MODULES $module"
fi
done
+
+cat >>confdefs.h <<\_ACEOF
+#define USE_DISKIO 1
+_ACEOF
+
;;
no)
+
+cat >>confdefs.h <<\_ACEOF
+#define USE_DISKIO 0
+_ACEOF
+
;;
*)
- STORE_MODULES="`echo $enableval| sed -e 's/,/ /g;s/ */ /g'`"
+ MAYBE_DISK_MODULES=" `echo $enableval| sed -e 's/,/ /g;s/ */ /g'` "
+
+cat >>confdefs.h <<\_ACEOF
+#define USE_DISKIO 1
+_ACEOF
+
;;
esac
else
- if test -z "$STORE_MODULES"; then
- STORE_MODULES="ufs"
- fi
+ if test -z "$MAYBE_DISK_MODULES"; then
+ { $as_echo "$as_me:$LINENO: Enabling all available DiskIO modules (default)..." >&5
+$as_echo "$as_me: Enabling all available DiskIO modules (default)..." >&6;}
+ for dir in $srcdir/src/DiskIO/*; do
+ module="`basename $dir`"
+ if test -d "$dir" && test "$module" != CVS; then
+ { $as_echo "$as_me:$LINENO: Autodetected $module DiskIO module" >&5
+$as_echo "$as_me: Autodetected $module DiskIO module" >&6;}
+ MAYBE_DISK_MODULES="$MAYBE_DISK_MODULES $module"
+ fi
+ done
+
+cat >>confdefs.h <<\_ACEOF
+#define USE_DISKIO 1
+_ACEOF
fi
+fi
-if test -n "$STORE_MODULES"; then
- STORE_MODULES_FULL=$STORE_MODULES
- STORE_MODULES=
- for module in $STORE_MODULES_FULL; do
- have_mod=`echo "$STORE_MODULES" | grep "$module"`
- if test "$have_mod" != ""; then
- { $as_echo "$as_me:$LINENO: Removing duplicate $module from storeio" >&5
-$as_echo "$as_me: Removing duplicate $module from storeio" >&6;}
- elif test -d $srcdir/src/fs/$module; then
- STORE_MODULES="$STORE_MODULES $module"
- else
- { { $as_echo "$as_me:$LINENO: error: storeio $module does not exist" >&5
-$as_echo "$as_me: error: storeio $module does not exist" >&2;}
+
+
+cat >>confdefs.h <<\_ACEOF
+#define USE_DISKIO_AIO 0
+_ACEOF
+
+
+cat >>confdefs.h <<\_ACEOF
+#define USE_DISKIO_DISKTHREADS 0
+_ACEOF
+
+USE_AIOPS_WIN32=0
+use_aio=
+use_diskthreads=
+AIOLIB=
+
+FOUND_DISKIO_AIO=
+FOUND_DISKIO_BLOCKING=
+FOUND_DISKIO_DISKDAEMON=
+FOUND_DISKIO_DISKTHREADS=
+DISK_LIBS=
+DISK_MODULES=
+DISK_LINKOBJS=
+for module in $MAYBE_DISK_MODULES none; do
+ if test "$module" = "none"; then
+ continue
+ fi
+ if ! test -d $srcdir/src/DiskIO/$module; then
+ { { $as_echo "$as_me:$LINENO: error: disk-io $module does not exist" >&5
+$as_echo "$as_me: error: disk-io $module does not exist" >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+ case "$module" in
+ DiskDaemon)
+ if test "$FOUND_DISKIO_DISKDAEMON" = "yes" ; then
+ { { $as_echo "$as_me:$LINENO: error: DiskIO DiskDaemon module listed twice." >&5
+$as_echo "$as_me: error: DiskIO DiskDaemon module listed twice." >&2;}
{ (exit 1); exit 1; }; }
fi
- done
- { $as_echo "$as_me:$LINENO: Store modules built: $STORE_MODULES" >&5
-$as_echo "$as_me: Store modules built: $STORE_MODULES" >&6;}
-fi
-UFS_FOUND=
-NEED_UFS=
-NEED_BLOCKING=
-NEED_DISKDAEMON=
-NEED_DISKTHREADS=
-NEED_AIO=
-STORE_TESTS=
-for fs in $STORE_MODULES none; do
- case "$fs" in
- diskd)
- NEED_UFS="true"
- NEED_BLOCKING="true"
- NEED_DISKDAEMON="true"
- ;;
- aufs)
- NEED_UFS="true"
- NEED_BLOCKING="true"
- NEED_DISKTHREADS="true"
- ;;
- coss)
- NEED_AIO="true"
- STORE_TESTS="$STORE_TESTS tests/testCoss$EXEEXT"
+ FOUND_DISKIO_DISKDAEMON="yes"
+ { $as_echo "$as_me:$LINENO: Enabling DiskDaemon DiskIO module" >&5
+$as_echo "$as_me: Enabling DiskDaemon DiskIO module" >&6;}
+ DISK_LIBS="$DISK_LIBS libDiskDaemon.a"
+ DISK_MODULES="$DISK_MODULES DiskDaemon"
+ DISK_PROGRAMS="$DISK_PROGRAMS DiskIO/DiskDaemon/diskd"
+ DISK_LINKOBJS="$DISK_LINKOBJS DiskIO/DiskDaemon/DiskDaemonDiskIOModule.o"
;;
- ufs)
- UFS_FOUND="true"
- STORE_TESTS="$STORE_TESTS tests/testUfs$EXEEXT"
- esac
-done
+ DiskThreads)
+ if test "$FOUND_DISKIO_DISKTHREADS" = "yes" ; then
+ { { $as_echo "$as_me:$LINENO: error: DiskIO DiskThreads module listed twice." >&5
+$as_echo "$as_me: error: DiskIO DiskThreads module listed twice." >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+ FOUND_DISKIO_DISKTHREADS="yes"
+ use_diskthreads="yes"
+ LIBPTHREADS=
+ SAVE_SQUID_CFLAGS="$SQUID_CFLAGS"
+ SAVE_SQUID_CXXFLAGS="$SQUID_CXXFLAGS"
-if test -z "$UFS_FOUND" && test -n "$NEED_UFS"; then
- { $as_echo "$as_me:$LINENO: Adding UFS, as it contains core logic for diskd and aufs" >&5
-$as_echo "$as_me: Adding UFS, as it contains core logic for diskd and aufs" >&6;}
- STORE_MODULES="$STORE_MODULES ufs"
- STORE_TESTS="$STORE_TESTS tests/testUfs$EXEEXT"
+# Check whether --with-pthreads was given.
+if test "${with_pthreads+set}" = set; then
+ withval=$with_pthreads;
fi
+ if test "$with_pthreads" != "no"; then
+ case "$host" in
+ mingw|mingw32)
+ USE_AIOPS_WIN32=1
+ { $as_echo "$as_me:$LINENO: Windows threads support automatically enabled" >&5
+$as_echo "$as_me: Windows threads support automatically enabled" >&6;}
+ ;;
+ i386-unknown-freebsd*)
+ SQUID_CFLAGS="$SQUID_CFLAGS -D_REENTRANT"
+ SQUID_CXXFLAGS="$SQUID_CXXFLAGS -D_REENTRANT"
+ if test "$GCC" = "yes" ; then
+ if test -z "$PRESET_LDFLAGS"; then
+ LDFLAGS="$LDFLAGS -pthread"
+ fi
+ fi
+ ;;
+ *-solaris2.*)
+ if test "$GCC" = "yes" ; then
+ SQUID_CFLAGS="$SQUID_CFLAGS -D_REENTRANT -pthreads"
+ SQUID_CXXFLAGS="$SQUID_CXXFLAGS -D_REENTRANT -pthreads"
+ { $as_echo "$as_me:$LINENO: checking for main in -lpthread" >&5
+$as_echo_n "checking for main in -lpthread... " >&6; }
+if test "${ac_cv_lib_pthread_main+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-lpthread $LIBS"
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+int
+main ()
+{
+return main ();
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_link") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_cxx_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest$ac_exeext && {
+ test "$cross_compiling" = yes ||
+ $as_test_x conftest$ac_exeext
+ }; then
+ ac_cv_lib_pthread_main=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+ ac_cv_lib_pthread_main=no
+fi
+rm -rf conftest.dSYM
+rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
+ conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_lib_pthread_main" >&5
+$as_echo "$ac_cv_lib_pthread_main" >&6; }
+if test $ac_cv_lib_pthread_main = yes; then
+ LIBPTHREADS="-lpthread"
+else
+ { $as_echo "$as_me:$LINENO: pthread library required but cannot be found." >&5
+$as_echo "$as_me: pthread library required but cannot be found." >&6;}
+ use_diskthreads="no"
+fi
+ else
+ SQUID_CFLAGS="$SQUID_CFLAGS -D_REENTRANT -lpthread"
+ SQUID_CXXFLAGS="$SQUID_CXXFLAGS -D_REENTRANT -lpthread"
+ { $as_echo "$as_me:$LINENO: checking for main in -lpthread" >&5
+$as_echo_n "checking for main in -lpthread... " >&6; }
+if test "${ac_cv_lib_pthread_main+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-lpthread $LIBS"
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+int
+main ()
+{
+return main ();
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_link") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_cxx_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest$ac_exeext && {
+ test "$cross_compiling" = yes ||
+ $as_test_x conftest$ac_exeext
+ }; then
+ ac_cv_lib_pthread_main=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+ ac_cv_lib_pthread_main=no
+fi
-STORE_LIBS_TO_BUILD=
-STORE_LIBS_TO_ADD=
-for fs in $STORE_MODULES; do
- STORE_LIBS_TO_BUILD="$STORE_LIBS_TO_BUILD lib${fs}.la"
- STORE_LIBS_TO_ADD="$STORE_LIBS_TO_ADD fs/lib${fs}.la"
- HAVE_FS_TYPE=HAVE_FS_`echo $fs | sed 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/'`
- cat >>confdefs.h <<_ACEOF
-#define $HAVE_FS_TYPE 1
+rm -rf conftest.dSYM
+rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
+ conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_lib_pthread_main" >&5
+$as_echo "$ac_cv_lib_pthread_main" >&6; }
+if test $ac_cv_lib_pthread_main = yes; then
+ LIBPTHREADS="-lpthread"
+else
+ SQUID_CFLAGS="$SAVE_SQUID_CFLAGS -D_REENTRANT -lpthread -mt"
+ SQUID_CXXFLAGS="$SAVE_SQUID_CXXFLAGS -D_REENTRANT -lpthread -mt"
+ { $as_echo "$as_me:$LINENO: checking for main in -lpthread" >&5
+$as_echo_n "checking for main in -lpthread... " >&6; }
+if test "${ac_cv_lib_pthread_main+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-lpthread $LIBS"
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
-done
+int
+main ()
+{
+return main ();
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_link") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_cxx_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest$ac_exeext && {
+ test "$cross_compiling" = yes ||
+ $as_test_x conftest$ac_exeext
+ }; then
+ ac_cv_lib_pthread_main=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+ ac_cv_lib_pthread_main=no
+fi
+rm -rf conftest.dSYM
+rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
+ conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_lib_pthread_main" >&5
+$as_echo "$ac_cv_lib_pthread_main" >&6; }
+if test $ac_cv_lib_pthread_main = yes; then
+ LIBPTHREADS="-lpthread"
+else
+ { $as_echo "$as_me:$LINENO: pthread library required but cannot be found." >&5
+$as_echo "$as_me: pthread library required but cannot be found." >&6;}
+ use_diskthreads="no"
+fi
-# Check whether --enable-disk-io was given.
-if test "${enable_disk_io+set}" = set; then
- enableval=$enable_disk_io; case $enableval in
- yes)
- for dir in $srcdir/src/DiskIO/*; do
- module="`basename $dir`"
- if test -d "$dir" && test "$module" != CVS; then
- { $as_echo "$as_me:$LINENO: Autodetected $module DiskIO module" >&5
-$as_echo "$as_me: Autodetected $module DiskIO module" >&6;}
- DISK_MODULES="$DISK_MODULES $module"
- fi
- done
-cat >>confdefs.h <<\_ACEOF
-#define USE_DISKIO 1
+fi
+
+ fi
+ ;;
+ *)
+ SQUID_CFLAGS="$SQUID_CFLAGS -D_REENTRANT"
+ SQUID_CXXFLAGS="$SQUID_CXXFLAGS -D_REENTRANT"
+ { $as_echo "$as_me:$LINENO: checking for main in -lpthread" >&5
+$as_echo_n "checking for main in -lpthread... " >&6; }
+if test "${ac_cv_lib_pthread_main+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-lpthread $LIBS"
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
- ;;
- no)
+
+int
+main ()
+{
+return main ();
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_link") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_cxx_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest$ac_exeext && {
+ test "$cross_compiling" = yes ||
+ $as_test_x conftest$ac_exeext
+ }; then
+ ac_cv_lib_pthread_main=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_cv_lib_pthread_main=no
+fi
+
+rm -rf conftest.dSYM
+rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
+ conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_lib_pthread_main" >&5
+$as_echo "$ac_cv_lib_pthread_main" >&6; }
+if test $ac_cv_lib_pthread_main = yes; then
+ DISK_LIBS="$DISK_LIBS -lpthread"
+else
+ { $as_echo "$as_me:$LINENO: pthread library required but cannot be found." >&5
+$as_echo "$as_me: pthread library required but cannot be found." >&6;}
+ use_diskthreads="no"
+
+fi
+
+ ;;
+ esac
+ else
+ { $as_echo "$as_me:$LINENO: Native pthreads support manually disabled." >&5
+$as_echo "$as_me: Native pthreads support manually disabled." >&6;}
+ use_diskthreads="no"
+ fi
+ if test "$use_diskthreads" = "yes" ; then
cat >>confdefs.h <<\_ACEOF
-#define USE_DISKIO 0
+#define USE_DISKIO_DISKTHREADS 1
_ACEOF
- ;;
- *)
- DISK_MODULES="`echo $enableval| sed -e 's/,/ /g;s/ */ /g'`"
+ { $as_echo "$as_me:$LINENO: Enabling DiskThreads DiskIO module" >&5
+$as_echo "$as_me: Enabling DiskThreads DiskIO module" >&6;}
+ DISK_LIBS="$DISK_LIBS $LIBPTHREADS libDiskThreads.a"
+ DISK_MODULES="$DISK_MODULES DiskThreads"
+ DISK_LINKOBJS="$DISK_LINKOBJS DiskIO/DiskThreads/DiskThreadsDiskIOModule.o"
+ else
cat >>confdefs.h <<\_ACEOF
-#define USE_DISKIO 1
+#define USE_DISKIO_DISKTHREADS 0
_ACEOF
+ { $as_echo "$as_me:$LINENO: Native pthreads support disabled. DiskThreads module automaticaly disabled." >&5
+$as_echo "$as_me: Native pthreads support disabled. DiskThreads module automaticaly disabled." >&6;}
+ SQUID_CFLAGS="$SAVE_SQUID_CFLAGS"
+ SQUID_CXXFLAGS="$SAVE_SQUID_CXXFLAGS"
+ fi
;;
- esac
-else
- if test -z "$DISK_MODULES"; then
- DISK_MODULES="Blocking"
- { $as_echo "$as_me:$LINENO: Enabling Blocking DiskIO module (required default)" >&5
-$as_echo "$as_me: Enabling Blocking DiskIO module (required default)" >&6;}
+ AIO)
+ if test "$FOUND_DISKIO_AIO" = "yes" ; then
+ { { $as_echo "$as_me:$LINENO: error: DiskIO AIO module listed twice." >&5
+$as_echo "$as_me: error: DiskIO AIO module listed twice." >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+ FOUND_DISKIO_AIO="yes"
+ use_aio="yes"
+ AIOLIB=
-cat >>confdefs.h <<\_ACEOF
-#define USE_DISKIO 1
-_ACEOF
+# Check whether --with-aio was given.
+if test "${with_aio+set}" = set; then
+ withval=$with_aio;
+fi
+
+ if test "$with_aio" != "no"; then
+ have_aio_header=no
- fi
+for ac_header in aio.h
+do
+as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
+if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then
+ { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5
+$as_echo_n "checking for $ac_header... " >&6; }
+if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then
+ $as_echo_n "(cached) " >&6
fi
+ac_res=`eval 'as_val=${'$as_ac_Header'}
+ $as_echo "$as_val"'`
+ { $as_echo "$as_me:$LINENO: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+else
+ # Is the header compilable?
+{ $as_echo "$as_me:$LINENO: checking $ac_header usability" >&5
+$as_echo_n "checking $ac_header usability... " >&6; }
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+#include <$ac_header>
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_cxx_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_header_compiler=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
-if test -n "$DISK_MODULES"; then
- for module in $DISK_MODULES; do
- if test -d $srcdir/src/DiskIO/$module; then
- :
- else
- { { $as_echo "$as_me:$LINENO: error: disk-io $module does not exist" >&5
-$as_echo "$as_me: error: disk-io $module does not exist" >&2;}
- { (exit 1); exit 1; }; }
- fi
- done
- DISK_LIBS="lib`echo $DISK_MODULES|sed -e 's% %.a lib%g'`.a"
- DISK_LINKOBJS=
- for module in $DISK_MODULES; do
- { $as_echo "$as_me:$LINENO: Enabling $module DiskIO module" >&5
-$as_echo "$as_me: Enabling $module DiskIO module" >&6;}
- DISK_LINKOBJS="$DISK_LINKOBJS DiskIO/${module}/${module}DiskIOModule.o"
- done
+ ac_header_compiler=no
fi
-for fs in $DISK_MODULES none; do
- case "$fs" in
- DiskDaemon)
- DISK_PROGRAMS="$DISK_PROGRAMS DiskIO/DiskDaemon/diskd"
- FOUND_DISKDAEMON="true"
- ;;
- DiskThreads)
- FOUND_DISKTHREADS="true"
- ;;
- AIO)
- FOUND_AIO="true"
- ;;
- Blocking)
- FOUND_BLOCKING="true"
- esac
-done
-if test -z "$FOUND_BLOCKING" && test -n "$NEED_BLOCKING"; then
- { $as_echo "$as_me:$LINENO: adding Blocking, as it is used by an active, legacy Store Module" >&5
-$as_echo "$as_me: adding Blocking, as it is used by an active, legacy Store Module" >&6;}
- DISK_LIBS="$DISK_LIBS libBlocking.a"
- DISK_MODULES="$DISK_MODULES Blocking"
- DISK_LINKOBJS="$DISK_LINKOBJS DiskIO/Blocking/BlockingDiskIOModule.o"
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+{ $as_echo "$as_me:$LINENO: result: $ac_header_compiler" >&5
+$as_echo "$ac_header_compiler" >&6; }
+
+# Is the header present?
+{ $as_echo "$as_me:$LINENO: checking $ac_header presence" >&5
+$as_echo_n "checking $ac_header presence... " >&6; }
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <$ac_header>
+_ACEOF
+if { (ac_try="$ac_cpp conftest.$ac_ext"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } >/dev/null && {
+ test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" ||
+ test ! -s conftest.err
+ }; then
+ ac_header_preproc=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_header_preproc=no
fi
-if test -z "$FOUND_DISKDAEMON" && test -n "$NEED_DISKDAEMON"; then
- { $as_echo "$as_me:$LINENO: \"adding DiskDaemon, as it is used by an active, legacy Store Module" >&5
-$as_echo "$as_me: \"adding DiskDaemon, as it is used by an active, legacy Store Module" >&6;}
- DISK_LIBS="$DISK_LIBS libDiskDaemon.a"
- DISK_MODULES="$DISK_MODULES DiskDaemon"
- DISK_PROGRAMS="$DISK_PROGRAMS DiskIO/DiskDaemon/diskd"
- DISK_LINKOBJS="$DISK_LINKOBJS DiskIO/DiskDaemon/DiskDaemonDiskIOModule.o"
-fi
+rm -f conftest.err conftest.$ac_ext
+{ $as_echo "$as_me:$LINENO: result: $ac_header_preproc" >&5
+$as_echo "$ac_header_preproc" >&6; }
-if test -z "$FOUND_DISKTHREADS" && test -n "$NEED_DISKTHREADS"; then
- { $as_echo "$as_me:$LINENO: adding DiskThreads, as it is used by an active, legacy Store Module" >&5
-$as_echo "$as_me: adding DiskThreads, as it is used by an active, legacy Store Module" >&6;}
- DISK_LIBS="$DISK_LIBS libDiskThreads.a"
- DISK_MODULES="$DISK_MODULES DiskThreads"
- DISK_LINKOBJS="$DISK_LINKOBJS DiskIO/DiskThreads/DiskThreadsDiskIOModule.o"
+# So? What about this header?
+case $ac_header_compiler:$ac_header_preproc:$ac_cxx_preproc_warn_flag in
+ yes:no: )
+ { $as_echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5
+$as_echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;}
+ { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5
+$as_echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;}
+ ac_header_preproc=yes
+ ;;
+ no:yes:* )
+ { $as_echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5
+$as_echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;}
+ { $as_echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5
+$as_echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;}
+ { $as_echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5
+$as_echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;}
+ { $as_echo "$as_me:$LINENO: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&5
+$as_echo "$as_me: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&2;}
+ { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5
+$as_echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;}
+ { $as_echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5
+$as_echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;}
+ ( cat <<\_ASBOX
+## ----------------------------------------------- ##
+## Report this to http://www.squid-cache.org/bugs/ ##
+## ----------------------------------------------- ##
+_ASBOX
+ ) | sed "s/^/$as_me: WARNING: /" >&2
+ ;;
+esac
+{ $as_echo "$as_me:$LINENO: checking for $ac_header" >&5
+$as_echo_n "checking for $ac_header... " >&6; }
+if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then
+ $as_echo_n "(cached) " >&6
+else
+ eval "$as_ac_Header=\$ac_header_preproc"
fi
+ac_res=`eval 'as_val=${'$as_ac_Header'}
+ $as_echo "$as_val"'`
+ { $as_echo "$as_me:$LINENO: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
-if test -z "$FOUND_AIO" && test -n "$NEED_AIO"; then
- { $as_echo "$as_me:$LINENO: adding AIO, as it is used by an active, legacy Store Module" >&5
-$as_echo "$as_me: adding AIO, as it is used by an active, legacy Store Module" >&6;}
- DISK_LIBS="$DISK_LIBS libAIO.a"
- DISK_MODULES="$DISK_MODULES AIO"
- DISK_LINKOBJS="$DISK_LINKOBJS DiskIO/AIO/AIODiskIOModule.o"
fi
-{ $as_echo "$as_me:$LINENO: IO Modules built: $DISK_MODULES" >&5
-$as_echo "$as_me: IO Modules built: $DISK_MODULES" >&6;}
-USE_AIOPS_WIN32=0
-USE_AIO_WIN32=0
-for fs in $DISK_MODULES none; do
- case "$fs" in
- DiskThreads)
- if test -z "$with_pthreads"; then
- case "$host_os" in
- mingw|mingw32)
- USE_AIOPS_WIN32=1
- { $as_echo "$as_me:$LINENO: DiskThreads IO Module used, Windows threads support automatically enabled" >&5
-$as_echo "$as_me: DiskThreads IO Module used, Windows threads support automatically enabled" >&6;}
- ;;
- *)
- { $as_echo "$as_me:$LINENO: DiskThreads IO Module used, pthreads support automatically enabled" >&5
-$as_echo "$as_me: DiskThreads IO Module used, pthreads support automatically enabled" >&6;}
- with_pthreads=yes
- ;;
- esac
- fi
- ;;
- AIO)
- if test -z "$with_aio"; then
- case "$host_os" in
- mingw|mingw32)
- USE_AIO_WIN32=1
- { $as_echo "$as_me:$LINENO: Aio IO Module used, Windows overlapped I/O support automatically enabled" >&5
-$as_echo "$as_me: Aio IO Module used, Windows overlapped I/O support automatically enabled" >&6;}
- ;;
- *)
- { $as_echo "$as_me:$LINENO: Aio IO Module used, aio support automatically enabled" >&5
-$as_echo "$as_me: Aio IO Module used, aio support automatically enabled" >&6;}
- with_aio=yes
- ;;
- esac
- fi
- ;;
- esac
+if test `eval 'as_val=${'$as_ac_Header'}
+ $as_echo "$as_val"'` = yes; then
+ cat >>confdefs.h <<_ACEOF
+#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1
+_ACEOF
+ have_aio_header=yes
+fi
+
done
-if test "$with_aio" = "yes"; then
- { $as_echo "$as_me:$LINENO: checking for aio_read in -lrt" >&5
+ { $as_echo "$as_me:$LINENO: checking for aio_read in -lrt" >&5
$as_echo_n "checking for aio_read in -lrt... " >&6; }
if test "${ac_cv_lib_rt_aio_read+set}" = set; then
$as_echo_n "(cached) " >&6
@@ -22270,7 +22654,7 @@
{ $as_echo "$as_me:$LINENO: result: $ac_cv_lib_rt_aio_read" >&5
$as_echo "$ac_cv_lib_rt_aio_read" >&6; }
if test $ac_cv_lib_rt_aio_read = yes; then
- DISK_LIBS="$DISK_LIBS -lrt"
+ AIOLIB="-lrt"
else
{ $as_echo "$as_me:$LINENO: checking for aio_read in -laio" >&5
$as_echo_n "checking for aio_read in -laio... " >&6; }
@@ -22338,103 +22722,85 @@
{ $as_echo "$as_me:$LINENO: result: $ac_cv_lib_aio_aio_read" >&5
$as_echo "$ac_cv_lib_aio_aio_read" >&6; }
if test $ac_cv_lib_aio_aio_read = yes; then
- DISK_LIBS="$DISK_LIBS -laio"
-fi
-
-
+ AIOLIB="-laio"
fi
fi
-if test "$with_pthreads" = "yes"; then
- SQUID_CFLAGS="$SQUID_CFLAGS -D_REENTRANT"
- SQUID_CXXFLAGS="$SQUID_CXXFLAGS -D_REENTRANT"
- case "$host" in
- i386-unknown-freebsd*)
- if test "$GCC" = "yes" ; then
- if test -z "$PRESET_LDFLAGS"; then
- LDFLAGS="$LDFLAGS -pthread"
- fi
- fi
- ;;
- *-solaris2.*)
- if test "$GCC" = "yes" ; then
- SQUID_CFLAGS="$SQUID_CFLAGS -pthreads"
- SQUID_CXXFLAGS="$SQUID_CXXFLAGS -pthreads"
+ if test "$AIOLIB" != "" && test "$have_aio_header" = "yes"; then
+ { $as_echo "$as_me:$LINENO: Native POSIX AIO support detected." >&5
+$as_echo "$as_me: Native POSIX AIO support detected." >&6;}
+ use_aio="yes"
+ else
+ case "$host_os" in
+ mingw|mingw32)
+ use_aio="yes"
+ { $as_echo "$as_me:$LINENO: Windows being built. Maybe-enable POSIX AIO." >&5
+$as_echo "$as_me: Windows being built. Maybe-enable POSIX AIO." >&6;}
+ ;;
+ *)
+ { $as_echo "$as_me:$LINENO: Native POSIX AIO support not detected. AIO automatically disabled." >&5
+$as_echo "$as_me: Native POSIX AIO support not detected. AIO automatically disabled." >&6;}
+ use_aio="no"
+ ;;
+ esac
+ fi
else
- SQUID_CFLAGS="$SQUID_CFLAGS -mt"
- SQUID_CXXFLAGS="$SQUID_CXXFLAGS -mt"
- fi
- ;;
- esac
- { $as_echo "$as_me:$LINENO: checking for main in -lpthread" >&5
-$as_echo_n "checking for main in -lpthread... " >&6; }
-if test "${ac_cv_lib_pthread_main+set}" = set; then
- $as_echo_n "(cached) " >&6
-else
- ac_check_lib_save_LIBS=$LIBS
-LIBS="-lpthread $LIBS"
-cat >conftest.$ac_ext <<_ACEOF
-/* confdefs.h. */
-_ACEOF
-cat confdefs.h >>conftest.$ac_ext
-cat >>conftest.$ac_ext <<_ACEOF
-/* end confdefs.h. */
-
+ { $as_echo "$as_me:$LINENO: POSIX AIO support manually disabled." >&5
+$as_echo "$as_me: POSIX AIO support manually disabled." >&6;}
+ use_aio="no"
+ fi
+ if test "$use_aio" = "yes" ; then
-int
-main ()
-{
-return main ();
- ;
- return 0;
-}
+cat >>confdefs.h <<\_ACEOF
+#define USE_DISKIO_AIO 1
_ACEOF
-rm -f conftest.$ac_objext conftest$ac_exeext
-if { (ac_try="$ac_link"
-case "(($ac_try" in
- *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
- *) ac_try_echo=$ac_try;;
-esac
-eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
-$as_echo "$ac_try_echo") >&5
- (eval "$ac_link") 2>conftest.er1
- ac_status=$?
- grep -v '^ *+' conftest.er1 >conftest.err
- rm -f conftest.er1
- cat conftest.err >&5
- $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
- (exit $ac_status); } && {
- test -z "$ac_cxx_werror_flag" ||
- test ! -s conftest.err
- } && test -s conftest$ac_exeext && {
- test "$cross_compiling" = yes ||
- $as_test_x conftest$ac_exeext
- }; then
- ac_cv_lib_pthread_main=yes
-else
- $as_echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
- ac_cv_lib_pthread_main=no
-fi
+ DISK_MODULES="$DISK_MODULES AIO"
+ DISK_LIBS="$DISK_LIBS libAIO.a"
+ DISK_LINKOBJS="$DISK_LINKOBJS DiskIO/AIO/AIODiskIOModule.o"
+ case "$host_os" in
+ mingw|mingw32)
+ USE_AIO_WIN32=1
+ { $as_echo "$as_me:$LINENO: Replacing AIO DiskIO module with: Windows overlapped I/O support" >&5
+$as_echo "$as_me: Replacing AIO DiskIO module with: Windows overlapped I/O support" >&6;}
+ ;;
+ *)
+ { $as_echo "$as_me:$LINENO: Enabling AIO DiskIO module" >&5
+$as_echo "$as_me: Enabling AIO DiskIO module" >&6;}
+ ;;
+ esac
+ else
+ { $as_echo "$as_me:$LINENO: AIO DiskIO Module disabled. Missing POSIX AIO support." >&5
+$as_echo "$as_me: AIO DiskIO Module disabled. Missing POSIX AIO support." >&6;}
+ fi
+ ;;
-rm -rf conftest.dSYM
-rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
- conftest$ac_exeext conftest.$ac_ext
-LIBS=$ac_check_lib_save_LIBS
-fi
-{ $as_echo "$as_me:$LINENO: result: $ac_cv_lib_pthread_main" >&5
-$as_echo "$ac_cv_lib_pthread_main" >&6; }
-if test $ac_cv_lib_pthread_main = yes; then
- DISK_LIBS="$DISK_LIBS -lpthread"
-else
- { { $as_echo "$as_me:$LINENO: error: pthread library required but cannot be found." >&5
-$as_echo "$as_me: error: pthread library required but cannot be found." >&2;}
+ Blocking)
+ if test "$FOUND_DISKIO_BLOCKING" = "yes" ; then
+ { { $as_echo "$as_me:$LINENO: error: DiskIO Blocking module listed twice." >&5
+$as_echo "$as_me: error: DiskIO Blocking module listed twice." >&2;}
{ (exit 1); exit 1; }; }
-fi
+ fi
+ FOUND_DISKIO_BLOCKING="yes"
+ { $as_echo "$as_me:$LINENO: Enabling Blocking DiskIO module" >&5
+$as_echo "$as_me: Enabling Blocking DiskIO module" >&6;}
+ DISK_LIBS="$DISK_LIBS libBlocking.a"
+ DISK_MODULES="$DISK_MODULES Blocking"
+ DISK_LINKOBJS="$DISK_LINKOBJS DiskIO/Blocking/BlockingDiskIOModule.o"
+ ;;
-fi
+ *)
+ { $as_echo "$as_me:$LINENO: Enabling $module DiskIO module" >&5
+$as_echo "$as_me: Enabling $module DiskIO module" >&6;}
+ DISK_LIBS="$DISK_LIBS lib${module}.a"
+ DISK_MODULES="$DISK_MODULES ${module}"
+ DISK_LINKOBJS="$DISK_LINKOBJS DiskIO/${module}/${module}DiskIOModule.o"
+ ;;
+ esac
+done
+{ $as_echo "$as_me:$LINENO: IO Modules built: $DISK_MODULES" >&5
+$as_echo "$as_me: IO Modules built: $DISK_MODULES" >&6;}
@@ -22458,6 +22824,116 @@
+# Check whether --enable-storeio was given.
+if test "${enable_storeio+set}" = set; then
+ enableval=$enable_storeio; case $enableval in
+ yes)
+ for dir in $srcdir/src/fs/*; do
+ module="`basename $dir`"
+ if test -d "$dir" && test "$module" != CVS && test "$module" != coss; then
+ STORE_MODULES="$STORE_MODULES $module"
+ fi
+ done
+ ;;
+ no)
+ ;;
+ *)
+ STORE_MODULES="`echo $enableval| sed -e 's/,/ /g;s/ */ /g'`"
+ ;;
+ esac
+
+else
+ if test -z "$STORE_MODULES"; then
+ STORE_MODULES="ufs"
+ fi
+
+fi
+
+if test -n "$STORE_MODULES"; then
+ STORE_MODULES_FULL=$STORE_MODULES
+ STORE_MODULES=
+ for module in $STORE_MODULES_FULL; do
+ have_mod=`echo "$STORE_MODULES" | grep "$module"`
+ if test "$have_mod" != ""; then
+ { $as_echo "$as_me:$LINENO: Removing duplicate $module from storeio" >&5
+$as_echo "$as_me: Removing duplicate $module from storeio" >&6;}
+ elif test -d $srcdir/src/fs/$module; then
+ STORE_MODULES="$STORE_MODULES $module"
+ else
+ { { $as_echo "$as_me:$LINENO: error: storeio $module does not exist" >&5
+$as_echo "$as_me: error: storeio $module does not exist" >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+ done
+ { $as_echo "$as_me:$LINENO: Store modules built: $STORE_MODULES" >&5
+$as_echo "$as_me: Store modules built: $STORE_MODULES" >&6;}
+fi
+for fs in $STORE_MODULES none; do
+ case "$fs" in
+ diskd)
+ if ! test "$FOUND_DISKIO_BLOCKING" = "yes" && ! test "$FOUND_DISKIO_DISKDAEMON" = "yes" ; then
+ { { $as_echo "$as_me:$LINENO: error: Storage diskd module requires DiskIO modules: Blocking or DiskDaemon" >&5
+$as_echo "$as_me: error: Storage diskd module requires DiskIO modules: Blocking or DiskDaemon" >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+ NEED_UFS="true"
+ ;;
+ aufs)
+ if ! test "$FOUND_DISKIO_BLOCKING" = "yes" && ! test "$FOUND_DISKIO_DISKTHREADS" = "yes" ; then
+ { { $as_echo "$as_me:$LINENO: error: Storage diskd module requires DiskIO modules: Blocking or DiskThreads" >&5
+$as_echo "$as_me: error: Storage diskd module requires DiskIO modules: Blocking or DiskThreads" >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+ NEED_UFS="true"
+ ;;
+ coss)
+ if ! test "$FOUND_DISKIO_AIO" = "yes"; then
+ { { $as_echo "$as_me:$LINENO: error: COSS requires POSIX AIO which is not available." >&5
+$as_echo "$as_me: error: COSS requires POSIX AIO which is not available." >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+ STORE_TESTS="$STORE_TESTS tests/testCoss$EXEEXT"
+ ;;
+ ufs)
+ UFS_FOUND="true"
+ STORE_TESTS="$STORE_TESTS tests/testUfs$EXEEXT"
+ esac
+done
+
+if test -z "$UFS_FOUND" && test -n "$NEED_UFS"; then
+ { $as_echo "$as_me:$LINENO: Adding UFS, as it contains core logic for diskd and aufs" >&5
+$as_echo "$as_me: Adding UFS, as it contains core logic for diskd and aufs" >&6;}
+ STORE_MODULES="$STORE_MODULES ufs"
+ STORE_TESTS="$STORE_TESTS tests/testUfs$EXEEXT"
+fi
+
+
+
+
+
+
+
+
+
+
+
+STORE_LIBS_TO_BUILD=
+STORE_LIBS_TO_ADD=
+for fs in $STORE_MODULES; do
+ STORE_LIBS_TO_BUILD="$STORE_LIBS_TO_BUILD lib${fs}.la"
+ STORE_LIBS_TO_ADD="$STORE_LIBS_TO_ADD fs/lib${fs}.la"
+ HAVE_FS_TYPE=HAVE_FS_`echo $fs | sed 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/'`
+ cat >>confdefs.h <<_ACEOF
+#define $HAVE_FS_TYPE 1
+_ACEOF
+
+done
+
+
+
+
+
+
REPL_POLICIES="lru"
# Check whether --enable-removal-policies was given.
if test "${enable_removal_policies+set}" = set; then
@@ -23350,7 +23826,6 @@
disable_kqueue=true
-
# Check whether --enable-kqueue was given.
if test "${enable_kqueue+set}" = set; then
enableval=$enable_kqueue;
@@ -24058,9 +24533,9 @@
int fd = epoll_create(256);
if (fd < 0) {
perror("epoll_create:");
- exit(1);
+ return 1;
}
- exit(0);
+ return 0;
}
_ACEOF
@@ -25427,7 +25902,6 @@
{ { $as_echo "$as_me:$LINENO: error: Cannot find cppunit at $withval" >&5
$as_echo "$as_me: error: Cannot find cppunit at $withval" >&2;}
{ (exit 1); exit 1; }; }
- exit 1
fi
fi
@@ -26784,9 +27258,7 @@
-
for ac_header in \
- aio.h \
arpa/inet.h \
arpa/nameser.h \
assert.h \
@@ -26814,6 +27286,7 @@
libc.h \
libgen.h \
limits.h \
+ linux/types.h \
machine/byte_swap.h \
malloc.h \
math.h \
@@ -26877,8 +27350,7 @@
inttypes.h \
grp.h \
db.h \
- db_185.h \
- sys/capability.h
+ db_185.h
do
as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
@@ -27568,20 +28040,20 @@
cat >>confdefs.h <<_ACEOF
#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1
_ACEOF
-
-else
-
- CPPFLAGS="$SAVED_CPPFLAGS"
-
+ ac_cv_libxml2_include=yes
fi
done
+ CPPFLAGS="$SAVED_CPPFLAGS"
fi
done
+if test "x$ac_cv_libxml2_include" = "xyes"; then
+ SQUID_CXXFLAGS="-I/usr/include/libxml2 $SQUID_CXXFLAGS"
+fi
{ $as_echo "$as_me:$LINENO: checking for an ANSI C-conforming const" >&5
$as_echo_n "checking for an ANSI C-conforming const... " >&6; }
@@ -44263,6 +44735,243 @@
fi
+use_caps=yes
+# Check whether --enable-caps was given.
+if test "${enable_caps+set}" = set; then
+ enableval=$enable_caps; if test "x$enableval" = "xyes" ; then
+ { $as_echo "$as_me:$LINENO: result: forced yes" >&5
+$as_echo "forced yes" >&6; }
+ else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+ use_caps=no
+ fi
+
+else
+ { $as_echo "$as_me:$LINENO: result: yes" >&5
+$as_echo "yes" >&6; }
+fi
+
+if test "x$use_caps" = "xyes"; then
+ libcap_broken=1
+
+for ac_header in sys/capability.h
+do
+as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
+if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then
+ { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5
+$as_echo_n "checking for $ac_header... " >&6; }
+if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then
+ $as_echo_n "(cached) " >&6
+fi
+ac_res=`eval 'as_val=${'$as_ac_Header'}
+ $as_echo "$as_val"'`
+ { $as_echo "$as_me:$LINENO: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+else
+ # Is the header compilable?
+{ $as_echo "$as_me:$LINENO: checking $ac_header usability" >&5
+$as_echo_n "checking $ac_header usability... " >&6; }
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+#include <$ac_header>
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_cxx_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_header_compiler=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_header_compiler=no
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+{ $as_echo "$as_me:$LINENO: result: $ac_header_compiler" >&5
+$as_echo "$ac_header_compiler" >&6; }
+
+# Is the header present?
+{ $as_echo "$as_me:$LINENO: checking $ac_header presence" >&5
+$as_echo_n "checking $ac_header presence... " >&6; }
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <$ac_header>
+_ACEOF
+if { (ac_try="$ac_cpp conftest.$ac_ext"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } >/dev/null && {
+ test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" ||
+ test ! -s conftest.err
+ }; then
+ ac_header_preproc=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_header_preproc=no
+fi
+
+rm -f conftest.err conftest.$ac_ext
+{ $as_echo "$as_me:$LINENO: result: $ac_header_preproc" >&5
+$as_echo "$ac_header_preproc" >&6; }
+
+# So? What about this header?
+case $ac_header_compiler:$ac_header_preproc:$ac_cxx_preproc_warn_flag in
+ yes:no: )
+ { $as_echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5
+$as_echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;}
+ { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5
+$as_echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;}
+ ac_header_preproc=yes
+ ;;
+ no:yes:* )
+ { $as_echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5
+$as_echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;}
+ { $as_echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5
+$as_echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;}
+ { $as_echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5
+$as_echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;}
+ { $as_echo "$as_me:$LINENO: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&5
+$as_echo "$as_me: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&2;}
+ { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5
+$as_echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;}
+ { $as_echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5
+$as_echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;}
+ ( cat <<\_ASBOX
+## ----------------------------------------------- ##
+## Report this to http://www.squid-cache.org/bugs/ ##
+## ----------------------------------------------- ##
+_ASBOX
+ ) | sed "s/^/$as_me: WARNING: /" >&2
+ ;;
+esac
+{ $as_echo "$as_me:$LINENO: checking for $ac_header" >&5
+$as_echo_n "checking for $ac_header... " >&6; }
+if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then
+ $as_echo_n "(cached) " >&6
+else
+ eval "$as_ac_Header=\$ac_header_preproc"
+fi
+ac_res=`eval 'as_val=${'$as_ac_Header'}
+ $as_echo "$as_val"'`
+ { $as_echo "$as_me:$LINENO: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+
+fi
+if test `eval 'as_val=${'$as_ac_Header'}
+ $as_echo "$as_val"'` = yes; then
+ cat >>confdefs.h <<_ACEOF
+#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1
+_ACEOF
+
+fi
+
+done
+
+ { $as_echo "$as_me:$LINENO: checking for operational libcap2" >&5
+$as_echo_n "checking for operational libcap2... " >&6; }
+if { as_var=$libcap_broken; eval "test \"\${$as_var+set}\" = set"; }; then
+ $as_echo_n "(cached) " >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include
+int
+main ()
+{
+
+ capget(NULL, NULL);
+ capset(NULL, NULL);
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_link") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_cxx_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest$ac_exeext && {
+ test "$cross_compiling" = yes ||
+ $as_test_x conftest$ac_exeext
+ }; then
+ libcap_broken=0
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -rf conftest.dSYM
+rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
+ conftest$ac_exeext conftest.$ac_ext
+
+fi
+ac_res=`eval 'as_val=${'$libcap_broken'}
+ $as_echo "$as_val"'`
+ { $as_echo "$as_me:$LINENO: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+
+cat >>confdefs.h <<_ACEOF
+#define LIBCAP_BROKEN $libcap_broken
+_ACEOF
+
+fi
+
{ $as_echo "$as_me:$LINENO: checking for mtyp_t" >&5
$as_echo_n "checking for mtyp_t... " >&6; }
if test "${ac_cv_type_mtyp_t+set}" = set; then
@@ -45528,7 +46237,7 @@
/* PF_INET6 available check */
# include
# include
- int main() {
+ int main(int argc, char **argv) {
if (socket(PF_INET6, SOCK_STREAM, 0) < 0)
return 1;
else
@@ -45631,7 +46340,7 @@
#if HAVE_NETINET_IN6_H
# include
#endif
- int main() {
+ int main(int argc, char **argv) {
int s = socket(PF_INET6, SOCK_STREAM, 0);
int tos = 0;
if (setsockopt(s, IPPROTO_IPV6, IPV6_V6ONLY, (char *) &tos, sizeof(int)) < 0)
@@ -47132,12 +47841,12 @@
/* end confdefs.h. */
#include
- int main() {
+ int main(int argc, char **argv) {
if(setresuid(-1,-1,-1)) {
perror("setresuid:");
- exit(1);
+ return 1;
}
- exit(0);
+ return 0;
}
_ACEOF
@@ -47298,18 +48007,16 @@
#include
#include
- void f (int i, ...) {
+ int f (int i, ...) {
va_list args1, args2;
va_start (args1, i);
va_copy (args2, args1);
if (va_arg (args2, int) != 42 || va_arg (args1, int) != 42)
- exit (1);
+ return 1;
va_end (args1); va_end (args2);
- }
- int main() {
- f (0, 42);
return 0;
}
+ int main(int argc, char **argv) { return f (0, 42); }
_ACEOF
rm -f conftest$ac_exeext
@@ -47381,18 +48088,16 @@
#include
#include
- void f (int i, ...) {
+ int f (int i, ...) {
va_list args1, args2;
va_start (args1, i);
__va_copy (args2, args1);
if (va_arg (args2, int) != 42 || va_arg (args1, int) != 42)
- exit (1);
+ return 1;
va_end (args1); va_end (args2);
- }
- int main() {
- f (0, 42);
return 0;
}
+ int main(int argc, char **argv) { return f (0, 42); }
_ACEOF
rm -f conftest$ac_exeext
@@ -47552,8 +48257,18 @@
$as_echo "$as_me: WARNING: Linux Transparent and Intercepting Proxy support WILL NOT be enabled" >&2;}
sleep 10
fi
+if test "$LINUX_NETFILTER" = "yes" && test "$use_caps" != "yes" ; then
+ { $as_echo "$as_me:$LINENO: WARNING: Missing needed capabilities (libcap or libcap2) for TPROXY" >&5
+$as_echo "$as_me: WARNING: Missing needed capabilities (libcap or libcap2) for TPROXY" >&2;}
+ { $as_echo "$as_me:$LINENO: WARNING: Linux Transparent Proxy support WILL NOT be enabled" >&5
+$as_echo "$as_me: WARNING: Linux Transparent Proxy support WILL NOT be enabled" >&2;}
+ { $as_echo "$as_me:$LINENO: WARNING: Reduced support to Interception Proxy" >&5
+$as_echo "$as_me: WARNING: Reduced support to Interception Proxy" >&2;}
+ sleep 10
+fi
-if test "$LINUX_TPROXY2" ; then
+if test "$LINUX_TPROXY2"; then
+ if test "$use_caps" = "yes"; then
{ $as_echo "$as_me:$LINENO: checking if TPROXYv2 header files are installed" >&5
$as_echo_n "checking if TPROXYv2 header files are installed... " >&6; }
# hold on to your hats...
@@ -47585,6 +48300,13 @@
$as_echo "$as_me: WARNING: Or select the '--enable-linux-netfilter' option instead for Netfilter support." >&2;}
sleep 10
fi
+ else
+ { $as_echo "$as_me:$LINENO: WARNING: Missing needed capabilities (libcap or libcap2) for TPROXY v2" >&5
+$as_echo "$as_me: WARNING: Missing needed capabilities (libcap or libcap2) for TPROXY v2" >&2;}
+ { $as_echo "$as_me:$LINENO: WARNING: Linux Transparent Proxy support WILL NOT be enabled" >&5
+$as_echo "$as_me: WARNING: Linux Transparent Proxy support WILL NOT be enabled" >&2;}
+ sleep 10
+ fi
fi
# Check whether --enable-gnuregex was given.
@@ -47703,10 +48425,10 @@
#if HAVE_WINSOCK2_H
#include
#endif
-main() {
+int main(int argc, char **argv) {
FILE *fp = fopen("conftestval", "w");
fprintf (fp, "%d\n", FD_SETSIZE);
- exit(0);
+ return 0;
}
_ACEOF
@@ -47785,7 +48507,7 @@
#include /* needed on FreeBSD */
#include
#include
-main() {
+int main(int argc, char **argv) {
FILE *fp;
int i,j;
#if defined(__CYGWIN32__) || defined (__CYGWIN__)
@@ -47838,7 +48560,7 @@
#endif /* IF !DEF CYGWIN */
fp = fopen("conftestval", "w");
fprintf (fp, "%d\n", i & ~0x3F);
- exit(0);
+ return 0;
}
_ACEOF
@@ -47937,7 +48659,7 @@
#if HAVE_WINSOCK2_H
#include
#endif
-main ()
+int main(int argc, char **argv)
{
FILE *fp;
int fd,val=0;
@@ -47948,17 +48670,17 @@
#else
socklen_t len=sizeof(socklen_t);
#endif
- if ((fd = socket(AF_INET, SOCK_DGRAM, 0)) < 0) exit(1);
+ if ((fd = socket(AF_INET, SOCK_DGRAM, 0)) < 0) return 1;
#if (defined(WIN32) || defined(__WIN32__) || defined(__WIN32)) && !(defined(__CYGWIN32__) || defined(__CYGWIN__))
- if (getsockopt(fd, SOL_SOCKET, SO_SNDBUF, (char *)&val, &len) < 0) exit(1);
+ if (getsockopt(fd, SOL_SOCKET, SO_SNDBUF, (char *)&val, &len) < 0) return 1;
WSACleanup();
#else
- if (getsockopt(fd, SOL_SOCKET, SO_SNDBUF, &val, &len) < 0) exit(1);
+ if (getsockopt(fd, SOL_SOCKET, SO_SNDBUF, &val, &len) < 0) return 1;
#endif
- if (val<=0) exit(1);
+ if (val<=0) return 1;
fp = fopen("conftestval", "w");
fprintf (fp, "%d\n", val);
- exit(0);
+ return 0;
}
_ACEOF
@@ -48033,7 +48755,7 @@
#if HAVE_WINSOCK2_H
#include
#endif
-main ()
+int main(int argc, char **argv)
{
FILE *fp;
int fd,val=0;
@@ -48044,17 +48766,17 @@
#else
socklen_t len=sizeof(socklen_t);
#endif
- if ((fd = socket(AF_INET, SOCK_DGRAM, 0)) < 0) exit(1);
+ if ((fd = socket(AF_INET, SOCK_DGRAM, 0)) < 0) return 1;
#if (defined(WIN32) || defined(__WIN32__) || defined(__WIN32)) && !(defined(__CYGWIN32__) || defined(__CYGWIN__))
- if (getsockopt(fd, SOL_SOCKET, SO_RCVBUF, (char *)&val, &len) < 0) exit(1);
+ if (getsockopt(fd, SOL_SOCKET, SO_RCVBUF, (char *)&val, &len) < 0) return 1;
WSACleanup();
#else
- if (getsockopt(fd, SOL_SOCKET, SO_RCVBUF, &val, &len) < 0) exit(1);
+ if (getsockopt(fd, SOL_SOCKET, SO_RCVBUF, &val, &len) < 0) return 1;
#endif
- if (val <= 0) exit(1);
+ if (val <= 0) return 1;
fp = fopen("conftestval", "w");
fprintf (fp, "%d\n", val);
- exit(0);
+ return 0;
}
_ACEOF
@@ -48129,7 +48851,7 @@
#if HAVE_WINSOCK2_H
#include
#endif
-main ()
+int main(int argc, char **argv)
{
FILE *fp;
int fd,val=0;
@@ -48140,17 +48862,17 @@
#else
socklen_t len=sizeof(socklen_t);
#endif
- if ((fd = socket(AF_INET, SOCK_STREAM, 0)) < 0) exit(1);
+ if ((fd = socket(AF_INET, SOCK_STREAM, 0)) < 0) return 1;
#if (defined(WIN32) || defined(__WIN32__) || defined(__WIN32)) && !(defined(__CYGWIN32__) || defined(__CYGWIN__))
- if (getsockopt(fd, SOL_SOCKET, SO_SNDBUF, (char *)&val, &len) < 0) exit(1);
+ if (getsockopt(fd, SOL_SOCKET, SO_SNDBUF, (char *)&val, &len) < 0) return 1;
WSACleanup();
#else
- if (getsockopt(fd, SOL_SOCKET, SO_SNDBUF, &val, &len) < 0) exit(1);
+ if (getsockopt(fd, SOL_SOCKET, SO_SNDBUF, &val, &len) < 0) return 1;
#endif
- if (val <= 0) exit(1);
+ if (val <= 0) return 1;
fp = fopen("conftestval", "w");
fprintf (fp, "%d\n", val);
- exit(0);
+ return 0;
}
_ACEOF
@@ -48230,7 +48952,7 @@
#if HAVE_WINSOCK2_H
#include
#endif
-main ()
+int main(int argc, char **argv)
{
FILE *fp;
int fd,val=0;
@@ -48241,17 +48963,17 @@
#else
socklen_t len=sizeof(socklen_t);
#endif
- if ((fd = socket(AF_INET, SOCK_STREAM, 0)) < 0) exit(1);
+ if ((fd = socket(AF_INET, SOCK_STREAM, 0)) < 0) return 1;
#if (defined(WIN32) || defined(__WIN32__) || defined(__WIN32)) && !(defined(__CYGWIN32__) || defined(__CYGWIN__))
- if (getsockopt(fd, SOL_SOCKET, SO_RCVBUF, (char *)&val, &len) < 0) exit(1);
+ if (getsockopt(fd, SOL_SOCKET, SO_RCVBUF, (char *)&val, &len) < 0) return 1;
WSACleanup();
#else
- if (getsockopt(fd, SOL_SOCKET, SO_RCVBUF, &val, &len) < 0) exit(1);
+ if (getsockopt(fd, SOL_SOCKET, SO_RCVBUF, &val, &len) < 0) return 1;
#endif
- if (val <= 0) exit(1);
+ if (val <= 0) return 1;
fp = fopen("conftestval", "w");
fprintf (fp, "%d\n", val);
- exit(0);
+ return 0;
}
_ACEOF
@@ -49692,7 +50414,7 @@
# report actual input values of CONFIG_FILES etc. instead of their
# values after options handling.
ac_log="
-This file was extended by Squid Web Proxy $as_me 3.1.0.13, which was
+This file was extended by Squid Web Proxy $as_me 3.1.0.14, which was
generated by GNU Autoconf 2.62. Invocation command line was
CONFIG_FILES = $CONFIG_FILES
@@ -49745,7 +50467,7 @@
_ACEOF
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
ac_cs_version="\\
-Squid Web Proxy config.status 3.1.0.13
+Squid Web Proxy config.status 3.1.0.14
configured by $0, generated by GNU Autoconf 2.62,
with options \\"`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`\\"
diff -u -r -N squid-3.1.0.13/configure.in squid-3.1.0.14/configure.in
--- squid-3.1.0.13/configure.in 2009-08-05 01:32:53.000000000 +1200
+++ squid-3.1.0.14/configure.in 2009-09-27 15:29:12.000000000 +1200
@@ -2,9 +2,9 @@
dnl
dnl $Id$
dnl
-AC_INIT(Squid Web Proxy, 3.1.0.13, http://www.squid-cache.org/bugs/, squid)
-AC_PREREQ(2.52)
-AM_CONFIG_HEADER(include/autoconf.h)
+AC_INIT([Squid Web Proxy],[3.1.0.14],[http://www.squid-cache.org/bugs/],[squid])
+AC_PREREQ(2.61)
+AC_CONFIG_HEADERS([include/autoconf.h])
AC_CONFIG_AUX_DIR(cfgaux)
AC_CONFIG_SRCDIR([src/main.cc])
AM_INIT_AUTOMAKE([tar-ustar nostdinc])
@@ -24,7 +24,7 @@
AC_PROG_CC
AM_PROG_CC_C_O
AC_PROG_CXX
-AC_LANG_CPLUSPLUS
+AC_LANG([C++])
AC_CANONICAL_HOST
dnl Make the squid top srcdir available to sub-packages as --with-squid=PATH
@@ -35,7 +35,7 @@
use_loadable_modules=1
AC_MSG_CHECKING(whether to use loadable modules)
AC_ARG_ENABLE(loadable-modules,
- AC_HELP_STRING( [--disable-loadable-modules], [do not support loadable modules]) ,
+ AS_HELP_STRING([--disable-loadable-modules],[do not support loadable modules]) ,
[
case "${enableval}" in
yes) use_loadable_modules=yes ;;
@@ -151,16 +151,14 @@
CACHE_EFFECTIVE_USER="nobody"
AC_ARG_WITH(default-user,
- AC_HELP_STRING([--with-default-user=USER],
- [System user account for squid permissions. Default: nobody]),
+ AS_HELP_STRING([--with-default-user=USER],[System user account for squid permissions. Default: nobody]),
[ CACHE_EFFECTIVE_USER="$withval" ]
)
AC_SUBST(CACHE_EFFECTIVE_USER)
DEFAULT_LOG_DIR="$localstatedir/logs"
AC_ARG_WITH(logdir,
- AC_HELP_STRING([--with-logdir=PATH],
- Default location for squid logs. default: $DEFAULT_LOG_DIR),
+ AS_HELP_STRING([--with-logdir=PATH],[Default location for squid logs. default: $DEFAULT_LOG_DIR]),
[ case $withval in
yes|no)
AC_MSG_ERROR( --with-logdir requires a directory PATH. --with-logdir=PATH )
@@ -175,8 +173,7 @@
DEFAULT_PIDFILE="$localstatedir/squid.pid"
AC_ARG_WITH(pidfile,
- AC_HELP_STRING([--with-pidfile=PATH],
- Default location for squid PID file. default: $DEFAULT_PIDFILE),
+ AS_HELP_STRING([--with-pidfile=PATH],[Default location for squid PID file. default: $DEFAULT_PIDFILE]),
[ case $withval in
yes|no)
AC_MSG_ERROR( --with-pidfile requires a file PATH. --with-pidfile=PATH )
@@ -300,8 +297,7 @@
SquidInline="yes"
AC_ARG_ENABLE(optimizations,
- AC_HELP_STRING([--disable-optimizations],
- [Don't compile Squid with compiler optimizations enabled.
+ AS_HELP_STRING([--disable-optimizations],[Don't compile Squid with compiler optimizations enabled.
Optimization is good for production builds, but not
good for debugging. During development, use
--disable-optimizations to reduce compilation times
@@ -316,8 +312,7 @@
])
AC_ARG_ENABLE(inline,
- AC_HELP_STRING([--disable-inline],
- [Don't compile trivial methods as inline. Squid
+ AS_HELP_STRING([--disable-inline],[Don't compile trivial methods as inline. Squid
is coded with much of the code able to be inlined.
Inlining is good for production builds, but not
good for development. During development, use
@@ -340,14 +335,17 @@
fi
AC_ARG_ENABLE(debug-cbdata,
- AC_HELP_STRING([--enable-debug-cbdata],
- [Provide some debug information in cbdata]),
+ AS_HELP_STRING([--enable-debug-cbdata],[Provide some debug information in cbdata]),
[ if test "$enableval" = "yes" ; then
AC_MSG_NOTICE([cbdata debugging enabled])
AC_DEFINE(CBDATA_DEBUG,1,[Enable for cbdata debug information])
fi
])
+dnl Nasty hack to get autoconf 2.64 on Linux to run.
+dnl all other uses of RUN_IFELSE are wrapped inside CACHE_CHECK which breaks on 2.64
+AC_RUN_IFELSE([AC_LANG_SOURCE([[ int main(int argc, char **argv) { return 0; } ]])],[],[],[])
+
dnl This is a developer only option.. developers know how to set defines
dnl
dnl AC_ARG_ENABLE(xmalloc-debug,
@@ -371,8 +369,7 @@
dnl ])
AC_ARG_ENABLE(xmalloc-statistics,
- AC_HELP_STRING([--enable-xmalloc-statistics],
- [Show malloc statistics in status page]),
+ AS_HELP_STRING([--enable-xmalloc-statistics],[Show malloc statistics in status page]),
[ if test "$enableval" = "yes" ; then
AC_MSG_NOTICE([malloc statistics enabled])
AC_DEFINE(XMALLOC_STATISTICS,1,[Define to have malloc statistics])
@@ -380,8 +377,7 @@
])
AC_ARG_ENABLE(async-io,
- AC_HELP_STRING([--enable-async-io[=N_THREADS]],
- [Shorthand for "--with-aufs-threads=N_THREADS --with-pthreads
+ AS_HELP_STRING([--enable-async-io[=N_THREADS]],[Shorthand for "--with-aufs-threads=N_THREADS --with-pthreads
--enable-storeio=ufs,aufs"]),
[ case $enableval in
yes)
@@ -399,8 +395,7 @@
])
AC_ARG_WITH(aufs-threads,
- AC_HELP_STRING([--with-aufs-threads=N_THREADS],
- [Tune the number of worker threads for the aufs object store.]),
+ AS_HELP_STRING([--with-aufs-threads=N_THREADS],[Tune the number of worker threads for the aufs object store.]),
[ case $withval in
[[0-9]]*)
aufs_io_threads=$withval
@@ -416,28 +411,252 @@
[Defines how many threads aufs uses for I/O])
fi
-AC_ARG_WITH(pthreads,
- AC_HELP_STRING([--with-pthreads],[Use POSIX Threads]))
-if test "$with_pthreads" = "yes"; then
- AC_MSG_NOTICE([With pthreads])
-fi
-
-AC_ARG_WITH(aio,
- AC_HELP_STRING([--with-aio],[Use POSIX AIO]))
-if test "$with_aio" = "yes"; then
- AC_MSG_NOTICE([With aio])
-fi
-
-
AC_ARG_WITH(dl,
- AC_HELP_STRING([--with-dl],[Use dynamic linking]))
+ AS_HELP_STRING([--with-dl],[Use dynamic linking]))
if test "$with_dl" = "yes"; then
AC_MSG_NOTICE([With dl])
fi
+AC_ARG_ENABLE(disk-io,
+ AS_HELP_STRING([--enable-disk-io="list of modules"],[Build support for the list of disk I/O modules.
+ Set without a value or omitted, all available modules will be built.
+ See src/DiskIO for a list of available modules, or
+ Programmers Guide section on DiskIO
+ for details on how to build your custom disk module]),
+[ case $enableval in
+ yes)
+ for dir in $srcdir/src/DiskIO/*; do
+ module="`basename $dir`"
+ if test -d "$dir" && test "$module" != CVS; then
+ AC_MSG_NOTICE([Autodetected $module DiskIO module])
+ MAYBE_DISK_MODULES="$MAYBE_DISK_MODULES $module"
+ fi
+ done
+ AC_DEFINE(USE_DISKIO,1,[DiskIO modules are expected to be available.])
+ ;;
+ no)
+ AC_DEFINE(USE_DISKIO,0,[DiskIO modules are expected to be available.])
+ ;;
+ *)
+ MAYBE_DISK_MODULES=" `echo $enableval| sed -e 's/,/ /g;s/ */ /g'` "
+ AC_DEFINE(USE_DISKIO,1,[DiskIO modules are expected to be available.])
+ ;;
+ esac
+],
+[ if test -z "$MAYBE_DISK_MODULES"; then
+ AC_MSG_NOTICE([Enabling all available DiskIO modules (default)...])
+ for dir in $srcdir/src/DiskIO/*; do
+ module="`basename $dir`"
+ if test -d "$dir" && test "$module" != CVS; then
+ AC_MSG_NOTICE([Autodetected $module DiskIO module])
+ MAYBE_DISK_MODULES="$MAYBE_DISK_MODULES $module"
+ fi
+ done
+ AC_DEFINE(USE_DISKIO,1,[DiskIO modules are expected to be available.])
+fi ])
+
+dnl Some autoconf.h defines we might enable later...
+AC_DEFINE(USE_DISKIO_AIO, 0, [Whether POSIX AIO support is needed. Automatic])
+AC_DEFINE(USE_DISKIO_DISKTHREADS, 0, [Whether pthreads support is needed. Automatic])
+USE_AIOPS_WIN32=0
+use_aio=
+use_diskthreads=
+AIOLIB=
+
+dnl Setup the module paths etc.
+FOUND_DISKIO_AIO=
+FOUND_DISKIO_BLOCKING=
+FOUND_DISKIO_DISKDAEMON=
+FOUND_DISKIO_DISKTHREADS=
+DISK_LIBS=
+DISK_MODULES=
+DISK_LINKOBJS=
+for module in $MAYBE_DISK_MODULES none; do
+ if test "$module" = "none"; then
+ continue
+ fi
+ if ! test -d $srcdir/src/DiskIO/$module; then
+ AC_MSG_ERROR(disk-io $module does not exist)
+ fi
+ case "$module" in
+ DiskDaemon)
+ if test "$FOUND_DISKIO_DISKDAEMON" = "yes" ; then
+ AC_MSG_ERROR([DiskIO DiskDaemon module listed twice.])
+ fi
+ FOUND_DISKIO_DISKDAEMON="yes"
+ AC_MSG_NOTICE([Enabling DiskDaemon DiskIO module])
+ DISK_LIBS="$DISK_LIBS libDiskDaemon.a"
+ DISK_MODULES="$DISK_MODULES DiskDaemon"
+ DISK_PROGRAMS="$DISK_PROGRAMS DiskIO/DiskDaemon/diskd"
+ DISK_LINKOBJS="$DISK_LINKOBJS DiskIO/DiskDaemon/DiskDaemonDiskIOModule.o"
+ ;;
+ DiskThreads)
+ if test "$FOUND_DISKIO_DISKTHREADS" = "yes" ; then
+ AC_MSG_ERROR([DiskIO DiskThreads module listed twice.])
+ fi
+ FOUND_DISKIO_DISKTHREADS="yes"
+ use_diskthreads="yes"
+ LIBPTHREADS=
+ SAVE_SQUID_CFLAGS="$SQUID_CFLAGS"
+ SAVE_SQUID_CXXFLAGS="$SQUID_CXXFLAGS"
+ AC_ARG_WITH(pthreads,AS_HELP_STRING([--without-pthreads],[Disable POSIX Threads]))
+ if test "$with_pthreads" != "no"; then
+ dnl TODO: this needs to be extended to handle more systems and better
+ dnl REF: http://www.openldap.org/lists/openldap-bugs/200006/msg00070.html
+ dnl REF: http://autoconf-archive.cryp.to/acx_pthread.html
+ case "$host" in
+ mingw|mingw32)
+ USE_AIOPS_WIN32=1
+ AC_MSG_NOTICE([Windows threads support automatically enabled])
+ ;;
+ i386-unknown-freebsd*)
+ SQUID_CFLAGS="$SQUID_CFLAGS -D_REENTRANT"
+ SQUID_CXXFLAGS="$SQUID_CXXFLAGS -D_REENTRANT"
+ if test "$GCC" = "yes" ; then
+ if test -z "$PRESET_LDFLAGS"; then
+ LDFLAGS="$LDFLAGS -pthread"
+ fi
+ fi
+ ;;
+ *-solaris2.*)
+ if test "$GCC" = "yes" ; then
+ SQUID_CFLAGS="$SQUID_CFLAGS -D_REENTRANT -pthreads"
+ SQUID_CXXFLAGS="$SQUID_CXXFLAGS -D_REENTRANT -pthreads"
+ AC_CHECK_LIB(pthread, main,[LIBPTHREADS="-lpthread"],
+ [ AC_MSG_NOTICE(pthread library required but cannot be found.)
+ use_diskthreads="no"
+ ])
+ else
+ dnl test for -lpthread first. libc version is a stub apparently on Solaris.
+ SQUID_CFLAGS="$SQUID_CFLAGS -D_REENTRANT -lpthread"
+ SQUID_CXXFLAGS="$SQUID_CXXFLAGS -D_REENTRANT -lpthread"
+ AC_CHECK_LIB(pthread, main,[LIBPTHREADS="-lpthread"],
+ [ SQUID_CFLAGS="$SAVE_SQUID_CFLAGS -D_REENTRANT -lpthread -mt"
+ SQUID_CXXFLAGS="$SAVE_SQUID_CXXFLAGS -D_REENTRANT -lpthread -mt"
+ AC_CHECK_LIB(pthread, main,[LIBPTHREADS="-lpthread"],
+ [ AC_MSG_NOTICE(pthread library required but cannot be found.)
+ use_diskthreads="no"
+ ])
+ ])
+ fi
+ ;;
+ *)
+ SQUID_CFLAGS="$SQUID_CFLAGS -D_REENTRANT"
+ SQUID_CXXFLAGS="$SQUID_CXXFLAGS -D_REENTRANT"
+ AC_CHECK_LIB(pthread, main,[DISK_LIBS="$DISK_LIBS -lpthread"],
+ [ AC_MSG_NOTICE(pthread library required but cannot be found.)
+ use_diskthreads="no"
+ ])
+ ;;
+ esac
+ else
+ AC_MSG_NOTICE([Native pthreads support manually disabled.])
+ use_diskthreads="no"
+ fi
+ if test "$use_diskthreads" = "yes" ; then
+ AC_DEFINE(USE_DISKIO_DISKTHREADS, 1, [Whether pthreads support is needed. Automatic])
+ AC_MSG_NOTICE([Enabling DiskThreads DiskIO module])
+ DISK_LIBS="$DISK_LIBS $LIBPTHREADS libDiskThreads.a"
+ DISK_MODULES="$DISK_MODULES DiskThreads"
+ DISK_LINKOBJS="$DISK_LINKOBJS DiskIO/DiskThreads/DiskThreadsDiskIOModule.o"
+ else
+ AC_DEFINE(USE_DISKIO_DISKTHREADS, 0, [Whether pthreads support is needed. Automatic])
+ AC_MSG_NOTICE([Native pthreads support disabled. DiskThreads module automaticaly disabled.])
+ SQUID_CFLAGS="$SAVE_SQUID_CFLAGS"
+ SQUID_CXXFLAGS="$SAVE_SQUID_CXXFLAGS"
+ fi
+ ;;
+
+ AIO)
+ if test "$FOUND_DISKIO_AIO" = "yes" ; then
+ AC_MSG_ERROR([DiskIO AIO module listed twice.])
+ fi
+ FOUND_DISKIO_AIO="yes"
+ dnl Check for POSIX AIO availability
+ use_aio="yes"
+ AIOLIB=
+ AC_ARG_WITH(aio, AS_HELP_STRING([--without-aio],[Do not use POSIX AIO. Default: auto-detect]))
+ if test "$with_aio" != "no"; then
+ have_aio_header=no
+ AC_CHECK_HEADERS(aio.h,[have_aio_header=yes])
+ dnl On some systems POSIX AIO functions are in librt
+ dnl On some systems POSIX AIO functions are in libaio
+ AC_CHECK_LIB(rt,aio_read,[AIOLIB="-lrt"],AC_CHECK_LIB(aio,aio_read,[AIOLIB="-laio"],[]))
+ dnl Enable AIO if the library and headers are found
+ if test "$AIOLIB" != "" && test "$have_aio_header" = "yes"; then
+ AC_MSG_NOTICE([Native POSIX AIO support detected.])
+ use_aio="yes"
+ else
+ dnl Windows does things differently. We provide wrappers.
+ dnl TODO: Windows really needs its own DiskIO module or its Overlaped IO
+ case "$host_os" in
+ mingw|mingw32)
+ use_aio="yes"
+ AC_MSG_NOTICE([Windows being built. Maybe-enable POSIX AIO.])
+ ;;
+ *)
+ AC_MSG_NOTICE([Native POSIX AIO support not detected. AIO automatically disabled.])
+ use_aio="no"
+ ;;
+ esac
+ fi
+ else
+ AC_MSG_NOTICE([POSIX AIO support manually disabled.])
+ use_aio="no"
+ fi
+ dnl Use the POSIX AIO pieces if we actually need them.
+ if test "$use_aio" = "yes" ; then
+ AC_DEFINE(USE_DISKIO_AIO, 1, [Whether POSIX AIO support is needed. Automatic])
+ DISK_MODULES="$DISK_MODULES AIO"
+ DISK_LIBS="$DISK_LIBS libAIO.a"
+ DISK_LINKOBJS="$DISK_LINKOBJS DiskIO/AIO/AIODiskIOModule.o"
+ case "$host_os" in
+ mingw|mingw32)
+ USE_AIO_WIN32=1
+ AC_MSG_NOTICE([Replacing AIO DiskIO module with: Windows overlapped I/O support])
+ ;;
+ *)
+ AC_MSG_NOTICE([Enabling AIO DiskIO module])
+ ;;
+ esac
+ else
+ AC_MSG_NOTICE([AIO DiskIO Module disabled. Missing POSIX AIO support.])
+ fi
+ ;;
+
+ Blocking)
+ if test "$FOUND_DISKIO_BLOCKING" = "yes" ; then
+ AC_MSG_ERROR([DiskIO Blocking module listed twice.])
+ fi
+ FOUND_DISKIO_BLOCKING="yes"
+ AC_MSG_NOTICE([Enabling Blocking DiskIO module])
+ DISK_LIBS="$DISK_LIBS libBlocking.a"
+ DISK_MODULES="$DISK_MODULES Blocking"
+ DISK_LINKOBJS="$DISK_LINKOBJS DiskIO/Blocking/BlockingDiskIOModule.o"
+ ;;
+
+ *)
+ AC_MSG_NOTICE([Enabling $module DiskIO module])
+ DISK_LIBS="$DISK_LIBS lib${module}.a"
+ DISK_MODULES="$DISK_MODULES ${module}"
+ DISK_LINKOBJS="$DISK_LINKOBJS DiskIO/${module}/${module}DiskIOModule.o"
+ ;;
+ esac
+done
+AC_MSG_NOTICE([IO Modules built: $DISK_MODULES])
+AC_SUBST(DISK_MODULES)
+AC_SUBST(DISK_LIBS)
+AC_SUBST(DISK_PROGRAMS)
+AC_SUBST(DISK_LINKOBJS)
+AC_SUBST(AIOLIB)
+AM_CONDITIONAL([USE_AIOPS_WIN32], [test "$USE_AIOPS_WIN32" = 1])
+AM_CONDITIONAL([USE_AIO_WIN32], [test "$USE_AIO_WIN32" = 1])
+
+
+dnl Check what Storage formats are wanted.
+dnl This version will error out with a message saying why if a required DiskIO is missing.
AC_ARG_ENABLE(storeio,
- AC_HELP_STRING([--enable-storeio="list of modules"],
- [Build support for the list of store I/O modules.
+ AS_HELP_STRING([--enable-storeio="list of modules"],[Build support for the list of store I/O modules.
The default is only to build the "ufs" module.
See src/fs for a list of available modules, or
Programmers Guide section
@@ -478,29 +697,24 @@
done
AC_MSG_NOTICE([Store modules built: $STORE_MODULES])
fi
-dnl remove all but diskd - its the only module that needs to recurse
-dnl into the sub directory
-UFS_FOUND=
-NEED_UFS=
-NEED_BLOCKING=
-NEED_DISKDAEMON=
-NEED_DISKTHREADS=
-NEED_AIO=
-STORE_TESTS=
for fs in $STORE_MODULES none; do
case "$fs" in
diskd)
+ if ! test "$FOUND_DISKIO_BLOCKING" = "yes" && ! test "$FOUND_DISKIO_DISKDAEMON" = "yes" ; then
+ AC_MSG_ERROR([Storage diskd module requires DiskIO modules: Blocking or DiskDaemon])
+ fi
NEED_UFS="true"
- NEED_BLOCKING="true"
- NEED_DISKDAEMON="true"
;;
aufs)
+ if ! test "$FOUND_DISKIO_BLOCKING" = "yes" && ! test "$FOUND_DISKIO_DISKTHREADS" = "yes" ; then
+ AC_MSG_ERROR([Storage diskd module requires DiskIO modules: Blocking or DiskThreads])
+ fi
NEED_UFS="true"
- NEED_BLOCKING="true"
- NEED_DISKTHREADS="true"
;;
coss)
- NEED_AIO="true"
+ if ! test "$FOUND_DISKIO_AIO" = "yes"; then
+ AC_MSG_ERROR([COSS requires POSIX AIO which is not available.])
+ fi
dnl
dnl Automake om MinGW needs explicit exe extension
dnl for STORE_TESTS substition
@@ -546,189 +760,13 @@
AC_SUBST(STORE_LIBS_TO_ADD)
AC_SUBST(STORE_TESTS)
-AC_ARG_ENABLE(disk-io,
- AC_HELP_STRING([--enable-disk-io="list of modules"],
- [Build support for the list of disk I/O modules.
- If unset only the "Blocking" module will be built.
- Set without a value all available modules will be built.
- See src/DiskIO for a list of available modules, or
- Programmers Guide section on DiskIO
- for details on how to build your custom disk module]),
-[ case $enableval in
- yes)
- for dir in $srcdir/src/DiskIO/*; do
- module="`basename $dir`"
- if test -d "$dir" && test "$module" != CVS; then
- AC_MSG_NOTICE([Autodetected $module DiskIO module])
- DISK_MODULES="$DISK_MODULES $module"
- fi
- done
- AC_DEFINE(USE_DISKIO,1,[DiskIO modules are expected to be available.])
- ;;
- no)
- AC_DEFINE(USE_DISKIO,0,[DiskIO modules are expected to be available.])
- ;;
- *)
- DISK_MODULES="`echo $enableval| sed -e 's/,/ /g;s/ */ /g'`"
- AC_DEFINE(USE_DISKIO,1,[DiskIO modules are expected to be available.])
- ;;
- esac
-],
-[ if test -z "$DISK_MODULES"; then
- DISK_MODULES="Blocking"
- AC_MSG_NOTICE([Enabling Blocking DiskIO module (required default)])
- AC_DEFINE(USE_DISKIO,1,[DiskIO modules are expected to be available.])
- fi
-])
-if test -n "$DISK_MODULES"; then
- for module in $DISK_MODULES; do
- if test -d $srcdir/src/DiskIO/$module; then
- :
- else
- AC_MSG_ERROR(disk-io $module does not exist)
- fi
- done
- DISK_LIBS="lib`echo $DISK_MODULES|sed -e 's% %.a lib%g'`.a"
- DISK_LINKOBJS=
- for module in $DISK_MODULES; do
- AC_MSG_NOTICE([Enabling $module DiskIO module])
- DISK_LINKOBJS="$DISK_LINKOBJS DiskIO/${module}/${module}DiskIOModule.o"
- done
-fi
-for fs in $DISK_MODULES none; do
- case "$fs" in
- DiskDaemon)
- DISK_PROGRAMS="$DISK_PROGRAMS DiskIO/DiskDaemon/diskd"
- FOUND_DISKDAEMON="true"
- ;;
- DiskThreads)
- FOUND_DISKTHREADS="true"
- ;;
- AIO)
- FOUND_AIO="true"
- ;;
- Blocking)
- FOUND_BLOCKING="true"
- esac
-done
-
-if test -z "$FOUND_BLOCKING" && test -n "$NEED_BLOCKING"; then
- AC_MSG_NOTICE([adding Blocking, as it is used by an active, legacy Store Module])
- DISK_LIBS="$DISK_LIBS libBlocking.a"
- DISK_MODULES="$DISK_MODULES Blocking"
- DISK_LINKOBJS="$DISK_LINKOBJS DiskIO/Blocking/BlockingDiskIOModule.o"
-fi
-
-if test -z "$FOUND_DISKDAEMON" && test -n "$NEED_DISKDAEMON"; then
- AC_MSG_NOTICE(["adding DiskDaemon, as it is used by an active, legacy Store Module])
- DISK_LIBS="$DISK_LIBS libDiskDaemon.a"
- DISK_MODULES="$DISK_MODULES DiskDaemon"
- DISK_PROGRAMS="$DISK_PROGRAMS DiskIO/DiskDaemon/diskd"
- DISK_LINKOBJS="$DISK_LINKOBJS DiskIO/DiskDaemon/DiskDaemonDiskIOModule.o"
-fi
-
-if test -z "$FOUND_DISKTHREADS" && test -n "$NEED_DISKTHREADS"; then
- AC_MSG_NOTICE([adding DiskThreads, as it is used by an active, legacy Store Module])
- DISK_LIBS="$DISK_LIBS libDiskThreads.a"
- DISK_MODULES="$DISK_MODULES DiskThreads"
- DISK_LINKOBJS="$DISK_LINKOBJS DiskIO/DiskThreads/DiskThreadsDiskIOModule.o"
-fi
-
-if test -z "$FOUND_AIO" && test -n "$NEED_AIO"; then
- AC_MSG_NOTICE([adding AIO, as it is used by an active, legacy Store Module])
- DISK_LIBS="$DISK_LIBS libAIO.a"
- DISK_MODULES="$DISK_MODULES AIO"
- DISK_LINKOBJS="$DISK_LINKOBJS DiskIO/AIO/AIODiskIOModule.o"
-fi
-AC_MSG_NOTICE([IO Modules built: $DISK_MODULES])
-USE_AIOPS_WIN32=0
-USE_AIO_WIN32=0
-dnl we know what is being built. now add dependencies.
-for fs in $DISK_MODULES none; do
- case "$fs" in
- DiskThreads)
- if test -z "$with_pthreads"; then
- case "$host_os" in
- mingw|mingw32)
- USE_AIOPS_WIN32=1
- AC_MSG_NOTICE([DiskThreads IO Module used, Windows threads support automatically enabled])
- ;;
- *)
- AC_MSG_NOTICE([DiskThreads IO Module used, pthreads support automatically enabled])
- with_pthreads=yes
- ;;
- esac
- fi
- ;;
- AIO)
- if test -z "$with_aio"; then
- case "$host_os" in
- mingw|mingw32)
- USE_AIO_WIN32=1
- AC_MSG_NOTICE([Aio IO Module used, Windows overlapped I/O support automatically enabled])
- ;;
- *)
- AC_MSG_NOTICE([Aio IO Module used, aio support automatically enabled])
- with_aio=yes
- ;;
- esac
- fi
- ;;
- esac
-done
-
-dnl Check for librt
-dnl We use AIO in the coss store
-if test "$with_aio" = "yes"; then
- dnl On some systems POSIX AIO functions are in libaio
- AC_CHECK_LIB(rt, aio_read,
- [DISK_LIBS="$DISK_LIBS -lrt"],
- AC_CHECK_LIB(aio, aio_read,[DISK_LIBS="$DISK_LIBS -laio"])
- )
-fi
-
-dnl Check for pthreads
-dnl We use pthreads when doing ASYNC I/O
-if test "$with_pthreads" = "yes"; then
- SQUID_CFLAGS="$SQUID_CFLAGS -D_REENTRANT"
- SQUID_CXXFLAGS="$SQUID_CXXFLAGS -D_REENTRANT"
- case "$host" in
- i386-unknown-freebsd*)
- if test "$GCC" = "yes" ; then
- if test -z "$PRESET_LDFLAGS"; then
- LDFLAGS="$LDFLAGS -pthread"
- fi
- fi
- ;;
- *-solaris2.*)
- if test "$GCC" = "yes" ; then
- SQUID_CFLAGS="$SQUID_CFLAGS -pthreads"
- SQUID_CXXFLAGS="$SQUID_CXXFLAGS -pthreads"
- else
- SQUID_CFLAGS="$SQUID_CFLAGS -mt"
- SQUID_CXXFLAGS="$SQUID_CXXFLAGS -mt"
- fi
- ;;
- esac
- AC_CHECK_LIB(pthread, main,[DISK_LIBS="$DISK_LIBS -lpthread"],
- [ AC_MSG_ERROR(pthread library required but cannot be found.) ])
-fi
-
-AC_SUBST(DISK_MODULES)
-AC_SUBST(DISK_LIBS)
-AC_SUBST(DISK_PROGRAMS)
-AC_SUBST(DISK_LINKOBJS)
-AM_CONDITIONAL([USE_AIOPS_WIN32], [test "$USE_AIOPS_WIN32" = 1])
-AM_CONDITIONAL([USE_AIO_WIN32], [test "$USE_AIO_WIN32" = 1])
-
dnl At lest one removal policy is always needed.
dnl 'lru' removal policy is currently hard-coded by name for tests
dnl so we must set it as default.
REPL_POLICIES="lru"
AC_ARG_ENABLE(removal-policies,
- AC_HELP_STRING([--enable-removal-policies="list of policies"],
- [Build support for the list of removal policies.
+ AS_HELP_STRING([--enable-removal-policies="list of policies"],[Build support for the list of removal policies.
The default is only to build the "lru" module.
See src/repl for a list of available modules, or
Programmers Guide section 9.9 for details on how
@@ -767,7 +805,7 @@
AM_CONDITIONAL(ENABLE_PINGER, false)
AC_ARG_ENABLE(icmp,
- AC_HELP_STRING([--enable-icmp],[Enable ICMP pinging and Network Measurement]),
+ AS_HELP_STRING([--enable-icmp],[Enable ICMP pinging and Network Measurement]),
[ if test "$enableval" = "yes" ; then
AC_MSG_NOTICE([ICMP enabled])
AC_DEFINE(USE_ICMP,1,[Define to use Squid's ICMP and Network Measurement features (highly recommended!)])
@@ -777,8 +815,7 @@
AM_CONDITIONAL(USE_DELAY_POOLS, false)
AC_ARG_ENABLE(delay-pools,
- AC_HELP_STRING([--enable-delay-pools],
- [Enable delay pools to limit bandwidth usage]),
+ AS_HELP_STRING([--enable-delay-pools],[Enable delay pools to limit bandwidth usage]),
[ if test "$enableval" = "yes" ; then
AC_MSG_NOTICE([Delay pools enabled])
AC_DEFINE([DELAY_POOLS],1,[Traffic management via "delay pools".])
@@ -791,8 +828,7 @@
AM_CONDITIONAL(USE_ESI, false)
AC_ARG_ENABLE(esi,
- AC_HELP_STRING([--enable-esi],
- [Enable ESI for accelerators. Requires libexpat.
+ AS_HELP_STRING([--enable-esi],[Enable ESI for accelerators. Requires libexpat.
Enabling ESI will cause squid to follow the
Edge Acceleration Specification (www.esi.org).
This causes squid to IGNORE client Cache-Control headers.
@@ -821,7 +857,7 @@
AM_CONDITIONAL(USE_ICAP_CLIENT, false)
AC_ARG_ENABLE(icap-client,
- AC_HELP_STRING([--enable-icap-client],[Enable the ICAP client.]),
+ AS_HELP_STRING([--enable-icap-client],[Enable the ICAP client.]),
use_icap_client=$enableval, use_icap_client=no)
if test "$use_icap_client" = "yes" ; then
AC_DEFINE(ICAP_CLIENT,1,[Enable ICAP client features in Squid])
@@ -837,8 +873,7 @@
use_ecap=1
AC_MSG_CHECKING(whether to support eCAP)
AC_ARG_ENABLE(ecap,
- AC_HELP_STRING([--enable-ecap],
- [support loadable content adaptation modules]),
+ AS_HELP_STRING([--enable-ecap],[support loadable content adaptation modules]),
[
case "${enableval}" in
yes) use_ecap=yes ;;
@@ -910,8 +945,7 @@
dnl ])
AC_ARG_ENABLE(useragent-log,
- AC_HELP_STRING([--enable-useragent-log],
- [Enable logging of User-Agent header]),
+ AS_HELP_STRING([--enable-useragent-log],[Enable logging of User-Agent header]),
[ if test "$enableval" = "yes" ; then
AC_MSG_NOTICE([User-Agent logging enabled])
AC_DEFINE(USE_USERAGENT_LOG,1,[If you want to log User-Agent request header values, define this.
@@ -921,7 +955,7 @@
])
AC_ARG_ENABLE(referer-log,
- AC_HELP_STRING([--enable-referer-log],[Enable logging of Referer header]),
+ AS_HELP_STRING([--enable-referer-log],[Enable logging of Referer header]),
[ if test "$enableval" = "yes" ; then
AC_MSG_NOTICE([Referer logging enabled])
AC_DEFINE(USE_REFERER_LOG,1,[If you want to log Referer request header values, define this.
@@ -932,7 +966,7 @@
USE_WCCP=1
AC_ARG_ENABLE(wccp,
- AC_HELP_STRING([--disable-wccp],[Disable Web Cache Coordination Protocol]),
+ AS_HELP_STRING([--disable-wccp],[Disable Web Cache Coordination Protocol]),
[ if test "$enableval" = "no" ; then
AC_MSG_NOTICE([Web Cache Coordination Protocol disabled])
USE_WCCP=0
@@ -944,8 +978,7 @@
USE_WCCPv2=1
AC_ARG_ENABLE(wccpv2,
- AC_HELP_STRING([--disable-wccpv2],
- [Disable Web Cache Coordination V2 Protocol]),
+ AS_HELP_STRING([--disable-wccpv2],[Disable Web Cache Coordination V2 Protocol]),
[ if test "$enableval" = "no" ; then
AC_MSG_NOTICE(["Web Cache Coordination V2 Protocol disabled])
USE_WCCPv2=0
@@ -956,7 +989,7 @@
fi
AC_ARG_ENABLE(kill-parent-hack,
- AC_HELP_STRING([--enable-kill-parent-hack],[Kill parent on shutdown]),
+ AS_HELP_STRING([--enable-kill-parent-hack],[Kill parent on shutdown]),
[ if test "$enableval" = "yes" ; then
AC_MSG_NOTICE([Kill parent on shutdown])
AC_DEFINE(KILL_PARENT_OPT,1,[A dangerous feature which causes Squid to kill its parent process
@@ -967,7 +1000,7 @@
USE_SNMP=true
AC_ARG_ENABLE(snmp,
- AC_HELP_STRING([--disable-snmp],[Disable SNMP monitoring support]),
+ AS_HELP_STRING([--disable-snmp],[Disable SNMP monitoring support]),
[ if test "$enableval" = "no" ; then
AC_MSG_NOTICE([SNMP monitoring disabled])
USE_SNMP=
@@ -983,8 +1016,7 @@
AC_SUBST(makesnmplib)
AC_ARG_ENABLE(cachemgr-hostname,
- AC_HELP_STRING([--enable-cachemgr-hostname=hostname],
- [Make cachemgr.cgi default to this host.
+ AS_HELP_STRING([--enable-cachemgr-hostname=hostname],[Make cachemgr.cgi default to this host.
If unspecified, uses the name of the build-host]),
[ case $enableval in
yes)
@@ -1004,8 +1036,7 @@
AM_CONDITIONAL(ENABLE_ARP_ACL, false)
AC_ARG_ENABLE(arp-acl,
- AC_HELP_STRING([--enable-arp-acl],
- [Enable use of ARP ACL lists (ether address)]),
+ AS_HELP_STRING([--enable-arp-acl],[Enable use of ARP ACL lists (ether address)]),
[ if test "$enableval" = "yes" ; then
AC_MSG_NOTICE([ARP ACL lists enabled (ether address)])
case "$host" in
@@ -1038,7 +1069,7 @@
USE_HTCP=true
AM_CONDITIONAL(ENABLE_HTCP, false)
AC_ARG_ENABLE(htcp,
- AC_HELP_STRING([--disable-htcp],[Disable HTCP protocol support]),
+ AS_HELP_STRING([--disable-htcp],[Disable HTCP protocol support]),
[ if test "$enableval" = "no" ; then
AC_MSG_NOTICE([HTCP support disabled])
fi
@@ -1053,7 +1084,7 @@
dnl Default is to use OpenSSL when available
AC_ARG_ENABLE(ssl,
- AC_HELP_STRING([--enable-ssl],[Enable ssl gatewaying support using OpenSSL]),
+ AS_HELP_STRING([--enable-ssl],[Enable ssl gatewaying support using OpenSSL]),
[ if test "$enableval" != "no"; then
AC_MSG_NOTICE([SSL gatewaying using OpenSSL enabled])
AC_DEFINE(USE_SSL,1,[Define this to include code for SSL encryption.])
@@ -1073,8 +1104,7 @@
dnl User may specify OpenSSL is needed from a non-standard location
AC_ARG_WITH(openssl,
- AC_HELP_STRING([--with-openssl{=PATH}],
- [Compile with the OpenSSL libraries. The path to
+ AS_HELP_STRING([--with-openssl{=PATH}],[Compile with the OpenSSL libraries. The path to
the OpenSSL development libraries and headers
installation can be specified if outside of the
system standard directories]),
@@ -1112,7 +1142,7 @@
AC_ARG_ENABLE(forw-via-db,
- AC_HELP_STRING([--enable-forw-via-db],[Enable Forw/Via database]),
+ AS_HELP_STRING([--enable-forw-via-db],[Enable Forw/Via database]),
[ if test "$enableval" = "yes" ; then
AC_MSG_NOTICE([FORW-VIA enabled])
AC_DEFINE(FORW_VIA_DB,1,[Enable Forw/Via database])
@@ -1120,8 +1150,7 @@
])
AC_ARG_ENABLE(cache-digests,
- AC_HELP_STRING([--enable-cache-digests],
- [Use Cache Digests.
+ AS_HELP_STRING([--enable-cache-digests],[Use Cache Digests.
See http://wiki.squid-cache.org/SquidFaq/CacheDigests]),
[ if test "$enableval" = "yes" ; then
AC_MSG_NOTICE([Cache Disgests enabled])
@@ -1131,8 +1160,7 @@
dnl Size of COSS memory buffer
AC_ARG_WITH(coss-membuf-size,
- AC_HELP_STRING([--with-coss-membuf-size=size],
- [COSS membuf size (default 1048576 bytes)]),
+ AS_HELP_STRING([--with-coss-membuf-size=size],[COSS membuf size (default 1048576 bytes)]),
[ if test -n "$withval" -a "x$withval" != "xno" ; then
AC_MSG_NOTICE([Setting COSS membuf size to $with_coss_membuf_size bytes])
AC_DEFINE_UNQUOTED(COSS_MEMBUF_SZ, $with_coss_membuf_size,[Define if you want to set the COSS membuf size])
@@ -1144,7 +1172,7 @@
dnl Enable poll()
disable_poll=
AC_ARG_ENABLE(poll,
- AC_HELP_STRING([--disable-poll],[Disable poll(2) support.]),
+ AS_HELP_STRING([--disable-poll],[Disable poll(2) support.]),
[
case "$enableval" in
yes)
@@ -1161,7 +1189,7 @@
dnl Enable select()
disable_select=
AC_ARG_ENABLE(select,
- AC_HELP_STRING([--disable-select],[Disable select(2) support.]),
+ AS_HELP_STRING([--disable-select],[Disable select(2) support.]),
[
case "$enableval" in
yes)
@@ -1179,7 +1207,7 @@
dnl kqueue support is still experiemntal and unstable. Not enabled by default.
disable_kqueue=true
AC_ARG_ENABLE(kqueue,
- AC_HELP_STRING([--enable-kqueue],[Enable kqueue(2) support (experimental).]),
+ AS_HELP_STRING([--enable-kqueue],[Enable kqueue(2) support (experimental).]),
[
case "$enableval" in
yes)
@@ -1202,7 +1230,7 @@
disable_epoll=
force_epoll="no"
AC_ARG_ENABLE(epoll,
- AC_HELP_STRING([--disable-epoll],[Disable Linux epoll(2) support.]),
+ AS_HELP_STRING([--disable-epoll],[Disable Linux epoll(2) support.]),
[
case "$enableval" in
yes)
@@ -1241,7 +1269,7 @@
dnl Verify that epoll really works
if test $ac_cv_func_epoll_ctl = yes; then
AC_CACHE_CHECK(if epoll works, ac_cv_epoll_works,
- AC_TRY_RUN([
+ AC_RUN_IFELSE([AC_LANG_SOURCE([[
#include
#include
#include
@@ -1250,11 +1278,11 @@
int fd = epoll_create(256);
if (fd < 0) {
perror("epoll_create:");
- exit(1);
+ return 1;
}
- exit(0);
+ return 0;
}
- ], [ac_cv_epoll_works=yes], [ac_cv_epoll_works=no]))
+ ]])],[ac_cv_epoll_works=yes],[ac_cv_epoll_works=no],[]))
fi
if test "$force_epoll" = "yes" && test "$ac_cv_epoll_works" = "no" ; then
@@ -1265,8 +1293,7 @@
dnl Disable HTTP violations
http_violations=1
AC_ARG_ENABLE(http-violations,
- AC_HELP_STRING([--disable-http-violations],
- [This allows you to remove code which is known to
+ AS_HELP_STRING([--disable-http-violations],[This allows you to remove code which is known to
violate the HTTP protocol specification.]),
[ if test "$enableval" = "no" ; then
AC_MSG_NOTICE([Disabling HTTP Violations])
@@ -1281,8 +1308,7 @@
dnl Enable IPFW Transparent Proxy
AC_ARG_ENABLE(ipfw-transparent,
- AC_HELP_STRING([--enable-ipfw-transparent],
- [Enable Transparent Proxy support for systems
+ AS_HELP_STRING([--enable-ipfw-transparent],[Enable Transparent Proxy support for systems
using FreeBSD IPFW style redirection.]),
[ if test "$enableval" = "yes" ; then
AC_MSG_NOTICE([IPFW Transparent Proxy enabled])
@@ -1295,8 +1321,7 @@
dnl Enable IP-Filter Transparent Proxy
AC_ARG_ENABLE(ipf-transparent,
- AC_HELP_STRING([--enable-ipf-transparent],
- [Enable Transparent Proxy support for systems
+ AS_HELP_STRING([--enable-ipf-transparent],[Enable Transparent Proxy support for systems
using IP-Filter network address redirection.]),
[ if test "$enableval" = "yes" ; then
AC_MSG_NOTICE([IP-Filter Transparent Proxy enabled])
@@ -1309,8 +1334,7 @@
dnl Enable PF Transparent Proxy
AC_ARG_ENABLE(pf-transparent,
- AC_HELP_STRING([--enable-pf-transparent],
- [Enable Transparent Proxy support for systems
+ AS_HELP_STRING([--enable-pf-transparent],[Enable Transparent Proxy support for systems
using PF network address redirection.]),
[ if test "$enableval" = "yes" ; then
AC_MSG_NOTICE([PF Transparent Proxy enabled])
@@ -1323,8 +1347,7 @@
dnl Enable Linux Netfilter Transparent Proxy
AC_ARG_ENABLE(linux-netfilter,
- AC_HELP_STRING([--enable-linux-netfilter],
- [Enable Transparent Proxy support for Linux (Netfilter)]),
+ AS_HELP_STRING([--enable-linux-netfilter],[Enable Transparent Proxy support for Linux (Netfilter)]),
[ if test "$enableval" = "yes" ; then
AC_MSG_NOTICE([Linux (Netfilter) Transparent Proxy enabled])
AC_DEFINE(LINUX_NETFILTER,1,[Enable support for Transparent Proxy on Linux (Netfilter) systems])
@@ -1339,14 +1362,14 @@
needlargefiles=
AC_ARG_WITH(large-files,
- AC_HELP_STRING([--with-large-files],[Enable support for large files (logs etc).]),
+ AS_HELP_STRING([--with-large-files],[Enable support for large files (logs etc).]),
[ if test "x$withval" = "xyes"; then
needlargefiles=1
fi
])
dnl UNIX Build environment
-dnl AC_HELP_STRING is not suited here because it doesn't allow to specify newlines
+dnl AS_HELP_STRING is not suited here because it doesn't allow to specify newlines
AC_ARG_WITH(build-environment,
[ --with-build-environment=model
The build environment to use. Normally one of
@@ -1446,8 +1469,7 @@
dnl Enable Linux transparent proxy support for obsolete TPROXY
AC_ARG_ENABLE(linux-tproxy,
- AC_HELP_STRING([--enable-linux-tproxy],
- [Enable real Transparent Proxy support for Netfilter TPROXY
+ AS_HELP_STRING([--enable-linux-tproxy],[Enable real Transparent Proxy support for Netfilter TPROXY
(version 2).]),
[ if test "$enableval" = "yes" ; then
AC_MSG_NOTICE(["Linux Netfilter/TPROXY v2 enabled])
@@ -1465,8 +1487,7 @@
AM_CONDITIONAL(MAKE_LEAKFINDER, false)
dnl Enable Leak Finding Functions
AC_ARG_ENABLE(leakfinder,
- AC_HELP_STRING([--enable-leakfinder],
- [Enable Leak Finding code. Enabling this alone
+ AS_HELP_STRING([--enable-leakfinder],[Enable Leak Finding code. Enabling this alone
does nothing; you also have to modify the source
code to use the leak finding functions. Probably
Useful for hackers only.]),
@@ -1480,8 +1501,7 @@
follow_xff=1
AC_ARG_ENABLE(follow-x-forwarded-for,
- AC_HELP_STRING([--enable-follow-x-forwarded-for],
- [Enable support for following the X-Forwarded-For
+ AS_HELP_STRING([--enable-follow-x-forwarded-for],[Enable support for following the X-Forwarded-For
HTTP header to try to find the IP address of the
original or indirect client when a request has
been forwarded through other proxies.]),
@@ -1498,8 +1518,7 @@
use_ident=1
AC_ARG_ENABLE(ident-lookups,
- AC_HELP_STRING([--disable-ident-lookups],
- [This allows you to remove code that performs Ident (RFC 931) lookups.]),
+ AS_HELP_STRING([--disable-ident-lookups],[This allows you to remove code that performs Ident (RFC 931) lookups.]),
[ if test "$enableval" = "no" ; then
AC_MSG_NOTICE([Disabling Ident Lookups])
use_ident=0
@@ -1514,8 +1533,7 @@
AM_CONDITIONAL(USE_DNSSERVER, false)
use_dnsserver=
AC_ARG_ENABLE(internal-dns,
- AC_HELP_STRING([--disable-internal-dns],
- [Prevents Squid from directly sending and receiving DNS messages,
+ AS_HELP_STRING([--disable-internal-dns],[Prevents Squid from directly sending and receiving DNS messages,
and instead enables the old external 'dnsserver' processes.]),
[ if test "$enableval" = "no" ; then
AC_MSG_WARN([Disabling Internal DNS queries])
@@ -1529,8 +1547,7 @@
dnl Select Default hosts file location
AC_ARG_ENABLE(default-hostsfile,
- AC_HELP_STRING([--enable-default-hostsfile=path],
- [Select default location for hosts file.
+ AS_HELP_STRING([--enable-default-hostsfile=path],[Select default location for hosts file.
See hosts_file directive in squid.conf for details]),
[
if test "$enableval" != "none" ; then
@@ -1549,8 +1566,7 @@
dnl Select auth schemes modules to build
AC_ARG_ENABLE(auth,
- AC_HELP_STRING([--enable-auth="list of auth scheme modules"],
- [Build support for the list of authentication schemes.
+ AS_HELP_STRING([--enable-auth="list of auth scheme modules"],[Build support for the list of authentication schemes.
The default is to build support for the Basic scheme.
See src/auth for a list of available modules, or
Programmers Guide section authentication schemes
@@ -1609,8 +1625,7 @@
BASIC_AUTH_HELPERS="all"
fi
AC_ARG_ENABLE(basic-auth-helpers,
- AC_HELP_STRING([--enable-basic-auth-helpers="list of helpers"],
- [This option selects which basic scheme proxy_auth
+ AS_HELP_STRING([--enable-basic-auth-helpers="list of helpers"],[This option selects which basic scheme proxy_auth
helpers to build and install as part of the normal
build process. For a list of available
helpers see the helpers/basic_auth directory.]),
@@ -1660,8 +1675,7 @@
NTLM_AUTH_HELPERS="all"
fi
AC_ARG_ENABLE(ntlm-auth-helpers,
- AC_HELP_STRING([--enable-ntlm-auth-helpers="list of helpers"],
- [This option selects which proxy_auth ntlm helpers
+ AS_HELP_STRING([--enable-ntlm-auth-helpers="list of helpers"],[This option selects which proxy_auth ntlm helpers
to build and install as part of the normal build
process. For a list of available helpers see
the helpers/ntlm_auth directory.]),
@@ -1703,8 +1717,7 @@
NEGOTIATE_AUTH_HELPERS="all"
fi
AC_ARG_ENABLE(negotiate-auth-helpers,
- AC_HELP_STRING([--enable-negotiate-auth-helpers="list of helpers"],
- [This option selects which proxy_auth negotiate helpers
+ AS_HELP_STRING([--enable-negotiate-auth-helpers="list of helpers"],[This option selects which proxy_auth negotiate helpers
to build and install as part of the normal build
process. For a list of available helpers see
the helpers/negotiate_auth directory.]),
@@ -1747,8 +1760,7 @@
DIGEST_AUTH_HELPERS=all
fi
AC_ARG_ENABLE(digest-auth-helpers,
- AC_HELP_STRING([--enable-digest-auth-helpers="list of helpers"],
- [This option selects which digest scheme authentication
+ AS_HELP_STRING([--enable-digest-auth-helpers="list of helpers"],[This option selects which digest scheme authentication
helpers to build and install as part of the normal build
process. For a list of available helpers see the
helpers/digest_auth directory.]),
@@ -1787,8 +1799,7 @@
dnl Enable "NTLM fail open"
AC_ARG_ENABLE(ntlm-fail-open,
- AC_HELP_STRING([--enable-ntlm-fail-open],
- [Enable NTLM fail open, where a helper that fails one of the
+ AS_HELP_STRING([--enable-ntlm-fail-open],[Enable NTLM fail open, where a helper that fails one of the
Authentication steps can allow squid to still authenticate
the user.]),
[ if test "$enableval" = "yes" ; then
@@ -1799,8 +1810,7 @@
dnl Select external_acl helpers to build
EXTERNAL_ACL_HELPERS=all
AC_ARG_ENABLE(external-acl-helpers,
- AC_HELP_STRING([--enable-external-acl-helpers="list of helpers"],
- [This option selects which external_acl helpers to
+ AS_HELP_STRING([--enable-external-acl-helpers="list of helpers"],[This option selects which external_acl helpers to
build and install as part of the normal build
process. For a list of available helpers see the
helpers/external_acl directory.]),
@@ -1838,8 +1848,7 @@
AC_SUBST(EXTERNAL_ACL_HELPERS)
AC_ARG_WITH(valgrind-debug,
- AC_HELP_STRING([--with-valgrind-debug],
- [Include debug instrumentation for use with valgrind]),
+ AS_HELP_STRING([--with-valgrind-debug],[Include debug instrumentation for use with valgrind]),
[ case $withval in
yes)
valgrind=1
@@ -1863,8 +1872,7 @@
dnl Disable "memPools" code
AC_ARG_ENABLE(mempools,
- AC_HELP_STRING([--disable-mempools],
- [Disable memPools. Note that this option now simply sets the
+ AS_HELP_STRING([--disable-mempools],[Disable memPools. Note that this option now simply sets the
default behaviour. Specific classes can override this at runtime, and
only lib/MemPool.c needs to be altered to change the squid-wide
default for all classes.]),
@@ -1880,8 +1888,7 @@
dnl Enable WIN32 Service compile mode
AC_ARG_ENABLE(win32-service,
- AC_HELP_STRING([--enable-win32-service],
- [Compile Squid as a WIN32 Service.
+ AS_HELP_STRING([--enable-win32-service],[Compile Squid as a WIN32 Service.
Works only on MS-Windows platforms (NT and up).]),
[ if test "$enableval" = "yes" ; then
AC_MSG_NOTICE([Enabling WIN32 run service mode])
@@ -1909,7 +1916,7 @@
dnl Disable "unlinkd" code
AC_ARG_ENABLE(unlinkd,
- AC_HELP_STRING([--disable-unlinkd],[Do not use unlinkd]),
+ AS_HELP_STRING([--disable-unlinkd],[Do not use unlinkd]),
[ if test "$enableval" = "no" ; then
use_unlinkd=no
else
@@ -1931,8 +1938,7 @@
dnl Enable backtraces on fatal errors
AC_ARG_ENABLE(stacktraces,
- AC_HELP_STRING([--enable-stacktraces],
- [Enable automatic call backtrace on fatal errors]),
+ AS_HELP_STRING([--enable-stacktraces],[Enable automatic call backtrace on fatal errors]),
[ if test "$enableval" = "yes" ; then
AC_MSG_NOTICE([Enabling automatic stack backtraces on fatal errors])
AC_DEFINE(PRINT_STACK_TRACE, 1,[Print stacktraces on fatal errors])
@@ -1942,8 +1948,7 @@
AM_CONDITIONAL(ENABLE_XPROF_STATS, false)
dnl Enable USE_XPROF_STATS
AC_ARG_ENABLE(cpu-profiling,
- AC_HELP_STRING([--enable-cpu-profiling],
- [Enable instrumentation to try and understand how CPU power
+ AS_HELP_STRING([--enable-cpu-profiling],[Enable instrumentation to try and understand how CPU power
is spent by squid, by enabling specific probes in selected
functions. New probes can only be added by modifying the source code.
It is meant to help developers in optimizing performance
@@ -1959,8 +1964,7 @@
dnl Enable X-Accelerator-Vary for Vary support within an accelerator setup
AC_ARG_ENABLE(x-accelerator-vary,
- AC_HELP_STRING([--enable-x-accelerator-vary],
- [Enable support for the X-Accelerator-Vary
+ AS_HELP_STRING([--enable-x-accelerator-vary],[Enable support for the X-Accelerator-Vary
HTTP header. Can be used to indicate
variance within an accelerator setup.
Typically used together with other code
@@ -1972,7 +1976,7 @@
])
AC_ARG_ENABLE(zph-qos,
- AC_HELP_STRING([--enable-zph-qos],[Enable ZPH QOS support]),
+ AS_HELP_STRING([--enable-zph-qos],[Enable ZPH QOS support]),
[ if test "$enableval" = "yes" ; then
AC_MSG_NOTICE([ZPH QOS enabled])
AC_DEFINE(USE_ZPH_QOS,1,
@@ -1995,8 +1999,7 @@
esac
])
AC_ARG_WITH(filedescriptors,
- AC_HELP_STRING([--with-filedescriptors=NUMBER],
- [Force squid to support NUMBER filedescriptors]),
+ AS_HELP_STRING([--with-filedescriptors=NUMBER],[Force squid to support NUMBER filedescriptors]),
[
case ${withval} in
[[0-9]]*)
@@ -2022,8 +2025,7 @@
fi
AC_ARG_WITH(cppunit-basedir,
- AC_HELP_STRING([--with-cppunit-basedir=PATH],
- [Path where the cppunit headers are libraries are found
+ AS_HELP_STRING([--with-cppunit-basedir=PATH],[Path where the cppunit headers are libraries are found
for unit testing.]),
[ if test -f $withval/include/cppunit/TestCase.h; then
AC_MSG_NOTICE([Using cppunit includes from $withval])
@@ -2037,7 +2039,6 @@
SQUID_CPPUNIT_LIBS='$(SQUID_CPPUNIT_LA)'
else
AC_MSG_ERROR(Cannot find cppunit at $withval)
- exit 1
fi
])
AC_SUBST(SQUID_CPPUNIT_LIBS)
@@ -2108,7 +2109,6 @@
AC_HEADER_STDC
AC_CHECK_HEADERS( \
- aio.h \
arpa/inet.h \
arpa/nameser.h \
assert.h \
@@ -2136,6 +2136,7 @@
libc.h \
libgen.h \
limits.h \
+ linux/types.h \
machine/byte_swap.h \
malloc.h \
math.h \
@@ -2199,8 +2200,7 @@
inttypes.h \
grp.h \
db.h \
- db_185.h \
- sys/capability.h
+ db_185.h
)
AC_CHECK_HEADERS(
@@ -2243,10 +2243,12 @@
SAVED_CPPFLAGS="$CPPFLAGS"
CPPFLAGS="-I/usr/include/libxml2 $CPPFLAGS"
unset ac_cv_header_libxml_parser_h
- AC_CHECK_HEADERS([libxml/parser.h], [], [
- CPPFLAGS="$SAVED_CPPFLAGS"
- ])
+ AC_CHECK_HEADERS([libxml/parser.h], [ac_cv_libxml2_include=yes], [])
+ CPPFLAGS="$SAVED_CPPFLAGS"
])
+if test "x$ac_cv_libxml2_include" = "xyes"; then
+ SQUID_CXXFLAGS="-I/usr/include/libxml2 $SQUID_CXXFLAGS"
+fi
AC_C_CONST
AC_C_BIGENDIAN
@@ -2467,6 +2469,29 @@
#include
#endif])
+dnl Check for libcap header (assume its not broken unless
+use_caps=yes
+AC_ARG_ENABLE(caps, AS_HELP_STRING([--disable-caps],[disable usage of Linux capabilities library to control privileges]),
+[ if test "x$enableval" = "xyes" ; then
+ AC_MSG_RESULT(forced yes)
+ else
+ AC_MSG_RESULT(no)
+ use_caps=no
+ fi
+],[AC_MSG_RESULT(yes)])
+if test "x$use_caps" = "xyes"; then
+ dnl Check for libcap1 breakage or libcap2 fixed (assume broken unless found working)
+ libcap_broken=1
+ AC_CHECK_HEADERS(sys/capability.h)
+ AC_CACHE_CHECK([for operational libcap2], $libcap_broken,
+ AC_LINK_IFELSE([AC_LANG_PROGRAM([[#include ]], [[
+ capget(NULL, NULL);
+ capset(NULL, NULL);
+ ]])],[libcap_broken=0],[])
+ )
+ AC_DEFINE_UNQUOTED([LIBCAP_BROKEN],$libcap_broken,[if libcap2 is available and not clashing with libc])
+fi
+
AC_CHECK_TYPE(mtyp_t,AC_DEFINE(HAVE_MTYP_T,1,[mtyp_t is defined by the system headers]),,[#include
#include
#include ])
@@ -2481,14 +2506,12 @@
save_LIBS="$LIBS"
for curlib in ws2_32 wsock32; do
LIBS="$LIBS -l$curlib"
- AC_TRY_LINK([#include ],
- [
+ AC_LINK_IFELSE([AC_LANG_PROGRAM([[#include ]], [[
socket(0,0,0);
select(0,NULL,NULL,NULL,NULL);
closesocket(0);
gethostname(NULL,0);
- ],
- have_winsock=yes, have_winsock=no)
+ ]])],[have_winsock=yes],[have_winsock=no])
if test $have_winsock = yes; then
ac_cv_func_select='yes'
@@ -2510,17 +2533,15 @@
dnl Ripped from the Samba sources
AC_CACHE_CHECK([for unix domain sockets],squid_cv_unixsocket, [
- AC_TRY_COMPILE([
+ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
#include
#include
#include
#include
-#include ],
-[
+#include ]], [[
struct sockaddr_un sunaddr;
sunaddr.sun_family = AF_UNIX;
-],
- squid_cv_unixsocket=yes,squid_cv_unixsocket=no)])
+]])],[squid_cv_unixsocket=yes],[squid_cv_unixsocket=no])])
if test x"$squid_cv_unixsocket" = x"yes"; then
AC_DEFINE(HAVE_UNIXSOCKET,1,[Do we have unix sockets? (required for the winbind ntlm helper])
fi
@@ -2573,7 +2594,7 @@
AC_MSG_CHECKING([whether to enable IPv6])
use_ipng=yes
AC_ARG_ENABLE(ipv6,
- AC_HELP_STRING([--disable-ipv6],[Disable IPv6 support]),
+ AS_HELP_STRING([--disable-ipv6],[Disable IPv6 support]),
[ if test "x$enableval" = "xyes" ; then
AC_MSG_RESULT(yes)
else
@@ -2593,21 +2614,19 @@
;;
esac
AC_CACHE_CHECK([if PF_INET6 is available], $use_ipng,
- AC_TRY_RUN([ /* PF_INET6 available check */
+ AC_RUN_IFELSE([AC_LANG_SOURCE([[ /* PF_INET6 available check */
# include
# include
- int main() {
+ int main(int argc, char **argv) {
if (socket(PF_INET6, SOCK_STREAM, 0) < 0)
return 1;
else
return 0;
}
- ],
- [ AC_MSG_RESULT(yes)
+ ]])],[ AC_MSG_RESULT(yes)
use_ipng=yes
- SAVED_LIBS="$LIBS" ],
- [ AC_MSG_RESULT(no)
- use_ipng=no ])
+ SAVED_LIBS="$LIBS" ],[ AC_MSG_RESULT(no)
+ use_ipng=no ],[])
)
LIBS="$SAVED_LIBS"
fi
@@ -2619,8 +2638,7 @@
dnl Check for forced split-stack mode
AC_MSG_CHECKING([for IPv6 split-stack requirement])
AC_ARG_WITH(ipv6-split-stack,
- AC_HELP_STRING([--with-ipv6-split-stack],
- [Force-Enable experimental split-stack support for Windows XP and *BSD. Requires IPv6.]),
+ AS_HELP_STRING([--with-ipv6-split-stack],[Force-Enable experimental split-stack support for Windows XP and *BSD. Requires IPv6.]),
[ use_v4mapped="no"
AC_MSG_RESULT(yes)],
[ AC_MSG_RESULT(no) ])
@@ -2630,14 +2648,14 @@
dnl But only usable if it actually works...
if test "$use_v4mapped" = "yes" ; then
AC_MSG_CHECKING([for IPv6 v4-mapping ability])
- AC_TRY_RUN([ /* IPPROTO_V4MAPPED is usable check */
+ AC_RUN_IFELSE([AC_LANG_SOURCE([[ /* IPPROTO_V4MAPPED is usable check */
# include
# include
# include
#if HAVE_NETINET_IN6_H
# include
#endif
- int main() {
+ int main(int argc, char **argv) {
int s = socket(PF_INET6, SOCK_STREAM, 0);
int tos = 0;
if (setsockopt(s, IPPROTO_IPV6, IPV6_V6ONLY, (char *) &tos, sizeof(int)) < 0)
@@ -2645,15 +2663,13 @@
else
return 0;
}
- ],
- [ AC_MSG_RESULT(yes)
+ ]])],[ AC_MSG_RESULT(yes)
use_v4mapped=yes
AC_DEFINE(IPV6_SPECIAL_V4MAPPED, 1, [Enable v4-mapping through v6 sockets])
- ],
- [ AC_MSG_RESULT(no)
+ ],[ AC_MSG_RESULT(no)
AC_DEFINE(IPV6_SPECIAL_V4MAPPED, 0, [Enable v4-mapping through v6 sockets])
use_v4mapped=no
- ])
+ ],[])
fi
dnl if we can't defer v4-mapping to the OS we are forced to split-stack the FD table.
@@ -2671,15 +2687,12 @@
dnl Check whether this OS defines sin6_len as a member of sockaddr_in6 as a backup to ss_len
AC_CACHE_CHECK([for sin6_len field in struct sockaddr_in6],
ac_cv_have_sin6_len_in_struct_sai, [
- AC_TRY_COMPILE([
+ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
#include
#include
#include
- ],
- [ struct sockaddr_in6 s; s.sin6_len = 1; ],
- [ ac_cv_have_sin6_len_in_struct_sai="yes" ],
- [ ac_cv_have_sin6_len_in_struct_sai="no" ]
- )
+ ]], [[ struct sockaddr_in6 s; s.sin6_len = 1; ]])],[ ac_cv_have_sin6_len_in_struct_sai="yes" ],[ ac_cv_have_sin6_len_in_struct_sai="no"
+ ])
])
if test "x$ac_cv_have_sin6_len_in_struct_sai" = "xyes" ; then
AC_DEFINE(HAVE_SIN6_LEN_IN_SAI, 1, [Does struct sockaddr_in6 have sin6_len? 1: Yes, 0: No])
@@ -2698,15 +2711,12 @@
dnl Check whether this OS defines ss_len as a member of sockaddr_storage
AC_CACHE_CHECK([for ss_len field in struct sockaddr_storage],
ac_cv_have_ss_len_in_struct_ss, [
- AC_TRY_COMPILE([
+ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
#include
#include
#include
- ],
- [ struct sockaddr_storage s; s.ss_len = 1; ],
- [ ac_cv_have_ss_len_in_struct_ss="yes" ],
- [ ac_cv_have_ss_len_in_struct_ss="no" ]
- )
+ ]], [[ struct sockaddr_storage s; s.ss_len = 1; ]])],[ ac_cv_have_ss_len_in_struct_ss="yes" ],[ ac_cv_have_ss_len_in_struct_ss="no"
+ ])
])
if test "x$ac_cv_have_ss_len_in_struct_ss" = "xyes" ; then
AC_DEFINE(HAVE_SS_LEN_IN_SS, 1, [Does struct sockaddr_storage have ss_len? 1: Yes, 0: No])
@@ -2717,15 +2727,12 @@
dnl Check whether this OS defines sin_len as a member of sockaddr_in as a backup to ss_len
AC_CACHE_CHECK([for sin_len field in struct sockaddr_in],
ac_cv_have_sin_len_in_struct_sai, [
- AC_TRY_COMPILE([
+ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
#include
#include
#include
- ],
- [ struct sockaddr_in s; s.sin_len = 1; ],
- [ ac_cv_have_sin_len_in_struct_sai="yes" ],
- [ ac_cv_have_sin_len_in_struct_sai="no" ]
- )
+ ]], [[ struct sockaddr_in s; s.sin_len = 1; ]])],[ ac_cv_have_sin_len_in_struct_sai="yes" ],[ ac_cv_have_sin_len_in_struct_sai="no"
+ ])
])
if test "x$ac_cv_have_sin_len_in_struct_sai" = "xyes" ; then
AC_DEFINE(HAVE_SIN_LEN_IN_SAI, 1, [Does struct sockaddr_in have sin_len? 1: Yes, 0: No])
@@ -2776,7 +2783,7 @@
dnl 1.85
AC_CACHE_CHECK(if dbopen needs -ldb,ac_cv_dbopen_libdb, [
SAVED_LIBS="$LIBS"; LIBS="$LIBS -ldb"
- AC_TRY_LINK([
+ AC_LINK_IFELSE([AC_LANG_PROGRAM([[
#if HAVE_SYS_TYPES_H
#include
#endif
@@ -2787,10 +2794,7 @@
#include
#elif HAVE_DB_H
#include
-#endif],
- [dbopen("", 0, 0, DB_HASH, (void *)0L)],
- ac_cv_dbopen_libdb="yes",
- ac_cv_dbopen_libdb="no")
+#endif]], [[dbopen("", 0, 0, DB_HASH, (void *)0L)]])],[ac_cv_dbopen_libdb="yes"],[ac_cv_dbopen_libdb="no"])
LIBS="$SAVED_LIBS"
])
if test $ac_cv_dbopen_libdb = yes; then
@@ -3051,16 +3055,16 @@
dnl setresuid() but doesn't implement it.
dnl
AC_CACHE_CHECK(if setresuid is implemented, ac_cv_func_setresuid,
- AC_TRY_RUN([
+ AC_RUN_IFELSE([AC_LANG_SOURCE([[
#include
- int main() {
+ int main(int argc, char **argv) {
if(setresuid(-1,-1,-1)) {
perror("setresuid:");
- exit(1);
+ return 1;
}
- exit(0);
+ return 0;
}
- ],ac_cv_func_setresuid="yes",ac_cv_func_setresuid="no")
+ ]])],[ac_cv_func_setresuid="yes"],[ac_cv_func_setresuid="no"],[])
)
if test "$ac_cv_func_setresuid" = "yes" ; then
AC_DEFINE(HAVE_SETRESUID,1,[Yay! Another Linux brokenness. Its not good enough to know that setresuid() exists, because RedHat 5.0 declare setresuid() but doesn't implement it.])
@@ -3071,7 +3075,7 @@
dnl copy that crashes with a buffer over-run!
dnl
AC_CACHE_CHECK(if strnstr is well implemented, ac_cv_func_strnstr,
- AC_TRY_RUN([
+ AC_RUN_IFELSE([AC_LANG_SOURCE([[
#include
#include
#include
@@ -3085,7 +3089,7 @@
strnstr(str, "fubar", size);
return 0;
}
- ],ac_cv_func_strnstr="yes",ac_cv_func_strnstr="no")
+ ]])],[ac_cv_func_strnstr="yes"],[ac_cv_func_strnstr="no"],[])
)
if test "$ac_cv_func_strnstr" = "yes" ; then
AC_DEFINE(HAVE_STRNSTR,1,[Yay! We have a working strnstr!])
@@ -3097,22 +3101,20 @@
dnl Test for va_copy
dnl
AC_CACHE_CHECK(if va_copy is implemented, ac_cv_func_va_copy,
- AC_TRY_RUN([
+ AC_RUN_IFELSE([AC_LANG_SOURCE([[
#include
#include
- void f (int i, ...) {
+ int f (int i, ...) {
va_list args1, args2;
va_start (args1, i);
va_copy (args2, args1);
if (va_arg (args2, int) != 42 || va_arg (args1, int) != 42)
- exit (1);
+ return 1;
va_end (args1); va_end (args2);
- }
- int main() {
- f (0, 42);
return 0;
}
- ],ac_cv_func_va_copy="yes",ac_cv_func_va_copy="no")
+ int main(int argc, char **argv) { return f (0, 42); }
+ ]])],[ac_cv_func_va_copy="yes"],[ac_cv_func_va_copy="no"],[])
)
if test "$ac_cv_func_va_copy" = "yes" ; then
AC_DEFINE(HAVE_VA_COPY, 1, [If your system have va_copy])
@@ -3122,22 +3124,20 @@
dnl Some systems support __va_copy
dnl
AC_CACHE_CHECK(if __va_copy is implemented, ac_cv_func___va_copy,
- AC_TRY_RUN([
+ AC_RUN_IFELSE([AC_LANG_SOURCE([[
#include
#include
- void f (int i, ...) {
+ int f (int i, ...) {
va_list args1, args2;
va_start (args1, i);
__va_copy (args2, args1);
if (va_arg (args2, int) != 42 || va_arg (args1, int) != 42)
- exit (1);
+ return 1;
va_end (args1); va_end (args2);
- }
- int main() {
- f (0, 42);
return 0;
}
- ],ac_cv_func___va_copy="yes",ac_cv_func___va_copy="no")
+ int main(int argc, char **argv) { return f (0, 42); }
+ ]])],[ac_cv_func___va_copy="yes"],[ac_cv_func___va_copy="no"],[])
)
if test "$ac_cv_func___va_copy" = "yes" ; then
AC_DEFINE(HAVE___VA_COPY, 1, [Some systems have __va_copy instead of va_copy])
@@ -3220,16 +3220,24 @@
AC_DEFINE(LINUX_NETFILTER, 0)
fi
AC_MSG_RESULT($LINUX_NETFILTER)
-fi
+fi
if test "$LINUX_NETFILTER" = "no" ; then
AC_MSG_WARN([Cannot find necessary Linux kernel (Netfilter) header files])
AC_MSG_WARN([Linux Transparent and Intercepting Proxy support WILL NOT be enabled])
sleep 10
fi
+dnl Netfilter TPROXY depends on libcap but the NAT parts can still work.
+if test "$LINUX_NETFILTER" = "yes" && test "$use_caps" != "yes" ; then
+ AC_MSG_WARN([Missing needed capabilities (libcap or libcap2) for TPROXY])
+ AC_MSG_WARN([Linux Transparent Proxy support WILL NOT be enabled])
+ AC_MSG_WARN([Reduced support to Interception Proxy])
+ sleep 10
+fi
dnl Linux Netfilter/TPROXYv2 support requires some specific header files
-dnl Shamelessly copied from shamelessly copied from above
-if test "$LINUX_TPROXY2" ; then
+dnl Shamelessly copied from above
+if test "$LINUX_TPROXY2"; then
+ if test "$use_caps" = "yes"; then
AC_MSG_CHECKING(if TPROXYv2 header files are installed)
# hold on to your hats...
if test "$ac_cv_header_linux_netfilter_ipv4_ip_tproxy_h" = "yes" && test "$LINUX_NETFILTER" = "yes"; then
@@ -3247,11 +3255,15 @@
AC_MSG_WARN([Or select the '--enable-linux-netfilter' option instead for Netfilter support.])
sleep 10
fi
+ else
+ AC_MSG_WARN([Missing needed capabilities (libcap or libcap2) for TPROXY v2])
+ AC_MSG_WARN([Linux Transparent Proxy support WILL NOT be enabled])
+ sleep 10
+ fi
fi
AC_ARG_ENABLE(gnuregex,
- AC_HELP_STRING([--enable-gnuregex],
- [Compile GNUregex. Unless you have reason to use
+ AS_HELP_STRING([--enable-gnuregex],[Compile GNUregex. Unless you have reason to use
this option, you should not enable it.
This library file is usually only required on Windows and
very old Unix boxes which do not have their own regex
@@ -3272,10 +3284,8 @@
if test "$ac_cv_func_regcomp" = "no" || test "$USE_GNUREGEX" = "yes" ; then
USE_GNUREGEX="yes"
else
- AC_TRY_COMPILE([#include
-#include ],[regex_t t; regcomp(&t,"",0);],
- USE_GNUREGEX="no",
- USE_GNUREGEX="yes")
+ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include
+#include ]], [[regex_t t; regcomp(&t,"",0);]])],[USE_GNUREGEX="no"],[USE_GNUREGEX="yes"])
fi
fi
AC_MSG_RESULT($USE_GNUREGEX)
@@ -3289,7 +3299,7 @@
dnl Not cached since people are likely to tune this
AC_MSG_CHECKING(Default FD_SETSIZE value)
-AC_TRY_RUN([
+AC_RUN_IFELSE([AC_LANG_SOURCE([[
#if HAVE_STDIO_H
#include
#endif
@@ -3314,15 +3324,12 @@
#if HAVE_WINSOCK2_H
#include
#endif
-main() {
+int main(int argc, char **argv) {
FILE *fp = fopen("conftestval", "w");
fprintf (fp, "%d\n", FD_SETSIZE);
- exit(0);
+ return 0;
}
-],
-DEFAULT_FD_SETSIZE=`cat conftestval`,
-DEFAULT_FD_SETSIZE=256,
-DEFAULT_FD_SETSIZE=256)
+]])],[DEFAULT_FD_SETSIZE=`cat conftestval`],[DEFAULT_FD_SETSIZE=256],[DEFAULT_FD_SETSIZE=256])
AC_MSG_RESULT($DEFAULT_FD_SETSIZE)
AC_DEFINE_UNQUOTED(DEFAULT_FD_SETSIZE, $DEFAULT_FD_SETSIZE, [Default FD_SETSIZE value])
@@ -3341,14 +3348,14 @@
LDFLAGS=`echo $LDFLAGS | sed -e "s/-pthread//"`
fi
esac
- AC_TRY_RUN([
+ AC_RUN_IFELSE([AC_LANG_SOURCE([[
#include
#include
#include
#include /* needed on FreeBSD */
#include
#include
-main() {
+int main(int argc, char **argv) {
FILE *fp;
int i,j;
#if defined(__CYGWIN32__) || defined (__CYGWIN__)
@@ -3401,12 +3408,9 @@
#endif /* IF !DEF CYGWIN */
fp = fopen("conftestval", "w");
fprintf (fp, "%d\n", i & ~0x3F);
- exit(0);
+ return 0;
}
- ],
- SQUID_MAXFD=`cat conftestval`,
- SQUID_MAXFD=256,
- SQUID_MAXFD=256)
+ ]])],[SQUID_MAXFD=`cat conftestval`],[SQUID_MAXFD=256],[SQUID_MAXFD=256])
dnl Microsoft MSVCRT.DLL supports 2048 maximum FDs
case "$host_os" in
mingw|mingw32)
@@ -3432,7 +3436,7 @@
dnl Not cached since people are likely to tune this
AC_MSG_CHECKING(Default UDP send buffer size)
-AC_TRY_RUN([
+AC_RUN_IFELSE([AC_LANG_SOURCE([[
#include
#include
#include
@@ -3448,7 +3452,7 @@
#if HAVE_WINSOCK2_H
#include
#endif
-main ()
+int main(int argc, char **argv)
{
FILE *fp;
int fd,val=0;
@@ -3459,28 +3463,25 @@
#else
socklen_t len=sizeof(socklen_t);
#endif
- if ((fd = socket(AF_INET, SOCK_DGRAM, 0)) < 0) exit(1);
+ if ((fd = socket(AF_INET, SOCK_DGRAM, 0)) < 0) return 1;
#if (defined(WIN32) || defined(__WIN32__) || defined(__WIN32)) && !(defined(__CYGWIN32__) || defined(__CYGWIN__))
- if (getsockopt(fd, SOL_SOCKET, SO_SNDBUF, (char *)&val, &len) < 0) exit(1);
+ if (getsockopt(fd, SOL_SOCKET, SO_SNDBUF, (char *)&val, &len) < 0) return 1;
WSACleanup();
#else
- if (getsockopt(fd, SOL_SOCKET, SO_SNDBUF, &val, &len) < 0) exit(1);
+ if (getsockopt(fd, SOL_SOCKET, SO_SNDBUF, &val, &len) < 0) return 1;
#endif
- if (val<=0) exit(1);
+ if (val<=0) return 1;
fp = fopen("conftestval", "w");
fprintf (fp, "%d\n", val);
- exit(0);
+ return 0;
}
-],
-SQUID_DETECT_UDP_SO_SNDBUF=`cat conftestval`,
-SQUID_DETECT_UDP_SO_SNDBUF=16384,
-SQUID_DETECT_UDP_SO_SNDBUF=16384)
+]])],[SQUID_DETECT_UDP_SO_SNDBUF=`cat conftestval`],[SQUID_DETECT_UDP_SO_SNDBUF=16384],[SQUID_DETECT_UDP_SO_SNDBUF=16384])
AC_MSG_RESULT($SQUID_DETECT_UDP_SO_SNDBUF)
AC_DEFINE_UNQUOTED(SQUID_DETECT_UDP_SO_SNDBUF, $SQUID_DETECT_UDP_SO_SNDBUF,[UDP send buffer size])
dnl Not cached since people are likely to tune this
AC_MSG_CHECKING(Default UDP receive buffer size)
-AC_TRY_RUN([
+AC_RUN_IFELSE([AC_LANG_SOURCE([[
#include
#include
#include
@@ -3496,7 +3497,7 @@
#if HAVE_WINSOCK2_H
#include
#endif
-main ()
+int main(int argc, char **argv)
{
FILE *fp;
int fd,val=0;
@@ -3507,28 +3508,25 @@
#else
socklen_t len=sizeof(socklen_t);
#endif
- if ((fd = socket(AF_INET, SOCK_DGRAM, 0)) < 0) exit(1);
+ if ((fd = socket(AF_INET, SOCK_DGRAM, 0)) < 0) return 1;
#if (defined(WIN32) || defined(__WIN32__) || defined(__WIN32)) && !(defined(__CYGWIN32__) || defined(__CYGWIN__))
- if (getsockopt(fd, SOL_SOCKET, SO_RCVBUF, (char *)&val, &len) < 0) exit(1);
+ if (getsockopt(fd, SOL_SOCKET, SO_RCVBUF, (char *)&val, &len) < 0) return 1;
WSACleanup();
#else
- if (getsockopt(fd, SOL_SOCKET, SO_RCVBUF, &val, &len) < 0) exit(1);
+ if (getsockopt(fd, SOL_SOCKET, SO_RCVBUF, &val, &len) < 0) return 1;
#endif
- if (val <= 0) exit(1);
+ if (val <= 0) return 1;
fp = fopen("conftestval", "w");
fprintf (fp, "%d\n", val);
- exit(0);
+ return 0;
}
-],
-SQUID_DETECT_UDP_SO_RCVBUF=`cat conftestval`,
-SQUID_DETECT_UDP_SO_RCVBUF=16384,
-SQUID_DETECT_UDP_SO_RCVBUF=16384)
+]])],[SQUID_DETECT_UDP_SO_RCVBUF=`cat conftestval`],[SQUID_DETECT_UDP_SO_RCVBUF=16384],[SQUID_DETECT_UDP_SO_RCVBUF=16384])
AC_MSG_RESULT($SQUID_DETECT_UDP_SO_RCVBUF)
AC_DEFINE_UNQUOTED(SQUID_DETECT_UDP_SO_RCVBUF, $SQUID_DETECT_UDP_SO_RCVBUF,[UDP receive buffer size])
dnl Not cached since people are likely to tune this
AC_MSG_CHECKING(Default TCP send buffer size)
-AC_TRY_RUN([
+AC_RUN_IFELSE([AC_LANG_SOURCE([[
#include
#include
#include
@@ -3544,7 +3542,7 @@
#if HAVE_WINSOCK2_H
#include
#endif
-main ()
+int main(int argc, char **argv)
{
FILE *fp;
int fd,val=0;
@@ -3555,22 +3553,19 @@
#else
socklen_t len=sizeof(socklen_t);
#endif
- if ((fd = socket(AF_INET, SOCK_STREAM, 0)) < 0) exit(1);
+ if ((fd = socket(AF_INET, SOCK_STREAM, 0)) < 0) return 1;
#if (defined(WIN32) || defined(__WIN32__) || defined(__WIN32)) && !(defined(__CYGWIN32__) || defined(__CYGWIN__))
- if (getsockopt(fd, SOL_SOCKET, SO_SNDBUF, (char *)&val, &len) < 0) exit(1);
+ if (getsockopt(fd, SOL_SOCKET, SO_SNDBUF, (char *)&val, &len) < 0) return 1;
WSACleanup();
#else
- if (getsockopt(fd, SOL_SOCKET, SO_SNDBUF, &val, &len) < 0) exit(1);
+ if (getsockopt(fd, SOL_SOCKET, SO_SNDBUF, &val, &len) < 0) return 1;
#endif
- if (val <= 0) exit(1);
+ if (val <= 0) return 1;
fp = fopen("conftestval", "w");
fprintf (fp, "%d\n", val);
- exit(0);
+ return 0;
}
-],
-SQUID_TCP_SO_SNDBUF=`cat conftestval`,
-SQUID_TCP_SO_SNDBUF=16384,
-SQUID_TCP_SO_SNDBUF=16384)
+]])],[SQUID_TCP_SO_SNDBUF=`cat conftestval`],[SQUID_TCP_SO_SNDBUF=16384],[SQUID_TCP_SO_SNDBUF=16384])
AC_MSG_RESULT($SQUID_TCP_SO_SNDBUF)
if test $SQUID_TCP_SO_SNDBUF -gt 32768; then
AC_MSG_NOTICE([Limiting send buffer size to 32K])
@@ -3580,7 +3575,7 @@
dnl Not cached since people are likely to tune this
AC_MSG_CHECKING(Default TCP receive buffer size)
-AC_TRY_RUN([
+AC_RUN_IFELSE([AC_LANG_SOURCE([[
#include
#include
#include
@@ -3596,7 +3591,7 @@
#if HAVE_WINSOCK2_H
#include
#endif
-main ()
+int main(int argc, char **argv)
{
FILE *fp;
int fd,val=0;
@@ -3607,22 +3602,19 @@
#else
socklen_t len=sizeof(socklen_t);
#endif
- if ((fd = socket(AF_INET, SOCK_STREAM, 0)) < 0) exit(1);
+ if ((fd = socket(AF_INET, SOCK_STREAM, 0)) < 0) return 1;
#if (defined(WIN32) || defined(__WIN32__) || defined(__WIN32)) && !(defined(__CYGWIN32__) || defined(__CYGWIN__))
- if (getsockopt(fd, SOL_SOCKET, SO_RCVBUF, (char *)&val, &len) < 0) exit(1);
+ if (getsockopt(fd, SOL_SOCKET, SO_RCVBUF, (char *)&val, &len) < 0) return 1;
WSACleanup();
#else
- if (getsockopt(fd, SOL_SOCKET, SO_RCVBUF, &val, &len) < 0) exit(1);
+ if (getsockopt(fd, SOL_SOCKET, SO_RCVBUF, &val, &len) < 0) return 1;
#endif
- if (val <= 0) exit(1);
+ if (val <= 0) return 1;
fp = fopen("conftestval", "w");
fprintf (fp, "%d\n", val);
- exit(0);
+ return 0;
}
-],
-SQUID_TCP_SO_RCVBUF=`cat conftestval`,
-SQUID_TCP_SO_RCVBUF=16384,
-SQUID_TCP_SO_RCVBUF=16384)
+]])],[SQUID_TCP_SO_RCVBUF=`cat conftestval`],[SQUID_TCP_SO_RCVBUF=16384],[SQUID_TCP_SO_RCVBUF=16384])
AC_MSG_RESULT($SQUID_TCP_SO_RCVBUF)
if test $SQUID_TCP_SO_RCVBUF -gt 65535; then
AC_MSG_NOTICE([Limiting receive buffer size to 64K])
@@ -3630,9 +3622,7 @@
fi
AC_DEFINE_UNQUOTED(SQUID_TCP_SO_RCVBUF, $SQUID_TCP_SO_RCVBUF,[TCP receive buffer size])
AC_CACHE_CHECK(if sys_errlist is already defined, ac_cv_needs_sys_errlist,
- AC_TRY_COMPILE([#include ],[char *s = sys_errlist;],
- ac_cv_needs_sys_errlist="no",
- ac_cv_needs_sys_errlist="yes")
+ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include ]], [[char *s = sys_errlist;]])],[ac_cv_needs_sys_errlist="no"],[ac_cv_needs_sys_errlist="yes"])
)
if test "$ac_cv_needs_sys_errlist" = "yes" ; then
AC_DEFINE(NEED_SYS_ERRLIST,1,[If we need to declare sys_errlist[] as external])
@@ -3640,27 +3630,22 @@
dnl Not cached since people are likely to change this
AC_MSG_CHECKING(for libresolv _dns_ttl_ hack)
-AC_TRY_LINK(extern int _dns_ttl_;,return _dns_ttl_;,
-[AC_MSG_RESULT(yes)
-AC_DEFINE(LIBRESOLV_DNS_TTL_HACK,1,[If libresolv.a has been hacked to export _dns_ttl_])],
-AC_MSG_RESULT(no))
+AC_LINK_IFELSE([AC_LANG_PROGRAM([[extern int _dns_ttl_;]], [[return _dns_ttl_;]])],[AC_MSG_RESULT(yes)
+AC_DEFINE(LIBRESOLV_DNS_TTL_HACK,1,[If libresolv.a has been hacked to export _dns_ttl_])],[AC_MSG_RESULT(no)])
if test "$ac_cv_header_sys_statvfs_h" = "yes" ; then
AC_MSG_CHECKING(for working statvfs() interface)
-AC_TRY_COMPILE([
+AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
#include
#include
#include
#include
-],
-[
+]], [[
struct statvfs sfs;
sfs.f_blocks = sfs.f_bfree = sfs.f_frsize =
sfs.f_files = sfs.f_ffree = 0;
statvfs("/tmp", &sfs);
-],
- ac_cv_func_statvfs=yes,
- ac_cv_func_statvfs=no)
+]])],[ac_cv_func_statvfs=yes],[ac_cv_func_statvfs=no])
AC_MSG_RESULT($ac_cv_func_statvfs)
if test "$ac_cv_func_statvfs" = "yes" ; then
AC_DEFINE(HAVE_STATVFS,1,[If your system has statvfs(), and if it actually works!])
@@ -3669,7 +3654,7 @@
dnl Detect what resolver fields we have available to use...
AC_CACHE_CHECK(for _res_ext.nsaddr_list, ac_cv_have_res_ext_nsaddr_list,
-AC_TRY_COMPILE([
+AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
#if HAVE_SYS_TYPES_H
#include
#endif
@@ -3685,10 +3670,7 @@
#if HAVE_RESOLV_H
#include
#endif
-],
-[_res_ext.nsaddr_list[[0]].s_addr;],
-ac_cv_have_res_ext_nsaddr_list="yes",
-ac_cv_have_res_ext_nsaddr_list="no"))
+]], [[_res_ext.nsaddr_list[[0]].s_addr;]])],[ac_cv_have_res_ext_nsaddr_list="yes"],[ac_cv_have_res_ext_nsaddr_list="no"]))
if test "$ac_cv_have_res_ext_nsaddr_list" = "yes" ; then
AC_DEFINE(_SQUID_RES_NSADDR6_LARRAY,_res_ext.nsaddr_list,[If _res_ext structure has nsaddr_list member])
AC_DEFINE(_SQUID_RES_NSADDR6_COUNT,ns6count,[Nameserver Counter for IPv6 _res_ext])
@@ -3696,7 +3678,7 @@
if test "$_SQUID_RES_NSADDR6_LIST" = ""; then
AC_CACHE_CHECK(for _res._u._ext.nsaddrs, ac_cv_have_res_ext_nsaddrs,
-AC_TRY_COMPILE([
+AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
#if HAVE_SYS_TYPES_H
#include
#endif
@@ -3712,10 +3694,7 @@
#if HAVE_RESOLV_H
#include
#endif
-],
-[_res._u._ext.nsaddrs[[0]]->sin6_addr;],
-ac_cv_have_res_ext_nsaddrs="yes",
-ac_cv_have_res_ext_nsaddrs="no"))
+]], [[_res._u._ext.nsaddrs[[0]]->sin6_addr;]])],[ac_cv_have_res_ext_nsaddrs="yes"],[ac_cv_have_res_ext_nsaddrs="no"]))
if test "$ac_cv_have_res_ext_nsaddrs" = "yes" ; then
AC_DEFINE(_SQUID_RES_NSADDR6_LPTR,_res._u._ext.nsaddrs,[If _res structure has _ext.nsaddrs member])
AC_DEFINE(_SQUID_RES_NSADDR6_COUNT,_res._u._ext.nscount6,[Nameserver Counter for IPv6 _res])
@@ -3723,7 +3702,7 @@
fi
AC_CACHE_CHECK(for _res.nsaddr_list, ac_cv_have_res_nsaddr_list,
-AC_TRY_COMPILE([
+AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
#if HAVE_SYS_TYPES_H
#include
#endif
@@ -3739,10 +3718,7 @@
#if HAVE_RESOLV_H
#include
#endif
-],
-[_res.nsaddr_list[[0]];],
-ac_cv_have_res_nsaddr_list="yes",
-ac_cv_have_res_nsaddr_list="no"))
+]], [[_res.nsaddr_list[[0]];]])],[ac_cv_have_res_nsaddr_list="yes"],[ac_cv_have_res_nsaddr_list="no"]))
if test $ac_cv_have_res_nsaddr_list = "yes" ; then
AC_DEFINE(_SQUID_RES_NSADDR_LIST,_res.nsaddr_list,[If _res structure has nsaddr_list member])
AC_DEFINE(_SQUID_RES_NSADDR_COUNT,_res.nscount,[Nameserver counter for IPv4 _res])
@@ -3750,7 +3726,7 @@
if test "$_SQUID_RES_NSADDR_LIST" = ""; then
AC_CACHE_CHECK(for _res.ns_list, ac_cv_have_res_ns_list,
-AC_TRY_COMPILE([
+AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
#if HAVE_SYS_TYPES_H
#include
#endif
@@ -3766,10 +3742,7 @@
#if HAVE_RESOLV_H
#include
#endif
-],
-[_res.ns_list[[0]].addr;],
-ac_cv_have_res_ns_list="yes",
-ac_cv_have_res_ns_list="no"))
+]], [[_res.ns_list[[0]].addr;]])],[ac_cv_have_res_ns_list="yes"],[ac_cv_have_res_ns_list="no"]))
if test $ac_cv_have_res_ns_list = "yes" ; then
AC_DEFINE(_SQUID_RES_NSADDR_LIST,_res.ns_list,[If _res structure has ns_list member])
AC_DEFINE(_SQUID_RES_NSADDR_COUNT,_res.nscount,[Nameserver counter for IPv4 _res])
@@ -3779,8 +3752,7 @@
dnl Squid will usually attempt to translate when packaging or building from VCS
use_translation="yes"
AC_ARG_ENABLE(translation,
- AC_HELP_STRING([--disable-translation],
- [Prevent Squid generating localized error page templates and manuals.
+ AS_HELP_STRING([--disable-translation],[Prevent Squid generating localized error page templates and manuals.
Which is usually tried, but may not be needed.]),
[ if test "$enableval" = "no" ; then
use_translation=no
@@ -3798,8 +3770,7 @@
dnl on error pages
use_errlocale=yes
AC_ARG_ENABLE(auto-locale,
- AC_HELP_STRING([--disable-auto-locale],
- [This prevents Squid providing localized error pages based on the
+ AS_HELP_STRING([--disable-auto-locale],[This prevents Squid providing localized error pages based on the
clients request headers.
When disabled Squid requires explicit language configuration.]),
[ if test "$enableval" = "no" ; then
diff -u -r -N squid-3.1.0.13/contrib/Makefile.in squid-3.1.0.14/contrib/Makefile.in
--- squid-3.1.0.13/contrib/Makefile.in 2009-08-05 01:32:30.000000000 +1200
+++ squid-3.1.0.14/contrib/Makefile.in 2009-09-27 15:28:48.000000000 +1200
@@ -51,6 +51,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/doc/Makefile.in squid-3.1.0.14/doc/Makefile.in
--- squid-3.1.0.13/doc/Makefile.in 2009-08-05 01:32:30.000000000 +1200
+++ squid-3.1.0.14/doc/Makefile.in 2009-09-27 15:28:48.000000000 +1200
@@ -58,6 +58,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/errors/aliases squid-3.1.0.14/errors/aliases
--- squid-3.1.0.13/errors/aliases 2009-08-05 01:32:08.000000000 +1200
+++ squid-3.1.0.14/errors/aliases 2009-09-27 15:28:24.000000000 +1200
@@ -3,20 +3,21 @@
bg bg-bg
cs cs-cz
da da-dk
-de de-de
+de de-at de-ch de-de de-li de-lu
el el-gr
-en en-au en-ca en-gb en-in en-nz en-sg en-tt en-uk en-us en-za
-es es-ar es-pe es-es
+en en-au en-bz en-ca en-gb en-ie en-in en-jm en-nz en-ph en-sg en-tt en-uk en-us en-za en-zw
+es es-ar es-bo es-cl es-co es-cr es-do es-ec es-es es-gt es-hn es-mx es-ni es-pa es-pe es-pr es-py es-sv es-uy es-ve
et et-ee
+fa fa-fa fa-ir
fi fi-fi
-fr fr-fr
+fr fr-be fr-ca fr-ch fr-fr fr-lu fr-mc
he he-il
hu hu-hu
hy hy-am
id id-id
-it it-it
+it it-ch it-it
ja ja-jp
-ko ko-kr
+ko ko-kp ko-kr
lt lt-lt
lv lv-lv
ms ms-my
@@ -27,7 +28,7 @@
ru ru-ru
sk sk-sk
sr sr-sp
-sv sv-se
+sv sv-fi sv-se
th th-th
tr tr-tr
uk uk-ua
diff -u -r -N squid-3.1.0.13/errors/alias-link.sh squid-3.1.0.14/errors/alias-link.sh
--- squid-3.1.0.13/errors/alias-link.sh 2009-08-05 01:32:08.000000000 +1200
+++ squid-3.1.0.14/errors/alias-link.sh 2009-09-27 15:28:24.000000000 +1200
@@ -38,6 +38,6 @@
# Remove and replace any pre-existing content/link
for alia in ${aliases}; do
${RM} -f -r ${DIR}/${alia} || exit 1
- ${LN} -s ${DIR}/${base} ${DIR}/${alia} || exit 1
+ ${LN} -s ${base} ${DIR}/${alia} || exit 1
done
done
diff -u -r -N squid-3.1.0.13/errors/ar/ERR_DIR_LISTING squid-3.1.0.14/errors/ar/ERR_DIR_LISTING
--- squid-3.1.0.13/errors/ar/ERR_DIR_LISTING 1970-01-01 12:00:00.000000000 +1200
+++ squid-3.1.0.14/errors/ar/ERR_DIR_LISTING 2009-09-27 15:30:32.000000000 +1200
@@ -0,0 +1 @@
+ Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/ar/ERR_FTP_LISTING squid-3.1.0.14/errors/ar/ERR_FTP_LISTING
--- squid-3.1.0.13/errors/ar/ERR_FTP_LISTING 2009-08-05 01:33:59.000000000 +1200
+++ squid-3.1.0.14/errors/ar/ERR_FTP_LISTING 1970-01-01 12:00:00.000000000 +1200
@@ -1 +0,0 @@
- FTP Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/az/ERR_DIR_LISTING squid-3.1.0.14/errors/az/ERR_DIR_LISTING
--- squid-3.1.0.13/errors/az/ERR_DIR_LISTING 1970-01-01 12:00:00.000000000 +1200
+++ squid-3.1.0.14/errors/az/ERR_DIR_LISTING 2009-09-27 15:30:37.000000000 +1200
@@ -0,0 +1 @@
+ Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/az/ERR_FTP_LISTING squid-3.1.0.14/errors/az/ERR_FTP_LISTING
--- squid-3.1.0.13/errors/az/ERR_FTP_LISTING 2009-08-05 01:34:04.000000000 +1200
+++ squid-3.1.0.14/errors/az/ERR_FTP_LISTING 1970-01-01 12:00:00.000000000 +1200
@@ -1 +0,0 @@
- FTP Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/bg/ERR_DIR_LISTING squid-3.1.0.14/errors/bg/ERR_DIR_LISTING
--- squid-3.1.0.13/errors/bg/ERR_DIR_LISTING 1970-01-01 12:00:00.000000000 +1200
+++ squid-3.1.0.14/errors/bg/ERR_DIR_LISTING 2009-09-27 15:30:42.000000000 +1200
@@ -0,0 +1 @@
+ Directory: %U
Съдържание на директориÑта:
%z
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/bg/ERR_FTP_LISTING squid-3.1.0.14/errors/bg/ERR_FTP_LISTING
--- squid-3.1.0.13/errors/bg/ERR_FTP_LISTING 2009-08-05 01:34:09.000000000 +1200
+++ squid-3.1.0.14/errors/bg/ERR_FTP_LISTING 1970-01-01 12:00:00.000000000 +1200
@@ -1 +0,0 @@
- FTP директориÑ: %U FTP директориÑ: %U/
Съдържание на директориÑта:
%z
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/ca/ERR_DIR_LISTING squid-3.1.0.14/errors/ca/ERR_DIR_LISTING
--- squid-3.1.0.13/errors/ca/ERR_DIR_LISTING 1970-01-01 12:00:00.000000000 +1200
+++ squid-3.1.0.14/errors/ca/ERR_DIR_LISTING 2009-09-27 15:30:47.000000000 +1200
@@ -0,0 +1 @@
+ Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/ca/ERR_FTP_LISTING squid-3.1.0.14/errors/ca/ERR_FTP_LISTING
--- squid-3.1.0.13/errors/ca/ERR_FTP_LISTING 2009-08-05 01:34:13.000000000 +1200
+++ squid-3.1.0.14/errors/ca/ERR_FTP_LISTING 1970-01-01 12:00:00.000000000 +1200
@@ -1 +0,0 @@
- FTP Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/cs/ERR_DIR_LISTING squid-3.1.0.14/errors/cs/ERR_DIR_LISTING
--- squid-3.1.0.13/errors/cs/ERR_DIR_LISTING 1970-01-01 12:00:00.000000000 +1200
+++ squid-3.1.0.14/errors/cs/ERR_DIR_LISTING 2009-09-27 15:30:52.000000000 +1200
@@ -0,0 +1 @@
+ Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/cs/ERR_FTP_LISTING squid-3.1.0.14/errors/cs/ERR_FTP_LISTING
--- squid-3.1.0.13/errors/cs/ERR_FTP_LISTING 2009-08-05 01:34:18.000000000 +1200
+++ squid-3.1.0.14/errors/cs/ERR_FTP_LISTING 1970-01-01 12:00:00.000000000 +1200
@@ -1 +0,0 @@
- FTP Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/da/ERR_DIR_LISTING squid-3.1.0.14/errors/da/ERR_DIR_LISTING
--- squid-3.1.0.13/errors/da/ERR_DIR_LISTING 1970-01-01 12:00:00.000000000 +1200
+++ squid-3.1.0.14/errors/da/ERR_DIR_LISTING 2009-09-27 15:30:57.000000000 +1200
@@ -0,0 +1 @@
+ Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/da/ERR_FTP_LISTING squid-3.1.0.14/errors/da/ERR_FTP_LISTING
--- squid-3.1.0.13/errors/da/ERR_FTP_LISTING 2009-08-05 01:34:22.000000000 +1200
+++ squid-3.1.0.14/errors/da/ERR_FTP_LISTING 1970-01-01 12:00:00.000000000 +1200
@@ -1 +0,0 @@
- FTP Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/de/ERR_DIR_LISTING squid-3.1.0.14/errors/de/ERR_DIR_LISTING
--- squid-3.1.0.13/errors/de/ERR_DIR_LISTING 1970-01-01 12:00:00.000000000 +1200
+++ squid-3.1.0.14/errors/de/ERR_DIR_LISTING 2009-09-27 15:31:03.000000000 +1200
@@ -0,0 +1 @@
+ Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/de/ERR_FTP_LISTING squid-3.1.0.14/errors/de/ERR_FTP_LISTING
--- squid-3.1.0.13/errors/de/ERR_FTP_LISTING 2009-08-05 01:34:27.000000000 +1200
+++ squid-3.1.0.14/errors/de/ERR_FTP_LISTING 1970-01-01 12:00:00.000000000 +1200
@@ -1 +0,0 @@
- FTP Verzeichnis: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/el/ERR_DIR_LISTING squid-3.1.0.14/errors/el/ERR_DIR_LISTING
--- squid-3.1.0.13/errors/el/ERR_DIR_LISTING 1970-01-01 12:00:00.000000000 +1200
+++ squid-3.1.0.14/errors/el/ERR_DIR_LISTING 2009-09-27 15:31:08.000000000 +1200
@@ -0,0 +1 @@
+ Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/el/ERR_FTP_LISTING squid-3.1.0.14/errors/el/ERR_FTP_LISTING
--- squid-3.1.0.13/errors/el/ERR_FTP_LISTING 2009-08-05 01:34:32.000000000 +1200
+++ squid-3.1.0.14/errors/el/ERR_FTP_LISTING 1970-01-01 12:00:00.000000000 +1200
@@ -1 +0,0 @@
- FTP Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/en/ERR_DIR_LISTING squid-3.1.0.14/errors/en/ERR_DIR_LISTING
--- squid-3.1.0.13/errors/en/ERR_DIR_LISTING 1970-01-01 12:00:00.000000000 +1200
+++ squid-3.1.0.14/errors/en/ERR_DIR_LISTING 2009-09-27 15:31:14.000000000 +1200
@@ -0,0 +1 @@
+ Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/en/ERR_FTP_LISTING squid-3.1.0.14/errors/en/ERR_FTP_LISTING
--- squid-3.1.0.13/errors/en/ERR_FTP_LISTING 2009-08-05 01:34:36.000000000 +1200
+++ squid-3.1.0.14/errors/en/ERR_FTP_LISTING 1970-01-01 12:00:00.000000000 +1200
@@ -1 +0,0 @@
- FTP Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/errorpage.css squid-3.1.0.14/errors/errorpage.css
--- squid-3.1.0.13/errors/errorpage.css 2009-08-05 01:32:08.000000000 +1200
+++ squid-3.1.0.14/errors/errorpage.css 2009-09-27 15:28:24.000000000 +1200
@@ -66,11 +66,11 @@
font-family:sans-serif;
}
-/* special event: FTP directory listing */
-#ftplisting tr.entry td.icon,td.filename,td.size,td.date {
+/* special event: FTP / Gopher directory listing */
+#dirlisting tr.entry td.icon,td.filename,td.size,td.date {
border-bottom: groove;
}
-#ftplisting td.size {
+#dirlisting td.size {
width: 50px;
text-align: right;
padding-right: 5px;
diff -u -r -N squid-3.1.0.13/errors/es/ERR_DIR_LISTING squid-3.1.0.14/errors/es/ERR_DIR_LISTING
--- squid-3.1.0.13/errors/es/ERR_DIR_LISTING 1970-01-01 12:00:00.000000000 +1200
+++ squid-3.1.0.14/errors/es/ERR_DIR_LISTING 2009-09-27 15:31:20.000000000 +1200
@@ -0,0 +1 @@
+ Directory: %U
Contenido del Directorio:
%z
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/es/ERR_FTP_LISTING squid-3.1.0.14/errors/es/ERR_FTP_LISTING
--- squid-3.1.0.13/errors/es/ERR_FTP_LISTING 2009-08-05 01:34:41.000000000 +1200
+++ squid-3.1.0.14/errors/es/ERR_FTP_LISTING 1970-01-01 12:00:00.000000000 +1200
@@ -1 +0,0 @@
- Directorio FTP: %U
Contenido del Directorio:
%z
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/et/ERR_DIR_LISTING squid-3.1.0.14/errors/et/ERR_DIR_LISTING
--- squid-3.1.0.13/errors/et/ERR_DIR_LISTING 1970-01-01 12:00:00.000000000 +1200
+++ squid-3.1.0.14/errors/et/ERR_DIR_LISTING 2009-09-27 15:31:26.000000000 +1200
@@ -0,0 +1 @@
+ Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/et/ERR_FTP_LISTING squid-3.1.0.14/errors/et/ERR_FTP_LISTING
--- squid-3.1.0.13/errors/et/ERR_FTP_LISTING 2009-08-05 01:34:46.000000000 +1200
+++ squid-3.1.0.14/errors/et/ERR_FTP_LISTING 1970-01-01 12:00:00.000000000 +1200
@@ -1 +0,0 @@
- FTP Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/fa/ERR_DIR_LISTING squid-3.1.0.14/errors/fa/ERR_DIR_LISTING
--- squid-3.1.0.13/errors/fa/ERR_DIR_LISTING 1970-01-01 12:00:00.000000000 +1200
+++ squid-3.1.0.14/errors/fa/ERR_DIR_LISTING 2009-09-27 15:31:31.000000000 +1200
@@ -0,0 +1 @@
+ Directory: %U
Ù…Øتویات مسیر:
%z
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/fa/ERR_FTP_LISTING squid-3.1.0.14/errors/fa/ERR_FTP_LISTING
--- squid-3.1.0.13/errors/fa/ERR_FTP_LISTING 2009-08-05 01:34:50.000000000 +1200
+++ squid-3.1.0.14/errors/fa/ERR_FTP_LISTING 1970-01-01 12:00:00.000000000 +1200
@@ -1 +0,0 @@
- FTP Directory: %U
Ù…Øتویات مسیر:
%z
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/fi/ERR_DIR_LISTING squid-3.1.0.14/errors/fi/ERR_DIR_LISTING
--- squid-3.1.0.13/errors/fi/ERR_DIR_LISTING 1970-01-01 12:00:00.000000000 +1200
+++ squid-3.1.0.14/errors/fi/ERR_DIR_LISTING 2009-09-27 15:31:36.000000000 +1200
@@ -0,0 +1 @@
+ Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/fi/ERR_FTP_LISTING squid-3.1.0.14/errors/fi/ERR_FTP_LISTING
--- squid-3.1.0.13/errors/fi/ERR_FTP_LISTING 2009-08-05 01:34:55.000000000 +1200
+++ squid-3.1.0.14/errors/fi/ERR_FTP_LISTING 1970-01-01 12:00:00.000000000 +1200
@@ -1 +0,0 @@
- FTP Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/fr/ERR_DIR_LISTING squid-3.1.0.14/errors/fr/ERR_DIR_LISTING
--- squid-3.1.0.13/errors/fr/ERR_DIR_LISTING 1970-01-01 12:00:00.000000000 +1200
+++ squid-3.1.0.14/errors/fr/ERR_DIR_LISTING 2009-09-27 15:31:42.000000000 +1200
@@ -0,0 +1 @@
+ Directory: %U
Contenu du Répertoire :
%z
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/fr/ERR_FTP_LISTING squid-3.1.0.14/errors/fr/ERR_FTP_LISTING
--- squid-3.1.0.13/errors/fr/ERR_FTP_LISTING 2009-08-05 01:34:59.000000000 +1200
+++ squid-3.1.0.14/errors/fr/ERR_FTP_LISTING 1970-01-01 12:00:00.000000000 +1200
@@ -1 +0,0 @@
- Répertoire FTP : %U
Contenu du Répertoire :
%z
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/he/ERR_DIR_LISTING squid-3.1.0.14/errors/he/ERR_DIR_LISTING
--- squid-3.1.0.13/errors/he/ERR_DIR_LISTING 1970-01-01 12:00:00.000000000 +1200
+++ squid-3.1.0.14/errors/he/ERR_DIR_LISTING 2009-09-27 15:31:47.000000000 +1200
@@ -0,0 +1 @@
+ Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/he/ERR_FTP_LISTING squid-3.1.0.14/errors/he/ERR_FTP_LISTING
--- squid-3.1.0.13/errors/he/ERR_FTP_LISTING 2009-08-05 01:35:04.000000000 +1200
+++ squid-3.1.0.14/errors/he/ERR_FTP_LISTING 1970-01-01 12:00:00.000000000 +1200
@@ -1 +0,0 @@
- FTP Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/hu/ERR_DIR_LISTING squid-3.1.0.14/errors/hu/ERR_DIR_LISTING
--- squid-3.1.0.13/errors/hu/ERR_DIR_LISTING 1970-01-01 12:00:00.000000000 +1200
+++ squid-3.1.0.14/errors/hu/ERR_DIR_LISTING 2009-09-27 15:31:52.000000000 +1200
@@ -0,0 +1 @@
+ Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/hu/ERR_FTP_LISTING squid-3.1.0.14/errors/hu/ERR_FTP_LISTING
--- squid-3.1.0.13/errors/hu/ERR_FTP_LISTING 2009-08-05 01:35:09.000000000 +1200
+++ squid-3.1.0.14/errors/hu/ERR_FTP_LISTING 1970-01-01 12:00:00.000000000 +1200
@@ -1 +0,0 @@
- FTP Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/hy/ERR_DIR_LISTING squid-3.1.0.14/errors/hy/ERR_DIR_LISTING
--- squid-3.1.0.13/errors/hy/ERR_DIR_LISTING 1970-01-01 12:00:00.000000000 +1200
+++ squid-3.1.0.14/errors/hy/ERR_DIR_LISTING 2009-09-27 15:31:57.000000000 +1200
@@ -0,0 +1 @@
+ Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/hy/ERR_FTP_LISTING squid-3.1.0.14/errors/hy/ERR_FTP_LISTING
--- squid-3.1.0.13/errors/hy/ERR_FTP_LISTING 2009-08-05 01:35:13.000000000 +1200
+++ squid-3.1.0.14/errors/hy/ERR_FTP_LISTING 1970-01-01 12:00:00.000000000 +1200
@@ -1 +0,0 @@
- FTP Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/id/ERR_DIR_LISTING squid-3.1.0.14/errors/id/ERR_DIR_LISTING
--- squid-3.1.0.13/errors/id/ERR_DIR_LISTING 1970-01-01 12:00:00.000000000 +1200
+++ squid-3.1.0.14/errors/id/ERR_DIR_LISTING 2009-09-27 15:32:02.000000000 +1200
@@ -0,0 +1 @@
+ Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/id/ERR_FTP_LISTING squid-3.1.0.14/errors/id/ERR_FTP_LISTING
--- squid-3.1.0.13/errors/id/ERR_FTP_LISTING 2009-08-05 01:35:18.000000000 +1200
+++ squid-3.1.0.14/errors/id/ERR_FTP_LISTING 1970-01-01 12:00:00.000000000 +1200
@@ -1 +0,0 @@
- FTP Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/it/ERR_ACCESS_DENIED squid-3.1.0.14/errors/it/ERR_ACCESS_DENIED
--- squid-3.1.0.13/errors/it/ERR_ACCESS_DENIED 2009-08-05 01:35:21.000000000 +1200
+++ squid-3.1.0.14/errors/it/ERR_ACCESS_DENIED 2009-09-27 15:32:07.000000000 +1200
@@ -1 +1 @@
- ERROREE: non è possibile accedere alla URL richiesta
ERROR
Non è stato possibile accedere alla URL richiesta.
Mentre si cercava di accedere alla URL %U si è presentato il seguente errore:
Accesso negato.
La configurazione di controllo d'accesso non consente di soddisfare la richiesta. Si prega di contattare il fornitore del servizio nel caso questo comportamento sia scorretto.
L'amministratore del proxy è %w.
\ No newline at end of file
+ ERRORE: Non è stato possibile accedere alla URL richiesta.
ERROR
The requested URL could not be retrieved
Mentre si cercava di accedere alla URL %U si è presentato il seguente errore:
Accesso negato.
La configurazione di controllo d'accesso non consente di soddisfare la richiesta. Si prega di contattare il fornitore del servizio nel caso questo comportamento sia scorretto.
L'amministratore del proxy è %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/it/ERR_CACHE_MGR_ACCESS_DENIED squid-3.1.0.14/errors/it/ERR_CACHE_MGR_ACCESS_DENIED
--- squid-3.1.0.13/errors/it/ERR_CACHE_MGR_ACCESS_DENIED 2009-08-05 01:35:21.000000000 +1200
+++ squid-3.1.0.14/errors/it/ERR_CACHE_MGR_ACCESS_DENIED 2009-09-27 15:32:07.000000000 +1200
@@ -1 +1 @@
- ERROREE: l'accesso al Cache Manager è negato.
ERROR
L'accesso al cache manager è negato.
Mentre si cercava di accedere alla URL %U si è presentato il seguente errore:
Cache Manager Accesso negato.
Per richiedere %U da questo cache manager è necessario prima identificarsi.
Si prega di contattare il gestore del vostro proxy se avete diffcoltà nell'identificarvi per l'accesso al servizio o, se siete l'amministratore, di consultare la documentazione di Squid riguardante l'interfaccia del cache manager e di verificare il log del servizio alla ricerca informazioni più dettagliate sull'errore.
\ No newline at end of file
+ ERRORE: Cache Manager Access Denied
ERROR
Cache Manager Accesso negato.
Mentre si cercava di accedere alla URL %U si è presentato il seguente errore:
L'accesso al cache manager è negato.
Per richiedere %U da questo cache manager è necessario prima identificarsi.
Si prega di contattare il gestore del vostro proxy se avete diffcoltà nell'identificarvi per l'accesso al servizio o, se siete l'amministratore, di consultare la documentazione di Squid riguardante l'interfaccia del cache manager e di verificare il log del servizio alla ricerca informazioni più dettagliate sull'errore.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/it/ERR_CANNOT_FORWARD squid-3.1.0.14/errors/it/ERR_CANNOT_FORWARD
--- squid-3.1.0.13/errors/it/ERR_CANNOT_FORWARD 2009-08-05 01:35:21.000000000 +1200
+++ squid-3.1.0.14/errors/it/ERR_CANNOT_FORWARD 2009-09-27 15:32:07.000000000 +1200
@@ -1 +1 @@
- ERROREE: non è possibile accedere alla URL richiesta
ERROR
Non è stato possibile accedere alla URL richiesta.
Mentre si cercava di accedere alla URL %U si è presentato il seguente errore:
Non è possibile inoltrare la richiesta in questo momento.
Non è stato possibile inoltrare la richiesta al server di origine nè ad alcun proxy di tipo parent noto. La causa più probabile è che questo proxy sia configurato per non contattare mai direttamente il server di origine, e che tutti i proxy parent non siano disponibili in questo momento.
L'amministratore del proxy è %w.
\ No newline at end of file
+ ERRORE: Non è stato possibile accedere alla URL richiesta.
ERROR
The requested URL could not be retrieved
Mentre si cercava di accedere alla URL %U si è presentato il seguente errore:
Non è possibile inoltrare la richiesta in questo momento.
Non è stato possibile inoltrare la richiesta al server di origine nè ad alcun proxy di tipo parent noto. La causa più probabile è che questo proxy sia configurato per non contattare mai direttamente il server di origine, e che tutti i proxy parent non siano disponibili in questo momento.
L'amministratore del proxy è %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/it/ERR_CONNECT_FAIL squid-3.1.0.14/errors/it/ERR_CONNECT_FAIL
--- squid-3.1.0.13/errors/it/ERR_CONNECT_FAIL 2009-08-05 01:35:22.000000000 +1200
+++ squid-3.1.0.14/errors/it/ERR_CONNECT_FAIL 2009-09-27 15:32:05.000000000 +1200
@@ -1 +1 @@
- ERROREE: non è possibile accedere alla URL richiesta
ERROR
Non è stato possibile accedere alla URL richiesta.
Mentre si cercava di accedere alla URL %U si è presentato il seguente errore:
La connessione a %I non è riuscita.
Il sistema ha risposto: %E
Il server remoto o un tratto di rete necessario a raggiungerlo potrebbero essere guasti. Si prega di ritentare la richiesta tra qualche minuto.
L'amministratore del proxy è %w.
\ No newline at end of file
+ ERRORE: Non è stato possibile accedere alla URL richiesta.
ERROR
The requested URL could not be retrieved
Mentre si cercava di accedere alla URL %U si è presentato il seguente errore:
La connessione a %I non è riuscita.
Il sistema ha risposto: %E
Il server remoto o un tratto di rete necessario a raggiungerlo potrebbero essere guasti. Si prega di ritentare la richiesta tra qualche minuto.
L'amministratore del proxy è %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/it/ERR_DIR_LISTING squid-3.1.0.14/errors/it/ERR_DIR_LISTING
--- squid-3.1.0.13/errors/it/ERR_DIR_LISTING 1970-01-01 12:00:00.000000000 +1200
+++ squid-3.1.0.14/errors/it/ERR_DIR_LISTING 2009-09-27 15:32:07.000000000 +1200
@@ -0,0 +1 @@
+ Directory: %U
Contenuto della directory:
%z
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/it/ERR_DNS_FAIL squid-3.1.0.14/errors/it/ERR_DNS_FAIL
--- squid-3.1.0.13/errors/it/ERR_DNS_FAIL 2009-08-05 01:35:22.000000000 +1200
+++ squid-3.1.0.14/errors/it/ERR_DNS_FAIL 2009-09-27 15:32:09.000000000 +1200
@@ -1 +1 @@
- ERROREE: non è possibile accedere alla URL richiesta
ERROR
Non è stato possibile accedere alla URL richiesta.
Mentre si cercava di accedere alla URL %U si è presentato il seguente errore:
Non è stato possibile risalire all'indirizzo IP corrispondente al nome host %H
Il server DNS ha risposto:
%z
Questo significa che il Proxy non è riuscito a tradurre il nome host nella URL nel relativo indirizzo. Verificate la correttezza dell'indirizzo e riprovate.
L'amministratore del proxy è %w.
\ No newline at end of file
+ ERRORE: Non è stato possibile accedere alla URL richiesta.
ERROR
The requested URL could not be retrieved
Mentre si cercava di accedere alla URL %U si è presentato il seguente errore:
Non è stato possibile risalire all'indirizzo IP corrispondente al nome host %H
Il server DNS ha risposto:
%z
Questo significa che il Proxy non è riuscito a tradurre il nome host nella URL nel relativo indirizzo. Verificate la correttezza dell'indirizzo e riprovate.
L'amministratore del proxy è %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/it/ERR_ESI squid-3.1.0.14/errors/it/ERR_ESI
--- squid-3.1.0.13/errors/it/ERR_ESI 2009-08-05 01:35:22.000000000 +1200
+++ squid-3.1.0.14/errors/it/ERR_ESI 2009-09-27 15:32:05.000000000 +1200
@@ -1 +1 @@
- ERROREE: non è possibile accedere alla URL richiesta
ERROR
Non è stato possibile accedere alla URL richiesta.
Mentre si cercava di accedere alla URL %U si è presentato il seguente errore:
L'elaborazione ESI è fallita.
Il sistema di gestione delle funzioni ESI ha risposto:
%Z
Questo significa che il surrogate non è stato in grado di processare il template ESI. Si prega di segnalare l'errore al webmaster.
Il webmaster è %w.
\ No newline at end of file
+ ERRORE: Non è stato possibile accedere alla URL richiesta.
ERROR
The requested URL could not be retrieved
Mentre si cercava di accedere alla URL %U si è presentato il seguente errore:
L'elaborazione ESI è fallita.
Il sistema di gestione delle funzioni ESI ha risposto:
%Z
Questo significa che il surrogate non è stato in grado di processare il template ESI. Si prega di segnalare l'errore al webmaster.
Il webmaster è %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/it/ERR_FORWARDING_DENIED squid-3.1.0.14/errors/it/ERR_FORWARDING_DENIED
--- squid-3.1.0.13/errors/it/ERR_FORWARDING_DENIED 2009-08-05 01:35:22.000000000 +1200
+++ squid-3.1.0.14/errors/it/ERR_FORWARDING_DENIED 2009-09-27 15:32:07.000000000 +1200
@@ -1 +1 @@
- ERROREE: non è possibile accedere alla URL richiesta
ERROR
Non è stato possibile accedere alla URL richiesta.
Mentre si cercava di accedere alla URL %U si è presentato il seguente errore:
Inoltro negato.
Il proxy non inoltrerà questa richiesta, perchè tenta di stabilire una relazione di parentela. Probabilmente il client all'indirizzo %i è una cache configurata in modo sbagliato.
L'amministratore del proxy è %w.
\ No newline at end of file
+ ERRORE: Non è stato possibile accedere alla URL richiesta.
ERROR
The requested URL could not be retrieved
Mentre si cercava di accedere alla URL %U si è presentato il seguente errore:
Inoltro negato.
Il proxy non inoltrerà questa richiesta, perchè tenta di stabilire una relazione di parentela. Probabilmente il client all'indirizzo %i è una cache configurata in modo sbagliato.
L'amministratore del proxy è %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/it/ERR_FTP_DISABLED squid-3.1.0.14/errors/it/ERR_FTP_DISABLED
--- squid-3.1.0.13/errors/it/ERR_FTP_DISABLED 2009-08-05 01:35:22.000000000 +1200
+++ squid-3.1.0.14/errors/it/ERR_FTP_DISABLED 2009-09-27 15:32:09.000000000 +1200
@@ -1 +1 @@
- ERROREE: non è possibile accedere alla URL richiesta
ERROR
Non è stato possibile accedere alla URL richiesta.
Mentre si cercava di accedere alla URL %U si è presentato il seguente errore:
Il protocollo FTP è disabilitato.
Questo proxy non supporta il protocollo FTP.
L'amministratore del proxy è %w.
\ No newline at end of file
+ ERRORE: Non è stato possibile accedere alla URL richiesta.
ERROR
The requested URL could not be retrieved
Mentre si cercava di accedere alla URL %U si è presentato il seguente errore:
Il protocollo FTP è disabilitato.
Questo proxy non supporta il protocollo FTP.
L'amministratore del proxy è %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/it/ERR_FTP_FAILURE squid-3.1.0.14/errors/it/ERR_FTP_FAILURE
--- squid-3.1.0.13/errors/it/ERR_FTP_FAILURE 2009-08-05 01:35:22.000000000 +1200
+++ squid-3.1.0.14/errors/it/ERR_FTP_FAILURE 2009-09-27 15:32:08.000000000 +1200
@@ -1 +1 @@
- ERROREE: non è possibile accedere alla URL richiesta
ERROR
Non è stato possibile accedere alla URL richiesta.
Si è verificato un errore di protocollo FTP durante l'accesso alla URL %U.
Il comando FTP inviato da Squid era:
%f
Il server ha risposto:
%F
%g
L'amministratore del proxy è %w.
\ No newline at end of file
+ ERRORE: Non è stato possibile accedere alla URL richiesta.
ERROR
The requested URL could not be retrieved
Si è verificato un errore di protocollo FTP durante l'accesso alla URL %U.
Il comando FTP inviato da Squid era:
%f
Il server ha risposto:
%F
%g
L'amministratore del proxy è %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/it/ERR_FTP_FORBIDDEN squid-3.1.0.14/errors/it/ERR_FTP_FORBIDDEN
--- squid-3.1.0.13/errors/it/ERR_FTP_FORBIDDEN 2009-08-05 01:35:22.000000000 +1200
+++ squid-3.1.0.14/errors/it/ERR_FTP_FORBIDDEN 2009-09-27 15:32:07.000000000 +1200
@@ -1 +1 @@
- ERROREE: non è possibile accedere alla URL richiesta
ERROR
Non è stato possibile accedere alla URL richiesta.
Le credenziali fornite per l'accesso al server FTP relativo alla URL %U sono invalide.
Il comando FTP inviato da Squid era:
%f
Il server ha risposto:
%F
%g
L'amministratore del proxy è %w.
\ No newline at end of file
+ ERRORE: Non è stato possibile accedere alla URL richiesta.
ERROR
The requested URL could not be retrieved
Le credenziali fornite per l'accesso al server FTP relativo alla URL %U sono invalide.
Il comando FTP inviato da Squid era:
%f
Il server ha risposto:
%F
%g
L'amministratore del proxy è %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/it/ERR_FTP_LISTING squid-3.1.0.14/errors/it/ERR_FTP_LISTING
--- squid-3.1.0.13/errors/it/ERR_FTP_LISTING 2009-08-05 01:35:23.000000000 +1200
+++ squid-3.1.0.14/errors/it/ERR_FTP_LISTING 1970-01-01 12:00:00.000000000 +1200
@@ -1 +0,0 @@
- Directory FTP: %U
Contenuto della directory:
%z
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/it/ERR_FTP_NOT_FOUND squid-3.1.0.14/errors/it/ERR_FTP_NOT_FOUND
--- squid-3.1.0.13/errors/it/ERR_FTP_NOT_FOUND 2009-08-05 01:35:23.000000000 +1200
+++ squid-3.1.0.14/errors/it/ERR_FTP_NOT_FOUND 2009-09-27 15:32:06.000000000 +1200
@@ -1 +1 @@
- ERROREE: non è possibile accedere alla URL richiesta
ERROR
Non è stato possibile accedere alla URL richiesta.
Non è stato possibile accedere alla URL: %U.
Il comando FTP inviato da Squid era:
%f
Il server ha risposto:
%F
%g
Questo potrebbe essere causato da una URL FTP con un percorso completo, comportamento questo non conforme allo standard imposto dalla RFC 1738. Se questo è il caso, il file è disponibile alla URL %B.
L'amministratore del proxy è %w.
\ No newline at end of file
+ ERRORE: Non è stato possibile accedere alla URL richiesta.
ERROR
The requested URL could not be retrieved
Non è stato possibile accedere alla URL: %U.
Il comando FTP inviato da Squid era:
%f
Il server ha risposto:
%F
%g
Questo potrebbe essere causato da una URL FTP con un percorso completo, comportamento questo non conforme allo standard imposto dalla RFC 1738. Se questo è il caso, il file è disponibile alla URL %B.
L'amministratore del proxy è %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/it/ERR_FTP_PUT_ERROR squid-3.1.0.14/errors/it/ERR_FTP_PUT_ERROR
--- squid-3.1.0.13/errors/it/ERR_FTP_PUT_ERROR 2009-08-05 01:35:23.000000000 +1200
+++ squid-3.1.0.14/errors/it/ERR_FTP_PUT_ERROR 2009-09-27 15:32:08.000000000 +1200
@@ -1 +1 @@
- ERROREE: l'invio del file via FTP non ha avuto successo.
ERROR
Comando FTP PUT non riuscito.
Mentre si cercava di effettuare un'operazione di FTP PUT verso la URL %U
Il comando FTP inviato da Squid era:
%f
Il server ha risposto:
%F
Questo significa che il server FTP potrebbe non avere i permessi o lo spazio per ricevere il file. Si prega di controllare il percorso, i permessi e lo spazio su disco e riprovare.
L'amministratore del proxy è %w.
\ No newline at end of file
+ ERRORE: FTP upload failed
ERROR
Comando FTP PUT non riuscito.
Mentre si cercava di effettuare un'operazione di FTP PUT verso la URL %U
Il comando FTP inviato da Squid era:
%f
Il server ha risposto:
%F
Questo significa che il server FTP potrebbe non avere i permessi o lo spazio per ricevere il file. Si prega di controllare il percorso, i permessi e lo spazio su disco e riprovare.
L'amministratore del proxy è %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/it/ERR_FTP_UNAVAILABLE squid-3.1.0.14/errors/it/ERR_FTP_UNAVAILABLE
--- squid-3.1.0.13/errors/it/ERR_FTP_UNAVAILABLE 2009-08-05 01:35:23.000000000 +1200
+++ squid-3.1.0.14/errors/it/ERR_FTP_UNAVAILABLE 2009-09-27 15:32:06.000000000 +1200
@@ -1 +1 @@
- ERROREE: non è possibile accedere alla URL richiesta
ERROR
Non è stato possibile accedere alla URL richiesta.
Il server FTP era sovraccarico e non ha consentito di accedere alla URL: %U.
Il comando FTP inviato da Squid era:
%f
Il server ha risposto:
%F
%g
L'amministratore del proxy è %w.
\ No newline at end of file
+ ERRORE: Non è stato possibile accedere alla URL richiesta.
ERROR
The requested URL could not be retrieved
Il server FTP era sovraccarico e non ha consentito di accedere alla URL: %U.
Il comando FTP inviato da Squid era:
%f
Il server ha risposto:
%F
%g
L'amministratore del proxy è %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/it/ERR_ICAP_FAILURE squid-3.1.0.14/errors/it/ERR_ICAP_FAILURE
--- squid-3.1.0.13/errors/it/ERR_ICAP_FAILURE 2009-08-05 01:35:23.000000000 +1200
+++ squid-3.1.0.14/errors/it/ERR_ICAP_FAILURE 2009-09-27 15:32:07.000000000 +1200
@@ -1 +1 @@
- ERROREE: non è possibile accedere alla URL richiesta
ERROR
Non è stato possibile accedere alla URL richiesta.
Mentre si cercava di accedere alla URL %U si è presentato il seguente errore:
Si è verificato un errore di protocollo ICAP.
Il sistema ha risposto: %E
Questo significa che qualche aspetto della comunicazione ICAP non è stato completato regolarmente.
Alcuni dei possibili problemi sono:
\ No newline at end of file
+ ERRORE: Non è stato possibile accedere alla URL richiesta.
ERROR
The requested URL could not be retrieved
Mentre si cercava di accedere alla URL %U si è presentato il seguente errore:
Si è verificato un errore di protocollo ICAP.
Il sistema ha risposto: %E
Questo significa che qualche aspetto della comunicazione ICAP non è stato completato regolarmente.
Alcuni dei possibili problemi sono:
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/it/ERR_INVALID_REQ squid-3.1.0.14/errors/it/ERR_INVALID_REQ
--- squid-3.1.0.13/errors/it/ERR_INVALID_REQ 2009-08-05 01:35:23.000000000 +1200
+++ squid-3.1.0.14/errors/it/ERR_INVALID_REQ 2009-09-27 15:32:08.000000000 +1200
@@ -1 +1 @@
- ERROREE: non è possibile accedere alla URL richiesta
ERROR
Non è stato possibile accedere alla URL richiesta.
Richiesta non valida. Si è verificato un errore durante l'elaborazione della richiesta:
%R
Alcuni dei possibili problemi sono:
Metodo della richiesta non specificato o sconoscito.
URL non specificata.
L'identificativo del protocollo HTTP è mancante (HTTP/1.0).
La richiesta è di dimensioni troppo grandi.
Una richiesta di tipo POST o PUT non contiene il campo Content-Length.
C'è un carattere irregolare nel nome host: gli underscore "_" non sono consentiti.
L'amministratore del proxy è %w.
\ No newline at end of file
+ ERRORE: Non è stato possibile accedere alla URL richiesta.
ERROR
The requested URL could not be retrieved
Richiesta non valida. Si è verificato un errore durante l'elaborazione della richiesta:
%R
Alcuni dei possibili problemi sono:
Metodo della richiesta non specificato o sconoscito.
URL non specificata.
L'identificativo del protocollo HTTP è mancante (HTTP/1.0).
La richiesta è di dimensioni troppo grandi.
Una richiesta di tipo POST o PUT non contiene il campo Content-Length.
C'è un carattere irregolare nel nome host: gli underscore "_" non sono consentiti.
L'amministratore del proxy è %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/it/ERR_INVALID_RESP squid-3.1.0.14/errors/it/ERR_INVALID_RESP
--- squid-3.1.0.13/errors/it/ERR_INVALID_RESP 2009-08-05 01:35:24.000000000 +1200
+++ squid-3.1.0.14/errors/it/ERR_INVALID_RESP 2009-09-27 15:32:09.000000000 +1200
@@ -1 +1 @@
- ERROREE: non è possibile accedere alla URL richiesta
ERROR
Non è stato possibile accedere alla URL richiesta.
Risposta non valida. Si è verificato un errore durante l'elaborazione della richiesta:
%R
Il messaggio di risposta HTTP ricevuto dal server non era comprensibile o era irregolare. Si prega di contattare il gestore del sito per segnalargli l'errore.
L'amministratore del proxy potrebbe essere in grado di fornire ulteriori dettagli sul tipo di problema.
L'amministratore del proxy è %w.
\ No newline at end of file
+ ERRORE: Non è stato possibile accedere alla URL richiesta.
ERROR
The requested URL could not be retrieved
Risposta non valida. Si è verificato un errore durante l'elaborazione della richiesta:
%R
Il messaggio di risposta HTTP ricevuto dal server non era comprensibile o era irregolare. Si prega di contattare il gestore del sito per segnalargli l'errore.
L'amministratore del proxy potrebbe essere in grado di fornire ulteriori dettagli sul tipo di problema.
L'amministratore del proxy è %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/it/ERR_INVALID_URL squid-3.1.0.14/errors/it/ERR_INVALID_URL
--- squid-3.1.0.13/errors/it/ERR_INVALID_URL 2009-08-05 01:35:24.000000000 +1200
+++ squid-3.1.0.14/errors/it/ERR_INVALID_URL 2009-09-27 15:32:09.000000000 +1200
@@ -1 +1 @@
- ERROREE: non è possibile accedere alla URL richiesta
ERROR
Non è stato possibile accedere alla URL richiesta.
Mentre si cercava di accedere alla URL %U si è presentato il seguente errore:
URL non valida
Qualche aspetto della URL richiesta è scorretto
Alcuni dei possibili problemi sono:
Protocollo di accesso mancante o non corretto (dovrebbe essere http://
o simile).
Nome host non specificato.
Doppia codifica ("double-escape") non valida nella path della URL.
C'è un carattere irregolare nel nome host: gli underscore "_" non sono consentiti.
L'amministratore del proxy è %w.
\ No newline at end of file
+ ERRORE: Non è stato possibile accedere alla URL richiesta.
ERROR
The requested URL could not be retrieved
Mentre si cercava di accedere alla URL %U si è presentato il seguente errore:
URL non valida
Qualche aspetto della URL richiesta è scorretto
Alcuni dei possibili problemi sono:
Protocollo di accesso mancante o non corretto (dovrebbe essere http://
o simile).
Nome host non specificato.
Doppia codifica ("double-escape") non valida nella path della URL.
C'è un carattere irregolare nel nome host: gli underscore "_" non sono consentiti.
L'amministratore del proxy è %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/it/ERR_LIFETIME_EXP squid-3.1.0.14/errors/it/ERR_LIFETIME_EXP
--- squid-3.1.0.13/errors/it/ERR_LIFETIME_EXP 2009-08-05 01:35:24.000000000 +1200
+++ squid-3.1.0.14/errors/it/ERR_LIFETIME_EXP 2009-09-27 15:32:06.000000000 +1200
@@ -1 +1 @@
- ERROREE: non è possibile accedere alla URL richiesta
ERROR
Non è stato possibile accedere alla URL richiesta.
Mentre si cercava di accedere alla URL %U si è presentato il seguente errore:
Il timeout di mantenimento della connessione è scaduto.
La richiesta è stata interrotta perchè è stato superato il limite di durata massimo della connessione.
L'amministratore del proxy è %w.
\ No newline at end of file
+ ERRORE: Non è stato possibile accedere alla URL richiesta.
ERROR
The requested URL could not be retrieved
Mentre si cercava di accedere alla URL %U si è presentato il seguente errore:
Il timeout di mantenimento della connessione è scaduto.
La richiesta è stata interrotta perchè è stato superato il limite di durata massimo della connessione.
L'amministratore del proxy è %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/it/ERR_NO_RELAY squid-3.1.0.14/errors/it/ERR_NO_RELAY
--- squid-3.1.0.13/errors/it/ERR_NO_RELAY 2009-08-05 01:35:24.000000000 +1200
+++ squid-3.1.0.14/errors/it/ERR_NO_RELAY 2009-09-27 15:32:09.000000000 +1200
@@ -1 +1 @@
- ERROREE: non è possibile accedere alla URL richiesta
ERROR
Non è stato possibile accedere alla URL richiesta.
Mentre si cercava di accedere alla URL %U si è presentato il seguente errore:
Le funzioni di inoltro Wais non sono implementate.
Non è stato configurato alcun server di relay per il protocollo WAIS su questa Cache! Prenditela con l'amministratore.
L'amministratore del proxy è %w.
\ No newline at end of file
+ ERRORE: Non è stato possibile accedere alla URL richiesta.
ERROR
The requested URL could not be retrieved
Mentre si cercava di accedere alla URL %U si è presentato il seguente errore:
Le funzioni di inoltro Wais non sono implementate.
Non è stato configurato alcun server di relay per il protocollo WAIS su questa Cache! Prenditela con l'amministratore.
L'amministratore del proxy è %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/it/ERR_ONLY_IF_CACHED_MISS squid-3.1.0.14/errors/it/ERR_ONLY_IF_CACHED_MISS
--- squid-3.1.0.13/errors/it/ERR_ONLY_IF_CACHED_MISS 2009-08-05 01:35:24.000000000 +1200
+++ squid-3.1.0.14/errors/it/ERR_ONLY_IF_CACHED_MISS 2009-09-27 15:32:06.000000000 +1200
@@ -1 +1 @@
- ERROREE: non è possibile accedere alla URL richiesta
ERROR
Non è stato possibile accedere alla URL richiesta.
Mentre si cercava di accedere alla URL %U si è presentato il seguente errore:
Non è stato possibile reperire un documento aggiornato nella cache e la richiesta contiene la direttiva only-if-cached
.
La richiesta contiene la direttiva di controllo della cache only-if-cached
. Non è stato possibile trovare il relativo documento nella cache, oppure richiedeva una operazione di verifica, non consentita dalla direttiva.
L'amministratore del proxy è %w.
\ No newline at end of file
+ ERRORE: Non è stato possibile accedere alla URL richiesta.
ERROR
The requested URL could not be retrieved
Mentre si cercava di accedere alla URL %U si è presentato il seguente errore:
Non è stato possibile reperire un documento aggiornato nella cache e la richiesta contiene la direttiva only-if-cached
.
La richiesta contiene la direttiva di controllo della cache only-if-cached
. Non è stato possibile trovare il relativo documento nella cache, oppure richiedeva una operazione di verifica, non consentita dalla direttiva.
L'amministratore del proxy è %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/it/ERR_READ_ERROR squid-3.1.0.14/errors/it/ERR_READ_ERROR
--- squid-3.1.0.13/errors/it/ERR_READ_ERROR 2009-08-05 01:35:24.000000000 +1200
+++ squid-3.1.0.14/errors/it/ERR_READ_ERROR 2009-09-27 15:32:06.000000000 +1200
@@ -1 +1 @@
- ERROREE: non è possibile accedere alla URL richiesta
ERROR
Non è stato possibile accedere alla URL richiesta.
Mentre si cercava di accedere alla URL %U si è presentato il seguente errore:
Errore di lettura
Il sistema ha risposto: %E
Si è verificato un errore nella ricezione dei dati dalla rete. Si prega di ritentare la richiesta.
L'amministratore del proxy è %w.
\ No newline at end of file
+ ERRORE: Non è stato possibile accedere alla URL richiesta.
ERROR
The requested URL could not be retrieved
Mentre si cercava di accedere alla URL %U si è presentato il seguente errore:
Errore di lettura
Il sistema ha risposto: %E
Si è verificato un errore nella ricezione dei dati dalla rete. Si prega di ritentare la richiesta.
L'amministratore del proxy è %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/it/ERR_READ_TIMEOUT squid-3.1.0.14/errors/it/ERR_READ_TIMEOUT
--- squid-3.1.0.13/errors/it/ERR_READ_TIMEOUT 2009-08-05 01:35:24.000000000 +1200
+++ squid-3.1.0.14/errors/it/ERR_READ_TIMEOUT 2009-09-27 15:32:06.000000000 +1200
@@ -1 +1 @@
- ERROREE: non è possibile accedere alla URL richiesta
ERROR
Non è stato possibile accedere alla URL richiesta.
Mentre si cercava di accedere alla URL %U si è presentato il seguente errore:
Timeout nella lettura
Il sistema ha risposto: %E
Si è verificato un timeout durante la ricezione dei dati dalla rete. La rete o il server potrebbero essere scollegati o in congestione. Riprovare più tardi.
L'amministratore del proxy è %w.
\ No newline at end of file
+ ERRORE: Non è stato possibile accedere alla URL richiesta.
ERROR
The requested URL could not be retrieved
Mentre si cercava di accedere alla URL %U si è presentato il seguente errore:
Timeout nella lettura
Il sistema ha risposto: %E
Si è verificato un timeout durante la ricezione dei dati dalla rete. La rete o il server potrebbero essere scollegati o in congestione. Riprovare più tardi.
L'amministratore del proxy è %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/it/ERR_SECURE_CONNECT_FAIL squid-3.1.0.14/errors/it/ERR_SECURE_CONNECT_FAIL
--- squid-3.1.0.13/errors/it/ERR_SECURE_CONNECT_FAIL 2009-08-05 01:35:24.000000000 +1200
+++ squid-3.1.0.14/errors/it/ERR_SECURE_CONNECT_FAIL 2009-09-27 15:32:05.000000000 +1200
@@ -1 +1 @@
- ERROREE: non è possibile accedere alla URL richiesta
ERROR
Non è stato possibile accedere alla URL richiesta.
Mentre si cercava di accedere alla URL %U si è presentato il seguente errore:
Non è stato possibile stabilire una connessione sicura verso %I
Il sistema ha risposto: %E
Questo Proxy e il server remoto non sono riusciti a negoziare una connessione cifrata con caratteristiche mutuamente accettabili. È possibile che il server non supporti le connessioni cifrate, o che il proxy non sia soddisfatto dalle credenziali di sicurezza proposte dal server.
L'amministratore del proxy è %w.
\ No newline at end of file
+ ERRORE: Non è stato possibile accedere alla URL richiesta.
ERROR
The requested URL could not be retrieved
Mentre si cercava di accedere alla URL %U si è presentato il seguente errore:
Non è stato possibile stabilire una connessione sicura verso %I
Il sistema ha risposto: %E
Questo Proxy e il server remoto non sono riusciti a negoziare una connessione cifrata con caratteristiche mutuamente accettabili. È possibile che il server non supporti le connessioni cifrate, o che il proxy non sia soddisfatto dalle credenziali di sicurezza proposte dal server.
L'amministratore del proxy è %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/it/ERR_SHUTTING_DOWN squid-3.1.0.14/errors/it/ERR_SHUTTING_DOWN
--- squid-3.1.0.13/errors/it/ERR_SHUTTING_DOWN 2009-08-05 01:35:25.000000000 +1200
+++ squid-3.1.0.14/errors/it/ERR_SHUTTING_DOWN 2009-09-27 15:32:05.000000000 +1200
@@ -1 +1 @@
- ERROREE: non è possibile accedere alla URL richiesta
ERROR
Non è stato possibile accedere alla URL richiesta.
Mentre si cercava di accedere alla URL %U si è presentato il seguente errore:
Questo Proxy non è in grado di soddisfare la richiesta perchè è in fase di spegnimento. si prega di riprovare tra qualche minuto.
L'amministratore del proxy è %w.
\ No newline at end of file
+ ERRORE: Non è stato possibile accedere alla URL richiesta.
ERROR
The requested URL could not be retrieved
Mentre si cercava di accedere alla URL %U si è presentato il seguente errore:
Questo Proxy non è in grado di soddisfare la richiesta perchè è in fase di spegnimento. si prega di riprovare tra qualche minuto.
L'amministratore del proxy è %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/it/ERR_SOCKET_FAILURE squid-3.1.0.14/errors/it/ERR_SOCKET_FAILURE
--- squid-3.1.0.13/errors/it/ERR_SOCKET_FAILURE 2009-08-05 01:35:25.000000000 +1200
+++ squid-3.1.0.14/errors/it/ERR_SOCKET_FAILURE 2009-09-27 15:32:05.000000000 +1200
@@ -1 +1 @@
- ERROREE: non è possibile accedere alla URL richiesta
ERROR
Non è stato possibile accedere alla URL richiesta.
Mentre si cercava di accedere alla URL %U si è presentato il seguente errore:
L'operazione di rete (socket) è fallita.
Il sistema ha risposto: %E
Squid non è in grado di aprire un socket TCP, probabilmente a causa del sovraccarico. Si prega di ritentare la richiesta.
L'amministratore del proxy è %w.
\ No newline at end of file
+ ERRORE: Non è stato possibile accedere alla URL richiesta.
ERROR
The requested URL could not be retrieved
Mentre si cercava di accedere alla URL %U si è presentato il seguente errore:
L'operazione di rete (socket) è fallita.
Il sistema ha risposto: %E
Squid non è in grado di aprire un socket TCP, probabilmente a causa del sovraccarico. Si prega di ritentare la richiesta.
L'amministratore del proxy è %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/it/ERR_TOO_BIG squid-3.1.0.14/errors/it/ERR_TOO_BIG
--- squid-3.1.0.13/errors/it/ERR_TOO_BIG 2009-08-05 01:35:25.000000000 +1200
+++ squid-3.1.0.14/errors/it/ERR_TOO_BIG 2009-09-27 15:32:08.000000000 +1200
@@ -1 +1 @@
- ERROREE: non è possibile accedere alla URL richiesta
ERROR
Non è stato possibile accedere alla URL richiesta.
Mentre si cercava di accedere alla URL %U si è presentato il seguente errore:
La richesta o la risposta è di dimensioni troppo grandi.
Se la richiesta è di tipo PUT, ciò che si sta cercando di inviare è di dimensioni troppo grandi.
Se la richiesta è di tipo GET, ciò che si sta cercando di scaricare è di dimensioni troppo grandi.
Questi limiti sono stati imposti dal provider che gestisce questo Proxy. Se ritenete che questo sia scorretto si prega di contattarlo.
L'amministratore del proxy è %w.
\ No newline at end of file
+ ERRORE: Non è stato possibile accedere alla URL richiesta.
ERROR
The requested URL could not be retrieved
Mentre si cercava di accedere alla URL %U si è presentato il seguente errore:
La richesta o la risposta è di dimensioni troppo grandi.
Se la richiesta è di tipo PUT, ciò che si sta cercando di inviare è di dimensioni troppo grandi.
Se la richiesta è di tipo GET, ciò che si sta cercando di scaricare è di dimensioni troppo grandi.
Questi limiti sono stati imposti dal provider che gestisce questo Proxy. Se ritenete che questo sia scorretto si prega di contattarlo.
L'amministratore del proxy è %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/it/ERR_UNSUP_HTTPVERSION squid-3.1.0.14/errors/it/ERR_UNSUP_HTTPVERSION
--- squid-3.1.0.13/errors/it/ERR_UNSUP_HTTPVERSION 2009-08-05 01:35:25.000000000 +1200
+++ squid-3.1.0.14/errors/it/ERR_UNSUP_HTTPVERSION 2009-09-27 15:32:06.000000000 +1200
@@ -1 +1 @@
- ERROREE: non è possibile accedere alla URL richiesta
ERROR
Versione HTTP non supportata
Mentre si cercava di accedere alla URL %U si è presentato il seguente errore:
Unsupported HTTP version
Questa installazione di Squid non supporta la versione HTTP che si sta cercando di utilizzare.
L'amministratore del proxy è %w.
\ No newline at end of file
+ ERRORE: Non è stato possibile accedere alla URL richiesta.
ERROR
Versione HTTP non supportata
Mentre si cercava di accedere alla URL %U si è presentato il seguente errore:
Unsupported HTTP version
Questa installazione di Squid non supporta la versione HTTP che si sta cercando di utilizzare.
L'amministratore del proxy è %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/it/ERR_UNSUP_REQ squid-3.1.0.14/errors/it/ERR_UNSUP_REQ
--- squid-3.1.0.13/errors/it/ERR_UNSUP_REQ 2009-08-05 01:35:25.000000000 +1200
+++ squid-3.1.0.14/errors/it/ERR_UNSUP_REQ 2009-09-27 15:32:08.000000000 +1200
@@ -1 +1 @@
- ERROREE: non è possibile accedere alla URL richiesta
ERROR
Non è stato possibile accedere alla URL richiesta.
Mentre si cercava di accedere alla URL %U si è presentato il seguente errore:
Metodo e protocollo della richiesta non sono supportati.
Squid non consente di utilizzare qualsiasi tipo di richiesta per qualsiasi protocollo (a esempio non consente una richiesta POST su protocollo Gopher).
L'amministratore del proxy è %w.
\ No newline at end of file
+ ERRORE: Non è stato possibile accedere alla URL richiesta.
ERROR
The requested URL could not be retrieved
Mentre si cercava di accedere alla URL %U si è presentato il seguente errore:
Metodo e protocollo della richiesta non sono supportati.
Squid non consente di utilizzare qualsiasi tipo di richiesta per qualsiasi protocollo (a esempio non consente una richiesta POST su protocollo Gopher).
L'amministratore del proxy è %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/it/ERR_URN_RESOLVE squid-3.1.0.14/errors/it/ERR_URN_RESOLVE
--- squid-3.1.0.13/errors/it/ERR_URN_RESOLVE 2009-08-05 01:35:25.000000000 +1200
+++ squid-3.1.0.14/errors/it/ERR_URN_RESOLVE 2009-09-27 15:32:05.000000000 +1200
@@ -1 +1 @@
- ERROREE: non è possibile accedere alla URN richiesta
ERROR
Non è stato possibile ottenere una URL corrispondente alla URN richiesta.
Mentre si cercava di accedere alla URN %U si è presentato il seguente errore:
Impossibile risolvere la URN.
Hey, non ci si deve aspettare granchè dalle URN su %T :)
L'amministratore del proxy è %w.
\ No newline at end of file
+ ERRORE: The requested URN not be retrieved
ERROR
Non è stato possibile ottenere una URL corrispondente alla URN richiesta.
Mentre si cercava di accedere alla URN %U si è presentato il seguente errore:
Impossibile risolvere la URN.
Hey, non ci si deve aspettare granchè dalle URN su %T :)
L'amministratore del proxy è %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/it/ERR_WRITE_ERROR squid-3.1.0.14/errors/it/ERR_WRITE_ERROR
--- squid-3.1.0.13/errors/it/ERR_WRITE_ERROR 2009-08-05 01:35:25.000000000 +1200
+++ squid-3.1.0.14/errors/it/ERR_WRITE_ERROR 2009-09-27 15:32:09.000000000 +1200
@@ -1 +1 @@
- ERROREE: non è possibile accedere alla URL richiesta
ERROR
Non è stato possibile accedere alla URL richiesta.
Mentre si cercava di accedere alla URL %U si è presentato il seguente errore:
Errore durante l'operazione di scrittura
Il sistema ha risposto: %E
Si è verificato un errore durante la ricezione dei dati dalla rete. Si prega di ritentare la richiesta.
L'amministratore del proxy è %w.
\ No newline at end of file
+ ERRORE: Non è stato possibile accedere alla URL richiesta.
ERROR
The requested URL could not be retrieved
Mentre si cercava di accedere alla URL %U si è presentato il seguente errore:
Errore durante l'operazione di scrittura
Il sistema ha risposto: %E
Si è verificato un errore durante la ricezione dei dati dalla rete. Si prega di ritentare la richiesta.
L'amministratore del proxy è %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/it/ERR_ZERO_SIZE_OBJECT squid-3.1.0.14/errors/it/ERR_ZERO_SIZE_OBJECT
--- squid-3.1.0.13/errors/it/ERR_ZERO_SIZE_OBJECT 2009-08-05 01:35:26.000000000 +1200
+++ squid-3.1.0.14/errors/it/ERR_ZERO_SIZE_OBJECT 2009-09-27 15:32:05.000000000 +1200
@@ -1 +1 @@
- ERROREE: non è possibile accedere alla URL richiesta
ERROR
Non è stato possibile accedere alla URL richiesta.
Mentre si cercava di accedere alla URL %U si è presentato il seguente errore:
Risposta di dimensione nulla.
Squid non ha ricevuto risposta a questa richiesta.
L'amministratore del proxy è %w.
\ No newline at end of file
+ ERRORE: Non è stato possibile accedere alla URL richiesta.
ERROR
The requested URL could not be retrieved
Mentre si cercava di accedere alla URL %U si è presentato il seguente errore:
Risposta di dimensione nulla.
Squid non ha ricevuto risposta a questa richiesta.
L'amministratore del proxy è %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/ja/ERR_DIR_LISTING squid-3.1.0.14/errors/ja/ERR_DIR_LISTING
--- squid-3.1.0.13/errors/ja/ERR_DIR_LISTING 1970-01-01 12:00:00.000000000 +1200
+++ squid-3.1.0.14/errors/ja/ERR_DIR_LISTING 2009-09-27 15:32:12.000000000 +1200
@@ -0,0 +1 @@
+ Directory: %U
ディレクトリã®å†…容:
%z
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/ja/ERR_FTP_LISTING squid-3.1.0.14/errors/ja/ERR_FTP_LISTING
--- squid-3.1.0.13/errors/ja/ERR_FTP_LISTING 2009-08-05 01:35:27.000000000 +1200
+++ squid-3.1.0.14/errors/ja/ERR_FTP_LISTING 1970-01-01 12:00:00.000000000 +1200
@@ -1 +0,0 @@
- FTP ディレクトリ: %U FTP ディレクトリ: %U/
ディレクトリã®å†…容:
%z
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/ko/ERR_DIR_LISTING squid-3.1.0.14/errors/ko/ERR_DIR_LISTING
--- squid-3.1.0.13/errors/ko/ERR_DIR_LISTING 1970-01-01 12:00:00.000000000 +1200
+++ squid-3.1.0.14/errors/ko/ERR_DIR_LISTING 2009-09-27 15:32:16.000000000 +1200
@@ -0,0 +1 @@
+ Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/ko/ERR_FTP_LISTING squid-3.1.0.14/errors/ko/ERR_FTP_LISTING
--- squid-3.1.0.13/errors/ko/ERR_FTP_LISTING 2009-08-05 01:35:32.000000000 +1200
+++ squid-3.1.0.14/errors/ko/ERR_FTP_LISTING 1970-01-01 12:00:00.000000000 +1200
@@ -1 +0,0 @@
- FTP Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/lt/ERR_DIR_LISTING squid-3.1.0.14/errors/lt/ERR_DIR_LISTING
--- squid-3.1.0.13/errors/lt/ERR_DIR_LISTING 1970-01-01 12:00:00.000000000 +1200
+++ squid-3.1.0.14/errors/lt/ERR_DIR_LISTING 2009-09-27 15:32:21.000000000 +1200
@@ -0,0 +1 @@
+ Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/lt/ERR_FTP_LISTING squid-3.1.0.14/errors/lt/ERR_FTP_LISTING
--- squid-3.1.0.13/errors/lt/ERR_FTP_LISTING 2009-08-05 01:35:36.000000000 +1200
+++ squid-3.1.0.14/errors/lt/ERR_FTP_LISTING 1970-01-01 12:00:00.000000000 +1200
@@ -1 +0,0 @@
- FTP Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/lv/ERR_DIR_LISTING squid-3.1.0.14/errors/lv/ERR_DIR_LISTING
--- squid-3.1.0.13/errors/lv/ERR_DIR_LISTING 1970-01-01 12:00:00.000000000 +1200
+++ squid-3.1.0.14/errors/lv/ERR_DIR_LISTING 2009-09-27 15:32:26.000000000 +1200
@@ -0,0 +1 @@
+ Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/lv/ERR_FTP_LISTING squid-3.1.0.14/errors/lv/ERR_FTP_LISTING
--- squid-3.1.0.13/errors/lv/ERR_FTP_LISTING 2009-08-05 01:35:41.000000000 +1200
+++ squid-3.1.0.14/errors/lv/ERR_FTP_LISTING 1970-01-01 12:00:00.000000000 +1200
@@ -1 +0,0 @@
- FTP Directorijs: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/Makefile.am squid-3.1.0.14/errors/Makefile.am
--- squid-3.1.0.13/errors/Makefile.am 2009-08-05 01:32:08.000000000 +1200
+++ squid-3.1.0.14/errors/Makefile.am 2009-09-27 15:28:24.000000000 +1200
@@ -11,43 +11,155 @@
DEFAULT_STYLESHEET = $(sysconfdir)/errorpage.css
## List of automated translations possible:
-TRANSLATIONPO=`ls -1 $(top_srcdir)/errors/*.po | grep -o -E "[a-z\-]+\.po" | sed s/.po//`
-TRANSLATIONDIR=`ls -1 $(srcdir) $(builddir) | sed -e 's%$(srcdir)/%%' -e 's%$(builddir)/%%' -e 's%.po%%' `
+ERROR_TEMPLATES = \
+ templates/ERR_FTP_PUT_MODIFIED \
+ templates/ERR_ESI \
+ templates/ERR_SECURE_CONNECT_FAIL \
+ templates/ERR_ZERO_SIZE_OBJECT \
+ templates/ERR_SHUTTING_DOWN \
+ templates/ERR_URN_RESOLVE \
+ templates/ERR_CONNECT_FAIL \
+ templates/ERR_SOCKET_FAILURE \
+ templates/ERR_FTP_NOT_FOUND \
+ templates/ERR_FTP_UNAVAILABLE \
+ templates/ERR_LIFETIME_EXP \
+ templates/ERR_READ_ERROR \
+ templates/ERR_ONLY_IF_CACHED_MISS \
+ templates/ERR_UNSUP_HTTPVERSION \
+ templates/ERR_READ_TIMEOUT \
+ templates/ERR_ICAP_FAILURE \
+ templates/ERR_DIR_LISTING \
+ templates/ERR_FTP_FORBIDDEN \
+ templates/ERR_ACCESS_DENIED \
+ templates/ERR_FORWARDING_DENIED \
+ templates/ERR_CANNOT_FORWARD \
+ templates/ERR_CACHE_MGR_ACCESS_DENIED \
+ templates/ERR_INVALID_REQ \
+ templates/ERR_CACHE_ACCESS_DENIED \
+ templates/ERR_FTP_PUT_ERROR \
+ templates/ERR_FTP_PUT_CREATED \
+ templates/ERR_TOO_BIG \
+ templates/ERR_UNSUP_REQ \
+ templates/ERR_FTP_FAILURE \
+ templates/ERR_DNS_FAIL \
+ templates/ERR_FTP_DISABLED \
+ templates/ERR_NO_RELAY \
+ templates/ERR_INVALID_URL \
+ templates/ERR_INVALID_RESP \
+ templates/ERR_WRITE_ERROR
+
+TRANSLATE_LANGUAGES = \
+ ar.lang \
+ az.lang \
+ bg.lang \
+ ca.lang \
+ cs.lang \
+ da.lang \
+ de.lang \
+ el.lang \
+ en.lang \
+ es.lang \
+ et.lang \
+ fa.lang \
+ fi.lang \
+ fr.lang \
+ he.lang \
+ hu.lang \
+ hy.lang \
+ id.lang \
+ it.lang \
+ ja.lang \
+ ko.lang \
+ lt.lang \
+ lv.lang \
+ ms.lang \
+ nl.lang \
+ pl.lang \
+ pt-br.lang \
+ pt.lang \
+ ro.lang \
+ ru.lang \
+ sk.lang \
+ sr.lang \
+ sv.lang \
+ th.lang \
+ tr.lang \
+ uk.lang \
+ uz.lang \
+ zh-cn.lang \
+ zh-tw.lang
+
+CLEANFILES = $(TRANSLATE_LANGUAGES) translate-warn
+EXTRA_DIST = \
+ $(ERROR_TEMPLATES) \
+ aliases alias-link.sh alias-upgrade errorpage.css TRANSLATORS COPYRIGHT
+
+all: all-am
+
+translate: translate-warn $(TRANSLATE_LANGUAGES)
+
+translate-warn:
+ case "$(PO2HTML)" in \
+ off) \
+ echo "WARNING: Translation is disabled."; \
+ ;; \
+ ""|no) \
+ echo "WARNING: Translation toolkit was not detected."; \
+ ;; \
+ esac; \
+ touch translate-warn
-## TODO: prevent this loop installing everything twice when srcdir == builddir
-install-data-local:
- $(mkinstalldirs) $(DESTDIR)$(DEFAULT_ERROR_DIR) ; \
- for l in $(TRANSLATIONDIR) ; do \
- echo "Located $$l for install..."; \
- if test -d $(srcdir)/$$l; then \
- $(mkinstalldirs) $(DESTDIR)$(DEFAULT_ERROR_DIR)/$$l && \
- for f in $(srcdir)/$$l/ERR_*; do \
- echo "$(INSTALL_DATA) $$f $(DESTDIR)$(DEFAULT_ERROR_DIR)/$$l"; \
- $(INSTALL_DATA) $$f $(DESTDIR)$(DEFAULT_ERROR_DIR)/$$l; \
- done; \
- fi ; \
- if test -d $(builddir)/$$l; then \
- $(mkinstalldirs) $(DESTDIR)$(DEFAULT_ERROR_DIR)/$$l && \
- for f in $(builddir)/$$l/ERR_*; do \
- echo "$(INSTALL_DATA) $$f $(DESTDIR)$(DEFAULT_ERROR_DIR)/$$l"; \
- $(INSTALL_DATA) $$f $(DESTDIR)$(DEFAULT_ERROR_DIR)/$$l; \
+$(TRANSLATE_LANGUAGES): $(ERROR_TEMPLATES)
+
+.po.lang:
+ if test "$(PO2HTML)" != "" && test "$(PO2HTML)" != "no" && test "$(PO2HTML)" != "off" && test -f $(top_srcdir)/errors/en.po; then \
+ lang=`basename $@ .lang`; \
+ mkdir -p $(top_builddir)/errors/$$lang; \
+ echo -n "Translate '$$lang' ..."; \
+ for f in $(ERROR_TEMPLATES); do \
+ page=`basename $$f`; \
+ $(PO2HTML) --progress=none -i $(top_srcdir)/errors/$$lang.po -t $(top_srcdir)/errors/$$f >$(top_builddir)/errors/$$lang/$$page || exit 1; \
done; \
- fi \
- done; \
- $(INSTALL_DATA) $(srcdir)/TRANSLATORS $(DESTDIR)$(DEFAULT_ERROR_DIR)/TRANSLATORS; \
- $(INSTALL_DATA) $(srcdir)/COPYRIGHT $(DESTDIR)$(DEFAULT_ERROR_DIR)/COPYRIGHT; \
- $(INSTALL_DATA) $(srcdir)/errorpage.css $(DESTDIR)$(DEFAULT_STYLESHEET).default; \
+ echo "done."; \
+ fi; \
+ touch $@
+
+install-exec-local: translate
if test -f $(DESTDIR)$(DEFAULT_STYLESHEET) ; then \
echo "$@ will not overwrite existing $(DESTDIR)$(DEFAULT_STYLESHEET)" ; \
else \
+ $(mkinstalldirs) $(DESTDIR)`dirname $(DEFAULT_STYLESHEET)` ; \
echo "$(INSTALL_DATA) $(srcdir)/errorpage.css $(DESTDIR)$(DEFAULT_STYLESHEET)"; \
$(INSTALL_DATA) $(srcdir)/errorpage.css $(DESTDIR)$(DEFAULT_STYLESHEET); \
- fi ; \
- $(SHELL) $(srcdir)/alias-link.sh "$(LN)" "$(RM)" "$(DESTDIR)$(DEFAULT_ERROR_DIR)" "$(srcdir)/aliases" || exit 1 ;
+ fi
+install-data-local: translate
+ $(mkinstalldirs) $(DESTDIR)$(DEFAULT_ERROR_DIR) ; \
+ for l in $(TRANSLATE_LANGUAGES) templates; do \
+ l=`basename $$l .lang`; \
+ echo "Located $$l for install..."; \
+ if test -d $(srcdir)/$$l || test -d $(builddir)/$$l; then \
+ $(mkinstalldirs) $(DESTDIR)$(DEFAULT_ERROR_DIR)/$$l; \
+ fi; \
+ for f in $(ERROR_TEMPLATES); do \
+ page=`basename $$f`; \
+ if test -f $(builddir)/$$l/$$page; then \
+ echo "$(INSTALL_DATA) $(builddir)/$$l/$$page $(DESTDIR)$(DEFAULT_ERROR_DIR)/$$l"; \
+ $(INSTALL_DATA) $(builddir)/$$l/$$page $(DESTDIR)$(DEFAULT_ERROR_DIR)/$$l; \
+ elif test -f $(srcdir)/$$l/$$page; then \
+ echo "$(INSTALL_DATA) $(srcdir)/$$l/$$page $(DESTDIR)$(DEFAULT_ERROR_DIR)/$$l"; \
+ $(INSTALL_DATA) $(srcdir)/$$l/$$page $(DESTDIR)$(DEFAULT_ERROR_DIR)/$$l; \
+ fi; \
+ done; \
+ done; \
+ $(INSTALL_DATA) $(srcdir)/TRANSLATORS $(DESTDIR)$(DEFAULT_ERROR_DIR)/TRANSLATORS; \
+ $(INSTALL_DATA) $(srcdir)/COPYRIGHT $(DESTDIR)$(DEFAULT_ERROR_DIR)/COPYRIGHT; \
+ $(INSTALL_DATA) $(srcdir)/errorpage.css $(DESTDIR)$(DEFAULT_STYLESHEET).default; \
+ $(SHELL) $(srcdir)/alias-link.sh "$(LN)" "$(RM)" "$(DESTDIR)$(DEFAULT_ERROR_DIR)" "$(srcdir)/aliases" || exit 1 ;
uninstall-local:
- for l in $(TRANSLATIONDIR) ; do \
+ for l in $(TRANSLATE_LANGUAGES) templates; do \
+ l=`basename $$l .lang`; \
echo "Located $$l for uninstall ..."; \
if test -d $(srcdir)/$$l; then \
for f in $(srcdir)/$$l/ERR_*; do \
@@ -99,42 +211,21 @@
fi
dist-hook: translate
- for lang in $(TRANSLATIONPO) templates; do \
+ for lang in $(TRANSLATE_LANGUAGES); do \
+ lang=`basename $$lang .lang`; \
if test -d $$lang ; then \
- test -d $(distdir)/$$lang \
- || mkdir $(distdir)/$$lang \
- || exit 1; \
+ mkdir -p $(distdir)/$$lang; \
cp -p $(top_builddir)/errors/$$lang/ERR_* $(distdir)/$$lang \
|| exit 1; \
fi; \
- done; \
- for f in aliases alias-link.sh alias-upgrade errorpage.css TRANSLATORS COPYRIGHT; do \
- cp -p $(srcdir)/$$f $(distdir)/`basename $$f`; \
- done;
+ done
-translate:
- @if ! test -f $(top_srcdir)/errors/en.po; then \
- echo "Translation is not currently possible."; \
- exit 0; \
- fi; \
+clean: clean-am
if test "$(PO2HTML)" != "" && test "$(PO2HTML)" != "no" && test "$(PO2HTML)" != "off" && test -f $(top_srcdir)/errors/en.po; then \
- for lang in $(TRANSLATIONPO); do \
- test -d $(top_builddir)/errors/$$lang && $(RM) -r $(top_builddir)/errors/$$lang; \
- mkdir $(top_builddir)/errors/$$lang || exit 1; \
- echo -n "Translate '$$lang' ..."; \
- for f in `ls -1 $(top_srcdir)/errors/templates`; do \
- $(PO2HTML) --progress=none -i $(top_srcdir)/errors/$$lang.po -t $(top_srcdir)/errors/templates/$$f >$(top_builddir)/errors/$$lang/$$f || exit 1; \
- done; \
- echo "done."; \
+ for lang in $(TRANSLATE_LANGUAGES); do \
+ lang=`basename $$lang .lang`; \
+ rm -rf $$lang; \
done; \
- else \
- if test "$(PO2HTML)" = "off" ; then \
- echo "WARNING: Translation is disabled."; \
- else \
- echo "WARNING: Translation toolkit was not detected."; \
- fi; \
- echo "A drop-in bundle of pre-translated files is available from"; \
- echo "http://www.squid-cache.org/Versions/langpack/"; \
fi
all: translate
diff -u -r -N squid-3.1.0.13/errors/Makefile.in squid-3.1.0.14/errors/Makefile.in
--- squid-3.1.0.13/errors/Makefile.in 2009-08-05 01:32:30.000000000 +1200
+++ squid-3.1.0.14/errors/Makefile.in 2009-09-27 15:28:48.000000000 +1200
@@ -50,6 +50,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
@@ -224,11 +225,93 @@
errordir = $(datadir)/errors
DEFAULT_ERROR_DIR = $(errordir)
DEFAULT_STYLESHEET = $(sysconfdir)/errorpage.css
-TRANSLATIONPO = `ls -1 $(top_srcdir)/errors/*.po | grep -o -E "[a-z\-]+\.po" | sed s/.po//`
-TRANSLATIONDIR = `ls -1 $(srcdir) $(builddir) | sed -e 's%$(srcdir)/%%' -e 's%$(builddir)/%%' -e 's%.po%%' `
+ERROR_TEMPLATES = \
+ templates/ERR_FTP_PUT_MODIFIED \
+ templates/ERR_ESI \
+ templates/ERR_SECURE_CONNECT_FAIL \
+ templates/ERR_ZERO_SIZE_OBJECT \
+ templates/ERR_SHUTTING_DOWN \
+ templates/ERR_URN_RESOLVE \
+ templates/ERR_CONNECT_FAIL \
+ templates/ERR_SOCKET_FAILURE \
+ templates/ERR_FTP_NOT_FOUND \
+ templates/ERR_FTP_UNAVAILABLE \
+ templates/ERR_LIFETIME_EXP \
+ templates/ERR_READ_ERROR \
+ templates/ERR_ONLY_IF_CACHED_MISS \
+ templates/ERR_UNSUP_HTTPVERSION \
+ templates/ERR_READ_TIMEOUT \
+ templates/ERR_ICAP_FAILURE \
+ templates/ERR_DIR_LISTING \
+ templates/ERR_FTP_FORBIDDEN \
+ templates/ERR_ACCESS_DENIED \
+ templates/ERR_FORWARDING_DENIED \
+ templates/ERR_CANNOT_FORWARD \
+ templates/ERR_CACHE_MGR_ACCESS_DENIED \
+ templates/ERR_INVALID_REQ \
+ templates/ERR_CACHE_ACCESS_DENIED \
+ templates/ERR_FTP_PUT_ERROR \
+ templates/ERR_FTP_PUT_CREATED \
+ templates/ERR_TOO_BIG \
+ templates/ERR_UNSUP_REQ \
+ templates/ERR_FTP_FAILURE \
+ templates/ERR_DNS_FAIL \
+ templates/ERR_FTP_DISABLED \
+ templates/ERR_NO_RELAY \
+ templates/ERR_INVALID_URL \
+ templates/ERR_INVALID_RESP \
+ templates/ERR_WRITE_ERROR
+
+TRANSLATE_LANGUAGES = \
+ ar.lang \
+ az.lang \
+ bg.lang \
+ ca.lang \
+ cs.lang \
+ da.lang \
+ de.lang \
+ el.lang \
+ en.lang \
+ es.lang \
+ et.lang \
+ fa.lang \
+ fi.lang \
+ fr.lang \
+ he.lang \
+ hu.lang \
+ hy.lang \
+ id.lang \
+ it.lang \
+ ja.lang \
+ ko.lang \
+ lt.lang \
+ lv.lang \
+ ms.lang \
+ nl.lang \
+ pl.lang \
+ pt-br.lang \
+ pt.lang \
+ ro.lang \
+ ru.lang \
+ sk.lang \
+ sr.lang \
+ sv.lang \
+ th.lang \
+ tr.lang \
+ uk.lang \
+ uz.lang \
+ zh-cn.lang \
+ zh-tw.lang
+
+CLEANFILES = $(TRANSLATE_LANGUAGES) translate-warn
+EXTRA_DIST = \
+ $(ERROR_TEMPLATES) \
+ aliases alias-link.sh alias-upgrade errorpage.css TRANSLATORS COPYRIGHT
+
all: all-am
.SUFFIXES:
+.SUFFIXES: .lang .po
$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
@for dep in $?; do \
case '$(am__configure_deps)' in \
@@ -321,6 +404,7 @@
mostlyclean-generic:
clean-generic:
+ -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
distclean-generic:
-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
@@ -328,8 +412,6 @@
maintainer-clean-generic:
@echo "This command is intended for maintainers to use"
@echo "it deletes files that may require special tools to rebuild."
-clean: clean-am
-
clean-am: clean-generic clean-libtool mostlyclean-am
distclean: distclean-am
@@ -350,7 +432,7 @@
install-dvi: install-dvi-am
-install-exec-am:
+install-exec-am: install-exec-local
install-html: install-html-am
@@ -389,46 +471,81 @@
distdir dvi dvi-am html html-am info info-am install \
install-am install-data install-data-am install-data-local \
install-dvi install-dvi-am install-exec install-exec-am \
- install-html install-html-am install-info install-info-am \
- install-man install-pdf install-pdf-am install-ps \
- install-ps-am install-strip installcheck installcheck-am \
- installdirs maintainer-clean maintainer-clean-generic \
- mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \
- ps ps-am uninstall uninstall-am uninstall-local
+ install-exec-local install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic \
+ mostlyclean-libtool pdf pdf-am ps ps-am uninstall uninstall-am \
+ uninstall-local
-install-data-local:
- $(mkinstalldirs) $(DESTDIR)$(DEFAULT_ERROR_DIR) ; \
- for l in $(TRANSLATIONDIR) ; do \
- echo "Located $$l for install..."; \
- if test -d $(srcdir)/$$l; then \
- $(mkinstalldirs) $(DESTDIR)$(DEFAULT_ERROR_DIR)/$$l && \
- for f in $(srcdir)/$$l/ERR_*; do \
- echo "$(INSTALL_DATA) $$f $(DESTDIR)$(DEFAULT_ERROR_DIR)/$$l"; \
- $(INSTALL_DATA) $$f $(DESTDIR)$(DEFAULT_ERROR_DIR)/$$l; \
- done; \
- fi ; \
- if test -d $(builddir)/$$l; then \
- $(mkinstalldirs) $(DESTDIR)$(DEFAULT_ERROR_DIR)/$$l && \
- for f in $(builddir)/$$l/ERR_*; do \
- echo "$(INSTALL_DATA) $$f $(DESTDIR)$(DEFAULT_ERROR_DIR)/$$l"; \
- $(INSTALL_DATA) $$f $(DESTDIR)$(DEFAULT_ERROR_DIR)/$$l; \
+all: all-am
+
+translate: translate-warn $(TRANSLATE_LANGUAGES)
+
+translate-warn:
+ case "$(PO2HTML)" in \
+ off) \
+ echo "WARNING: Translation is disabled."; \
+ ;; \
+ ""|no) \
+ echo "WARNING: Translation toolkit was not detected."; \
+ ;; \
+ esac; \
+ touch translate-warn
+
+$(TRANSLATE_LANGUAGES): $(ERROR_TEMPLATES)
+
+.po.lang:
+ if test "$(PO2HTML)" != "" && test "$(PO2HTML)" != "no" && test "$(PO2HTML)" != "off" && test -f $(top_srcdir)/errors/en.po; then \
+ lang=`basename $@ .lang`; \
+ mkdir -p $(top_builddir)/errors/$$lang; \
+ echo -n "Translate '$$lang' ..."; \
+ for f in $(ERROR_TEMPLATES); do \
+ page=`basename $$f`; \
+ $(PO2HTML) --progress=none -i $(top_srcdir)/errors/$$lang.po -t $(top_srcdir)/errors/$$f >$(top_builddir)/errors/$$lang/$$page || exit 1; \
done; \
- fi \
- done; \
- $(INSTALL_DATA) $(srcdir)/TRANSLATORS $(DESTDIR)$(DEFAULT_ERROR_DIR)/TRANSLATORS; \
- $(INSTALL_DATA) $(srcdir)/COPYRIGHT $(DESTDIR)$(DEFAULT_ERROR_DIR)/COPYRIGHT; \
- $(INSTALL_DATA) $(srcdir)/errorpage.css $(DESTDIR)$(DEFAULT_STYLESHEET).default; \
+ echo "done."; \
+ fi; \
+ touch $@
+
+install-exec-local: translate
if test -f $(DESTDIR)$(DEFAULT_STYLESHEET) ; then \
echo "$@ will not overwrite existing $(DESTDIR)$(DEFAULT_STYLESHEET)" ; \
else \
+ $(mkinstalldirs) $(DESTDIR)`dirname $(DEFAULT_STYLESHEET)` ; \
echo "$(INSTALL_DATA) $(srcdir)/errorpage.css $(DESTDIR)$(DEFAULT_STYLESHEET)"; \
$(INSTALL_DATA) $(srcdir)/errorpage.css $(DESTDIR)$(DEFAULT_STYLESHEET); \
- fi ; \
+ fi
+
+install-data-local: translate
+ $(mkinstalldirs) $(DESTDIR)$(DEFAULT_ERROR_DIR) ; \
+ for l in $(TRANSLATE_LANGUAGES) templates; do \
+ l=`basename $$l .lang`; \
+ echo "Located $$l for install..."; \
+ if test -d $(srcdir)/$$l || test -d $(builddir)/$$l; then \
+ $(mkinstalldirs) $(DESTDIR)$(DEFAULT_ERROR_DIR)/$$l; \
+ fi; \
+ for f in $(ERROR_TEMPLATES); do \
+ page=`basename $$f`; \
+ if test -f $(builddir)/$$l/$$page; then \
+ echo "$(INSTALL_DATA) $(builddir)/$$l/$$page $(DESTDIR)$(DEFAULT_ERROR_DIR)/$$l"; \
+ $(INSTALL_DATA) $(builddir)/$$l/$$page $(DESTDIR)$(DEFAULT_ERROR_DIR)/$$l; \
+ elif test -f $(srcdir)/$$l/$$page; then \
+ echo "$(INSTALL_DATA) $(srcdir)/$$l/$$page $(DESTDIR)$(DEFAULT_ERROR_DIR)/$$l"; \
+ $(INSTALL_DATA) $(srcdir)/$$l/$$page $(DESTDIR)$(DEFAULT_ERROR_DIR)/$$l; \
+ fi; \
+ done; \
+ done; \
+ $(INSTALL_DATA) $(srcdir)/TRANSLATORS $(DESTDIR)$(DEFAULT_ERROR_DIR)/TRANSLATORS; \
+ $(INSTALL_DATA) $(srcdir)/COPYRIGHT $(DESTDIR)$(DEFAULT_ERROR_DIR)/COPYRIGHT; \
+ $(INSTALL_DATA) $(srcdir)/errorpage.css $(DESTDIR)$(DEFAULT_STYLESHEET).default; \
$(SHELL) $(srcdir)/alias-link.sh "$(LN)" "$(RM)" "$(DESTDIR)$(DEFAULT_ERROR_DIR)" "$(srcdir)/aliases" || exit 1 ;
uninstall-local:
- for l in $(TRANSLATIONDIR) ; do \
+ for l in $(TRANSLATE_LANGUAGES) templates; do \
+ l=`basename $$l .lang`; \
echo "Located $$l for uninstall ..."; \
if test -d $(srcdir)/$$l; then \
for f in $(srcdir)/$$l/ERR_*; do \
@@ -479,42 +596,21 @@
fi
dist-hook: translate
- for lang in $(TRANSLATIONPO) templates; do \
+ for lang in $(TRANSLATE_LANGUAGES); do \
+ lang=`basename $$lang .lang`; \
if test -d $$lang ; then \
- test -d $(distdir)/$$lang \
- || mkdir $(distdir)/$$lang \
- || exit 1; \
+ mkdir -p $(distdir)/$$lang; \
cp -p $(top_builddir)/errors/$$lang/ERR_* $(distdir)/$$lang \
|| exit 1; \
fi; \
- done; \
- for f in aliases alias-link.sh alias-upgrade errorpage.css TRANSLATORS COPYRIGHT; do \
- cp -p $(srcdir)/$$f $(distdir)/`basename $$f`; \
- done;
+ done
-translate:
- @if ! test -f $(top_srcdir)/errors/en.po; then \
- echo "Translation is not currently possible."; \
- exit 0; \
- fi; \
+clean: clean-am
if test "$(PO2HTML)" != "" && test "$(PO2HTML)" != "no" && test "$(PO2HTML)" != "off" && test -f $(top_srcdir)/errors/en.po; then \
- for lang in $(TRANSLATIONPO); do \
- test -d $(top_builddir)/errors/$$lang && $(RM) -r $(top_builddir)/errors/$$lang; \
- mkdir $(top_builddir)/errors/$$lang || exit 1; \
- echo -n "Translate '$$lang' ..."; \
- for f in `ls -1 $(top_srcdir)/errors/templates`; do \
- $(PO2HTML) --progress=none -i $(top_srcdir)/errors/$$lang.po -t $(top_srcdir)/errors/templates/$$f >$(top_builddir)/errors/$$lang/$$f || exit 1; \
- done; \
- echo "done."; \
+ for lang in $(TRANSLATE_LANGUAGES); do \
+ lang=`basename $$lang .lang`; \
+ rm -rf $$lang; \
done; \
- else \
- if test "$(PO2HTML)" = "off" ; then \
- echo "WARNING: Translation is disabled."; \
- else \
- echo "WARNING: Translation toolkit was not detected."; \
- fi; \
- echo "A drop-in bundle of pre-translated files is available from"; \
- echo "http://www.squid-cache.org/Versions/langpack/"; \
fi
all: translate
diff -u -r -N squid-3.1.0.13/errors/ms/ERR_DIR_LISTING squid-3.1.0.14/errors/ms/ERR_DIR_LISTING
--- squid-3.1.0.13/errors/ms/ERR_DIR_LISTING 1970-01-01 12:00:00.000000000 +1200
+++ squid-3.1.0.14/errors/ms/ERR_DIR_LISTING 2009-09-27 15:32:31.000000000 +1200
@@ -0,0 +1 @@
+ Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/ms/ERR_FTP_LISTING squid-3.1.0.14/errors/ms/ERR_FTP_LISTING
--- squid-3.1.0.13/errors/ms/ERR_FTP_LISTING 2009-08-05 01:35:46.000000000 +1200
+++ squid-3.1.0.14/errors/ms/ERR_FTP_LISTING 1970-01-01 12:00:00.000000000 +1200
@@ -1 +0,0 @@
- Direktori FTP: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/nl/ERR_DIR_LISTING squid-3.1.0.14/errors/nl/ERR_DIR_LISTING
--- squid-3.1.0.13/errors/nl/ERR_DIR_LISTING 1970-01-01 12:00:00.000000000 +1200
+++ squid-3.1.0.14/errors/nl/ERR_DIR_LISTING 2009-09-27 15:32:35.000000000 +1200
@@ -0,0 +1 @@
+ Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/nl/ERR_FTP_LISTING squid-3.1.0.14/errors/nl/ERR_FTP_LISTING
--- squid-3.1.0.13/errors/nl/ERR_FTP_LISTING 2009-08-05 01:35:50.000000000 +1200
+++ squid-3.1.0.14/errors/nl/ERR_FTP_LISTING 1970-01-01 12:00:00.000000000 +1200
@@ -1 +0,0 @@
- FTP Map: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/pl/ERR_DIR_LISTING squid-3.1.0.14/errors/pl/ERR_DIR_LISTING
--- squid-3.1.0.13/errors/pl/ERR_DIR_LISTING 1970-01-01 12:00:00.000000000 +1200
+++ squid-3.1.0.14/errors/pl/ERR_DIR_LISTING 2009-09-27 15:32:40.000000000 +1200
@@ -0,0 +1 @@
+ Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/pl/ERR_FTP_LISTING squid-3.1.0.14/errors/pl/ERR_FTP_LISTING
--- squid-3.1.0.13/errors/pl/ERR_FTP_LISTING 2009-08-05 01:35:55.000000000 +1200
+++ squid-3.1.0.14/errors/pl/ERR_FTP_LISTING 1970-01-01 12:00:00.000000000 +1200
@@ -1 +0,0 @@
- FTP Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/pt/ERR_DIR_LISTING squid-3.1.0.14/errors/pt/ERR_DIR_LISTING
--- squid-3.1.0.13/errors/pt/ERR_DIR_LISTING 1970-01-01 12:00:00.000000000 +1200
+++ squid-3.1.0.14/errors/pt/ERR_DIR_LISTING 2009-09-27 15:32:49.000000000 +1200
@@ -0,0 +1 @@
+ Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/pt/ERR_FTP_LISTING squid-3.1.0.14/errors/pt/ERR_FTP_LISTING
--- squid-3.1.0.13/errors/pt/ERR_FTP_LISTING 2009-08-05 01:36:04.000000000 +1200
+++ squid-3.1.0.14/errors/pt/ERR_FTP_LISTING 1970-01-01 12:00:00.000000000 +1200
@@ -1 +0,0 @@
- FTP Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/pt-br/ERR_DIR_LISTING squid-3.1.0.14/errors/pt-br/ERR_DIR_LISTING
--- squid-3.1.0.13/errors/pt-br/ERR_DIR_LISTING 1970-01-01 12:00:00.000000000 +1200
+++ squid-3.1.0.14/errors/pt-br/ERR_DIR_LISTING 2009-09-27 15:32:45.000000000 +1200
@@ -0,0 +1 @@
+ Directory: %U
Conteúdo do diretório:
%z
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/pt-br/ERR_FTP_LISTING squid-3.1.0.14/errors/pt-br/ERR_FTP_LISTING
--- squid-3.1.0.13/errors/pt-br/ERR_FTP_LISTING 2009-08-05 01:35:59.000000000 +1200
+++ squid-3.1.0.14/errors/pt-br/ERR_FTP_LISTING 1970-01-01 12:00:00.000000000 +1200
@@ -1 +0,0 @@
- Diretório FTP: %U
Conteúdo do diretório:
%z
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/ro/ERR_DIR_LISTING squid-3.1.0.14/errors/ro/ERR_DIR_LISTING
--- squid-3.1.0.13/errors/ro/ERR_DIR_LISTING 1970-01-01 12:00:00.000000000 +1200
+++ squid-3.1.0.14/errors/ro/ERR_DIR_LISTING 2009-09-27 15:32:54.000000000 +1200
@@ -0,0 +1 @@
+ Directory: %U
Conţinutul directorului:
%z
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/ro/ERR_FTP_LISTING squid-3.1.0.14/errors/ro/ERR_FTP_LISTING
--- squid-3.1.0.13/errors/ro/ERR_FTP_LISTING 2009-08-05 01:36:09.000000000 +1200
+++ squid-3.1.0.14/errors/ro/ERR_FTP_LISTING 1970-01-01 12:00:00.000000000 +1200
@@ -1 +0,0 @@
- Directorul FTP: %U
Conţinutul directorului:
%z
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/ru/ERR_CACHE_ACCESS_DENIED squid-3.1.0.14/errors/ru/ERR_CACHE_ACCESS_DENIED
--- squid-3.1.0.13/errors/ru/ERR_CACHE_ACCESS_DENIED 2009-08-05 01:36:13.000000000 +1200
+++ squid-3.1.0.14/errors/ru/ERR_CACHE_ACCESS_DENIED 2009-09-27 15:33:00.000000000 +1200
@@ -1 +1 @@
- ОШИБКÐ: ДоÑтуп к кÑшу запрещён
ОШИБКÐ
Cache ДоÑтуп запрещён.
При получении URL %U произошла ÑÐ»ÐµÐ´ÑƒÑŽÑ‰Ð°Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ°
ДоÑтуп к кÑшу запрещён.
Извините, Ð’Ñ‹ не можете запроÑить %U из Ñтого кÑша до тех пор, пока не пройдёте аутентификацию.
ЕÑли у Ð’Ð°Ñ Ð²Ð¾Ð·Ð½Ð¸ÐºÐ»Ð¸ проблемы Ñ Ð°ÑƒÑ‚ÐµÐ½Ñ‚Ð¸Ñ„Ð¸ÐºÐ°Ñ†Ð¸ÐµÐ¹, пожалуйÑта, ÑвÑжитеÑÑŒ Ñ Ð°Ð´Ð¼Ð¸Ð½Ð¸Ñтратором кÑша или Ñмените Ваш пароль по умолчанию.
\ No newline at end of file
+ ОШИБКÐ: Cache Access Denied
ERROR
Cache ДоÑтуп запрещён.
При получении URL %U произошла ÑÐ»ÐµÐ´ÑƒÑŽÑ‰Ð°Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ°
ДоÑтуп к кÑшу запрещён.
Извините, Ð’Ñ‹ не можете запроÑить %U из Ñтого кÑша до тех пор, пока не пройдёте аутентификацию.
ЕÑли у Ð’Ð°Ñ Ð²Ð¾Ð·Ð½Ð¸ÐºÐ»Ð¸ проблемы Ñ Ð°ÑƒÑ‚ÐµÐ½Ñ‚Ð¸Ñ„Ð¸ÐºÐ°Ñ†Ð¸ÐµÐ¹, пожалуйÑта, ÑвÑжитеÑÑŒ Ñ Ð°Ð´Ð¼Ð¸Ð½Ð¸Ñтратором кÑша или Ñмените Ваш пароль по умолчанию.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/ru/ERR_CACHE_MGR_ACCESS_DENIED squid-3.1.0.14/errors/ru/ERR_CACHE_MGR_ACCESS_DENIED
--- squid-3.1.0.13/errors/ru/ERR_CACHE_MGR_ACCESS_DENIED 2009-08-05 01:36:13.000000000 +1200
+++ squid-3.1.0.14/errors/ru/ERR_CACHE_MGR_ACCESS_DENIED 2009-09-27 15:33:00.000000000 +1200
@@ -1 +1 @@
- ОШИБКÐ: ДоÑтуп к управлению кÑшем запрещён.
ОШИБКÐ
Cache Manager ДоÑтуп запрещён.
При получении URL %U произошла ÑÐ»ÐµÐ´ÑƒÑŽÑ‰Ð°Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ°
ДоÑтуп к управлению кÑшем запрещён.
Извините, Ð’Ñ‹ не можете запроÑить %U из ÑиÑтемы ÑƒÐ¿Ñ€Ð°Ð²Ð»ÐµÐ½Ð¸Ñ ÐºÑшем до тех пор, пока не пройдете аутентификацию.
ЕÑли у Ð’Ð°Ñ Ð²Ð¾Ð·Ð½Ð¸ÐºÐ»Ð¸ проблемы Ñ Ð°ÑƒÑ‚ÐµÐ½Ñ‚Ð¸Ñ„Ð¸ÐºÐ°Ñ†Ð¸ÐµÐ¹, пожалуйÑта, ÑвÑжитеÑÑŒ Ñ Ð°Ð´Ð¼Ð¸Ð½Ð¸Ñтратором кÑша, еÑли же Ð’Ñ‹ - админиÑтратор, прочитайте документацию по интерфейÑу ÑƒÐ¿Ñ€Ð°Ð²Ð»ÐµÐ½Ð¸Ñ ÐºÑшем и проверьте файл протокола на предмет более детальных Ñообщений об ошибках.
\ No newline at end of file
+ ОШИБКÐ: Cache Manager Access Denied
ERROR
Cache Manager ДоÑтуп запрещён.
При получении URL %U произошла ÑÐ»ÐµÐ´ÑƒÑŽÑ‰Ð°Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ°
ДоÑтуп к управлению кÑшем запрещён.
Извините, Ð’Ñ‹ не можете запроÑить %U из ÑиÑтемы ÑƒÐ¿Ñ€Ð°Ð²Ð»ÐµÐ½Ð¸Ñ ÐºÑшем до тех пор, пока не пройдете аутентификацию.
ЕÑли у Ð’Ð°Ñ Ð²Ð¾Ð·Ð½Ð¸ÐºÐ»Ð¸ проблемы Ñ Ð°ÑƒÑ‚ÐµÐ½Ñ‚Ð¸Ñ„Ð¸ÐºÐ°Ñ†Ð¸ÐµÐ¹, пожалуйÑта, ÑвÑжитеÑÑŒ Ñ Ð°Ð´Ð¼Ð¸Ð½Ð¸Ñтратором кÑша, еÑли же Ð’Ñ‹ - админиÑтратор, прочитайте документацию по интерфейÑу ÑƒÐ¿Ñ€Ð°Ð²Ð»ÐµÐ½Ð¸Ñ ÐºÑшем и проверьте файл протокола на предмет более детальных Ñообщений об ошибках.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/ru/ERR_DIR_LISTING squid-3.1.0.14/errors/ru/ERR_DIR_LISTING
--- squid-3.1.0.13/errors/ru/ERR_DIR_LISTING 1970-01-01 12:00:00.000000000 +1200
+++ squid-3.1.0.14/errors/ru/ERR_DIR_LISTING 2009-09-27 15:32:59.000000000 +1200
@@ -0,0 +1 @@
+ Directory: %U
Содержимое каталога:
%z
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/ru/ERR_FTP_LISTING squid-3.1.0.14/errors/ru/ERR_FTP_LISTING
--- squid-3.1.0.13/errors/ru/ERR_FTP_LISTING 2009-08-05 01:36:14.000000000 +1200
+++ squid-3.1.0.14/errors/ru/ERR_FTP_LISTING 1970-01-01 12:00:00.000000000 +1200
@@ -1 +0,0 @@
- Каталог FTP: %U
Содержимое каталога:
%z
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/ru/ERR_FTP_PUT_ERROR squid-3.1.0.14/errors/ru/ERR_FTP_PUT_ERROR
--- squid-3.1.0.13/errors/ru/ERR_FTP_PUT_ERROR 2009-08-05 01:36:14.000000000 +1200
+++ squid-3.1.0.14/errors/ru/ERR_FTP_PUT_ERROR 2009-09-27 15:33:00.000000000 +1200
@@ -1 +1 @@
- ОШИБКÐ: Отправка по FTP не удалаÑÑŒ
ОШИБКÐ
Команда FTP PUT (отправка) завершилаÑÑŒ аварийно
При попытке запроÑа PUT Ð´Ð»Ñ Ñледующего URL: %U
Squid поÑлал Ñледующую команду FTP:
%f
Сервер ответил:
%F
Ðто означает, что Ñервер FTP может не иметь прав или Ñвободного меÑта Ð´Ð»Ñ Ñ…Ñ€Ð°Ð½ÐµÐ½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð°. Проверьте путь, права, Ñвободное меÑто и попробуйте Ñнова.
ÐдминиÑтратор Вашего кÑша: %w.
\ No newline at end of file
+ ОШИБКÐ: FTP upload failed
ERROR
Команда FTP PUT (отправка) завершилаÑÑŒ аварийно
При попытке запроÑа PUT Ð´Ð»Ñ Ñледующего URL: %U
Squid поÑлал Ñледующую команду FTP:
%f
Сервер ответил:
%F
Ðто означает, что Ñервер FTP может не иметь прав или Ñвободного меÑта Ð´Ð»Ñ Ñ…Ñ€Ð°Ð½ÐµÐ½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð°. Проверьте путь, права, Ñвободное меÑто и попробуйте Ñнова.
ÐдминиÑтратор Вашего кÑша: %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/sk/ERR_DIR_LISTING squid-3.1.0.14/errors/sk/ERR_DIR_LISTING
--- squid-3.1.0.13/errors/sk/ERR_DIR_LISTING 1970-01-01 12:00:00.000000000 +1200
+++ squid-3.1.0.14/errors/sk/ERR_DIR_LISTING 2009-09-27 15:33:04.000000000 +1200
@@ -0,0 +1 @@
+ Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/sk/ERR_FTP_LISTING squid-3.1.0.14/errors/sk/ERR_FTP_LISTING
--- squid-3.1.0.13/errors/sk/ERR_FTP_LISTING 2009-08-05 01:36:19.000000000 +1200
+++ squid-3.1.0.14/errors/sk/ERR_FTP_LISTING 1970-01-01 12:00:00.000000000 +1200
@@ -1 +0,0 @@
- FTP Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/sr/ERR_DIR_LISTING squid-3.1.0.14/errors/sr/ERR_DIR_LISTING
--- squid-3.1.0.13/errors/sr/ERR_DIR_LISTING 1970-01-01 12:00:00.000000000 +1200
+++ squid-3.1.0.14/errors/sr/ERR_DIR_LISTING 2009-09-27 15:33:08.000000000 +1200
@@ -0,0 +1 @@
+ Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/sr/ERR_FTP_LISTING squid-3.1.0.14/errors/sr/ERR_FTP_LISTING
--- squid-3.1.0.13/errors/sr/ERR_FTP_LISTING 2009-08-05 01:36:23.000000000 +1200
+++ squid-3.1.0.14/errors/sr/ERR_FTP_LISTING 1970-01-01 12:00:00.000000000 +1200
@@ -1 +0,0 @@
- FTP Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/sv/ERR_DIR_LISTING squid-3.1.0.14/errors/sv/ERR_DIR_LISTING
--- squid-3.1.0.13/errors/sv/ERR_DIR_LISTING 1970-01-01 12:00:00.000000000 +1200
+++ squid-3.1.0.14/errors/sv/ERR_DIR_LISTING 2009-09-27 15:33:13.000000000 +1200
@@ -0,0 +1 @@
+ Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/sv/ERR_FTP_LISTING squid-3.1.0.14/errors/sv/ERR_FTP_LISTING
--- squid-3.1.0.13/errors/sv/ERR_FTP_LISTING 2009-08-05 01:36:28.000000000 +1200
+++ squid-3.1.0.14/errors/sv/ERR_FTP_LISTING 1970-01-01 12:00:00.000000000 +1200
@@ -1 +0,0 @@
- FTP Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/templates/ERR_DIR_LISTING squid-3.1.0.14/errors/templates/ERR_DIR_LISTING
--- squid-3.1.0.13/errors/templates/ERR_DIR_LISTING 1970-01-01 12:00:00.000000000 +1200
+++ squid-3.1.0.14/errors/templates/ERR_DIR_LISTING 2009-09-27 15:28:25.000000000 +1200
@@ -0,0 +1,41 @@
+
+
+
+Directory: %U
+
+
+
+
+
+
+
Directory Content:
+
+
+%z
+
+
+
+
+
+
+
+
diff -u -r -N squid-3.1.0.13/errors/templates/ERR_FTP_LISTING squid-3.1.0.14/errors/templates/ERR_FTP_LISTING
--- squid-3.1.0.13/errors/templates/ERR_FTP_LISTING 2009-08-05 01:32:08.000000000 +1200
+++ squid-3.1.0.14/errors/templates/ERR_FTP_LISTING 1970-01-01 12:00:00.000000000 +1200
@@ -1,41 +0,0 @@
-
-
-
-FTP Directory: %U
-
-
-
-
-
-
-
Directory Content:
-
-
-%z
-
-
-
-
-
-
-
-
diff -u -r -N squid-3.1.0.13/errors/th/ERR_DIR_LISTING squid-3.1.0.14/errors/th/ERR_DIR_LISTING
--- squid-3.1.0.13/errors/th/ERR_DIR_LISTING 1970-01-01 12:00:00.000000000 +1200
+++ squid-3.1.0.14/errors/th/ERR_DIR_LISTING 2009-09-27 15:33:18.000000000 +1200
@@ -0,0 +1 @@
+ Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/th/ERR_FTP_LISTING squid-3.1.0.14/errors/th/ERR_FTP_LISTING
--- squid-3.1.0.13/errors/th/ERR_FTP_LISTING 2009-08-05 01:36:33.000000000 +1200
+++ squid-3.1.0.14/errors/th/ERR_FTP_LISTING 1970-01-01 12:00:00.000000000 +1200
@@ -1 +0,0 @@
- FTP Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/tr/ERR_DIR_LISTING squid-3.1.0.14/errors/tr/ERR_DIR_LISTING
--- squid-3.1.0.13/errors/tr/ERR_DIR_LISTING 1970-01-01 12:00:00.000000000 +1200
+++ squid-3.1.0.14/errors/tr/ERR_DIR_LISTING 2009-09-27 15:33:23.000000000 +1200
@@ -0,0 +1 @@
+ Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/tr/ERR_FTP_LISTING squid-3.1.0.14/errors/tr/ERR_FTP_LISTING
--- squid-3.1.0.13/errors/tr/ERR_FTP_LISTING 2009-08-05 01:36:37.000000000 +1200
+++ squid-3.1.0.14/errors/tr/ERR_FTP_LISTING 1970-01-01 12:00:00.000000000 +1200
@@ -1 +0,0 @@
- FTP Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/TRANSLATORS squid-3.1.0.14/errors/TRANSLATORS
--- squid-3.1.0.13/errors/TRANSLATORS 2009-08-05 01:32:08.000000000 +1200
+++ squid-3.1.0.14/errors/TRANSLATORS 2009-09-27 15:28:24.000000000 +1200
@@ -37,12 +37,16 @@
Iker Sagasti Markina
juancarlospaco
karlag
+ Lyongb
MaXer
Maxim S.
Mohamad Faizul bin Zulkifli
+ Mr.Lodar
Ricardo Ichizo
smsoft
Tobias
+ Wang DaQing
+ Yinghua Wang
zhuravlik
Thanks also to all the original translators for their great
diff -u -r -N squid-3.1.0.13/errors/uk/ERR_DIR_LISTING squid-3.1.0.14/errors/uk/ERR_DIR_LISTING
--- squid-3.1.0.13/errors/uk/ERR_DIR_LISTING 1970-01-01 12:00:00.000000000 +1200
+++ squid-3.1.0.14/errors/uk/ERR_DIR_LISTING 2009-09-27 15:33:27.000000000 +1200
@@ -0,0 +1 @@
+ Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/uk/ERR_FTP_LISTING squid-3.1.0.14/errors/uk/ERR_FTP_LISTING
--- squid-3.1.0.13/errors/uk/ERR_FTP_LISTING 2009-08-05 01:36:42.000000000 +1200
+++ squid-3.1.0.14/errors/uk/ERR_FTP_LISTING 1970-01-01 12:00:00.000000000 +1200
@@ -1 +0,0 @@
- FTP Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/uz/ERR_DIR_LISTING squid-3.1.0.14/errors/uz/ERR_DIR_LISTING
--- squid-3.1.0.13/errors/uz/ERR_DIR_LISTING 1970-01-01 12:00:00.000000000 +1200
+++ squid-3.1.0.14/errors/uz/ERR_DIR_LISTING 2009-09-27 15:33:32.000000000 +1200
@@ -0,0 +1 @@
+ Directory: %U
Ð”Ð¸Ñ€ÐµÐºÑ‚Ð¾Ñ€Ð¸Ñ Ñ‚Ð°Ñ€ÐºÐ¸Ð±Ð¸:
%z
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/uz/ERR_FTP_LISTING squid-3.1.0.14/errors/uz/ERR_FTP_LISTING
--- squid-3.1.0.13/errors/uz/ERR_FTP_LISTING 2009-08-05 01:36:46.000000000 +1200
+++ squid-3.1.0.14/errors/uz/ERR_FTP_LISTING 1970-01-01 12:00:00.000000000 +1200
@@ -1 +0,0 @@
- FTP ДиректориÑ: %U FTP ДиректориÑ: %U/
Ð”Ð¸Ñ€ÐµÐºÑ‚Ð¾Ñ€Ð¸Ñ Ñ‚Ð°Ñ€ÐºÐ¸Ð±Ð¸:
%z
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/zh-cn/ERR_ACCESS_DENIED squid-3.1.0.14/errors/zh-cn/ERR_ACCESS_DENIED
--- squid-3.1.0.13/errors/zh-cn/ERR_ACCESS_DENIED 2009-08-05 01:36:50.000000000 +1200
+++ squid-3.1.0.14/errors/zh-cn/ERR_ACCESS_DENIED 2009-09-27 15:33:37.000000000 +1200
@@ -1 +1 @@
- 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
The following error was encountered while trying to retrieve the URL: %U
访问被拒ç»ã€‚
当å‰çš„å˜å–控制设定ç¦æ¢æ‚¨çš„请求被接å—,如果您觉得这是错误的,请与您网路æœåŠ¡çš„æ供者è”系。
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
+ 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
The following error was encountered while trying to retrieve the URL: %U
访问被拒ç»ã€‚
当å‰çš„å˜å–控制设定ç¦æ¢æ‚¨çš„请求被接å—,如果您觉得这是错误的,请与您网路æœåŠ¡çš„æ供者è”系。
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/zh-cn/ERR_CACHE_ACCESS_DENIED squid-3.1.0.14/errors/zh-cn/ERR_CACHE_ACCESS_DENIED
--- squid-3.1.0.13/errors/zh-cn/ERR_CACHE_ACCESS_DENIED 2009-08-05 01:36:50.000000000 +1200
+++ squid-3.1.0.14/errors/zh-cn/ERR_CACHE_ACCESS_DENIED 2009-09-27 15:33:38.000000000 +1200
@@ -1 +1 @@
- 错误: Cache Access Denied
ERROR
Cache 访问被拒ç»ã€‚
The following error was encountered while trying to retrieve the URL: %U
缓å˜è®¿é—®è¢«æ‹’ç»ã€‚
Sorry, you are not currently allowed to request %U from this cache until you have authenticated yourself.
如果您在身份验è¯ä¸Š å‘生困难,请与 管ç†è€… è”系。 或是更改您的密ç 。
\ No newline at end of file
+ 错误: Cache Access Denied
ERROR
Cache 访问被拒ç»ã€‚
The following error was encountered while trying to retrieve the URL: %U
缓å˜è®¿é—®è¢«æ‹’ç»ã€‚
抱æ‰ï¼Œæ‚¨ä¸è¢«å…许通过本网络缓å˜æœåŠ¡å™¨è®¿é—®ä¸‹åˆ—ä½ç½® %U 除éžæ‚¨é€šè¿‡äº†æˆ‘们的身份验è¯ã€‚
如果您在身份验è¯ä¸Š å‘生困难,请与 管ç†è€… è”系。 或是更改您的密ç 。
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/zh-cn/ERR_CACHE_MGR_ACCESS_DENIED squid-3.1.0.14/errors/zh-cn/ERR_CACHE_MGR_ACCESS_DENIED
--- squid-3.1.0.13/errors/zh-cn/ERR_CACHE_MGR_ACCESS_DENIED 2009-08-05 01:36:50.000000000 +1200
+++ squid-3.1.0.14/errors/zh-cn/ERR_CACHE_MGR_ACCESS_DENIED 2009-09-27 15:33:37.000000000 +1200
@@ -1 +1 @@
- 错误: Cache Manager Access Denied
ERROR
Cache Manager 访问被拒ç»ã€‚
The following error was encountered while trying to retrieve the URL: %U
缓å˜ç®¡ç†è®¿é—®è¢«æ‹’ç»ã€‚
Sorry, you are not currently allowed to request %U from this cache manager until you have authenticated yourself.
如果您是在身份验è¯ä¸Šå‘生问题,请先确定您有æƒå¯¹ç¼“å˜ä½¿ç”¨ç®¡ç†å™¨ã€‚或是与管ç†è€…è”系。如果您就是管ç†è€…,请详细阅读 Squid 所附文件ä¸ä¸Ž cache manager 有关部份或检查 cache log 以便得到更详尽的细节。
\ No newline at end of file
+ 错误: Cache Manager Access Denied
ERROR
Cache Manager 访问被拒ç»ã€‚
The following error was encountered while trying to retrieve the URL: %U
缓å˜ç®¡ç†è®¿é—®è¢«æ‹’ç»ã€‚
抱æ‰ï¼Œæ‚¨ä¸è¢«å…许通过本缓å˜ç®¡ç†å™¨è®¿é—®ä»¥ä¸‹ä½ç½® %U 除éžæ‚¨é€šè¿‡æˆ‘们的身份验è¯ã€‚
如果您是在身份验è¯ä¸Šå‘生问题,请先确定您有æƒå¯¹ç¼“å˜ä½¿ç”¨ç®¡ç†å™¨ã€‚或是与管ç†è€…è”系。如果您就是管ç†è€…,请详细阅读 Squid 所附文件ä¸ä¸Ž cache manager 有关部份或检查 cache log 以便得到更详尽的细节。
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/zh-cn/ERR_CANNOT_FORWARD squid-3.1.0.14/errors/zh-cn/ERR_CANNOT_FORWARD
--- squid-3.1.0.13/errors/zh-cn/ERR_CANNOT_FORWARD 2009-08-05 01:36:50.000000000 +1200
+++ squid-3.1.0.14/errors/zh-cn/ERR_CANNOT_FORWARD 2009-09-27 15:33:37.000000000 +1200
@@ -1 +1 @@
- 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
The following error was encountered while trying to retrieve the URL: %U
ç›®å‰æ— 法将您的请求进行转é€æ“作
This request could not be forwarded to the origin server or to any parent caches. The most likely cause for this error is that the cache administrator does not allow this cache to make direct connections to origin servers, and all configured parent caches are currently unreachable.
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
+ 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
The following error was encountered while trying to retrieve the URL: %U
ç›®å‰æ— 法将您的请求进行转é€æ“作
æ‚¨çš„è¯·æ±‚æ— æ³•è¢«è½¬é€åˆ°åŽŸå§‹ç½‘络æœåŠ¡å™¨æˆ–其他的上层缓å˜æœåŠ¡å™¨ï¼Œå‘生这个问题最å¯èƒ½çš„åŽŸå› æ˜¯ç¼“å˜æœåŠ¡å™¨ç®¡ç†å‘˜ä¸å…许本æœåŠ¡å™¨ä¸ŽåŽŸå§‹ç½‘络æœåŠ¡å™¨ç›´æŽ¥è¿žç»“,而 所有本æœåŠ¡å™¨æŒ‡å®šçš„上层缓å˜æœåŠ¡å™¨éƒ½æš‚æ—¶æ— æ³•è¿žç»“ã€‚
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/zh-cn/ERR_CONNECT_FAIL squid-3.1.0.14/errors/zh-cn/ERR_CONNECT_FAIL
--- squid-3.1.0.13/errors/zh-cn/ERR_CONNECT_FAIL 2009-08-05 01:36:50.000000000 +1200
+++ squid-3.1.0.14/errors/zh-cn/ERR_CONNECT_FAIL 2009-09-27 15:33:35.000000000 +1200
@@ -1 +1 @@
- 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
The following error was encountered while trying to retrieve the URL: %U
连接到 %I 失败。
系统返回以下内容:%E
您è¦è¿žç»“的网络æœåŠ¡å™¨æˆ–网络å¯èƒ½å‘生故障。请ç¨åŽå†è¯•ã€‚
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
+ 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
The following error was encountered while trying to retrieve the URL: %U
连接到 %I 失败。
系统返回以下内容:%E
您è¦è¿žç»“的网络æœåŠ¡å™¨æˆ–网络å¯èƒ½å‘生故障。请ç¨åŽå†è¯•ã€‚
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/zh-cn/ERR_DIR_LISTING squid-3.1.0.14/errors/zh-cn/ERR_DIR_LISTING
--- squid-3.1.0.13/errors/zh-cn/ERR_DIR_LISTING 1970-01-01 12:00:00.000000000 +1200
+++ squid-3.1.0.14/errors/zh-cn/ERR_DIR_LISTING 2009-09-27 15:33:37.000000000 +1200
@@ -0,0 +1 @@
+ Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/zh-cn/ERR_DNS_FAIL squid-3.1.0.14/errors/zh-cn/ERR_DNS_FAIL
--- squid-3.1.0.13/errors/zh-cn/ERR_DNS_FAIL 2009-08-05 01:36:50.000000000 +1200
+++ squid-3.1.0.14/errors/zh-cn/ERR_DNS_FAIL 2009-09-27 15:33:38.000000000 +1200
@@ -1 +1 @@
- 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
The following error was encountered while trying to retrieve the URL: %U
Unable to determine IP address from host name %H
The DNS server returned:
%z
This means that the cache was not able to resolve the hostname presented in the URL. Check if the address is correct.
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
+ 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
The following error was encountered while trying to retrieve the URL: %U
ä¸èƒ½ç”±ä¸»æœºå %H
确定 IP 地å€ã€‚
DNS æœåŠ¡å™¨è¿”回了:
%z
这表示 缓å˜æœåŠ¡å™¨æ— 法解æžæ‚¨è¾“入网å€ï¼ˆURL)ä¸çš„主机å称, 请检查该å称是å¦æ£ç¡®ã€‚
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/zh-cn/ERR_ESI squid-3.1.0.14/errors/zh-cn/ERR_ESI
--- squid-3.1.0.13/errors/zh-cn/ERR_ESI 2009-08-05 01:36:50.000000000 +1200
+++ squid-3.1.0.14/errors/zh-cn/ERR_ESI 2009-09-27 15:33:35.000000000 +1200
@@ -1 +1 @@
- 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
The following error was encountered while trying to retrieve the URL: %U
ESI Processing failed.
The ESI processor returned:
%Z
This means that the surrogate was not able to process the ESI template. Please report this error to the webmaster.
Your webmaster is %w.
\ No newline at end of file
+ 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
The following error was encountered while trying to retrieve the URL: %U
ESI 处ç†å¤±è´¥ã€‚
ESI 处ç†å™¨è¿”回了:
%Z
This means that the surrogate was not able to process the ESI template. Please report this error to the webmaster.
您的网站管ç†å‘˜æ˜¯ %w。
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/zh-cn/ERR_FORWARDING_DENIED squid-3.1.0.14/errors/zh-cn/ERR_FORWARDING_DENIED
--- squid-3.1.0.13/errors/zh-cn/ERR_FORWARDING_DENIED 2009-08-05 01:36:51.000000000 +1200
+++ squid-3.1.0.14/errors/zh-cn/ERR_FORWARDING_DENIED 2009-09-27 15:33:37.000000000 +1200
@@ -1 +1 @@
- 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
The following error was encountered while trying to retrieve the URL: %U
æ‹’ç»è½¬é€
本缓å˜æœåŠ¡å™¨æ— 法转é€æ‚¨çš„请求,也许您是通过一个并未被本缓å˜æœåŠ¡å™¨è®¾å®šä¸ºå…许å˜å–的缓å˜æœåŠ¡å™¨ %i æ¥è®¿é—®çš„。
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
+ 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
The following error was encountered while trying to retrieve the URL: %U
æ‹’ç»è½¬é€
本缓å˜æœåŠ¡å™¨æ— 法转é€æ‚¨çš„请求,也许您是通过一个并未被本缓å˜æœåŠ¡å™¨è®¾å®šä¸ºå…许å˜å–的缓å˜æœåŠ¡å™¨ %i æ¥è®¿é—®çš„。
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/zh-cn/ERR_FTP_DISABLED squid-3.1.0.14/errors/zh-cn/ERR_FTP_DISABLED
--- squid-3.1.0.13/errors/zh-cn/ERR_FTP_DISABLED 2009-08-05 01:36:51.000000000 +1200
+++ squid-3.1.0.14/errors/zh-cn/ERR_FTP_DISABLED 2009-09-27 15:33:38.000000000 +1200
@@ -1 +1 @@
- 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
The following error was encountered while trying to retrieve the URL: %U
FTP 被ç¦ç”¨
本缓å˜æœåŠ¡å™¨æœªå¼€æ”¾æ–‡ä»¶ä¼ 输æœåŠ¡ã€‚
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
+ 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
The following error was encountered while trying to retrieve the URL: %U
FTP 被ç¦ç”¨
本缓å˜æœåŠ¡å™¨æœªå¼€æ”¾æ–‡ä»¶ä¼ 输æœåŠ¡ã€‚
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/zh-cn/ERR_FTP_FAILURE squid-3.1.0.14/errors/zh-cn/ERR_FTP_FAILURE
--- squid-3.1.0.13/errors/zh-cn/ERR_FTP_FAILURE 2009-08-05 01:36:51.000000000 +1200
+++ squid-3.1.0.14/errors/zh-cn/ERR_FTP_FAILURE 2009-09-27 15:33:38.000000000 +1200
@@ -1 +1 @@
- 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
å°è¯•èŽ·å–该URL:%Uæ—¶å‘生一个FTPå议错误
本缓å˜æœåŠ¡å™¨å‘出以下 FTP 命令:
%f
The server responded with:
%F
%g
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
+ 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
å°è¯•èŽ·å–该URL:%Uæ—¶å‘生一个FTPå议错误
本缓å˜æœåŠ¡å™¨å‘出以下 FTP 命令:
%f
The server responded with:
%F
%g
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/zh-cn/ERR_FTP_FORBIDDEN squid-3.1.0.14/errors/zh-cn/ERR_FTP_FORBIDDEN
--- squid-3.1.0.13/errors/zh-cn/ERR_FTP_FORBIDDEN 2009-08-05 01:36:51.000000000 +1200
+++ squid-3.1.0.14/errors/zh-cn/ERR_FTP_FORBIDDEN 2009-09-27 15:33:37.000000000 +1200
@@ -1 +1 @@
- 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
å°è¯•èŽ·å– URL: %U æ—¶å‘生一个FTP认è¯é”™è¯¯
本缓å˜æœåŠ¡å™¨å‘出以下 FTP 命令:
%f
The server responded with:
%F
%g
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
+ 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
å°è¯•èŽ·å– URL: %U æ—¶å‘生一个FTP认è¯é”™è¯¯
本缓å˜æœåŠ¡å™¨å‘出以下 FTP 命令:
%f
The server responded with:
%F
%g
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/zh-cn/ERR_FTP_LISTING squid-3.1.0.14/errors/zh-cn/ERR_FTP_LISTING
--- squid-3.1.0.13/errors/zh-cn/ERR_FTP_LISTING 2009-08-05 01:36:51.000000000 +1200
+++ squid-3.1.0.14/errors/zh-cn/ERR_FTP_LISTING 1970-01-01 12:00:00.000000000 +1200
@@ -1 +0,0 @@
- FTP Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/zh-cn/ERR_FTP_NOT_FOUND squid-3.1.0.14/errors/zh-cn/ERR_FTP_NOT_FOUND
--- squid-3.1.0.13/errors/zh-cn/ERR_FTP_NOT_FOUND 2009-08-05 01:36:51.000000000 +1200
+++ squid-3.1.0.14/errors/zh-cn/ERR_FTP_NOT_FOUND 2009-09-27 15:33:36.000000000 +1200
@@ -1 +1 @@
- 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
The following URL could not be retrieved: %U
本缓å˜æœåŠ¡å™¨å‘出以下 FTP 命令:
%f
The server responded with:
%F
%g
è¿™å¯èƒ½æ˜¯ç”±äºŽ FTP 网å€ï¼ˆURL)ä¸åŒ…å«äº†ç»å¯¹è·¯å¾„所致(ä¸ç¬¦åˆ RFC1738ï¼‰ã€‚å¦‚æžœçœŸçš„æ˜¯è¿™æ ·ï¼Œæ–‡ä»¶å¯ä»¥ä»Ž%Bä½ç½®æ‰¾åˆ°ã€‚
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
+ 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
æ— æ³•å–回下é¢çš„ URL: %U
本缓å˜æœåŠ¡å™¨å‘出以下 FTP 命令:
%f
The server responded with:
%F
%g
è¿™å¯èƒ½æ˜¯ç”±äºŽ FTP 网å€ï¼ˆURL)ä¸åŒ…å«äº†ç»å¯¹è·¯å¾„所致(ä¸ç¬¦åˆ RFC1738ï¼‰ã€‚å¦‚æžœçœŸçš„æ˜¯è¿™æ ·ï¼Œæ–‡ä»¶å¯ä»¥ä»Ž%Bä½ç½®æ‰¾åˆ°ã€‚
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/zh-cn/ERR_FTP_PUT_CREATED squid-3.1.0.14/errors/zh-cn/ERR_FTP_PUT_CREATED
--- squid-3.1.0.13/errors/zh-cn/ERR_FTP_PUT_CREATED 2009-08-05 01:36:51.000000000 +1200
+++ squid-3.1.0.14/errors/zh-cn/ERR_FTP_PUT_CREATED 2009-09-27 15:33:38.000000000 +1200
@@ -1 +1 @@
- FTP PUT æˆåŠŸ: 文件已创建
æ“作æˆåŠŸ
文件已创建
\ No newline at end of file
+ FTP PUT æˆåŠŸ: 文件已创建
æ“作æˆåŠŸ
文件已创建
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/zh-cn/ERR_FTP_PUT_ERROR squid-3.1.0.14/errors/zh-cn/ERR_FTP_PUT_ERROR
--- squid-3.1.0.13/errors/zh-cn/ERR_FTP_PUT_ERROR 2009-08-05 01:36:51.000000000 +1200
+++ squid-3.1.0.14/errors/zh-cn/ERR_FTP_PUT_ERROR 2009-09-27 15:33:38.000000000 +1200
@@ -1 +1 @@
- 错误: FTP upload failed
ERROR
FTP PUT/ä¸Šä¼ å¤±è´¥
当å°è¯•ä¸Šä¼ (PUT)到以下ä½ç½®æ—¶ï¼š %U
本缓å˜æœåŠ¡å™¨å‘出以下 FTP 命令:
%f
The server responded with:
%F
This means that the FTP server may not have permission or space to store the file. Check the path, permissions, diskspace and try again.
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
+ 错误: FTP upload failed
ERROR
FTP PUT/ä¸Šä¼ å¤±è´¥
当å°è¯•ä¸Šä¼ (PUT)到以下ä½ç½®æ—¶ï¼š %U
本缓å˜æœåŠ¡å™¨å‘出以下 FTP 命令:
%f
The server responded with:
%F
This means that the FTP server may not have permission or space to store the file. Check the path, permissions, diskspace and try again.
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/zh-cn/ERR_FTP_PUT_MODIFIED squid-3.1.0.14/errors/zh-cn/ERR_FTP_PUT_MODIFIED
--- squid-3.1.0.13/errors/zh-cn/ERR_FTP_PUT_MODIFIED 2009-08-05 01:36:52.000000000 +1200
+++ squid-3.1.0.14/errors/zh-cn/ERR_FTP_PUT_MODIFIED 2009-09-27 15:33:34.000000000 +1200
@@ -1 +1 @@
- FTP PUT æˆåŠŸ: 文件已更新
æ“作æˆåŠŸ
文件已更新
\ No newline at end of file
+ FTP PUT æˆåŠŸ: 文件已更新
æ“作æˆåŠŸ
文件已更新
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/zh-cn/ERR_FTP_UNAVAILABLE squid-3.1.0.14/errors/zh-cn/ERR_FTP_UNAVAILABLE
--- squid-3.1.0.13/errors/zh-cn/ERR_FTP_UNAVAILABLE 2009-08-05 01:36:52.000000000 +1200
+++ squid-3.1.0.14/errors/zh-cn/ERR_FTP_UNAVAILABLE 2009-09-27 15:33:36.000000000 +1200
@@ -1 +1 @@
- 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
The FTP server was too busy to retrieve the URL: %U
本缓å˜æœåŠ¡å™¨å‘出以下 FTP 命令:
%f
The server responded with:
%F
%g
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
+ 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
FTP æœåŠ¡å™¨å¤ªå¿™ï¼Œæ— 法å–回 URL: %U
本缓å˜æœåŠ¡å™¨å‘出以下 FTP 命令:
%f
The server responded with:
%F
%g
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/zh-cn/ERR_ICAP_FAILURE squid-3.1.0.14/errors/zh-cn/ERR_ICAP_FAILURE
--- squid-3.1.0.13/errors/zh-cn/ERR_ICAP_FAILURE 2009-08-05 01:36:52.000000000 +1200
+++ squid-3.1.0.14/errors/zh-cn/ERR_ICAP_FAILURE 2009-09-27 15:33:36.000000000 +1200
@@ -1 +1 @@
- 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
The following error was encountered while trying to retrieve the URL: %U
ICAP protocol error.
系统返回以下内容:%E
This means that some aspect of the ICAP communication failed.
Some possible problems are:
\ No newline at end of file
+ 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
The following error was encountered while trying to retrieve the URL: %U
ICAP å议错误。
系统返回以下内容:%E
è¿™æ„å‘³ç€ ICAP 通信的æŸäº›åœ°æ–¹å¤±è´¥äº†ã€‚
å¯èƒ½çš„问题包括:
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/zh-cn/ERR_INVALID_REQ squid-3.1.0.14/errors/zh-cn/ERR_INVALID_REQ
--- squid-3.1.0.13/errors/zh-cn/ERR_INVALID_REQ 2009-08-05 01:36:52.000000000 +1200
+++ squid-3.1.0.14/errors/zh-cn/ERR_INVALID_REQ 2009-09-27 15:33:37.000000000 +1200
@@ -1 +1 @@
- 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
éžæ³•è¯·æ±‚ å°è¯•å¤„ç†è¯·æ±‚是å‘生错误:
%R
Some possible problems are:
缺少请求方å¼æˆ–未知的请求方å¼
缺少网å€
缺少 HTTP æ ‡è¯†(HTTP/1.0)
请求命令过长
POST 或 PUT 请求ä¸ä¸¢å¤±å†…容长度(Content-Length)。
主机å称ä¸åŒ…å«ä¸åˆæ³•çš„å—符;下划线是ä¸å…许的。
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
+ 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
éžæ³•è¯·æ±‚ å°è¯•å¤„ç†è¯·æ±‚是å‘生错误:
%R
å¯èƒ½çš„问题包括:
缺少请求方å¼æˆ–未知的请求方å¼
缺少网å€
缺少 HTTP æ ‡è¯†(HTTP/1.0)
请求命令过长
POST 或 PUT 请求ä¸ä¸¢å¤±å†…容长度(Content-Length)。
主机å称ä¸åŒ…å«ä¸åˆæ³•çš„å—符;下划线是ä¸å…许的。
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/zh-cn/ERR_INVALID_RESP squid-3.1.0.14/errors/zh-cn/ERR_INVALID_RESP
--- squid-3.1.0.13/errors/zh-cn/ERR_INVALID_RESP 2009-08-05 01:36:52.000000000 +1200
+++ squid-3.1.0.14/errors/zh-cn/ERR_INVALID_RESP 2009-09-27 15:33:39.000000000 +1200
@@ -1 +1 @@
- 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
éžæ³•å“应 å°è¯•å¤„ç†è¯·æ±‚æ—¶å‘生错误:
%R
The HTTP Response message received from the contacted server could not be understood or was otherwise malformed. Please contact the site operator.
Your cache administrator may be able to provide you with more details about the exact nature of the problem if needed.
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
+ 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
éžæ³•å“应 å°è¯•å¤„ç†è¯·æ±‚æ—¶å‘生错误:
%R
The HTTP Response message received from the contacted server could not be understood or was otherwise malformed. Please contact the site operator.
Your cache administrator may be able to provide you with more details about the exact nature of the problem if needed.
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/zh-cn/ERR_INVALID_URL squid-3.1.0.14/errors/zh-cn/ERR_INVALID_URL
--- squid-3.1.0.13/errors/zh-cn/ERR_INVALID_URL 2009-08-05 01:36:52.000000000 +1200
+++ squid-3.1.0.14/errors/zh-cn/ERR_INVALID_URL 2009-09-27 15:33:39.000000000 +1200
@@ -1 +1 @@
- 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
The following error was encountered while trying to retrieve the URL: %U
æ— æ•ˆçš„ç½‘å€
Some aspect of the requested URL is incorrect.
Some possible problems are:
缺少或ä¸æ£ç¡®çš„通讯å议(应该如 http://
或类似的开头)
缺少欲连结的主机å称
网å€è·¯å¾„ä¸æœ‰ä¸åˆæ³•åŒé‡è½¬ä¹‰ç¬¦
主机å称ä¸åŒ…å«ä¸åˆæ³•çš„å—符;下划线是ä¸å…许的。
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
+ 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
The following error was encountered while trying to retrieve the URL: %U
æ— æ•ˆçš„ç½‘å€
Some aspect of the requested URL is incorrect.
å¯èƒ½çš„问题包括:
缺少或ä¸æ£ç¡®çš„通讯å议(应该如 http://
或类似的开头)
缺少欲连结的主机å称
网å€è·¯å¾„ä¸æœ‰ä¸åˆæ³•åŒé‡è½¬ä¹‰ç¬¦
主机å称ä¸åŒ…å«ä¸åˆæ³•çš„å—符;下划线是ä¸å…许的。
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/zh-cn/ERR_LIFETIME_EXP squid-3.1.0.14/errors/zh-cn/ERR_LIFETIME_EXP
--- squid-3.1.0.13/errors/zh-cn/ERR_LIFETIME_EXP 2009-08-05 01:36:52.000000000 +1200
+++ squid-3.1.0.14/errors/zh-cn/ERR_LIFETIME_EXP 2009-09-27 15:33:36.000000000 +1200
@@ -1 +1 @@
- 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
The following error was encountered while trying to retrieve the URL: %U
连接已过期
缓å˜æœåŠ¡å™¨å·²ç»ˆæ¢æ‚¨çš„è¿žæŽ¥è¯·æ±‚ï¼Œå› ä¸ºå·²ç»è¶…过了最大连接ç‰å¾…时间。
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
+ 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
The following error was encountered while trying to retrieve the URL: %U
连接已过期
缓å˜æœåŠ¡å™¨å·²ç»ˆæ¢æ‚¨çš„è¿žæŽ¥è¯·æ±‚ï¼Œå› ä¸ºå·²ç»è¶…过了最大连接ç‰å¾…时间。
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/zh-cn/ERR_NO_RELAY squid-3.1.0.14/errors/zh-cn/ERR_NO_RELAY
--- squid-3.1.0.13/errors/zh-cn/ERR_NO_RELAY 2009-08-05 01:36:52.000000000 +1200
+++ squid-3.1.0.14/errors/zh-cn/ERR_NO_RELAY 2009-09-27 15:33:39.000000000 +1200
@@ -1 +1 @@
- 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
The following error was encountered while trying to retrieve the URL: %U
æ— Wais ä¸ç»§
本缓å˜æœåŠ¡å™¨æ²¡æœ‰è®¾å®š WAIS ä¸ç»§ä¸»æœºï¼å¦‚有疑问请与缓å˜æœåŠ¡å™¨ç®¡ç†å‘˜è”系。
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
+ 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
The following error was encountered while trying to retrieve the URL: %U
æ— Wais ä¸ç»§
本缓å˜æœåŠ¡å™¨æ²¡æœ‰è®¾å®š WAIS ä¸ç»§ä¸»æœºï¼å¦‚有疑问请与缓å˜æœåŠ¡å™¨ç®¡ç†å‘˜è”系。
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/zh-cn/ERR_ONLY_IF_CACHED_MISS squid-3.1.0.14/errors/zh-cn/ERR_ONLY_IF_CACHED_MISS
--- squid-3.1.0.13/errors/zh-cn/ERR_ONLY_IF_CACHED_MISS 2009-08-05 01:36:53.000000000 +1200
+++ squid-3.1.0.14/errors/zh-cn/ERR_ONLY_IF_CACHED_MISS 2009-09-27 15:33:36.000000000 +1200
@@ -1 +1 @@
- 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
The following error was encountered while trying to retrieve the URL: %U
请求的文件在本缓å˜æœåŠ¡å™¨ä¸Šæœªæ‰¾åˆ°ï¼Œè€Œæ‚¨è®¾å®šäº†only-if-cached
(åªè¯»å–缓å˜ï¼‰æŒ‡ä»¤ã€‚
您é€å‡ºäº†ä¸€ä¸ªåŒ…å« only-if-cached
(åªè¯»å–缓å˜ï¼‰ç¼“å˜æŽ§åˆ¶æŒ‡ä»¤çš„连结请求。而所è¦çš„文件并未å˜åœ¨äºŽæœ¬ç¼“å˜æœåŠ¡å™¨ä¸ï¼Œæˆ–者这个连结请求需è¦åˆ·æ–°æ–‡ä»¶è€Œ only-if-cached
指令ç¦æ¢è¿™ä¹ˆåšã€‚
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
+ 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
The following error was encountered while trying to retrieve the URL: %U
请求的文件在本缓å˜æœåŠ¡å™¨ä¸Šæœªæ‰¾åˆ°ï¼Œè€Œæ‚¨è®¾å®šäº†only-if-cached
(åªè¯»å–缓å˜ï¼‰æŒ‡ä»¤ã€‚
您é€å‡ºäº†ä¸€ä¸ªåŒ…å« only-if-cached
(åªè¯»å–缓å˜ï¼‰ç¼“å˜æŽ§åˆ¶æŒ‡ä»¤çš„连结请求。而所è¦çš„文件并未å˜åœ¨äºŽæœ¬ç¼“å˜æœåŠ¡å™¨ä¸ï¼Œæˆ–者这个连结请求需è¦åˆ·æ–°æ–‡ä»¶è€Œ only-if-cached
指令ç¦æ¢è¿™ä¹ˆåšã€‚
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/zh-cn/ERR_READ_ERROR squid-3.1.0.14/errors/zh-cn/ERR_READ_ERROR
--- squid-3.1.0.13/errors/zh-cn/ERR_READ_ERROR 2009-08-05 01:36:53.000000000 +1200
+++ squid-3.1.0.14/errors/zh-cn/ERR_READ_ERROR 2009-09-27 15:33:36.000000000 +1200
@@ -1 +1 @@
- 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
The following error was encountered while trying to retrieve the URL: %U
读å–错误
系统返回以下内容:%E
æ£åœ¨é€šè¿‡ç½‘络读å–æ•°æ®æ—¶å‘生了错误,请é‡æ–°å°è¯•ã€‚
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
+ 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
The following error was encountered while trying to retrieve the URL: %U
读å–错误
系统返回以下内容:%E
æ£åœ¨é€šè¿‡ç½‘络读å–æ•°æ®æ—¶å‘生了错误,请é‡æ–°å°è¯•ã€‚
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/zh-cn/ERR_READ_TIMEOUT squid-3.1.0.14/errors/zh-cn/ERR_READ_TIMEOUT
--- squid-3.1.0.13/errors/zh-cn/ERR_READ_TIMEOUT 2009-08-05 01:36:53.000000000 +1200
+++ squid-3.1.0.14/errors/zh-cn/ERR_READ_TIMEOUT 2009-09-27 15:33:36.000000000 +1200
@@ -1 +1 @@
- 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
The following error was encountered while trying to retrieve the URL: %U
读å–超时
系统返回以下内容:%E
ç‰å¾…从网络读å–æ•°æ®æ—¶å‘生超时。 网络或æœåŠ¡å™¨ä¸‹çº¿æˆ–拥挤。 请é‡æ–°å‘é€ä½ 的请求。
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
+ 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
The following error was encountered while trying to retrieve the URL: %U
读å–超时
系统返回以下内容:%E
ç‰å¾…从网络读å–æ•°æ®æ—¶å‘生超时。 网络或æœåŠ¡å™¨ä¸‹çº¿æˆ–拥挤。 请é‡æ–°å‘é€ä½ 的请求。
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/zh-cn/ERR_SECURE_CONNECT_FAIL squid-3.1.0.14/errors/zh-cn/ERR_SECURE_CONNECT_FAIL
--- squid-3.1.0.13/errors/zh-cn/ERR_SECURE_CONNECT_FAIL 2009-08-05 01:36:53.000000000 +1200
+++ squid-3.1.0.14/errors/zh-cn/ERR_SECURE_CONNECT_FAIL 2009-09-27 15:33:35.000000000 +1200
@@ -1 +1 @@
- 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
The following error was encountered while trying to retrieve the URL: %U
Failed to establish a secure connection to %I
系统返回以下内容:%E
This proxy and the remote host failed to negotiate a mutually acceptable security settings for handling your request. It is possible that the remote host does not support secure connections, or the proxy is not satisfied with the host security credentials.
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
+ 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
The following error was encountered while trying to retrieve the URL: %U
建立到 %I 的安全连接失败
系统返回以下内容:%E
This proxy and the remote host failed to negotiate a mutually acceptable security settings for handling your request. It is possible that the remote host does not support secure connections, or the proxy is not satisfied with the host security credentials.
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/zh-cn/ERR_SHUTTING_DOWN squid-3.1.0.14/errors/zh-cn/ERR_SHUTTING_DOWN
--- squid-3.1.0.13/errors/zh-cn/ERR_SHUTTING_DOWN 2009-08-05 01:36:53.000000000 +1200
+++ squid-3.1.0.14/errors/zh-cn/ERR_SHUTTING_DOWN 2009-09-27 15:33:35.000000000 +1200
@@ -1 +1 @@
- 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
The following error was encountered while trying to retrieve the URL: %U
本缓å˜æœåŠ¡å™¨æ£åœ¨å…³é—ï¼Œæš‚æ—¶æ— æ³•ä¸ºæ‚¨æœåŠ¡ã€‚请ç¨ä¾¯å†è¯•ã€‚
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
+ 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
The following error was encountered while trying to retrieve the URL: %U
本缓å˜æœåŠ¡å™¨æ£åœ¨å…³é—ï¼Œæš‚æ—¶æ— æ³•ä¸ºæ‚¨æœåŠ¡ã€‚请ç¨ä¾¯å†è¯•ã€‚
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/zh-cn/ERR_SOCKET_FAILURE squid-3.1.0.14/errors/zh-cn/ERR_SOCKET_FAILURE
--- squid-3.1.0.13/errors/zh-cn/ERR_SOCKET_FAILURE 2009-08-05 01:36:53.000000000 +1200
+++ squid-3.1.0.14/errors/zh-cn/ERR_SOCKET_FAILURE 2009-09-27 15:33:35.000000000 +1200
@@ -1 +1 @@
- 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
The following error was encountered while trying to retrieve the URL: %U
Socket 建立失败
系统返回以下内容:%E
Squid (缓å˜æœåŠ¡å™¨ï¼‰æ— 法建立 TCP socketï¼ˆæ— æ³•å‘系统申请建立新的网络连接),å¯èƒ½æ˜¯å› 为负è·è¿‡é‡ï¼Œè¯·é‡æ–°å°è¯•ã€‚
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
+ 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
The following error was encountered while trying to retrieve the URL: %U
Socket 建立失败
系统返回以下内容:%E
Squid (缓å˜æœåŠ¡å™¨ï¼‰æ— 法建立 TCP socketï¼ˆæ— æ³•å‘系统申请建立新的网络连接),å¯èƒ½æ˜¯å› 为负è·è¿‡é‡ï¼Œè¯·é‡æ–°å°è¯•ã€‚
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/zh-cn/ERR_TOO_BIG squid-3.1.0.14/errors/zh-cn/ERR_TOO_BIG
--- squid-3.1.0.13/errors/zh-cn/ERR_TOO_BIG 2009-08-05 01:36:53.000000000 +1200
+++ squid-3.1.0.14/errors/zh-cn/ERR_TOO_BIG 2009-09-27 15:33:38.000000000 +1200
@@ -1 +1 @@
- 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
The following error was encountered while trying to retrieve the URL: %U
请求或å“应太大
If you are making a POST or PUT request, then the item you are trying to upload is too large.
If you are making a GET request, then the item you are trying to download is too large.
These limits have been established by the Internet Service Provider who operates this cache. Please contact them directly if you feel this is an error.
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
+ 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
The following error was encountered while trying to retrieve the URL: %U
请求或å“应太大
如果您在执行 POST 或 PUT 请求,那是您è¦ä¸Šä¼ 的东西太大。
如果您在执行 GET 请求,那是您è¦ä¸‹è½½çš„项目太大。
这些长度é™åˆ¶ç”±æ“作本缓å˜æœåŠ¡å™¨çš„æœåŠ¡æ供者制定。如果您觉得这是错误的,请与他们直接è”系。
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/zh-cn/ERR_UNSUP_HTTPVERSION squid-3.1.0.14/errors/zh-cn/ERR_UNSUP_HTTPVERSION
--- squid-3.1.0.13/errors/zh-cn/ERR_UNSUP_HTTPVERSION 2009-08-05 01:36:54.000000000 +1200
+++ squid-3.1.0.14/errors/zh-cn/ERR_UNSUP_HTTPVERSION 2009-09-27 15:33:36.000000000 +1200
@@ -1 +1 @@
- 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
Unsupported HTTP version
The following error was encountered while trying to retrieve the URL: %U
Unsupported HTTP version
This Squid does not accept the HTTP version you are attempting to use.
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
+ 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
ä¸æ”¯æŒçš„ HTTP 版本
The following error was encountered while trying to retrieve the URL: %U
Unsupported HTTP version
æ¤ç‰ˆæœ¬çš„ Squid ä¸æŽ¥å—您试图使用的 HTTP 版本。
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/zh-cn/ERR_UNSUP_REQ squid-3.1.0.14/errors/zh-cn/ERR_UNSUP_REQ
--- squid-3.1.0.13/errors/zh-cn/ERR_UNSUP_REQ 2009-08-05 01:36:54.000000000 +1200
+++ squid-3.1.0.14/errors/zh-cn/ERR_UNSUP_REQ 2009-09-27 15:33:38.000000000 +1200
@@ -1 +1 @@
- 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
The following error was encountered while trying to retrieve the URL: %U
ä¸æ”¯æŒçš„请求方å¼å’Œåè®®
Squid (缓å˜æœåŠ¡å™¨ï¼‰ä¸èƒ½å¯¹æ‰€æœ‰çš„å˜å–å议支æŒæ‰€æœ‰çš„请求方å¼ã€‚æ¯”å¦‚è¯´ï¼Œä½ ä¸èƒ½å¯¹ GOPHER 进行一个 POST 请求。
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
+ 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
The following error was encountered while trying to retrieve the URL: %U
ä¸æ”¯æŒçš„请求方å¼å’Œåè®®
Squid (缓å˜æœåŠ¡å™¨ï¼‰ä¸èƒ½å¯¹æ‰€æœ‰çš„å˜å–å议支æŒæ‰€æœ‰çš„请求方å¼ã€‚æ¯”å¦‚è¯´ï¼Œä½ ä¸èƒ½å¯¹ GOPHER 进行一个 POST 请求。
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/zh-cn/ERR_URN_RESOLVE squid-3.1.0.14/errors/zh-cn/ERR_URN_RESOLVE
--- squid-3.1.0.13/errors/zh-cn/ERR_URN_RESOLVE 2009-08-05 01:36:54.000000000 +1200
+++ squid-3.1.0.14/errors/zh-cn/ERR_URN_RESOLVE 2009-09-27 15:33:35.000000000 +1200
@@ -1 +1 @@
- 错误: The requested URN not be retrieved
ERROR
URN ä¸çš„一个网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
当å°è¯•è¯»å–以下 URN 时: %U
ä¸èƒ½è§£æž URN
抱æ‰ï¼æ‚¨ä¸èƒ½å¯¹ %T 上的 URNs 期待太多 :)
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
+ 错误: The requested URN not be retrieved
ERROR
URN ä¸çš„一个网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
当å°è¯•è¯»å–以下 URN 时: %U
ä¸èƒ½è§£æž URN
抱æ‰ï¼æ‚¨ä¸èƒ½å¯¹ %T 上的 URNs 期待太多 :)
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/zh-cn/ERR_WRITE_ERROR squid-3.1.0.14/errors/zh-cn/ERR_WRITE_ERROR
--- squid-3.1.0.13/errors/zh-cn/ERR_WRITE_ERROR 2009-08-05 01:36:54.000000000 +1200
+++ squid-3.1.0.14/errors/zh-cn/ERR_WRITE_ERROR 2009-09-27 15:33:39.000000000 +1200
@@ -1 +1 @@
- 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
The following error was encountered while trying to retrieve the URL: %U
写入错误
系统返回以下内容:%E
通过网络写入数æ®æ—¶å‘生了错误,请é‡æ–°å°è¯•ã€‚
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
+ 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
The following error was encountered while trying to retrieve the URL: %U
写入错误
系统返回以下内容:%E
通过网络写入数æ®æ—¶å‘生了错误,请é‡æ–°å°è¯•ã€‚
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/zh-cn/ERR_ZERO_SIZE_OBJECT squid-3.1.0.14/errors/zh-cn/ERR_ZERO_SIZE_OBJECT
--- squid-3.1.0.13/errors/zh-cn/ERR_ZERO_SIZE_OBJECT 2009-08-05 01:36:54.000000000 +1200
+++ squid-3.1.0.14/errors/zh-cn/ERR_ZERO_SIZE_OBJECT 2009-09-27 15:33:35.000000000 +1200
@@ -1 +1 @@
- 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
The following error was encountered while trying to retrieve the URL: %U
å“应内容长度为零
本缓å˜æœåŠ¡å™¨ä»Žè¢«è¿žæŽ¥çš„æœåŠ¡å™¨ä¸Šæ²¡æœ‰æ”¶åˆ°ä»»ä½•æ•°æ®ã€‚
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
+ 错误: 您所请求的网å€ï¼ˆURLï¼‰æ— æ³•èŽ·å–
ERROR
The requested URL could not be retrieved
The following error was encountered while trying to retrieve the URL: %U
å“应内容长度为零
本缓å˜æœåŠ¡å™¨ä»Žè¢«è¿žæŽ¥çš„æœåŠ¡å™¨ä¸Šæ²¡æœ‰æ”¶åˆ°ä»»ä½•æ•°æ®ã€‚
缓å˜æœåŠ¡å™¨çš„管ç†å‘˜ %w.
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/zh-tw/ERR_DIR_LISTING squid-3.1.0.14/errors/zh-tw/ERR_DIR_LISTING
--- squid-3.1.0.13/errors/zh-tw/ERR_DIR_LISTING 1970-01-01 12:00:00.000000000 +1200
+++ squid-3.1.0.14/errors/zh-tw/ERR_DIR_LISTING 2009-09-27 15:33:41.000000000 +1200
@@ -0,0 +1 @@
+ Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/errors/zh-tw/ERR_FTP_LISTING squid-3.1.0.14/errors/zh-tw/ERR_FTP_LISTING
--- squid-3.1.0.13/errors/zh-tw/ERR_FTP_LISTING 2009-08-05 01:36:56.000000000 +1200
+++ squid-3.1.0.14/errors/zh-tw/ERR_FTP_LISTING 1970-01-01 12:00:00.000000000 +1200
@@ -1 +0,0 @@
- FTP Directory: %U
\ No newline at end of file
diff -u -r -N squid-3.1.0.13/helpers/basic_auth/DB/Makefile.in squid-3.1.0.14/helpers/basic_auth/DB/Makefile.in
--- squid-3.1.0.13/helpers/basic_auth/DB/Makefile.in 2009-08-05 01:32:30.000000000 +1200
+++ squid-3.1.0.14/helpers/basic_auth/DB/Makefile.in 2009-09-27 15:28:49.000000000 +1200
@@ -61,6 +61,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/helpers/basic_auth/getpwnam/Makefile.in squid-3.1.0.14/helpers/basic_auth/getpwnam/Makefile.in
--- squid-3.1.0.13/helpers/basic_auth/getpwnam/Makefile.in 2009-08-05 01:32:32.000000000 +1200
+++ squid-3.1.0.14/helpers/basic_auth/getpwnam/Makefile.in 2009-09-27 15:28:50.000000000 +1200
@@ -82,6 +82,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/helpers/basic_auth/LDAP/Makefile.in squid-3.1.0.14/helpers/basic_auth/LDAP/Makefile.in
--- squid-3.1.0.13/helpers/basic_auth/LDAP/Makefile.in 2009-08-05 01:32:30.000000000 +1200
+++ squid-3.1.0.14/helpers/basic_auth/LDAP/Makefile.in 2009-09-27 15:28:49.000000000 +1200
@@ -86,6 +86,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/helpers/basic_auth/Makefile.in squid-3.1.0.14/helpers/basic_auth/Makefile.in
--- squid-3.1.0.13/helpers/basic_auth/Makefile.in 2009-08-05 01:32:31.000000000 +1200
+++ squid-3.1.0.14/helpers/basic_auth/Makefile.in 2009-09-27 15:28:49.000000000 +1200
@@ -62,6 +62,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/helpers/basic_auth/MSNT/Makefile.in squid-3.1.0.14/helpers/basic_auth/MSNT/Makefile.in
--- squid-3.1.0.13/helpers/basic_auth/MSNT/Makefile.in 2009-08-05 01:32:31.000000000 +1200
+++ squid-3.1.0.14/helpers/basic_auth/MSNT/Makefile.in 2009-09-27 15:28:49.000000000 +1200
@@ -95,6 +95,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/helpers/basic_auth/MSNT/rfcnb-util.c squid-3.1.0.14/helpers/basic_auth/MSNT/rfcnb-util.c
--- squid-3.1.0.13/helpers/basic_auth/MSNT/rfcnb-util.c 2009-08-05 01:32:09.000000000 +1200
+++ squid-3.1.0.14/helpers/basic_auth/MSNT/rfcnb-util.c 2009-09-27 15:28:25.000000000 +1200
@@ -208,7 +208,7 @@
/* Get a packet of size n */
struct RFCNB_Pkt *
- RFCNB_Alloc_Pkt(int n) {
+RFCNB_Alloc_Pkt(int n) {
RFCNB_Pkt *pkt;
if ((pkt = malloc(sizeof(struct RFCNB_Pkt))) == NULL) {
diff -u -r -N squid-3.1.0.13/helpers/basic_auth/mswin_sspi/Makefile.in squid-3.1.0.14/helpers/basic_auth/mswin_sspi/Makefile.in
--- squid-3.1.0.13/helpers/basic_auth/mswin_sspi/Makefile.in 2009-08-05 01:32:32.000000000 +1200
+++ squid-3.1.0.14/helpers/basic_auth/mswin_sspi/Makefile.in 2009-09-27 15:28:50.000000000 +1200
@@ -82,6 +82,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/helpers/basic_auth/multi-domain-NTLM/Makefile.in squid-3.1.0.14/helpers/basic_auth/multi-domain-NTLM/Makefile.in
--- squid-3.1.0.13/helpers/basic_auth/multi-domain-NTLM/Makefile.in 2009-08-05 01:32:32.000000000 +1200
+++ squid-3.1.0.14/helpers/basic_auth/multi-domain-NTLM/Makefile.in 2009-09-27 15:28:50.000000000 +1200
@@ -58,6 +58,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/helpers/basic_auth/NCSA/crypt_md5.c squid-3.1.0.14/helpers/basic_auth/NCSA/crypt_md5.c
--- squid-3.1.0.13/helpers/basic_auth/NCSA/crypt_md5.c 2009-08-05 01:32:09.000000000 +1200
+++ squid-3.1.0.14/helpers/basic_auth/NCSA/crypt_md5.c 2009-09-27 15:28:25.000000000 +1200
@@ -188,7 +188,7 @@
SquidMD5Update(&ctx,(const unsigned char *)s,strlen(s));
SquidMD5Final(digest,&ctx);
- for (idx=0;idx<16;idx++)
+ for (idx=0; idx<16; idx++)
sprintf(&sum[idx*2],"%02x",digest[idx]);
sum[32]='\0';
diff -u -r -N squid-3.1.0.13/helpers/basic_auth/NCSA/Makefile.in squid-3.1.0.14/helpers/basic_auth/NCSA/Makefile.in
--- squid-3.1.0.13/helpers/basic_auth/NCSA/Makefile.in 2009-08-05 01:32:31.000000000 +1200
+++ squid-3.1.0.14/helpers/basic_auth/NCSA/Makefile.in 2009-09-27 15:28:49.000000000 +1200
@@ -86,6 +86,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/helpers/basic_auth/PAM/Makefile.in squid-3.1.0.14/helpers/basic_auth/PAM/Makefile.in
--- squid-3.1.0.13/helpers/basic_auth/PAM/Makefile.in 2009-08-05 01:32:31.000000000 +1200
+++ squid-3.1.0.14/helpers/basic_auth/PAM/Makefile.in 2009-09-27 15:28:49.000000000 +1200
@@ -85,6 +85,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/helpers/basic_auth/POP3/Makefile.in squid-3.1.0.14/helpers/basic_auth/POP3/Makefile.in
--- squid-3.1.0.13/helpers/basic_auth/POP3/Makefile.in 2009-08-05 01:32:31.000000000 +1200
+++ squid-3.1.0.14/helpers/basic_auth/POP3/Makefile.in 2009-09-27 15:28:49.000000000 +1200
@@ -58,6 +58,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/helpers/basic_auth/SASL/Makefile.in squid-3.1.0.14/helpers/basic_auth/SASL/Makefile.in
--- squid-3.1.0.13/helpers/basic_auth/SASL/Makefile.in 2009-08-05 01:32:31.000000000 +1200
+++ squid-3.1.0.14/helpers/basic_auth/SASL/Makefile.in 2009-09-27 15:28:50.000000000 +1200
@@ -82,6 +82,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/helpers/basic_auth/SMB/Makefile.in squid-3.1.0.14/helpers/basic_auth/SMB/Makefile.in
--- squid-3.1.0.13/helpers/basic_auth/SMB/Makefile.in 2009-08-05 01:32:31.000000000 +1200
+++ squid-3.1.0.14/helpers/basic_auth/SMB/Makefile.in 2009-09-27 15:28:50.000000000 +1200
@@ -88,6 +88,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/helpers/basic_auth/squid_radius_auth/Makefile.in squid-3.1.0.14/helpers/basic_auth/squid_radius_auth/Makefile.in
--- squid-3.1.0.13/helpers/basic_auth/squid_radius_auth/Makefile.in 2009-08-05 01:32:32.000000000 +1200
+++ squid-3.1.0.14/helpers/basic_auth/squid_radius_auth/Makefile.in 2009-09-27 15:28:50.000000000 +1200
@@ -86,6 +86,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/helpers/basic_auth/squid_radius_auth/radius-util.c squid-3.1.0.14/helpers/basic_auth/squid_radius_auth/radius-util.c
--- squid-3.1.0.13/helpers/basic_auth/squid_radius_auth/radius-util.c 2009-08-05 01:32:09.000000000 +1200
+++ squid-3.1.0.14/helpers/basic_auth/squid_radius_auth/radius-util.c 2009-09-27 15:28:25.000000000 +1200
@@ -121,7 +121,7 @@
int cur_byte;
ipaddr = (u_int32_t)0;
- for (i = 0;i < 4;i++) {
+ for (i = 0; i < 4; i++) {
ptr = buf;
count = 0;
*ptr = '\0';
diff -u -r -N squid-3.1.0.13/helpers/basic_auth/YP/Makefile.in squid-3.1.0.14/helpers/basic_auth/YP/Makefile.in
--- squid-3.1.0.13/helpers/basic_auth/YP/Makefile.in 2009-08-05 01:32:32.000000000 +1200
+++ squid-3.1.0.14/helpers/basic_auth/YP/Makefile.in 2009-09-27 15:28:50.000000000 +1200
@@ -81,6 +81,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/helpers/digest_auth/eDirectory/Makefile.in squid-3.1.0.14/helpers/digest_auth/eDirectory/Makefile.in
--- squid-3.1.0.13/helpers/digest_auth/eDirectory/Makefile.in 2009-08-05 01:32:32.000000000 +1200
+++ squid-3.1.0.14/helpers/digest_auth/eDirectory/Makefile.in 2009-09-27 15:28:51.000000000 +1200
@@ -85,6 +85,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/helpers/digest_auth/ldap/ldap_backend.c squid-3.1.0.14/helpers/digest_auth/ldap/ldap_backend.c
--- squid-3.1.0.13/helpers/digest_auth/ldap/ldap_backend.c 2009-08-05 01:32:09.000000000 +1200
+++ squid-3.1.0.14/helpers/digest_auth/ldap/ldap_backend.c 2009-09-27 15:28:25.000000000 +1200
@@ -361,10 +361,12 @@
}
if (use_tls) {
#ifdef LDAP_OPT_X_TLS
- if ((version == LDAP_VERSION3) && (ldap_start_tls_s(ld, NULL, NULL) == LDAP_SUCCESS)) {
+ if (version != LDAP_VERSION3) {
+ fprintf(stderr, "TLS requires LDAP version 3\n");
+ exit(1);
+ } else if (ldap_start_tls_s(ld, NULL, NULL) != LDAP_SUCCESS) {
fprintf(stderr, "Could not Activate TLS connection\n");
- ldap_unbind(ld);
- ld = NULL;
+ exit(1);
}
#else
fprintf(stderr, "TLS not supported with your LDAP library\n");
diff -u -r -N squid-3.1.0.13/helpers/digest_auth/ldap/Makefile.in squid-3.1.0.14/helpers/digest_auth/ldap/Makefile.in
--- squid-3.1.0.13/helpers/digest_auth/ldap/Makefile.in 2009-08-05 01:32:32.000000000 +1200
+++ squid-3.1.0.14/helpers/digest_auth/ldap/Makefile.in 2009-09-27 15:28:51.000000000 +1200
@@ -85,6 +85,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/helpers/digest_auth/Makefile.in squid-3.1.0.14/helpers/digest_auth/Makefile.in
--- squid-3.1.0.13/helpers/digest_auth/Makefile.in 2009-08-05 01:32:32.000000000 +1200
+++ squid-3.1.0.14/helpers/digest_auth/Makefile.in 2009-09-27 15:28:50.000000000 +1200
@@ -62,6 +62,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/helpers/digest_auth/password/Makefile.in squid-3.1.0.14/helpers/digest_auth/password/Makefile.in
--- squid-3.1.0.13/helpers/digest_auth/password/Makefile.in 2009-08-05 01:32:33.000000000 +1200
+++ squid-3.1.0.14/helpers/digest_auth/password/Makefile.in 2009-09-27 15:28:51.000000000 +1200
@@ -84,6 +84,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/helpers/external_acl/ip_user/dict.c squid-3.1.0.14/helpers/external_acl/ip_user/dict.c
--- squid-3.1.0.13/helpers/external_acl/ip_user/dict.c 2009-08-05 01:32:09.000000000 +1200
+++ squid-3.1.0.14/helpers/external_acl/ip_user/dict.c 2009-09-27 15:28:25.000000000 +1200
@@ -42,7 +42,7 @@
* It returns a pointer to the first entry of the linked list
*/
struct ip_user_dict *
- load_dict (FILE * FH) {
+load_dict (FILE * FH) {
struct ip_user_dict *current_entry; /* the structure used to
store data */
struct ip_user_dict *first_entry = NULL; /* the head of the
diff -u -r -N squid-3.1.0.13/helpers/external_acl/ip_user/Makefile.in squid-3.1.0.14/helpers/external_acl/ip_user/Makefile.in
--- squid-3.1.0.13/helpers/external_acl/ip_user/Makefile.in 2009-08-05 01:32:33.000000000 +1200
+++ squid-3.1.0.14/helpers/external_acl/ip_user/Makefile.in 2009-09-27 15:28:51.000000000 +1200
@@ -81,6 +81,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/helpers/external_acl/ldap_group/Makefile.in squid-3.1.0.14/helpers/external_acl/ldap_group/Makefile.in
--- squid-3.1.0.13/helpers/external_acl/ldap_group/Makefile.in 2009-08-05 01:32:33.000000000 +1200
+++ squid-3.1.0.14/helpers/external_acl/ldap_group/Makefile.in 2009-09-27 15:28:51.000000000 +1200
@@ -85,6 +85,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/helpers/external_acl/ldap_group/squid_ldap_group.c squid-3.1.0.14/helpers/external_acl/ldap_group/squid_ldap_group.c
--- squid-3.1.0.13/helpers/external_acl/ldap_group/squid_ldap_group.c 2009-08-05 01:32:09.000000000 +1200
+++ squid-3.1.0.14/helpers/external_acl/ldap_group/squid_ldap_group.c 2009-09-27 15:28:25.000000000 +1200
@@ -233,6 +233,7 @@
case 'd':
case 'g':
case 'S':
+ case 'K':
break;
default:
if (strlen(argv[1]) > 2) {
diff -u -r -N squid-3.1.0.13/helpers/external_acl/Makefile.in squid-3.1.0.14/helpers/external_acl/Makefile.in
--- squid-3.1.0.13/helpers/external_acl/Makefile.in 2009-08-05 01:32:33.000000000 +1200
+++ squid-3.1.0.14/helpers/external_acl/Makefile.in 2009-09-27 15:28:51.000000000 +1200
@@ -62,6 +62,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/helpers/external_acl/mswin_ad_group/Makefile.in squid-3.1.0.14/helpers/external_acl/mswin_ad_group/Makefile.in
--- squid-3.1.0.13/helpers/external_acl/mswin_ad_group/Makefile.in 2009-08-05 01:32:33.000000000 +1200
+++ squid-3.1.0.14/helpers/external_acl/mswin_ad_group/Makefile.in 2009-09-27 15:28:52.000000000 +1200
@@ -79,6 +79,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/helpers/external_acl/mswin_lm_group/Makefile.in squid-3.1.0.14/helpers/external_acl/mswin_lm_group/Makefile.in
--- squid-3.1.0.13/helpers/external_acl/mswin_lm_group/Makefile.in 2009-08-05 01:32:33.000000000 +1200
+++ squid-3.1.0.14/helpers/external_acl/mswin_lm_group/Makefile.in 2009-09-27 15:28:52.000000000 +1200
@@ -79,6 +79,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/helpers/external_acl/session/Makefile.in squid-3.1.0.14/helpers/external_acl/session/Makefile.in
--- squid-3.1.0.13/helpers/external_acl/session/Makefile.in 2009-08-05 01:32:33.000000000 +1200
+++ squid-3.1.0.14/helpers/external_acl/session/Makefile.in 2009-09-27 15:28:52.000000000 +1200
@@ -83,6 +83,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/helpers/external_acl/unix_group/Makefile.in squid-3.1.0.14/helpers/external_acl/unix_group/Makefile.in
--- squid-3.1.0.13/helpers/external_acl/unix_group/Makefile.in 2009-08-05 01:32:34.000000000 +1200
+++ squid-3.1.0.14/helpers/external_acl/unix_group/Makefile.in 2009-09-27 15:28:52.000000000 +1200
@@ -83,6 +83,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/helpers/external_acl/wbinfo_group/Makefile.in squid-3.1.0.14/helpers/external_acl/wbinfo_group/Makefile.in
--- squid-3.1.0.13/helpers/external_acl/wbinfo_group/Makefile.in 2009-08-05 01:32:34.000000000 +1200
+++ squid-3.1.0.14/helpers/external_acl/wbinfo_group/Makefile.in 2009-09-27 15:28:52.000000000 +1200
@@ -58,6 +58,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/helpers/Makefile.in squid-3.1.0.14/helpers/Makefile.in
--- squid-3.1.0.13/helpers/Makefile.in 2009-08-05 01:32:30.000000000 +1200
+++ squid-3.1.0.14/helpers/Makefile.in 2009-09-27 15:28:48.000000000 +1200
@@ -58,6 +58,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/helpers/negotiate_auth/Makefile.in squid-3.1.0.14/helpers/negotiate_auth/Makefile.in
--- squid-3.1.0.13/helpers/negotiate_auth/Makefile.in 2009-08-05 01:32:34.000000000 +1200
+++ squid-3.1.0.14/helpers/negotiate_auth/Makefile.in 2009-09-27 15:28:52.000000000 +1200
@@ -62,6 +62,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/helpers/negotiate_auth/mswin_sspi/libnegotiatessp.c squid-3.1.0.14/helpers/negotiate_auth/mswin_sspi/libnegotiatessp.c
--- squid-3.1.0.13/helpers/negotiate_auth/mswin_sspi/libnegotiatessp.c 2009-08-05 01:32:10.000000000 +1200
+++ squid-3.1.0.14/helpers/negotiate_auth/mswin_sspi/libnegotiatessp.c 2009-09-27 15:28:26.000000000 +1200
@@ -22,7 +22,8 @@
#include
#endif
-void hex_dump(void *data, int size)
+void
+hex_dump(void *data, int size)
{
/* dumps size bytes of *data to stdout. Looks like:
* [0000] 75 6E 6B 6E 6F 77 6E 20
@@ -39,39 +40,37 @@
int n;
char bytestr[4] = {0};
char addrstr[10] = {0};
- char hexstr[ 16*3 + 5] = {0};
- char charstr[16*1 + 5] = {0};
- for (n=1;n<=size;n++) {
- if (n%16 == 1) {
+ char hexstr[16 * 3 + 5] = {0};
+ char charstr[16 * 1 + 5] = {0};
+ for (n = 1; n <= size; n++) {
+ if (n % 16 == 1) {
/* store address for this line */
snprintf(addrstr, sizeof(addrstr), "%.4x",
- ((unsigned int)p-(unsigned int)data) );
+ ((unsigned int) p - (unsigned int) data));
}
-
c = *p;
if (xisalnum(c) == 0) {
c = '.';
}
-
/* store hex str (for left side) */
snprintf(bytestr, sizeof(bytestr), "%02X ", *p);
- strncat(hexstr, bytestr, sizeof(hexstr)-strlen(hexstr)-1);
+ strncat(hexstr, bytestr, sizeof(hexstr) - strlen(hexstr) - 1);
/* store char str (for right side) */
snprintf(bytestr, sizeof(bytestr), "%c", c);
- strncat(charstr, bytestr, sizeof(charstr)-strlen(charstr)-1);
+ strncat(charstr, bytestr, sizeof(charstr) - strlen(charstr) - 1);
- if (n%16 == 0) {
+ if (n % 16 == 0) {
/* line completed */
fprintf(stderr, "[%4.4s] %-50.50s %s\n", addrstr, hexstr, charstr);
hexstr[0] = 0;
charstr[0] = 0;
- } else if (n%8 == 0) {
+ } else if (n % 8 == 0) {
/* half line: add whitespaces */
- strncat(hexstr, " ", sizeof(hexstr)-strlen(hexstr)-1);
- strncat(charstr, " ", sizeof(charstr)-strlen(charstr)-1);
+ strncat(hexstr, " ", sizeof(hexstr) - strlen(hexstr) - 1);
+ strncat(charstr, " ", sizeof(charstr) - strlen(charstr) - 1);
}
- p++; /* next byte */
+ p++; /* next byte */
}
if (strlen(hexstr) > 0) {
@@ -80,4 +79,3 @@
}
}
}
-
diff -u -r -N squid-3.1.0.13/helpers/negotiate_auth/mswin_sspi/Makefile.in squid-3.1.0.14/helpers/negotiate_auth/mswin_sspi/Makefile.in
--- squid-3.1.0.13/helpers/negotiate_auth/mswin_sspi/Makefile.in 2009-08-05 01:32:34.000000000 +1200
+++ squid-3.1.0.14/helpers/negotiate_auth/mswin_sspi/Makefile.in 2009-09-27 15:28:52.000000000 +1200
@@ -80,6 +80,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/helpers/negotiate_auth/mswin_sspi/negotiate_auth.c squid-3.1.0.14/helpers/negotiate_auth/mswin_sspi/negotiate_auth.c
--- squid-3.1.0.13/helpers/negotiate_auth/mswin_sspi/negotiate_auth.c 2009-08-05 01:32:10.000000000 +1200
+++ squid-3.1.0.14/helpers/negotiate_auth/mswin_sspi/negotiate_auth.c 2009-09-27 15:28:26.000000000 +1200
@@ -1,3 +1,4 @@
+
/*
* mswin_negotiate_auth: helper for Negotiate Authentication for Squid Cache
*
@@ -79,15 +80,15 @@
helperfail(const char *reason)
{
#if FAIL_DEBUG
- fail_debug_enabled =1;
+ fail_debug_enabled = 1;
#endif
SEND2("BH %s", reason);
}
/*
- options:
- -d enable debugging.
- -v enable verbose Negotiate packet debugging.
+ * options:
+ * -d enable debugging.
+ * -v enable verbose Negotiate packet debugging.
*/
char *my_program_name = NULL;
@@ -108,7 +109,7 @@
{
int opt, had_error = 0;
- opterr =0;
+ opterr = 0;
while (-1 != (opt = getopt(argc, argv, "hdv"))) {
switch (opt) {
case 'd':
@@ -142,15 +143,15 @@
char *c, *decoded;
int plen, status;
int oversized = 0;
- char * ErrorMessage;
- static char cred[SSP_MAX_CRED_LEN+1];
+ char *ErrorMessage;
+ static char cred[SSP_MAX_CRED_LEN + 1];
BOOL Done = FALSE;
try_again:
if (fgets(buf, BUFFER_SIZE, stdin) == NULL)
return 0;
- c = memchr(buf, '\n', BUFFER_SIZE); /* safer against overrun than strchr */
+ c = memchr(buf, '\n', BUFFER_SIZE); /* safer against overrun than strchr */
if (c) {
if (oversized) {
helperfail("illegal request received");
@@ -183,27 +184,30 @@
return 1;
}
/* Obtain server blob against SSPI */
- plen = (strlen(buf) - 3) * 3 / 4; /* we only need it here. Optimization */
+ plen = (strlen(buf) - 3) * 3 / 4; /* we only need it here. Optimization */
c = (char *) SSP_MakeNegotiateBlob(decoded, plen, &Done, &status, cred);
if (status == SSP_OK) {
if (Done) {
- lc(cred); /* let's lowercase them for our convenience */
+ lc(cred); /* let's lowercase them for our convenience */
have_serverblob = 0;
Done = FALSE;
if (Negotiate_packet_debug_enabled) {
- printf("AF %s %s\n",c,cred);
decoded = base64_decode(c);
debug("sending 'AF' %s to squid with data:\n", cred);
- hex_dump(decoded, (strlen(c) * 3) / 4);
+ if (c != NULL)
+ hex_dump(decoded, (strlen(c) * 3) / 4);
+ else
+ fprintf(stderr, "No data available.\n");
+ printf("AF %s %s\n", c, cred);
} else
SEND3("AF %s %s", c, cred);
} else {
if (Negotiate_packet_debug_enabled) {
- printf("TT %s\n",c);
decoded = base64_decode(c);
debug("sending 'TT' to squid with data:\n");
hex_dump(decoded, (strlen(c) * 3) / 4);
+ printf("TT %s\n", c);
} else {
SEND2("TT %s", c);
}
@@ -213,7 +217,6 @@
helperfail("can't obtain server blob");
return 1;
}
-
if (memcmp(buf, "KK ", 3) == 0) { /* authenticate-request */
if (!have_serverblob) {
helperfail("invalid server blob");
@@ -228,9 +231,8 @@
SEND("NA * Packet format error, couldn't base64-decode");
return 1;
}
-
/* check against SSPI */
- plen = (strlen(buf) - 3) * 3 / 4; /* we only need it here. Optimization */
+ plen = (strlen(buf) - 3) * 3 / 4; /* we only need it here. Optimization */
c = (char *) SSP_ValidateNegotiateCredentials(decoded, plen, &Done, &status, cred);
if (status == SSP_ERROR) {
@@ -241,8 +243,8 @@
FORMAT_MESSAGE_IGNORE_INSERTS,
NULL,
GetLastError(),
- MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), /* Default language */
- (LPTSTR) &ErrorMessage,
+ MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), /* Default language */
+ (LPTSTR) & ErrorMessage,
0,
NULL);
if (ErrorMessage[strlen(ErrorMessage) - 1] == '\n')
@@ -253,32 +255,34 @@
LocalFree(ErrorMessage);
return 1;
}
-
if (Done) {
lc(cred); /* let's lowercase them for our convenience */
have_serverblob = 0;
Done = FALSE;
if (Negotiate_packet_debug_enabled) {
- printf("AF %s %s\n",c,cred);
decoded = base64_decode(c);
debug("sending 'AF' %s to squid with data:\n", cred);
- hex_dump(decoded, (strlen(c) * 3) / 4);
+ if (c != NULL)
+ hex_dump(decoded, (strlen(c) * 3) / 4);
+ else
+ fprintf(stderr, "No data available.\n");
+ printf("AF %s %s\n", c, cred);
} else {
SEND3("AF %s %s", c, cred);
}
return 1;
} else {
if (Negotiate_packet_debug_enabled) {
- printf("TT %s\n",c);
decoded = base64_decode(c);
debug("sending 'TT' to squid with data:\n");
hex_dump(decoded, (strlen(c) * 3) / 4);
+ printf("TT %s\n", c);
} else
SEND2("TT %s", c);
return 1;
}
- } else { /* not an auth-request */
+ } else { /* not an auth-request */
helperfail("illegal request received");
fprintf(stderr, "Illegal request received: '%s'\n", buf);
return 1;
diff -u -r -N squid-3.1.0.13/helpers/negotiate_auth/mswin_sspi/negotiate.h squid-3.1.0.14/helpers/negotiate_auth/mswin_sspi/negotiate.h
--- squid-3.1.0.13/helpers/negotiate_auth/mswin_sspi/negotiate.h 2009-08-05 01:32:10.000000000 +1200
+++ squid-3.1.0.14/helpers/negotiate_auth/mswin_sspi/negotiate.h 2009-09-27 15:28:26.000000000 +1200
@@ -67,21 +67,21 @@
#if FAIL_DEBUG
if (debug_enabled || fail_debug_enabled) {
#else
- if (debug_enabled) {
+if (debug_enabled) {
#endif
- va_list args;
+ va_list args;
- va_start(args,format);
- fprintf(stderr, "negotiate-auth[%d]: ",getpid());
- vfprintf(stderr, format, args);
- va_end(args);
+ va_start(args,format);
+ fprintf(stderr, "negotiate-auth[%d]: ",getpid());
+ vfprintf(stderr, format, args);
+ va_end(args);
#if FAIL_DEBUG
- fail_debug_enabled = 0;
+ fail_debug_enabled = 0;
#endif
- }
+ }
#endif /* _SQUID_MSWIN_ */
#endif /* DEBUG */
- }
+}
#endif /* __GNUC__ */
diff -u -r -N squid-3.1.0.13/helpers/negotiate_auth/squid_kerb_auth/base64.c squid-3.1.0.14/helpers/negotiate_auth/squid_kerb_auth/base64.c
--- squid-3.1.0.13/helpers/negotiate_auth/squid_kerb_auth/base64.c 2009-08-05 01:32:10.000000000 +1200
+++ squid-3.1.0.14/helpers/negotiate_auth/squid_kerb_auth/base64.c 2009-09-27 15:28:26.000000000 +1200
@@ -42,7 +42,7 @@
ska_base64_init();
val = c = 0;
- for (j = 0; *data ;data++) {
+ for (j = 0; *data ; data++) {
unsigned int k = ((unsigned char) *data) % BASE64_VALUE_SZ;
if (base64_value[k] < 0)
continue;
@@ -144,7 +144,7 @@
int i,j;
j=0;
- for (i=strlen(data)-1;i>=0;i--) {
+ for (i=strlen(data)-1; i>=0; i--) {
if (data[i] == '=') j++;
if (data[i] != '=') break;
}
diff -u -r -N squid-3.1.0.13/helpers/negotiate_auth/squid_kerb_auth/configure squid-3.1.0.14/helpers/negotiate_auth/squid_kerb_auth/configure
--- squid-3.1.0.13/helpers/negotiate_auth/squid_kerb_auth/configure 2009-08-05 01:32:53.000000000 +1200
+++ squid-3.1.0.14/helpers/negotiate_auth/squid_kerb_auth/configure 2009-09-27 15:29:12.000000000 +1200
@@ -3766,7 +3766,7 @@
ac_includedir=""
ac_libdir=""
case $sys in
- Linux) if test "x$enableval" != "xyes" -a "x$enableval" != "x" ; then
+ Linux) if test "x$enableval" != "xyes" -a "x$enableval" != "x" -a "x$enableval" != "xno" ; then
ac_libdir=$enableval/lib
ac_includedir=$enableval/include
else
@@ -4479,7 +4479,7 @@
fi
fi
;;
- *) if test "x$enableval" != "xyes" -a "x$enableval" != "x" ; then
+ *) if test "x$enableval" != "xyes" -a "x$enableval" != "x" -a "x$enableval" != "xno" ; then
ac_libdir=$enableval/lib
ac_includedir=$enableval/include
CPPFLAGS="$CPPFLAGS -I$ac_includedir"
@@ -4972,7 +4972,7 @@
ac_includedir=""
ac_libdir=""
case $sys in
- Linux) if test "x$enableval" != "xyes" -a "x$enableval" != "x" ; then
+ Linux) if test "x$enableval" != "xyes" -a "x$enableval" != "x" -a "x$enableval" != "xno" ; then
ac_libdir=$enableval/lib
ac_includedir=$enableval/include
else
@@ -5303,7 +5303,7 @@
fi
fi
;;
- *) if test "x$enableval" != "xyes" -a "x$enableval" != "x" ; then
+ *) if test "x$enableval" != "xyes" -a "x$enableval" != "x" -a "x$enableval" != "xno" ; then
ac_libdir=$enableval/lib
ac_includedir=$enableval/include
CPPFLAGS="$CPPFLAGS -I$ac_includedir"
@@ -7033,8 +7033,7 @@
fi
-eval ac_p_include=$includedir
-CPPFLAGS="$CPPFLAGS -I$ac_p_include -I../../../ -I../../../include/ -I$squid_dir/include -I$squid_dir/src -I$squid_dir"
+CPPFLAGS="$CPPFLAGS -I../../../ -I../../../include/ -I$squid_dir/include -I$squid_dir/src -I$squid_dir"
{ $as_echo "$as_me:$LINENO: checking for SQUID at '$squid_dir' " >&5
$as_echo_n "checking for SQUID at '$squid_dir' ... " >&6; }
if test "${ac_cv_have_squid+set}" = set; then
@@ -7104,8 +7103,7 @@
fi
{ $as_echo "$as_me:$LINENO: result: $ac_cv_have_squid" >&5
$as_echo "$ac_cv_have_squid" >&6; }
-eval ac_p_lib=$libdir
-LDFLAGS="$LDFLAGS -L../../../lib -L$ac_p_lib $w_flag$ac_p_lib$w_flag_2"
+LDFLAGS="$LDFLAGS -L../../../lib"
if test "x$ac_cv_have_squid" = "xyes"; then
cat >>confdefs.h <<\_ACEOF
diff -u -r -N squid-3.1.0.13/helpers/negotiate_auth/squid_kerb_auth/configure.in squid-3.1.0.14/helpers/negotiate_auth/squid_kerb_auth/configure.in
--- squid-3.1.0.13/helpers/negotiate_auth/squid_kerb_auth/configure.in 2009-08-05 01:32:10.000000000 +1200
+++ squid-3.1.0.14/helpers/negotiate_auth/squid_kerb_auth/configure.in 2009-09-27 15:28:26.000000000 +1200
@@ -69,7 +69,7 @@
ac_includedir=""
ac_libdir=""
case $sys in
- Linux) if test "x$enableval" != "xyes" -a "x$enableval" != "x" ; then
+ Linux) if test "x$enableval" != "xyes" -a "x$enableval" != "x" -a "x$enableval" != "xno" ; then
ac_libdir=$enableval/lib
ac_includedir=$enableval/include
else
@@ -101,7 +101,7 @@
fi
fi
;;
- *) if test "x$enableval" != "xyes" -a "x$enableval" != "x" ; then
+ *) if test "x$enableval" != "xyes" -a "x$enableval" != "x" -a "x$enableval" != "xno" ; then
ac_libdir=$enableval/lib
ac_includedir=$enableval/include
CPPFLAGS="$CPPFLAGS -I$ac_includedir"
@@ -142,7 +142,7 @@
ac_includedir=""
ac_libdir=""
case $sys in
- Linux) if test "x$enableval" != "xyes" -a "x$enableval" != "x" ; then
+ Linux) if test "x$enableval" != "xyes" -a "x$enableval" != "x" -a "x$enableval" != "xno" ; then
ac_libdir=$enableval/lib
ac_includedir=$enableval/include
else
@@ -179,7 +179,7 @@
fi
fi
;;
- *) if test "x$enableval" != "xyes" -a "x$enableval" != "x" ; then
+ *) if test "x$enableval" != "xyes" -a "x$enableval" != "x" -a "x$enableval" != "xno" ; then
ac_libdir=$enableval/lib
ac_includedir=$enableval/include
CPPFLAGS="$CPPFLAGS -I$ac_includedir"
@@ -424,8 +424,7 @@
[ squid_dir=$withval ]
)
-eval ac_p_include=$includedir
-CPPFLAGS="$CPPFLAGS -I$ac_p_include -I../../../ -I../../../include/ -I$squid_dir/include -I$squid_dir/src -I$squid_dir"
+CPPFLAGS="$CPPFLAGS -I../../../ -I../../../include/ -I$squid_dir/include -I$squid_dir/src -I$squid_dir"
AC_CACHE_CHECK([for SQUID at '$squid_dir' ],ac_cv_have_squid,[
AC_TRY_RUN([
#include
@@ -439,8 +438,7 @@
ac_cv_have_squid=yes,
ac_cv_have_squid=no)
])
-eval ac_p_lib=$libdir
-LDFLAGS="$LDFLAGS -L../../../lib -L$ac_p_lib $w_flag$ac_p_lib$w_flag_2"
+LDFLAGS="$LDFLAGS -L../../../lib"
if test "x$ac_cv_have_squid" = "xyes"; then
AC_DEFINE(HAVE_SQUID,1, [Define to 1 if you have SQUID])
AC_CHECK_HEADERS(getaddrinfo.h getnameinfo.h util.h)
diff -u -r -N squid-3.1.0.13/helpers/negotiate_auth/squid_kerb_auth/spnegohelp/derparse.h squid-3.1.0.14/helpers/negotiate_auth/squid_kerb_auth/spnegohelp/derparse.h
--- squid-3.1.0.13/helpers/negotiate_auth/squid_kerb_auth/spnegohelp/derparse.h 2009-08-05 01:32:10.000000000 +1200
+++ squid-3.1.0.14/helpers/negotiate_auth/squid_kerb_auth/spnegohelp/derparse.h 2009-09-27 15:28:26.000000000 +1200
@@ -26,8 +26,7 @@
// C++ Specific
#if defined(__cplusplus)
-extern "C"
-{
+extern "C" {
#endif
/* Identifier Types */
diff -u -r -N squid-3.1.0.13/helpers/negotiate_auth/squid_kerb_auth/spnegohelp/spnego.h squid-3.1.0.14/helpers/negotiate_auth/squid_kerb_auth/spnegohelp/spnego.h
--- squid-3.1.0.13/helpers/negotiate_auth/squid_kerb_auth/spnegohelp/spnego.h 2009-08-05 01:32:10.000000000 +1200
+++ squid-3.1.0.14/helpers/negotiate_auth/squid_kerb_auth/spnegohelp/spnego.h 2009-09-27 15:28:26.000000000 +1200
@@ -27,8 +27,7 @@
// C++ Specific
#if defined(__cplusplus)
-extern "C"
-{
+extern "C" {
#endif
// Type Definitions
diff -u -r -N squid-3.1.0.13/helpers/negotiate_auth/squid_kerb_auth/spnegohelp/spnegohelp.h squid-3.1.0.14/helpers/negotiate_auth/squid_kerb_auth/spnegohelp/spnegohelp.h
--- squid-3.1.0.13/helpers/negotiate_auth/squid_kerb_auth/spnegohelp/spnegohelp.h 2009-08-05 01:32:10.000000000 +1200
+++ squid-3.1.0.14/helpers/negotiate_auth/squid_kerb_auth/spnegohelp/spnegohelp.h 2009-09-27 15:28:26.000000000 +1200
@@ -11,8 +11,7 @@
#define SPNEGOHELP_H
#ifdef __cplusplus
-extern "C"
-{
+extern "C" {
#endif
#include
@@ -29,9 +28,9 @@
*/
int makeNegTokenTarg (const unsigned char * kerberosToken,
- size_t kerberosTokenLength,
- const unsigned char ** negTokenTarg,
- size_t * negTokenTargLength);
+ size_t kerberosTokenLength,
+ const unsigned char ** negTokenTarg,
+ size_t * negTokenTargLength);
/* -----------------------------------------------------------------------------
* parseNegTokenInit parses an RFC 2478 SPNEGO NegTokenInit (token) to extract
diff -u -r -N squid-3.1.0.13/helpers/negotiate_auth/squid_kerb_auth/spnegohelp/spnegoparse.h squid-3.1.0.14/helpers/negotiate_auth/squid_kerb_auth/spnegohelp/spnegoparse.h
--- squid-3.1.0.13/helpers/negotiate_auth/squid_kerb_auth/spnegohelp/spnegoparse.h 2009-08-05 01:32:10.000000000 +1200
+++ squid-3.1.0.14/helpers/negotiate_auth/squid_kerb_auth/spnegohelp/spnegoparse.h 2009-09-27 15:28:26.000000000 +1200
@@ -26,8 +26,7 @@
// C++ Specific
#if defined(__cplusplus)
-extern "C"
-{
+extern "C" {
#endif
// Indicates if we copy data when creating a SPNEGO_TOKEN structure or not
diff -u -r -N squid-3.1.0.13/helpers/negotiate_auth/squid_kerb_auth/squid_kerb_auth.c squid-3.1.0.14/helpers/negotiate_auth/squid_kerb_auth/squid_kerb_auth.c
--- squid-3.1.0.13/helpers/negotiate_auth/squid_kerb_auth/squid_kerb_auth.c 2009-08-05 01:32:10.000000000 +1200
+++ squid-3.1.0.14/helpers/negotiate_auth/squid_kerb_auth/squid_kerb_auth.c 2009-09-27 15:28:26.000000000 +1200
@@ -511,10 +511,10 @@
goto cleanup;
user=xmalloc(output_token.length+1);
if (user == NULL) {
- if (debug)
- fprintf(stderr, "%s| %s: Not enough memory\n", LogTime(), PROGRAM);
- fprintf(stdout, "BH Not enough memory\n");
- goto cleanup;
+ if (debug)
+ fprintf(stderr, "%s| %s: Not enough memory\n", LogTime(), PROGRAM);
+ fprintf(stdout, "BH Not enough memory\n");
+ goto cleanup;
}
memcpy(user,output_token.value,output_token.length);
user[output_token.length]='\0';
@@ -544,10 +544,10 @@
*/
user=xmalloc(output_token.length+1);
if (user == NULL) {
- if (debug)
- fprintf(stderr, "%s| %s: Not enough memory\n", LogTime(), PROGRAM);
- fprintf(stdout, "BH Not enough memory\n");
- goto cleanup;
+ if (debug)
+ fprintf(stderr, "%s| %s: Not enough memory\n", LogTime(), PROGRAM);
+ fprintf(stdout, "BH Not enough memory\n");
+ goto cleanup;
}
memcpy(user,output_token.value,output_token.length);
user[output_token.length]='\0';
diff -u -r -N squid-3.1.0.13/helpers/ntlm_auth/fakeauth/Makefile.in squid-3.1.0.14/helpers/ntlm_auth/fakeauth/Makefile.in
--- squid-3.1.0.13/helpers/ntlm_auth/fakeauth/Makefile.in 2009-08-05 01:32:34.000000000 +1200
+++ squid-3.1.0.14/helpers/ntlm_auth/fakeauth/Makefile.in 2009-09-27 15:28:53.000000000 +1200
@@ -82,6 +82,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/helpers/ntlm_auth/fakeauth/ntlm.h squid-3.1.0.14/helpers/ntlm_auth/fakeauth/ntlm.h
--- squid-3.1.0.13/helpers/ntlm_auth/fakeauth/ntlm.h 2009-08-05 01:32:10.000000000 +1200
+++ squid-3.1.0.14/helpers/ntlm_auth/fakeauth/ntlm.h 2009-09-27 15:28:26.000000000 +1200
@@ -131,21 +131,21 @@
#if FAIL_DEBUG
if (debug_enabled || fail_debug_enabled) {
#else
- if (debug_enabled) {
+if (debug_enabled) {
#endif
- va_list args;
+ va_list args;
- va_start(args, format);
- fprintf(stderr, "ntlm-auth[%ld]: ", (long)getpid());
- vfprintf(stderr, format, args);
- va_end(args);
+ va_start(args, format);
+ fprintf(stderr, "ntlm-auth[%ld]: ", (long)getpid());
+ vfprintf(stderr, format, args);
+ va_end(args);
#if FAIL_DEBUG
- fail_debug_enabled = 0;
+ fail_debug_enabled = 0;
#endif
- }
+ }
#endif /* _SQUID_MSWIN_ */
#endif /* DEBUG */
- }
+}
#endif /* __GNUC__ */
diff -u -r -N squid-3.1.0.13/helpers/ntlm_auth/Makefile.in squid-3.1.0.14/helpers/ntlm_auth/Makefile.in
--- squid-3.1.0.13/helpers/ntlm_auth/Makefile.in 2009-08-05 01:32:34.000000000 +1200
+++ squid-3.1.0.14/helpers/ntlm_auth/Makefile.in 2009-09-27 15:28:52.000000000 +1200
@@ -62,6 +62,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/helpers/ntlm_auth/mswin_sspi/libntlmssp.c squid-3.1.0.14/helpers/ntlm_auth/mswin_sspi/libntlmssp.c
--- squid-3.1.0.13/helpers/ntlm_auth/mswin_sspi/libntlmssp.c 2009-08-05 01:32:10.000000000 +1200
+++ squid-3.1.0.14/helpers/ntlm_auth/mswin_sspi/libntlmssp.c 2009-09-27 15:28:26.000000000 +1200
@@ -340,7 +340,7 @@
char addrstr[10] = {0};
char hexstr[ 16*3 + 5] = {0};
char charstr[16*1 + 5] = {0};
- for (n=1;n<=size;n++) {
+ for (n=1; n<=size; n++) {
if (n%16 == 1) {
/* store address for this line */
snprintf(addrstr, sizeof(addrstr), "%.4x",
diff -u -r -N squid-3.1.0.13/helpers/ntlm_auth/mswin_sspi/Makefile.in squid-3.1.0.14/helpers/ntlm_auth/mswin_sspi/Makefile.in
--- squid-3.1.0.13/helpers/ntlm_auth/mswin_sspi/Makefile.in 2009-08-05 01:32:34.000000000 +1200
+++ squid-3.1.0.14/helpers/ntlm_auth/mswin_sspi/Makefile.in 2009-09-27 15:28:53.000000000 +1200
@@ -80,6 +80,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/helpers/ntlm_auth/mswin_sspi/ntlm.h squid-3.1.0.14/helpers/ntlm_auth/mswin_sspi/ntlm.h
--- squid-3.1.0.13/helpers/ntlm_auth/mswin_sspi/ntlm.h 2009-08-05 01:32:10.000000000 +1200
+++ squid-3.1.0.14/helpers/ntlm_auth/mswin_sspi/ntlm.h 2009-09-27 15:28:26.000000000 +1200
@@ -68,21 +68,21 @@
#if FAIL_DEBUG
if (debug_enabled || fail_debug_enabled) {
#else
- if (debug_enabled) {
+if (debug_enabled) {
#endif
- va_list args;
+ va_list args;
- va_start(args,format);
- fprintf(stderr, "ntlm-auth[%d]: ",getpid());
- vfprintf(stderr, format, args);
- va_end(args);
+ va_start(args,format);
+ fprintf(stderr, "ntlm-auth[%d]: ",getpid());
+ vfprintf(stderr, format, args);
+ va_end(args);
#if FAIL_DEBUG
- fail_debug_enabled = 0;
+ fail_debug_enabled = 0;
#endif
- }
+ }
#endif /* _SQUID_MSWIN_ */
#endif /* DEBUG */
- }
+}
#endif /* __GNUC__ */
diff -u -r -N squid-3.1.0.13/helpers/ntlm_auth/no_check/Makefile.in squid-3.1.0.14/helpers/ntlm_auth/no_check/Makefile.in
--- squid-3.1.0.13/helpers/ntlm_auth/no_check/Makefile.in 2009-08-05 01:32:34.000000000 +1200
+++ squid-3.1.0.14/helpers/ntlm_auth/no_check/Makefile.in 2009-09-27 15:28:53.000000000 +1200
@@ -58,6 +58,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/helpers/ntlm_auth/smb_lm/Makefile.in squid-3.1.0.14/helpers/ntlm_auth/smb_lm/Makefile.in
--- squid-3.1.0.13/helpers/ntlm_auth/smb_lm/Makefile.in 2009-08-05 01:32:35.000000000 +1200
+++ squid-3.1.0.14/helpers/ntlm_auth/smb_lm/Makefile.in 2009-09-27 15:28:53.000000000 +1200
@@ -90,6 +90,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/helpers/ntlm_auth/smb_lm/smbval/Makefile.in squid-3.1.0.14/helpers/ntlm_auth/smb_lm/smbval/Makefile.in
--- squid-3.1.0.13/helpers/ntlm_auth/smb_lm/smbval/Makefile.in 2009-08-05 01:32:35.000000000 +1200
+++ squid-3.1.0.14/helpers/ntlm_auth/smb_lm/smbval/Makefile.in 2009-09-27 15:28:53.000000000 +1200
@@ -75,6 +75,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/helpers/ntlm_auth/smb_lm/smbval/rfcnb-util.c squid-3.1.0.14/helpers/ntlm_auth/smb_lm/smbval/rfcnb-util.c
--- squid-3.1.0.13/helpers/ntlm_auth/smb_lm/smbval/rfcnb-util.c 2009-08-05 01:32:10.000000000 +1200
+++ squid-3.1.0.14/helpers/ntlm_auth/smb_lm/smbval/rfcnb-util.c 2009-09-27 15:28:26.000000000 +1200
@@ -186,7 +186,7 @@
/* Get a packet of size n */
struct RFCNB_Pkt *
- RFCNB_Alloc_Pkt(int n) {
+RFCNB_Alloc_Pkt(int n) {
RFCNB_Pkt *pkt;
if ((pkt = (struct RFCNB_Pkt *) malloc(sizeof(struct RFCNB_Pkt))) == NULL) {
diff -u -r -N squid-3.1.0.13/helpers/ntlm_auth/smb_lm/smbval/smblib-priv.h squid-3.1.0.14/helpers/ntlm_auth/smb_lm/smbval/smblib-priv.h
--- squid-3.1.0.13/helpers/ntlm_auth/smb_lm/smbval/smblib-priv.h 2009-08-05 01:32:10.000000000 +1200
+++ squid-3.1.0.14/helpers/ntlm_auth/smb_lm/smbval/smblib-priv.h 2009-09-27 15:28:26.000000000 +1200
@@ -638,7 +638,7 @@
#endif
extern SMB_Tree_Handle SMB_TreeConnect(SMB_Handle_Type con, SMB_Tree_Handle tree,
- char *path, char *password, char const *dev);
+ char *path, char *password, char const *dev);
extern int SMB_Init(void);
extern void SMB_Get_My_Name(char *name, int len);
@@ -646,7 +646,7 @@
extern int SMB_Discon(SMB_Handle_Type Con_Handle, BOOL KeepHandle);
extern int SMB_Logon_Server(SMB_Handle_Type Con_Handle, char *UserName,
- char *PassWord, char *UserDomain, int precrypted);
+ char *PassWord, char *UserDomain, int precrypted);
extern int SMB_Get_Error_Msg(int msg, char *msgbuf, int len);
diff -u -r -N squid-3.1.0.13/icons/Makefile.in squid-3.1.0.14/icons/Makefile.in
--- squid-3.1.0.13/icons/Makefile.in 2009-08-05 01:32:35.000000000 +1200
+++ squid-3.1.0.14/icons/Makefile.in 2009-09-27 15:28:53.000000000 +1200
@@ -59,6 +59,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/include/asn1.h squid-3.1.0.14/include/asn1.h
--- squid-3.1.0.13/include/asn1.h 2009-08-05 01:32:10.000000000 +1200
+++ squid-3.1.0.14/include/asn1.h 2009-09-27 15:28:26.000000000 +1200
@@ -69,8 +69,7 @@
#define IS_EXTENSION_ID(byte) (((byte) & ASN_EXTENSION_ID) == ASN_EXTENSION_ID)
#ifdef __cplusplus
-extern "C"
-{
+extern "C" {
#endif
u_char *asn_build_header(u_char *, int *, u_char, int);
diff -u -r -N squid-3.1.0.13/include/autoconf.h.in squid-3.1.0.14/include/autoconf.h.in
--- squid-3.1.0.13/include/autoconf.h.in 2009-08-05 01:32:28.000000000 +1200
+++ squid-3.1.0.14/include/autoconf.h.in 2009-09-27 15:28:47.000000000 +1200
@@ -333,6 +333,9 @@
*/
#undef HAVE_LINUX_NETFILTER_IPV4_IP_TPROXY_H
+/* Define to 1 if you have the header file. */
+#undef HAVE_LINUX_TYPES_H
+
/* long is defined in system headers */
#undef HAVE_LONG
@@ -859,6 +862,9 @@
with caution. */
#undef KILL_PARENT_OPT
+/* if libcap2 is available and not clashing with libc */
+#undef LIBCAP_BROKEN
+
/* If libresolv.a has been hacked to export _dns_ttl_ */
#undef LIBRESOLV_DNS_TTL_HACK
@@ -1017,6 +1023,12 @@
/* DiskIO modules are expected to be available. */
#undef USE_DISKIO
+/* Whether POSIX AIO support is needed. Automatic */
+#undef USE_DISKIO_AIO
+
+/* Whether pthreads support is needed. Automatic */
+#undef USE_DISKIO_DISKTHREADS
+
/* Use dnsserver processes instead of the internal DNS protocol support */
#undef USE_DNSSERVERS
diff -u -r -N squid-3.1.0.13/include/parse.h squid-3.1.0.14/include/parse.h
--- squid-3.1.0.13/include/parse.h 2009-08-05 01:32:10.000000000 +1200
+++ squid-3.1.0.14/include/parse.h 2009-09-27 15:28:26.000000000 +1200
@@ -67,8 +67,7 @@
#define TYPE_NULL 10
#ifdef __cplusplus
-extern "C"
-{
+extern "C" {
#endif
void init_mib(char *);
diff -u -r -N squid-3.1.0.13/include/radix.h squid-3.1.0.14/include/radix.h
--- squid-3.1.0.13/include/radix.h 2009-08-05 01:32:10.000000000 +1200
+++ squid-3.1.0.14/include/radix.h 2009-09-27 15:28:26.000000000 +1200
@@ -113,34 +113,31 @@
int rnh_pktsize; /* permit, but not require fixed keys */
struct squid_radix_node *(*rnh_addaddr) /* add based on sockaddr */
- (void *v, void *mask,
-
- struct squid_radix_node_head * head, struct squid_radix_node nodes[]);
+ (void *v, void *mask, struct squid_radix_node_head * head, struct squid_radix_node nodes[]);
struct squid_radix_node *(*rnh_addpkt) /* add based on packet hdr */
- (void *v, void *mask,
-
- struct squid_radix_node_head * head, struct squid_radix_node nodes[]);
+ (void *v, void *mask, struct squid_radix_node_head * head, struct squid_radix_node nodes[]);
struct squid_radix_node *(*rnh_deladdr) /* remove based on sockaddr */
- (void *v, void *mask, struct squid_radix_node_head * head);
+ (void *v, void *mask, struct squid_radix_node_head * head);
struct squid_radix_node *(*rnh_delpkt) /* remove based on packet hdr */
- (void *v, void *mask, struct squid_radix_node_head * head);
+ (void *v, void *mask, struct squid_radix_node_head * head);
struct squid_radix_node *(*rnh_matchaddr) /* locate based on sockaddr */
- (void *v, struct squid_radix_node_head * head);
+ (void *v, struct squid_radix_node_head * head);
struct squid_radix_node *(*rnh_lookup) /* locate based on sockaddr */
- (void *v, void *mask, struct squid_radix_node_head * head);
+ (void *v, void *mask, struct squid_radix_node_head * head);
struct squid_radix_node *(*rnh_matchpkt) /* locate based on packet hdr */
- (void *v, struct squid_radix_node_head * head);
+ (void *v, struct squid_radix_node_head * head);
+
int (*rnh_walktree) /* traverse tree */
(struct squid_radix_node_head * head, int (*f) (struct squid_radix_node *, void *), void *w);
diff -u -r -N squid-3.1.0.13/include/snmp_api_error.h squid-3.1.0.14/include/snmp_api_error.h
--- squid-3.1.0.13/include/snmp_api_error.h 2009-08-05 01:32:10.000000000 +1200
+++ squid-3.1.0.14/include/snmp_api_error.h 2009-09-27 15:28:26.000000000 +1200
@@ -52,8 +52,7 @@
#define SNMPERR_LAST -16 /* Last error message */
#ifdef __cplusplus
-extern "C"
-{
+extern "C" {
#endif
/* extern int snmp_errno */
diff -u -r -N squid-3.1.0.13/include/snmp_api.h squid-3.1.0.14/include/snmp_api.h
--- squid-3.1.0.13/include/snmp_api.h 2009-08-05 01:32:10.000000000 +1200
+++ squid-3.1.0.14/include/snmp_api.h 2009-09-27 15:28:26.000000000 +1200
@@ -52,22 +52,19 @@
#define SNMP_DEFAULT_MACREPEATERS 0
#ifdef __cplusplus
-extern "C"
-{
+extern "C" {
#endif
/* Parse the buffer pointed to by arg3, of length arg4, into pdu arg2.
*
* Returns the community of the incoming PDU, or NULL
*/
- u_char *snmp_parse(struct snmp_session *, struct snmp_pdu *,
- u_char *, int);
+ u_char *snmp_parse(struct snmp_session *, struct snmp_pdu *, u_char *, int);
/* Encode pdu arg2 into buffer arg3. arg4 contains the size of
* the buffer.
*/
- int snmp_build(struct snmp_session *, struct snmp_pdu *,
- u_char *, int *);
+ int snmp_build(struct snmp_session *, struct snmp_pdu *, u_char *, int *);
/*
* struct snmp_session *snmp_open(session)
diff -u -r -N squid-3.1.0.13/include/snmp_api_util.h squid-3.1.0.14/include/snmp_api_util.h
--- squid-3.1.0.13/include/snmp_api_util.h 2009-08-05 01:32:10.000000000 +1200
+++ squid-3.1.0.14/include/snmp_api_util.h 2009-09-27 15:28:26.000000000 +1200
@@ -85,8 +85,7 @@
#endif /* HAVE_SRAND */
#ifdef __cplusplus
-extern "C"
-{
+extern "C" {
#endif
int snmp_get_socket_session(struct snmp_session *session_);
diff -u -r -N squid-3.1.0.13/include/snmp_client.h squid-3.1.0.14/include/snmp_client.h
--- squid-3.1.0.13/include/snmp_client.h 2009-08-05 01:32:10.000000000 +1200
+++ squid-3.1.0.14/include/snmp_client.h 2009-09-27 15:28:26.000000000 +1200
@@ -41,8 +41,7 @@
};
#ifdef __cplusplus
-extern "C"
-{
+extern "C" {
#endif
extern struct synch_state snmp_synch_state;
diff -u -r -N squid-3.1.0.13/include/snmp_coexist.h squid-3.1.0.14/include/snmp_coexist.h
--- squid-3.1.0.13/include/snmp_coexist.h 2009-08-05 01:32:10.000000000 +1200
+++ squid-3.1.0.14/include/snmp_coexist.h 2009-09-27 15:28:26.000000000 +1200
@@ -31,8 +31,7 @@
**********************************************************************/
#ifdef __cplusplus
-extern "C"
-{
+extern "C" {
#endif
int snmp_coexist_V2toV1(struct snmp_pdu *);
diff -u -r -N squid-3.1.0.13/include/snmp_error.h squid-3.1.0.14/include/snmp_error.h
--- squid-3.1.0.13/include/snmp_error.h 2009-08-05 01:32:10.000000000 +1200
+++ squid-3.1.0.14/include/snmp_error.h 2009-09-27 15:28:26.000000000 +1200
@@ -57,8 +57,7 @@
#ifdef __cplusplus
-extern "C"
-{
+extern "C" {
#endif
const char *snmp_errstring(int);
diff -u -r -N squid-3.1.0.13/include/snmp_msg.h squid-3.1.0.14/include/snmp_msg.h
--- squid-3.1.0.13/include/snmp_msg.h 2009-08-05 01:32:10.000000000 +1200
+++ squid-3.1.0.14/include/snmp_msg.h 2009-09-27 15:28:26.000000000 +1200
@@ -36,8 +36,7 @@
#define SNMP_VERSION_2 1 /* RFC 1901 */
#ifdef __cplusplus
-extern "C"
-{
+extern "C" {
#endif
u_char *snmp_msg_Encode(u_char *, int *, u_char *, int, int, struct snmp_pdu *);
diff -u -r -N squid-3.1.0.13/include/snmp_pdu.h squid-3.1.0.14/include/snmp_pdu.h
--- squid-3.1.0.13/include/snmp_pdu.h 2009-08-05 01:32:10.000000000 +1200
+++ squid-3.1.0.14/include/snmp_pdu.h 2009-09-27 15:28:26.000000000 +1200
@@ -39,8 +39,7 @@
**********************************************************************/
#ifdef __cplusplus
-extern "C"
-{
+extern "C" {
#endif
typedef struct sockaddr_in ipaddr;
diff -u -r -N squid-3.1.0.13/include/snmp_util.h squid-3.1.0.14/include/snmp_util.h
--- squid-3.1.0.13/include/snmp_util.h 2009-08-05 01:32:10.000000000 +1200
+++ squid-3.1.0.14/include/snmp_util.h 2009-09-27 15:28:26.000000000 +1200
@@ -7,8 +7,7 @@
#include "config.h"
#ifdef __cplusplus
-extern "C"
-{
+extern "C" {
#endif
/* call a function at regular intervals (in seconds): */
diff -u -r -N squid-3.1.0.13/include/snmp_vars.h squid-3.1.0.14/include/snmp_vars.h
--- squid-3.1.0.13/include/snmp_vars.h 2009-08-05 01:32:10.000000000 +1200
+++ squid-3.1.0.14/include/snmp_vars.h 2009-09-27 15:28:26.000000000 +1200
@@ -32,8 +32,7 @@
#include "asn1.h"
#ifdef __cplusplus
-extern "C"
-{
+extern "C" {
#endif
struct variable_list {
diff -u -r -N squid-3.1.0.13/include/squid_mswin.h squid-3.1.0.14/include/squid_mswin.h
--- squid-3.1.0.13/include/squid_mswin.h 2009-08-05 01:32:10.000000000 +1200
+++ squid-3.1.0.14/include/squid_mswin.h 1970-01-01 12:00:00.000000000 +1200
@@ -1,724 +0,0 @@
-/*
- * $Id$
- *
- * AUTHOR: Andrey Shorin
- * AUTHOR: Guido Serassio
- *
- * SQUID Web Proxy Cache http://www.squid-cache.org/
- * ----------------------------------------------------------
- *
- * Squid is the result of efforts by numerous individuals from
- * the Internet community; see the CONTRIBUTORS file for full
- * details. Many organizations have provided support for Squid's
- * development; see the SPONSORS file for full details. Squid is
- * Copyrighted (C) 2001 by the Regents of the University of
- * California; see the COPYRIGHT file for full details. Squid
- * incorporates software developed and/or copyrighted by other
- * sources; see the CREDITS file for full details.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
- *
- */
-#ifndef _INC_SQUID_MSWIN_H
-#define _INC_SQUID_MSWIN_H
-
-#include "config.h"
-
-#ifdef _SQUID_WIN32_
-
-#define ACL WindowsACL
-#if defined(_MSC_VER) /* Microsoft C Compiler ONLY */
-#if _MSC_VER == 1400
-#define _CRT_SECURE_NO_DEPRECATE
-#pragma warning( disable : 4290 )
-#pragma warning( disable : 4996 )
-#endif
-#endif
-
-#if defined _FILE_OFFSET_BITS && _FILE_OFFSET_BITS == 64
-# define __USE_FILE_OFFSET64 1
-#endif
-
-#if defined(_MSC_VER) /* Microsoft C Compiler ONLY */
-
-typedef unsigned char uint8_t;
-typedef unsigned short uint16_t;
-typedef unsigned int uint32_t;
-typedef unsigned __int64 uint64_t;
-
-typedef long pid_t;
-
-#if defined __USE_FILE_OFFSET64
-typedef int64_t off_t;
-typedef uint64_t ino_t;
-
-#else
-typedef long off_t;
-typedef unsigned long ino_t;
-
-#endif
-
-#define INT64_MAX _I64_MAX
-#define INT64_MIN _I64_MIN
-
-#include "default_config_file.h"
-/* Some tricks for MS Compilers */
-#define __STDC__ 1
-#define THREADLOCAL __declspec(thread)
-
-#elif defined(__GNUC__) /* gcc environment */
-
-#define THREADLOCAL __attribute__((section(".tls")))
-
-#endif
-
-#if defined(_MSC_VER) /* Microsoft C Compiler ONLY */
-#define alloca _alloca
-#endif
-#define chdir _chdir
-#define dup _dup
-#define dup2 _dup2
-#define fdopen _fdopen
-#if defined(_MSC_VER) /* Microsoft C Compiler ONLY */
-#define fileno _fileno
-#define fstat _fstati64
-#endif
-#define ftruncate WIN32_ftruncate
-#define getcwd _getcwd
-#define getpid _getpid
-#define getrusage WIN32_getrusage
-#if defined(_MSC_VER) /* Microsoft C Compiler ONLY */
-#define lseek _lseeki64
-#define memccpy _memccpy
-#define mkdir(p) _mkdir(p)
-#define mktemp _mktemp
-#endif
-#define pclose _pclose
-#define pipe WIN32_pipe
-#define popen _popen
-#define putenv _putenv
-#define setmode _setmode
-#define sleep(t) Sleep((t)*1000)
-#if defined(_MSC_VER) /* Microsoft C Compiler ONLY */
-#define snprintf _snprintf
-#define stat _stati64
-#define strcasecmp _stricmp
-#define strdup _strdup
-#define strlwr _strlwr
-#define strncasecmp _strnicmp
-#define tempnam _tempnam
-#endif
-#define truncate WIN32_truncate
-#define umask _umask
-#define unlink _unlink
-#if defined(_MSC_VER) /* Microsoft C Compiler ONLY */
-#define vsnprintf _vsnprintf
-#endif
-
-#define O_RDONLY _O_RDONLY
-#define O_WRONLY _O_WRONLY
-#define O_RDWR _O_RDWR
-#define O_APPEND _O_APPEND
-
-#define O_CREAT _O_CREAT
-#define O_TRUNC _O_TRUNC
-#define O_EXCL _O_EXCL
-
-#define O_TEXT _O_TEXT
-#define O_BINARY _O_BINARY
-#define O_RAW _O_BINARY
-#define O_TEMPORARY _O_TEMPORARY
-#define O_NOINHERIT _O_NOINHERIT
-#define O_SEQUENTIAL _O_SEQUENTIAL
-#define O_RANDOM _O_RANDOM
-#define O_NDELAY 0
-
-#define S_IFMT _S_IFMT
-#define S_IFDIR _S_IFDIR
-#define S_IFCHR _S_IFCHR
-#define S_IFREG _S_IFREG
-#define S_IREAD _S_IREAD
-#define S_IWRITE _S_IWRITE
-#define S_IEXEC _S_IEXEC
-
-#define S_IRWXO 007
-#if defined(_MSC_VER) /* Microsoft C Compiler ONLY */
-#define S_ISDIR(m) (((m) & _S_IFDIR) == _S_IFDIR)
-#endif
-
-#define SIGHUP 1 /* hangup */
-#define SIGKILL 9 /* kill (cannot be caught or ignored) */
-#define SIGBUS 10 /* bus error */
-#define SIGPIPE 13 /* write on a pipe with no one to read it */
-#define SIGCHLD 20 /* to parent on child stop or exit */
-#define SIGUSR1 30 /* user defined signal 1 */
-#define SIGUSR2 31 /* user defined signal 2 */
-
-typedef unsigned short int ushort;
-typedef int uid_t;
-typedef int gid_t;
-
-struct passwd {
- char *pw_name; /* user name */
- char *pw_passwd; /* user password */
- uid_t pw_uid; /* user id */
- gid_t pw_gid; /* group id */
- char *pw_gecos; /* real name */
- char *pw_dir; /* home directory */
- char *pw_shell; /* shell program */
-};
-
-struct group {
- char *gr_name; /* group name */
- char *gr_passwd; /* group password */
- gid_t gr_gid; /* group id */
- char **gr_mem; /* group members */
-};
-
-struct statfs {
- long f_type; /* type of filesystem (see below) */
- long f_bsize; /* optimal transfer block size */
- long f_blocks; /* total data blocks in file system */
- long f_bfree; /* free blocks in fs */
- long f_bavail; /* free blocks avail to non-superuser */
- long f_files; /* total file nodes in file system */
- long f_ffree; /* free file nodes in fs */
- long f_fsid; /* file system id */
- long f_namelen; /* maximum length of filenames */
- long f_spare[6]; /* spare for later */
-};
-
-#ifndef HAVE_GETTIMEOFDAY
-struct timezone {
- int tz_minuteswest; /* minutes west of Greenwich */
- int tz_dsttime; /* type of dst correction */
-};
-#endif
-
-#define CHANGE_FD_SETSIZE 1
-#if CHANGE_FD_SETSIZE && SQUID_MAXFD > DEFAULT_FD_SETSIZE
-#define FD_SETSIZE SQUID_MAXFD
-#endif
-
-#include
-#include
-#include
-#if defined(_MSC_VER) /* Microsoft C Compiler ONLY */
-#include
-#endif
-#include
-#if (EAI_NODATA == EAI_NONAME)
-#undef EAI_NODATA
-#define EAI_NODATA WSANO_DATA
-#endif
-#if defined(_MSC_VER) /* Microsoft C Compiler ONLY */
-/* Hack to suppress compiler warnings on FD_SET() & FD_CLR() */
-#pragma warning (push)
-#pragma warning (disable:4142)
-#endif
-/* prevent inclusion of wingdi.h */
-#define NOGDI
-#include
-#if defined(_MSC_VER) /* Microsoft C Compiler ONLY */
-#pragma warning (pop)
-#endif
-#include
-#include
-
-typedef char * caddr_t;
-
-#undef FD_CLOSE
-#undef FD_OPEN
-#undef FD_READ
-#undef FD_WRITE
-#define EISCONN WSAEISCONN
-#define EINPROGRESS WSAEINPROGRESS
-#define EWOULDBLOCK WSAEWOULDBLOCK
-#define EALREADY WSAEALREADY
-#define ETIMEDOUT WSAETIMEDOUT
-#define ECONNREFUSED WSAECONNREFUSED
-#define ECONNRESET WSAECONNRESET
-#define ENOTCONN WSAENOTCONN
-#define ERESTART WSATRY_AGAIN
-#define EAFNOSUPPORT WSAEAFNOSUPPORT
-
-#undef h_errno
-#define h_errno errno /* we'll set it ourselves */
-
-#undef FD_CLR
-#define FD_CLR(fd, set) do { \
- u_int __i; \
- SOCKET __sock = _get_osfhandle(fd); \
- for (__i = 0; __i < ((fd_set FAR *)(set))->fd_count ; __i++) { \
- if (((fd_set FAR *)(set))->fd_array[__i] == __sock) { \
- while (__i < ((fd_set FAR *)(set))->fd_count-1) { \
- ((fd_set FAR *)(set))->fd_array[__i] = \
- ((fd_set FAR *)(set))->fd_array[__i+1]; \
- __i++; \
- } \
- ((fd_set FAR *)(set))->fd_count--; \
- break; \
- } \
- } \
-} while(0)
-
-#undef FD_SET
-#define FD_SET(fd, set) do { \
- u_int __i; \
- SOCKET __sock = _get_osfhandle(fd); \
- for (__i = 0; __i < ((fd_set FAR *)(set))->fd_count; __i++) { \
- if (((fd_set FAR *)(set))->fd_array[__i] == (__sock)) { \
- break; \
- } \
- } \
- if (__i == ((fd_set FAR *)(set))->fd_count) { \
- if (((fd_set FAR *)(set))->fd_count < FD_SETSIZE) { \
- ((fd_set FAR *)(set))->fd_array[__i] = (__sock); \
- ((fd_set FAR *)(set))->fd_count++; \
- } \
- } \
-} while(0)
-
-#undef FD_ISSET
-#define FD_ISSET(fd, set) Win32__WSAFDIsSet(fd, (fd_set FAR *)(set))
-
-/* internal to Microsoft CRTLIB */
-typedef struct {
- long osfhnd; /* underlying OS file HANDLE */
- char osfile; /* attributes of file (e.g., open in text mode?) */
- char pipech; /* one char buffer for handles opened on pipes */
-#ifdef _MT
- int lockinitflag;
- CRITICAL_SECTION lock;
-#endif /* _MT */
-} ioinfo;
-#define IOINFO_L2E 5
-#define IOINFO_ARRAY_ELTS (1 << IOINFO_L2E)
-#define _pioinfo(i) ( __pioinfo[(i) >> IOINFO_L2E] + ((i) & (IOINFO_ARRAY_ELTS - 1)) )
-#define _osfile(i) ( _pioinfo(i)->osfile )
-#define _osfhnd(i) ( _pioinfo(i)->osfhnd )
-#define FOPEN 0x01 /* file handle open */
-
-#if defined(_MSC_VER) /* Microsoft C Compiler ONLY */
-
-SQUIDCEXTERN _CRTIMP ioinfo * __pioinfo[];
-SQUIDCEXTERN int __cdecl _free_osfhnd(int);
-
-#elif defined(__MINGW32__) /* MinGW environment */
-
-__MINGW_IMPORT ioinfo * __pioinfo[];
-SQUIDCEXTERN int _free_osfhnd(int);
-
-#endif
-
-SQUIDCEXTERN THREADLOCAL int ws32_result;
-
-#define strerror(e) WIN32_strerror(e)
-
-#ifdef __cplusplus
-
-inline
-int close(int fd)
-{
- char l_so_type[sizeof(int)];
- int l_so_type_siz = sizeof(l_so_type);
- SOCKET sock = _get_osfhandle(fd);
-
- if (::getsockopt(sock, SOL_SOCKET, SO_TYPE, l_so_type, &l_so_type_siz) == 0) {
- int result = 0;
- if (closesocket(sock) == SOCKET_ERROR) {
- errno = WSAGetLastError();
- result = 1;
- }
- _free_osfhnd(fd);
- _osfile(fd) = 0;
- return result;
- } else
- return _close(fd);
-}
-
-#if defined(_MSC_VER) /* Microsoft C Compiler ONLY */
-
-#ifndef _S_IREAD
-#define _S_IREAD 0x0100
-#endif
-
-#ifndef _S_IWRITE
-#define _S_IWRITE 0x0080
-#endif
-
-inline
-int open(const char *filename, int oflag, int pmode = 0)
-{
- return _open(filename, oflag, pmode & (_S_IREAD | _S_IWRITE));
-}
-#endif
-
-inline
-int read(int fd, void * buf, size_t siz)
-{
- char l_so_type[sizeof(int)];
- int l_so_type_siz = sizeof(l_so_type);
- SOCKET sock = _get_osfhandle(fd);
-
- if (::getsockopt(sock, SOL_SOCKET, SO_TYPE, l_so_type, &l_so_type_siz) == 0)
- return ::recv(sock, (char FAR *) buf, (int)siz, 0);
- else
- return _read(fd, buf, (unsigned int)siz);
-}
-
-inline
-int write(int fd, const void * buf, size_t siz)
-{
- char l_so_type[sizeof(int)];
- int l_so_type_siz = sizeof(l_so_type);
- SOCKET sock = _get_osfhandle(fd);
-
- if (::getsockopt(sock, SOL_SOCKET, SO_TYPE, l_so_type, &l_so_type_siz) == 0)
- return ::send(sock, (char FAR *) buf, siz, 0);
- else
- return _write(fd, buf, siz);
-}
-
-inline
-char *index(const char *s, int c)
-{
- return (char *)strchr(s,c);
-}
-
-/** \cond AUTODOCS-IGNORE */
-namespace Squid
-{
-/** \endcond */
-
-inline
-int accept(int s, struct sockaddr * a, size_t * l)
-{
- SOCKET result;
- if ((result = ::accept(_get_osfhandle(s), a, (int *)l)) == INVALID_SOCKET) {
- if (WSAEMFILE == (errno = WSAGetLastError()))
- errno = EMFILE;
- return -1;
- } else
- return _open_osfhandle(result, 0);
-}
-
-inline
-int bind(int s, struct sockaddr * n, int l)
-{
- if (::bind(_get_osfhandle(s),n,l) == SOCKET_ERROR) {
- errno = WSAGetLastError();
- return -1;
- } else
- return 0;
-}
-
-inline
-int connect(int s, const struct sockaddr * n, int l)
-{
- if (::connect(_get_osfhandle(s),n,l) == SOCKET_ERROR) {
- if (WSAEMFILE == (errno = WSAGetLastError()))
- errno = EMFILE;
- return -1;
- } else
- return 0;
-}
-
-inline
-struct hostent * gethostbyname (const char *n) {
- HOSTENT FAR * result;
- if ((result = ::gethostbyname(n)) == NULL)
- errno = WSAGetLastError();
- return result;
-}
-#define gethostbyname(n) Squid::gethostbyname(n)
-
-inline
-SERVENT FAR* getservbyname (const char * n, const char * p)
-{
- SERVENT FAR * result;
- if ((result = ::getservbyname(n, p)) == NULL)
- errno = WSAGetLastError();
- return result;
-}
-#define getservbyname(n,p) Squid::getservbyname(n,p)
-
-inline
-HOSTENT FAR * gethostbyaddr(const char * a, int l, int t)
-{
- HOSTENT FAR * result;
- if ((result = ::gethostbyaddr(a, l, t)) == NULL)
- errno = WSAGetLastError();
- return result;
-}
-#define gethostbyaddr(a,l,t) Squid::gethostbyaddr(a,l,t)
-
-inline
-int getsockname(int s, struct sockaddr * n, size_t * l)
-{
- if ((::getsockname(_get_osfhandle(s), n, (int *)l)) == SOCKET_ERROR) {
- errno = WSAGetLastError();
- return -1;
- } else
- return 0;
-}
-
-inline
-int gethostname(char * n, size_t l)
-{
- if ((::gethostname(n, l)) == SOCKET_ERROR) {
- errno = WSAGetLastError();
- return -1;
- } else
- return 0;
-}
-#define gethostname(n,l) Squid::gethostname(n,l)
-
-inline
-int getsockopt(int s, int l, int o, void * v, int * n)
-{
- Sleep(1);
- if ((::getsockopt(_get_osfhandle(s), l, o,(char *) v, n)) == SOCKET_ERROR) {
- errno = WSAGetLastError();
- return -1;
- } else
- return 0;
-}
-
-/* Simple ioctl() emulation */
-inline
-int ioctl(int s, int c, void * a)
-{
- if ((::ioctlsocket(_get_osfhandle(s), c, (u_long FAR *)a)) == SOCKET_ERROR) {
- errno = WSAGetLastError();
- return -1;
- } else
- return 0;
-}
-
-inline
-int ioctlsocket(int s, long c, u_long FAR * a)
-{
- if ((::ioctlsocket(_get_osfhandle(s), c, a)) == SOCKET_ERROR) {
- errno = WSAGetLastError();
- return -1;
- } else
- return 0;
-}
-
-inline
-int listen(int s, int b)
-{
- if (::listen(_get_osfhandle(s), b) == SOCKET_ERROR) {
- if (WSAEMFILE == (errno = WSAGetLastError()))
- errno = EMFILE;
- return -1;
- } else
- return 0;
-}
-#define listen(s,b) Squid::listen(s,b)
-
-inline
-int recv(int s, void * b, size_t l, int f)
-{
- int result;
- if ((result = ::recv(_get_osfhandle(s), (char *)b, l, f)) == SOCKET_ERROR) {
- errno = WSAGetLastError();
- return -1;
- } else
- return result;
-}
-
-inline
-int recvfrom(int s, void * b, size_t l, int f, struct sockaddr * fr, size_t * fl)
-{
- int result;
- if ((result = ::recvfrom(_get_osfhandle(s), (char *)b, l, f, fr, (int *)fl)) == SOCKET_ERROR) {
- errno = WSAGetLastError();
- return -1;
- } else
- return result;
-}
-
-inline
-int select(int n, fd_set * r, fd_set * w, fd_set * e, struct timeval * t)
-{
- int result;
- if ((result = ::select(n,r,w,e,t)) == SOCKET_ERROR) {
- errno = WSAGetLastError();
- return -1;
- } else
- return result;
-}
-#define select(n,r,w,e,t) Squid::select(n,r,w,e,t)
-
-inline
-int send(int s, const void * b, size_t l, int f)
-{
- int result;
- if ((result = ::send(_get_osfhandle(s), (char *)b, l, f)) == SOCKET_ERROR) {
- errno = WSAGetLastError();
- return -1;
- } else
- return result;
-}
-
-inline
-int sendto(int s, const void * b, size_t l, int f, const struct sockaddr * t, int tl)
-{
- int result;
- if ((result = ::sendto(_get_osfhandle(s), (char *)b, l, f, t, tl)) == SOCKET_ERROR) {
- errno = WSAGetLastError();
- return -1;
- } else
- return result;
-}
-
-inline
-int setsockopt(SOCKET s, int l, int o, const char * v, int n)
-{
- SOCKET socket;
-
- socket = ((s == INVALID_SOCKET) ? s : (SOCKET)_get_osfhandle((int)s));
-
- if (::setsockopt(socket, l, o, v, n) == SOCKET_ERROR) {
- errno = WSAGetLastError();
- return -1;
- } else
- return 0;
-}
-#define setsockopt(s,l,o,v,n) Squid::setsockopt(s,l,o,v,n)
-
-inline
-int shutdown(int s, int h)
-{
- if (::shutdown(_get_osfhandle(s),h) == SOCKET_ERROR) {
- errno = WSAGetLastError();
- return -1;
- } else
- return 0;
-}
-
-inline
-int socket(int f, int t, int p)
-{
- SOCKET result;
- if ((result = ::socket(f, t, p)) == INVALID_SOCKET) {
- if (WSAEMFILE == (errno = WSAGetLastError()))
- errno = EMFILE;
- return -1;
- } else
- return _open_osfhandle(result, 0);
-}
-#define socket(f,t,p) Squid::socket(f,t,p)
-
-inline
-int WSAAsyncSelect(int s, HWND h, unsigned int w, long e)
-{
- if (::WSAAsyncSelect(_get_osfhandle(s), h, w, e) == SOCKET_ERROR) {
- errno = WSAGetLastError();
- return -1;
- } else
- return 0;
-}
-
-#undef WSADuplicateSocket
-inline
-int WSADuplicateSocket(int s, DWORD n, LPWSAPROTOCOL_INFO l)
-{
-#ifdef UNICODE
- if (::WSADuplicateSocketW(_get_osfhandle(s), n, l) == SOCKET_ERROR) {
-#else
- if (::WSADuplicateSocketA(_get_osfhandle(s), n, l) == SOCKET_ERROR) {
-#endif
- errno = WSAGetLastError();
- return -1;
- } else
- return 0;
-}
-
-#undef WSASocket
-inline
-int WSASocket(int a, int t, int p, LPWSAPROTOCOL_INFO i, GROUP g, DWORD f) {
- SOCKET result;
-#ifdef UNICODE
- if ((result = ::WSASocketW(a, t, p, i, g, f)) == INVALID_SOCKET) {
-#else
- if ((result = ::WSASocketA(a, t, p, i, g, f)) == INVALID_SOCKET) {
-#endif
- if (WSAEMFILE == (errno = WSAGetLastError()))
- errno = EMFILE;
- return -1;
- } else
- return _open_osfhandle(result, 0);
-}
-
-} /* namespace Squid */
-
-#else /* #ifdef __cplusplus */
-#define connect(s,n,l) \
- (SOCKET_ERROR == connect(_get_osfhandle(s),n,l) ? \
- (WSAEMFILE == (errno = WSAGetLastError()) ? errno = EMFILE : -1, -1) : 0)
-#define gethostbyname(n) \
- (NULL == ((HOSTENT FAR*)(ws32_result = (int)gethostbyname(n))) ? \
- (errno = WSAGetLastError()), (HOSTENT FAR*)NULL : (HOSTENT FAR*)ws32_result)
-#define gethostname(n,l) \
- (SOCKET_ERROR == gethostname(n,l) ? \
- (errno = WSAGetLastError()), -1 : 0)
-#define recv(s,b,l,f) \
- (SOCKET_ERROR == (ws32_result = recv(_get_osfhandle(s),b,l,f)) ? \
- (errno = WSAGetLastError()), -1 : ws32_result)
-#define sendto(s,b,l,f,t,tl) \
- (SOCKET_ERROR == (ws32_result = sendto(_get_osfhandle(s),b,l,f,t,tl)) ? \
- (errno = WSAGetLastError()), -1 : ws32_result)
-#define select(n,r,w,e,t) \
- (SOCKET_ERROR == (ws32_result = select(n,r,w,e,t)) ? \
- (errno = WSAGetLastError()), -1 : ws32_result)
-#define socket(f,t,p) \
- (INVALID_SOCKET == ((SOCKET)(ws32_result = (int)socket(f,t,p))) ? \
- ((WSAEMFILE == (errno = WSAGetLastError()) ? errno = EMFILE : -1), -1) : \
- (SOCKET)_open_osfhandle(ws32_result,0))
-#define write _write /* Needed in util.c */
-#define open _open /* Needed in win32lib.c */
-#endif /* #ifdef __cplusplus */
-
-#define RUSAGE_SELF 0 /* calling process */
-#define RUSAGE_CHILDREN -1 /* terminated child processes */
-
-struct rusage {
- struct timeval ru_utime; /* user time used */
- struct timeval ru_stime; /* system time used */
- long ru_maxrss; /* integral max resident set size */
- long ru_ixrss; /* integral shared text memory size */
- long ru_idrss; /* integral unshared data size */
- long ru_isrss; /* integral unshared stack size */
- long ru_minflt; /* page reclaims */
- long ru_majflt; /* page faults */
- long ru_nswap; /* swaps */
- long ru_inblock; /* block input operations */
- long ru_oublock; /* block output operations */
- long ru_msgsnd; /* messages sent */
- long ru_msgrcv; /* messages received */
- long ru_nsignals; /* signals received */
- long ru_nvcsw; /* voluntary context switches */
- long ru_nivcsw; /* involuntary context switches */
-};
-
-#undef ACL
-
-
-#endif /* _SQUID_WIN32_ */
-
-#endif /* _INC_SQUID_MSWIN_H */
diff -u -r -N squid-3.1.0.13/include/squid_types.h squid-3.1.0.14/include/squid_types.h
--- squid-3.1.0.13/include/squid_types.h 2009-08-05 01:32:10.000000000 +1200
+++ squid-3.1.0.14/include/squid_types.h 2009-09-27 15:28:26.000000000 +1200
@@ -60,6 +60,9 @@
#if HAVE_SYS_TYPES_H
#include
#endif
+#if HAVE_LINUX_TYPES_H
+#include
+#endif
#if STDC_HEADERS
#include
#include
diff -u -r -N squid-3.1.0.13/include/version.h squid-3.1.0.14/include/version.h
--- squid-3.1.0.13/include/version.h 2009-08-05 01:32:53.000000000 +1200
+++ squid-3.1.0.14/include/version.h 2009-09-27 15:29:12.000000000 +1200
@@ -9,7 +9,7 @@
*/
#ifndef SQUID_RELEASE_TIME
-#define SQUID_RELEASE_TIME 1249392725
+#define SQUID_RELEASE_TIME 1254022102
#endif
#ifndef APP_SHORTNAME
diff -u -r -N squid-3.1.0.13/lib/dirent.c squid-3.1.0.14/lib/dirent.c
--- squid-3.1.0.13/lib/dirent.c 2009-08-05 01:32:10.000000000 +1200
+++ squid-3.1.0.14/lib/dirent.c 2009-09-27 15:28:27.000000000 +1200
@@ -155,7 +155,7 @@
* next entry in the directory.
*/
struct dirent *
- readdir(DIR * dirp) {
+readdir(DIR * dirp) {
errno = 0;
/* Check for valid DIR struct. */
diff -u -r -N squid-3.1.0.13/lib/getaddrinfo.c squid-3.1.0.14/lib/getaddrinfo.c
--- squid-3.1.0.13/lib/getaddrinfo.c 2009-08-05 01:32:11.000000000 +1200
+++ squid-3.1.0.14/lib/getaddrinfo.c 2009-09-27 15:28:27.000000000 +1200
@@ -83,7 +83,7 @@
#include "inet_pton.h"
static struct addrinfo *
- dup_addrinfo (struct addrinfo *info, void *addr, size_t addrlen) {
+dup_addrinfo (struct addrinfo *info, void *addr, size_t addrlen) {
struct addrinfo *ret;
ret = malloc (sizeof (struct addrinfo));
diff -u -r -N squid-3.1.0.13/lib/Makefile.in squid-3.1.0.14/lib/Makefile.in
--- squid-3.1.0.13/lib/Makefile.in 2009-08-05 01:32:35.000000000 +1200
+++ squid-3.1.0.14/lib/Makefile.in 2009-09-27 15:28:53.000000000 +1200
@@ -135,6 +135,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/lib/radix.c squid-3.1.0.14/lib/radix.c
--- squid-3.1.0.13/lib/radix.c 2009-08-05 01:32:11.000000000 +1200
+++ squid-3.1.0.14/lib/radix.c 2009-09-27 15:28:27.000000000 +1200
@@ -178,7 +178,7 @@
*/
struct squid_radix_node *
- squid_rn_search(void *v_arg, struct squid_radix_node *head) {
+squid_rn_search(void *v_arg, struct squid_radix_node *head) {
register struct squid_radix_node *x;
register caddr_t v;
@@ -192,7 +192,7 @@
}
struct squid_radix_node *
- squid_rn_search_m(void *v_arg, struct squid_radix_node *head, void *m_arg) {
+squid_rn_search_m(void *v_arg, struct squid_radix_node *head, void *m_arg) {
register struct squid_radix_node *x;
register caddr_t v = v_arg, m = m_arg;
@@ -233,7 +233,7 @@
}
struct squid_radix_node *
- squid_rn_lookup(void *v_arg, void *m_arg, struct squid_radix_node_head *head) {
+squid_rn_lookup(void *v_arg, void *m_arg, struct squid_radix_node_head *head) {
register struct squid_radix_node *x;
caddr_t netmask = 0;
@@ -271,7 +271,7 @@
}
struct squid_radix_node *
- squid_rn_match(void *v_arg, struct squid_radix_node_head *head) {
+squid_rn_match(void *v_arg, struct squid_radix_node_head *head) {
caddr_t v = v_arg;
register struct squid_radix_node *t = head->rnh_treetop, *x;
register caddr_t cp = v, cp2;
@@ -377,7 +377,7 @@
#endif
struct squid_radix_node *
- squid_rn_newpair(void *v, int b, struct squid_radix_node nodes[2]) {
+squid_rn_newpair(void *v, int b, struct squid_radix_node nodes[2]) {
register struct squid_radix_node *tt = nodes, *t = tt + 1;
t->rn_b = b;
t->rn_bmask = 0x80 >> (b & 7);
@@ -398,7 +398,7 @@
}
struct squid_radix_node *
- squid_rn_insert(void *v_arg, struct squid_radix_node_head *head, int *dupentry, struct squid_radix_node nodes[2]) {
+squid_rn_insert(void *v_arg, struct squid_radix_node_head *head, int *dupentry, struct squid_radix_node nodes[2]) {
caddr_t v = v_arg;
struct squid_radix_node *top = head->rnh_treetop;
int head_off = top->rn_off, vlen = (int) *((u_char *) v);
@@ -463,7 +463,7 @@
}
struct squid_radix_node *
- squid_rn_addmask(void *n_arg, int search, int skip) {
+squid_rn_addmask(void *n_arg, int search, int skip) {
caddr_t netmask = (caddr_t) n_arg;
register struct squid_radix_node *x;
register caddr_t cp, cplim;
@@ -548,7 +548,7 @@
}
static struct squid_radix_mask *
- rn_new_radix_mask(struct squid_radix_node *tt, struct squid_radix_mask *next) {
+rn_new_radix_mask(struct squid_radix_node *tt, struct squid_radix_mask *next) {
register struct squid_radix_mask *m;
squid_MKGet(m);
@@ -569,7 +569,7 @@
}
struct squid_radix_node *
- squid_rn_addroute(void *v_arg, void *n_arg, struct squid_radix_node_head *head, struct squid_radix_node treenodes[2]) {
+squid_rn_addroute(void *v_arg, void *n_arg, struct squid_radix_node_head *head, struct squid_radix_node treenodes[2]) {
caddr_t v = (caddr_t) v_arg, netmask = (caddr_t) n_arg;
register struct squid_radix_node *t, *x = NULL, *tt;
struct squid_radix_node *saved_tt, *top = head->rnh_treetop;
@@ -720,7 +720,7 @@
}
struct squid_radix_node *
- squid_rn_delete(void *v_arg, void *netmask_arg, struct squid_radix_node_head *head) {
+squid_rn_delete(void *v_arg, void *netmask_arg, struct squid_radix_node_head *head) {
register struct squid_radix_node *t, *p, *x, *tt;
struct squid_radix_mask *m, *saved_m, **mp;
struct squid_radix_node *dupedkey, *saved_tt, *top;
diff -u -r -N squid-3.1.0.13/lib/rfc1123.c squid-3.1.0.14/lib/rfc1123.c
--- squid-3.1.0.13/lib/rfc1123.c 2009-08-05 01:32:11.000000000 +1200
+++ squid-3.1.0.14/lib/rfc1123.c 2009-09-27 15:28:27.000000000 +1200
@@ -118,8 +118,8 @@
}
static struct tm *
- parse_date_elements(const char *day, const char *month, const char *year,
- const char *time, const char *zone) {
+parse_date_elements(const char *day, const char *month, const char *year,
+ const char *time, const char *zone) {
static struct tm tm;
char *t;
memset(&tm, 0, sizeof(tm));
@@ -150,7 +150,7 @@
}
static struct tm *
- parse_date(const char *str) {
+parse_date(const char *str) {
struct tm *tm;
static char tmp[64];
char *t;
@@ -224,7 +224,7 @@
#elif defined(_SQUID_MSWIN_)
#elif defined(_SQUID_SGI_)
#else
- extern long timezone;
+ extern long timezone;
#endif
/*
* The following assumes a fixed DST offset of 1 hour,
@@ -235,7 +235,7 @@
#if defined ( _timezone) || defined(_SQUID_WIN32_)
t -= (_timezone + dst);
#else
- t -= (timezone + dst);
+ t -= (timezone + dst);
#endif
}
#endif
diff -u -r -N squid-3.1.0.13/lib/rfc2617.c squid-3.1.0.14/lib/rfc2617.c
--- squid-3.1.0.13/lib/rfc2617.c 2009-08-05 01:32:11.000000000 +1200
+++ squid-3.1.0.14/lib/rfc2617.c 2009-09-27 15:28:27.000000000 +1200
@@ -168,7 +168,7 @@
SquidMD5Update(&Md5Ctx, pszMethod, strlen(pszMethod));
SquidMD5Update(&Md5Ctx, ":", 1);
SquidMD5Update(&Md5Ctx, pszDigestUri, strlen(pszDigestUri));
- if (strcasecmp(pszQop, "auth-int") == 0) {
+ if (pszQop && strcasecmp(pszQop, "auth-int") == 0) {
SquidMD5Update(&Md5Ctx, ":", 1);
SquidMD5Update(&Md5Ctx, HEntity, HASHHEXLEN);
}
@@ -182,7 +182,7 @@
SquidMD5Update(&Md5Ctx, ":", 1);
SquidMD5Update(&Md5Ctx, pszNonce, strlen(pszNonce));
SquidMD5Update(&Md5Ctx, ":", 1);
- if (*pszQop) {
+ if (pszQop) {
SquidMD5Update(&Md5Ctx, pszNonceCount, strlen(pszNonceCount));
SquidMD5Update(&Md5Ctx, ":", 1);
SquidMD5Update(&Md5Ctx, pszCNonce, strlen(pszCNonce));
diff -u -r -N squid-3.1.0.13/lib/util.c squid-3.1.0.14/lib/util.c
--- squid-3.1.0.13/lib/util.c 2009-08-05 01:32:11.000000000 +1200
+++ squid-3.1.0.14/lib/util.c 2009-09-27 15:28:27.000000000 +1200
@@ -106,7 +106,9 @@
#define DBG_GRAIN_SM (4)
#define DBG_OFFSET (DBG_SPLIT/DBG_GRAIN_SM - DBG_SPLIT/DBG_GRAIN )
#define DBG_MAXINDEX (DBG_MAXSIZE/DBG_GRAIN + DBG_OFFSET)
-// #define DBG_INDEX(sz) (sz
- Squid 3.0.STABLE16 release notes
+ Squid 3.1.0.13 release notes
-Squid 3.0.STABLE16 release notes
+Squid 3.1.0.13 release notes
Squid Developers
-This document contains the release notes for version 3.0 of Squid.
+This document contains the release notes for version 3.1 of Squid.
Squid is a WWW Cache application developed by the National Laboratory
for Applied Network Research and members of the Web Caching community.
-The Squid Team are pleased to announce the release of Squid-3.0.STABLE16.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+The Squid Team are pleased to announce the release of Squid-3.1.0.13 for testing.
This new release is available for download from
-http://www.squid-cache.org/Versions/v3/3.0/ or the
+http://www.squid-cache.org/Versions/v3/3.1/ or the
mirrors.
-A large number of the show-stopper bugs have been fixed along with general improvements to the ICAP support and additional Languages.
+A large number of the show-stopper bugs have been fixed along with general improvements to the ICAP support.
+While this release is not deemed ready for production use, we believe it is ready for wider testing by the community.
We welcome feedback and bug reports. If you find a bug, please see
http://wiki.squid-cache.org/SquidFaq/TroubleShooting#head-7067fc0034ce967e67911becaabb8c95a34d576d for how to submit a report with a stack trace.
-
+
Although this release is deemed good enough for use in many setups, please note the existence of
-open bugs against Squid-3.0.
-
-
+open bugs against Squid-3.1.
-The 3.0 change history can be
-viewed here.
+
-
+The 3.1 change history can be
+viewed here.
-
+
-Squid 3.0 represents a major rewrite of Squid and has a number of new features.
-The most important of these are:
+Squid 3.1 represents a new feature release above 3.0.
+The most important of these new features are:
-- Code converted to C++, with significant internal restructuring and rewrites.
-- ICAP implementation (RFC 3507 and www.icap-forum.org)
-- Edge Side Includes (ESI) implementation (www.esi.org)
+- New Version Numbering System
+- Minimal squid.conf improvements
+- Native IPv6 Support
+- Error Page Localization
+- Connection Pinning (for NTLM Auth Passthrough)
+- Quality of Service (QoS) Flow support
+- SSL Bump (for HTTPS Filtering and Adaptation)
+- eCAP Adaptation Module support
+- ICAP Bypass and Retry enhancements
Most user-facing changes are reflected in squid.conf (see below).
-Internet Content Adaptation Protocol (ICAP)
+
+
+Begining with 3.1 the Squid Developers are trialling a new release numbering system.
+
+We have decided, based on input from interested users to drop the Squid-2 terminology of
+(DEVEL, PRE, RC, and STABLE) from the release package names.
+These are replaced with a simpler 3-tier system based around the natural code development cycle.
+
+Daily generated snapshots of all current versions are provided as testing (old DEVEL) and bug-fix releases.
+These are numbered from their last release with a date appended.
+Snapshots generated from 3.HEAD continue to be highly volatile.
+
+Regular feature releases from Squid-3 will be branched out as sub-versions. Such as this Squid-3.1.
+
+All this is previous policy you should be accustomed to. Now we get to the new numbering change.
+
+Initial branch packages will be generated with a 3.X.0.Z version as testing packages.
+Packages and Snapshots generated with these 3-dot numbers are expected to be relatively stable regarding feature behaviors.
+Suitable for testing, but without any guarantees under production loads. This replaces both the old PRE and RC packages.
+
+If a large number of bugs are found several *.0.Z packages may be attempted before any is considered production-ready.
-Squid 3.0 supports ICAP/1.0. To enable ICAP support, use the --enable-icap-client ./configure option and icap_enable squid.conf option. You will also need to configure ICAP services in your squid.conf using icap_service, icap_class, and icap_access options. The following example instructs Squid to talk to two ICAP services, one for request and one for response adaptation:
+When one of these Squid-3.X.0.Z packages passes our bug-free standards a 3.X.Y numbered release will be made.
+
+We can only hope enough testing has been done to consider these ready for production use.
+As always we are fully dependent on people testing the previous packages and reporting all bugs.
+
+In support of all this are several squid-dev process changes which have been worked out over the last year.
+
+- We no longer accept new features into branches.
+Those are reserved for the next feature release.
+The cycle for major releases is hoped to be fast enough to suit some peoples needs for new features
+and others need for stability in the branched releases.
+
+- We now audit and vote on all feature and major code additions.
+Requiring at least two sets of developer eyes on any new features before they are committed to 3.HEAD.
+Vastly reducing the number of bugs in all code.
+
+- We have implemented and continue to add more testing infrastructure.
+
+
+
+
+
+
+squid.conf has undergone a facelift.
+
+Don't worry, few operational changes have been made.
+Older configs from are still expected to run in 3.1 with only the usual minor
+changes seen between major release. Details on those are listed below.
+
+New users will be relieved to see a short 32-line or less squid.conf on clean installs.
+Many of the options have reasonable defaults but had previously needed them explicitly configured!
+These are now proper built-in defaults and no longer need to be in squid.conf unless changed.
+
+All of the option documentation has been offloaded to another file squid.conf.documented which
+contains a fully documented set of options previously cluttering up squid.conf itself.
+
+Package maintainers are provided with a second file squid.conf.default which as always contains the default
+config options provided on a clean install.
+
+
+
+
+Squid 3.1 supports IPv6.
+Details in
+The Squid wiki
+
+New Features for IPv6
+
+Squid handles localhost values seperately. For the purpose of ACLs and also external
+connections ::1 is considered a seperate IP from 127.0.0.1. This means all ACL which
+define behaviour for localhost may need ::1/128 included.
+
+Pinger has been upgraded to perform both ICMP and ICMPv6 as required.
+As a result of this and due to a change in the binary protocol format between them,
+new builds of squid are no longer backwards-compatible with old pinger binaries.
+You will need to perform "make install-pinger" again after installing squid.
+
+Peer and Client SNMP tables have been altered to handle IPv6 addresses.
+As a side effect of this the long-missing fix to show seperate named peers on one IP
+has been integrated. Making the SNMP peer table now produce correct output.
+The table structure change is identical for both IPv4-only and Dual modes but with
+IPv4-only simply not including any IPv6 entries. This means any third-party SNMP
+software which hard coded the MIB paths needs to be upgraded for this Squid release.
+
+
+Limitations of IPv6 Support
+
+Specify a specific tcp_outgoing_address and the clients who match its ACL are limited
+to the IPv4 or IPv6 network that address belongs to. They are not permitted over the
+IPv4-IPv6 boundary. Some ACL voodoo can however be applied to explicitly route the
+IPv6/IPv4 bound traffic (DIRECT access) out an appropriate interface.
-icap_enable on
-icap_service service_req reqmod_precache 1 icap://127.0.0.1:1344/request
-icap_service service_resp respmod_precache 0 icap://127.0.0.1:1344/response
-icap_class class_req service_req
-icap_class class_resp service_resp
-icap_access class_req allow all
-icap_access class_resp allow all
+ acl toIP6 dst ipv6
+ tcp_outgoing_address 2001::1 toIP6
+ tcp_outgoing_address 10.0.0.1 !toIP6
-Please see squid.conf.default for more details about these and many other icap_* options.
+WCCP is not available (neither version 1 or 2). It remains built into squid for use with IPv4 traffic but IPv6 cannot use it.
-Squid supports pre-cache request and pre-cache response vectoring points. The following ICAP features are supported: message preview, 204 responses outside of preview, request satisfaction, X-Transfer-* negotiation, persistent ICAP connections, client IP/credentials sharing, and optional bypass of certain service failures.
+Transparent Interception is done via NAT at the OS level and is not available in IPv6.
+Squid will ensure that any port set with transparent, intercept, or tproxy options be an IPv4-only
+listening address. Wildcard can still be used but will not open as an IPv6.
+To ensure that squid can accept IPv6 traffic on its default port, an alternative should
+be chosen to handle transparently intercepted traffic.
+
+ http_port 3128
+ http_port 8080 intercept
+
+
-No more than one ICAP service can be applied to an HTTP message. In other words, chaining or load balancing multiple services is not yet supported.
+The bundled NTLM Auth helper is IPv4-native between itself and the NTLM server.
+A new one will be needed for IPv6 traffic between the helper and server.
-Proxy-directed data trickling and patience pages are not supported yet.
+The bundled RADIUS Auth helper is IPv4-native, both in traffic between and data storage
+with the RADIUS server. A new helper will be needed for IPv6 RADIUS protocol.
-Following ICAP requirements, Squid never performs HTTP message adaptation without a successful and fresh ICAP OPTIONS response on file. A REQMOD or RESPMOD request will not be sent to a configured ICAP service until Squid receives a valid OPTIONS response from that service. If a service malfunctions or goes down, Squid may stop talking to the service for a while. Several squid.conf options can be used to tune the failure bypass algorithm (e.g., icap_service_failure_limit and icap_service_revival_delay).
-The bypass parameter of the icap_service squid.conf option determines whether Squid will try to bypass service failures. Most connectivity and preview-stage failures can be bypassed.
+
+
+Details in
+The Squid wiki
+
+Localization
+
+The error pages presented by squid may now be localized per-request to match the visitors local preferred language.
+
+The error_directory option in squid.conf needs to be removed.
+
+For best coverage of languages, using the latest language pack of error files is recommended.
+Updates can be downloaded from
+www.squid-cache.org/Versions/langpack/
+
+The squid developers are interested in making squid available in a wide variety of languages.
+Contribution of new languages is encouraged.
+
+CSS Stylesheet controls
+
+To further enhance the visitor experience all new translations have embeded CSS hooks for scalable per-site localization of the display.
+
+CSS display is controlled by updating the errorpage.css file installed into Squids configuration directory
+or the err_page_stylesheet option in squid.conf.
+
+Custom error pages can also embed the CSS content by adding the %l tag to their headers.
+
+
+
-More information about ICAP can be found from the ICAP-forum website
-http://www.icap-forum.org
+Details in
+The Squid wiki
-Edge Side Includes (ESI)
+Squid 3.1 includes the much asked for Connection Pinning feature from Squid 2.6.
-ESI is an open specification of an markup language enabling reverse proxies
-to perform some simple XML based processing, offloading the final page assembly from the webserver and similar tasks.
+This feature is often called 'NTLM Passthru' since it is a giant workaround which permits Web servers to use
+Microsoft NTLM Authentication instead of HTTP standard authentication through a web proxy.
-More information about ESI can be found from the ESI website
-http://www.esi.org
-Some of the features found in Squid-2.6 is not available in Squid-3.
-Some have been dropped as they are not needed. Some have not yet been forward-ported to Squid-3 and may appear in a later release.
+Details in
+The Squid wiki
+
+Zero Penalty Hit created a patch to set QoS markers on outgoing traffic.
-- refresh_stale_hit option. Not yet ported.
-- ability to follow X-Forwarded-For. Not yet ported.
-- Full caching of Vary/ETag using If-None-Match. Only basic Vary cache supported. Not yet ported.
-- Mapping of server error messages. Not yet ported.
-- http_access2 access directive. Not yet ported.
-- Location header rewrites. Not yet ported.
-- wais_relay. Feature dropped as it's equivalent to cache_peer + cache_peer_access.
-- urlgroup. Not yet ported.
-- collapsed forwarding. Not yet ported.
-- stable Windows support. Irregularly maintained.
+- Allows you to select a TOS/Diffserv value to mark local hits.
+- Allows you to select a TOS/Diffserv value to mark peer hits.
+- Allows you to selectively mark only sibling or parent requests
+- Allows any HTTP response towards clients to have the TOS value of the response coming from
+the remote server preserved.
+For this to work correctly, you will need to patch your linux kernel with the TOS preserving ZPH patch.
+The kernel patch can be downloaded from
+http://zph.bratcheda.org
+- Allows you to mask certain bits in the TOS received from the remote server,
+before copying the value to the TOS send towards clients.
-4.3 Logging changes
-
+Squid Configuration
-access.log
+Squid 3.1 needs to be configured with --enable-zph-qos for the ZPH QoS controls to be available.
-The TCP_REFRESH_HIT and TCP_REFRESH_MISS log types have been replaced because they were misleading (all refreshes need to query the origin server, so they could never be hits). The following log types have been introduced to replace them:
+The configuration options for 2.7 and 3.1 are based on different ZPH patches.
+The two releases configuration differs and only the TOS mode settings are directly translatable.
-
-- TCP_REFRESH_UNMODIFIED
-
-
The requested object was cached but STALE. The IMS query for the object resulted in "304 not modified".
- - TCP_REFRESH_MODIFIED
-
-
The requested object was cached but STALE. The IMS query returned the new content.
-
+
+- qos_flows local-hit=0xff Responses found as a HIT in the local cache
+- qos_flows sibling-hit=0xff Responses found as a HIT in a sibling peer
+- qos_flows parent-hit=0xff Responses found as a HIT in a parent peer
+
-See
-http://www.squid-cache.org/Doc/FAQ/FAQ-6.html#ss6.7 for a definition of all log types.
+The lines above are spearated for documentation. qos_flows may be configured with all options on one line, or separated as shown.
+Also options may be repeated as many times as desired. Only the final configured value for any option will be used.
+
+The legacy Option and Priority modes available in Squid-2.7 are no longer supported.
+
+
+2.7 SSL Bump (for HTTPS Filtering and Adaptation)
+
+
+Details in
+The Squid wiki
+
+Squid-in-the-middle decryption and encryption of straight CONNECT and transparently redirected SSL traffic,
+using configurable client- and server-side certificates.
+While decrypted, the traffic can be inspected using ICAP.
+
+
+2.8 eCAP Adaptation Module support
+
+
+Details in
+The Squid wiki
+
+2.9 ICAP Bypass and Retry enhancements
+
+
+Details in
+The Squid wiki
+
+ICAP is now extended with full bypass and dynamic chain routing to handle multiple
+adaptation services.
+
+ICAP Adaptation Service Sets and Chains
-5. Windows support
+An adaptation service set contains similar, interchangeable services. No more
+than one service is successfully applied. If one service is down or fails,
+Squid can use another service. Think "hot standby" or "spare" ICAP servers.
+
+Sets may seem similar to the existing "service bypass" feature, but they allow
+the failed adaptation to be retried and succeed if a replacement service is
+available. The services in a set may be all optional or all essential,
+depending on whether ignoring the entire set is acceptable. The mixture of
+optional and essential services in a set is supported, but yields results that
+may be difficult for a human to anticipate or interpret. Squid warns when it
+detects such a mixture.
+
+When performing adaptations with a set, failures at a service (optional or
+essential, does not matter) are retried with a different service if possible.
+If there are no more replacement services left to try, the failure is treated
+depending on whether the last service tried was optional or essential: Squid
+either tries to ignore the failure and proceed or terminates the master
+transaction.
+
+An adaptation chain is a list of different services applied one after another,
+forming an adaptation pipeline. Services in a chain may be optional or
+essential. When performing adaptations, failures at an optional service are
+ignored as if the service did not exist in the chain.
+
+Request satisfaction terminates the adaptation chain.
+
+When forming a set or chain for a given transaction, optional down services are ignored as if they did not exist.
+
+ICAP and eCAP services can be mixed and matched in an adaptation set or chain.
+
+Dynamically form adaptation chains based on the ICAP X-Next-Services header.
+
+If an ICAP service with the routing=1 option in squid.conf returns an ICAP
+X-Next-Services response header during a successful REQMOD or RESPMOD
+transaction, Squid abandons the original adaptation plan and forms a new
+adaptation chain consisting of services identified in the X-Next-Services
+header value (using a comma-separated list of adaptation service names from
+squid.conf). The dynamically created chain is destroyed once the new plan is
+completed or replaced.
+
+This feature is useful when a custom adaptation service knows which other
+services are applicable to the message being adapted.
+
+Limit adaptation iterations to adaptation_service_iteration_limit to protect
+Squid from infinite adaptation loops caused by ICAP services constantly
+including themselves in the dynamic adaptation chain they request. When the
+limit is exceeded, the master transaction fails. The default limit of 16
+should be large enough to not require an explicit configuration in most
+environments yet may be small enough to limit side-effects of loops.
+
+
+3. Windows support
This Squid version can run on Windows as a system service using the Cygwin emulation environment,
or can be compiled in Windows native mode using the MinGW + MSYS development environment. Windows NT 4 SP4 and later are supported.
On Windows 2000 and later the service is configured to use the Windows Service Recovery option
-restarting automatically after 60 seconds.
-
+restarting automatically after 60 seconds.
+
+3.1 Usage
+
-- Usage
-
Some new command line options were added for the Windows service support:
+
The service installation is made with -i command line switch, it's possible to use -f switch at
the same time for specify a different config-file settings for the Squid Service that will be
stored on the Windows Registry.
+
A new -n switch specify the Windows Service Name, so multiple Squid instance are allowed.
"Squid" is the default when the switch is not used.
+
So, to install the service, the syntax is:
squid -i [-f file] [-n name]
+
Service uninstallation is made with -r command line switch with the appropriate -n switch.
+
The -k switch family must be used with the appropriate -f and -n switches, so the syntax is:
@@ -152,55 +442,58 @@
where service-name is the name specified with -n options at service install time.
+
To use the Squid original command line, the new -O switch must be used ONCE, the syntax is:
squid -O cmdline [-n service-name]
-
-If multiple service command line options must be specified, use quote. The -n switch is
+
+If multiple service command line options must be specified, use quote. The -n switch is
needed only when a non default service name is in use.
+
Don't use the "Start parameters" in the Windows 2000/XP/2003 Service applet: they are
specific to Windows services functionality and Squid is not designed for understand they.
+
In the following example the command line of the "squidsvc" Squid service is set to "-D -u 3130":
squid -O "-D -u 3130" -n squidsvc
-
-
-
-
-- PSAPI.DLL (Process Status Helper) Considerations
-
+
+
3.2 PSAPI.DLL (Process Status Helper) Considerations
+
+
The process status helper functions make it easier for you to obtain information about
processes and device drivers running on Microsoft® Windows NT®/Windows® 2000. These
functions are available in PSAPI.DLL, which is distributed in the Microsoft® Platform
Software Development Kit (SDK). The same information is generally available through the
performance data in the registry, but it is more difficult to get to it. PSAPI.DLL is
freely redistributable.
+
PSAPI.DLL is available only on Windows NT, 2000, XP and 2003. The implementation in Squid is
aware of this, and try to use it only on the right platform.
+
On Windows NT PSAPI.DLL can be found as component of many applications, if you need it,
you can find it on Windows NT Resource KIT. If you have problem, it can be
downloaded from here:
http://download.microsoft.com/download/platformsdk/Redist/4.0.1371.1/NT4/EN-US/psinst.EXE
+
On Windows 2000 and later it is available installing the Windows Support Tools, located on the
Support\Tools folder of the installation Windows CD-ROM.
-
-
-
-
-- Registry DNS lookup
-
+
+
3.3 Registry DNS lookup
+
+
On Windows platforms, if no value is specified in the dns_nameservers option on
squid.conf or in the /etc/resolv.conf file, the list of DNS name servers are
taken from the Windows registry, both static and dynamic DHCP configurations
are supported.
-
-
-
-
-- Compatibility Notes
-
+
+
3.4 Compatibility Notes
+
+
- It's recommended to use '/' char in Squid paths instead of '\'
@@ -224,14 +517,14 @@
- When Squid runs in command line mode, the launching user account must have administrative privilege on the system
- "Start parameters" in the Windows 2000/XP/2003 Service applet cannot be used
-- Building with MinGW, when the configure option --enable-truncate is used, Squid cannot run on Windows NT, only Windows 2000 and later are supported
+- On Windows Vista and later, User Account Control (UAC) must be disabled before running service installation
-
-
-
-
-- Known Limitations
-
+
+
+
3.5 Known Limitations
+
+
- Squid features not operational:
@@ -246,11 +539,10 @@
- File Descriptors number hard-limited to 2048 when building with MinGW.
-
-
-
-
-- Building Squid on Windows
-
+
+
3.6 Building Squid on Windows
+
+
A reasonably recent release of
Cygwin or
MinGW is needed.
@@ -262,8 +554,6 @@
MinGW packages repository
db-1.85:
TinyCOBOL download area
-uudecode:
-Native Win32 ports of some GNU utilities
When running configure, --disable-wccp and --disable-wccpv2 options should always specified to avoid compile errors.
- New configure options:
@@ -298,11 +588,9 @@
-
-
-
-
-- Using cache manager on Windows:
-
+
3.7 Using cache manager on Windows:
+
+
On Windows, cache manager (cachemgr.cgi) can be used with Microsoft IIS or Apache.
Some specific configuration could be needed:
-
-
-
-
-6. Changes to squid.conf since Squid-2.6
-There have been many changes to Squid's configuration file since Squid-2.6.
+4. Changes to squid.conf since Squid-3.0
-This section gives a detailed account of those changes in three categories:
+There have been changes to Squid's configuration file since Squid-3.0.
+This section gives a thorough account of those changes in three categories:
-
@@ -362,253 +646,470 @@
-
6.1 New tags
+
+ 4.1 New tags
-- minimum_icp_query_timeout (msec)
-
-
+
- acl_uses_indirect_client
-
+
Whether to use any result found by follow_x_forwarded_for in further ACL processing.
+Default: ON
-Default: 5
-
-Normally the ICP query timeout is determined dynamically. But
-sometimes it can lead to very small timeouts, even lower than
-the normal latency variance on your link due to traffic.
-Use this option to put an lower limit on the dynamic timeout
-value. Do NOT use this option to always use a fixed (instead
-of a dynamic) timeout value. To set a fixed timeout see the
-'icp_query_timeout' directive.
+ Controls whether the indirect client address
+ (see follow_x_forwarded_for) is used instead of the
+ direct client address in acl matching.
- - background_ping_rate
-
-
+
- adaptation_access
-
+
Sends an HTTP transaction to an ICAP or eCAP adaptation service.
-Default: 10 seconds
+ adaptation_access service_name allow|deny [!]aclname...
+ adaptation_access set_name allow|deny [!]aclname...
+
+ At each supported vectoring point, the adaptation_access
+ statements are processed in the order they appear in this
+ configuration file. Statements pointing to the following services
+ are ignored (i.e., skipped without checking their ACL):
+
+ - services serving different vectoring points
+ - "broken-but-bypassable" services
+ - "up" services configured to ignore such transactions
+ (e.g., based on the ICAP Transfer-Ignore header).
-Controls how often the ICP pings are sent to siblings that
-have background-ping set.
+ When a set_name is used, all services in the set are checked
+ using the same rules, to find the first applicable one. See
+ adaptation_service_set for details.
+
+ If an access list is checked and there is a match, the
+ processing stops: For an "allow" rule, the corresponding
+ adaptation service is used for the transaction. For a "deny"
+ rule, no adaptation service is activated.
+
+ It is currently not possible to apply more than one adaptation
+ service at the same vectoring point to the same HTTP transaction.
- - httpd_accel_surrogate_id
-
+
- adaptation_masterx_shared_names
-
-Default: unset
+ For each master transaction (i.e., the HTTP request and response
+ sequence, including all related ICAP and eCAP exchanges), Squid
+ maintains a table of metadata. The table entries are (name, value)
+ pairs shared among eCAP and ICAP exchanges. The table is destroyed
+ with the master transaction.
+
+ This option specifies the table entry names that Squid must accept
+ from and forward to the adaptation transactions.
+
+ An ICAP REQMOD or RESPMOD transaction may set an entry in the
+ shared table by returning an ICAP header field with a name
+ specified in adaptation_masterx_shared_names. Squid will store
+ and forward that ICAP header field to subsequent ICAP
+ transactions within the same master transaction scope.
-Surrogates (http://www.esi.org/architecture_spec_1.0.html)
-need an identification token to allow control targeting. Because
-a farm of surrogates may all perform the same tasks, they may share
-an identification token.
+ Only one shared entry name is supported at this time.
- - http_accel_surrogate_remote on|off
-
+
- adaptation_service_chain
-
-Default: off
+ Configures a list of complementary services that will be applied
+ one-by-one, forming an adaptation chain or pipeline. This is useful
+ when Squid must perform different adaptations on the same message.
+
+ adaptation_service_chain chain_name service_name1 svc_name2 ...
+
+ The named services are used in the chain declaration order. The first
+ applicable adaptation service from the chain is used first. The next
+ applicable service is applied to the successful adaptation results of
+ the previous service in the chain.
+
+ When adaptation starts, broken services are ignored as if they were
+ not a part of the chain. A broken service is a down optional service.
-Remote surrogates (such as those in a CDN) honour Surrogate-Control: no-store-remote.
-Set this to on to have squid behave as a remote surrogate.
+ Request satisfaction terminates the adaptation chain because Squid
+ does not currently allow declaration of RESPMOD services at the
+ "reqmod_precache" vectoring point (see icap_service or ecap_service).
+
+ The services in a chain must be attached to the same vectoring point
+ (e.g., pre-cache) and use the same adaptation method (e.g., REQMOD).
+
+ A chain may contain a mix of optional and essential services. If an
+ essential adaptation fails (or the failure cannot be bypassed for
+ other reasons), the master transaction fails. Otherwise, the failure
+ is bypassed as if the failed adaptation service was not in the chain.
- - esi_parser libxml2|expat|custom
-
+
- adaptation_service_iteration_limit
-
-Default: custom
+ Limits the number of iterations allowed when applying adaptation
+ services to a message. If your longest adaptation set or chain
+ may have more than 16 services, increase the limit beyond its
+ default value of 16. If detecting infinite iteration loops sooner
+ is critical, make the iteration limit match the actual number
+ of services in your longest adaptation set or chain.
-ESI markup is not strictly XML compatible. The custom ESI parser
-will give higher performance, but cannot handle non ASCII character
-encodings.
+ Infinite adaptation loops are most likely with routing services.
- - email_err_data on|off
-
+
- adaptation_service_set
-
-Default: on
+ Configures an ordered set of similar, redundant services. This is
+ useful when hot standby or backup adaptation servers are available.
+
+ adaptation_service_set set_name service_name1 service_name2 ...
+
+ The named services are used in the set declaration order. The first
+ applicable adaptation service from the set is used first. The next
+ applicable service is tried if and only if the transaction with the
+ previous service fails and the message waiting to be adapted is still
+ intact.
+
+ When adaptation starts, broken services are ignored as if they were
+ not a part of the set. A broken service is a down optional service.
+
+ The services in a set must be attached to the same vectoring point
+ (e.g., pre-cache) and use the same adaptation method (e.g., REQMOD).
+
+ If all services in a set are optional then adaptation failures are
+ bypassable. If all services in the set are essential, then a
+ transaction failure with one service may still be retried using
+ another service from the set, but when all services fail, the master
+ transaction fails as well.
-If enabled, information about the occurred error will be
-included in the mailto links of the ERR pages (if %W is set)
-so that the email body contains the data.
-Syntax is <A HREF="mailto:%w%W">%w</A>
+ A set may contain a mix of optional and essential services, but that
+ is likely to lead to surprising results because broken services become
+ ignored (see above), making previously bypassable failures fatal.
+ Technically, it is the bypassability of the last failed service that
+ matters.
- - refresh_all_ims on|off
-
-
+
- chunked_request_body_max_size
-
+
New option to enable handing of broken HTTP/1.1 clients sending chunk requests.
-Default: off
+ A broken or confused HTTP/1.1 client may send a chunked HTTP
+ request to Squid. Squid does not have full support for that
+ feature yet. To cope with such requests, Squid buffers the
+ entire request and then dechunks request body to create a
+ plain HTTP/1.0 request with a known content length. The plain
+ request is then used by the rest of Squid code as usual.
+
+ The option value specifies the maximum size of the buffer used
+ to hold the request before the conversion. If the chunked
+ request size exceeds the specified limit, the conversion
+ fails, and the client receives an "unsupported request" error,
+ as if dechunking was disabled.
-When you enable this option, squid will always check
-the origin server for an update when a client sends an
-If-Modified-Since request. Many browsers use IMS
-requests when the user requests a reload, and this
-ensures those clients receive the latest version.
+ Dechunking is enabled by default. To disable conversion of
+ chunked requests, set the maximum to zero.
-By default (off), squid may return a Not Modified response
-based on the age of the cached version.
+ Request dechunking feature and this option in particular are a
+ temporary hack. When chunking requests and responses are fully
+ supported, there will be no need to buffer a chunked request.
- - request_header_access
-
-
Replaces the header_access directive of Squid-2.6 and earlier, but applies to requests only.
- - reply_header_access
-
-
Replaces the header_access directive of Squid-2.6 and earlier, but applies to replies only.
- - icap_enable on|off
-
-
+
- delay_pool_uses_indirect_client
-
+
Whether to use any result found by follow_x_forwarded_for in delay_pool assignment.
+Default: ON
-Default: off
-
-If you want to enable the ICAP module support, set this to on.
+ Controls whether the indirect client address
+ (see follow_x_forwarded_for) is used instead of the
+ direct client address in delay pools.
- - icap_preview_enable on|off
-
-
+
+
- dns_v4_fallback
-
+
New option to prevent squid from always looking up IPv4 regardless of whether IPv6 addresses are found.
+Squid will follow a policy of prefering IPv6 links, keeping the IPv4 only as a safety net behind IPv6.
-Default: off
+ Standard practice with DNS is to lookup either A or AAAA records
+ and use the results if it succeeds. Only looking up the other if
+ the first attempt fails or otherwise produces no results.
-Set this to 'on' if you want to enable the ICAP preview
-feature in Squid.
+ That policy however will cause squid to produce error pages for some
+ servers that advertise AAAA but are unreachable over IPv6.
+
+ If this is ON squid will always lookup both AAAA and A, using both.
+ If this is OFF squid will lookup AAAA and only try A if none found.
+
+ WARNING: There are some possibly unwanted side-effects with this on:
+ *) Doubles the load placed by squid on the DNS network.
+ *) May negatively impact connection delay times.
- - icap_preview_size
-
-
+
+
- ecap_enable
-
+
Controls whether eCAP support is enabled. Default: OFF
+
+ - ecap_service
-
+
Defines a single eCAP service
-Default: -1
+ ecap_service servicename vectoring_point bypass service_url
+
+ vectoring_point = reqmod_precache|reqmod_postcache|respmod_precache|respmod_postcache
+ This specifies at which point of transaction processing the
+ eCAP service should be activated. *_postcache vectoring points
+ are not yet supported.
+
+ bypass = 1|0
+ If set to 1, the eCAP service is treated as optional. If the
+ service cannot be reached or malfunctions, Squid will try to
+ ignore any errors and process the message as if the service
+ was not enabled. No all eCAP errors can be bypassed.
+ If set to 0, the eCAP service is treated as essential and all
+ eCAP errors will result in an error page returned to the
+ HTTP client.
-The default size of preview data to be sent to the ICAP server.
--1 means no preview. This value might be overwritten on a per server
-basis by OPTIONS requests.
+ service_url = ecap://vendor/service_name?custom&cgi=style¶meters=optional
+
+ Example:
+ ecap_service service_1 reqmod_precache 0 ecap://filters-R-us/leakDetector?on_error=block
+ ecap_service service_2 respmod_precache 1 icap://filters-R-us/virusFilter?config=/etc/vf.cfg
- - icap_default_options_ttl (seconds)
-
-
+
+
- err_page_stylesheet
-
+
New option to configure location for CSS stylesheet controlling error page display.
+
+ - error_default_language
-
+
New option to replace the old configure option --enable-default-err-language
+New translations can be downloaded from http://www.squid-cache.org/Versions/langpack/
-Default: 60
+ Set the default language which squid will send error pages in
+ if no existing translation matches the clients language
+ preferences.
-The default TTL value for ICAP OPTIONS responses that don't have
-an Options-TTL header.
+ If unset (default) generic English will be used.
- - icap_persistent_connections on|off
-
+
+
- error_log_languages
-
-Default: on
+ Log to cache.log what languages users are attempting to
+ auto-negotiate for translations.
-Whether or not Squid should use persistent connections to
-an ICAP server.
+ Successful negotiations are not logged. Only failures
+ have meaning to indicate that Squid may need an upgrade
+ of its error page translations.
- - icap_send_client_ip on|off
-
-
+
+
- follow_x_forwarded_for
-
+
Enable processing of the X-Forwarded-for header for various administration tasks.
-Default: off
+ Allowing or Denying the X-Forwarded-For header to be followed to
+ find the original source of a request.
+
+ Requests may pass through a chain of several other proxies
+ before reaching us. The X-Forwarded-For header will contain a
+ comma-separated list of the IP addresses in the chain, with the
+ rightmost address being the most recent.
+
+ If a request reaches us from a source that is allowed by this
+ configuration item, then we consult the X-Forwarded-For header
+ to see where that host received the request from. If the
+ X-Forwarded-For header contains multiple addresses, and if
+ acl_uses_indirect_client is on, then we continue backtracking
+ until we reach an address for which we are not allowed to
+ follow the X-Forwarded-For header, or until we reach the first
+ address in the list. (If acl_uses_indirect_client is off, then
+ it's impossible to backtrack through more than one level of
+ X-Forwarded-For addresses.)
+
+ The end result of this process is an IP address that we will
+ refer to as the indirect client address. This address may
+ be treated as the client address for access control, delay
+ pools and logging, depending on the acl_uses_indirect_client,
+ delay_pool_uses_indirect_client and log_uses_indirect_client
+ options.
-This adds the header "X-Client-IP" to ICAP requests.
+ SECURITY CONSIDERATIONS:
+ Any host for which we follow the X-Forwarded-For header
+ can place incorrect information in the header, and Squid
+ will use the incorrect information as if it were the
+ source address of the request. This may enable remote
+ hosts to bypass any access control restrictions that are
+ based on the client's source addresses.
+
+ For example:
+
+ acl localhost src 127.0.0.1
+ acl my_other_proxy srcdomain .proxy.example.com
+ follow_x_forwarded_for allow localhost
+ follow_x_forwarded_for allow my_other_proxy
- - icap_send_client_username on|off
-
+
+
- ftp_epsv
-
-Default: off
+ FTP Protocol extensions permit the use of a special "EPSV" command.
-This adds the header "X-Client-Username" to ICAP requests
-if proxy access is authentified.
+ NATs may be able to put the connection on a "fast path" through the
+ translator using EPSV, as the EPRT command will never be used and therefore,
+ translation of the data portion of the segments will never be needed.
+
+ Turning this OFF will prevent EPSV being attempted.
+
+ WARNING: Doing so will convert Squid back to the old behavior with all
+ the related problems with external NAT devices/layers.
+
+ Requires ftp_passive to be ON (default) for any effect.
- - icap_service
-
+
+
- ftp_epsv_all
-
-Default: none
+ FTP Protocol extensions permit the use of a special "EPSV ALL" command.
-Defines a single ICAP service
+ NATs may be able to put the connection on a "fast path" through the
+ translator, as the EPRT command will never be used and therefore,
+ translation of the data portion of the segments will never be needed.
-icap_service servicename vectoring_point bypass service_url
+ When a client only expects to do two-way FTP transfers this may be useful.
+ If squid finds that it must do a three-way FTP transfer after issuing
+ an EPSV ALL command, the FTP session will fail.
-vectoring_point = reqmod_precache|reqmod_postcache|respmod_precache|respmod_postcache
-This specifies at which point of request processing the ICAP
-service should be plugged in.
-bypass = 1|0
-If set to 1 and the ICAP server cannot be reached, the request will go
-through without being processed by an ICAP server
-service_url = icap://servername:port/service
+ If you have any doubts about this option do not use it.
+ Squid will nicely attempt all other connection methods.
-Note: reqmod_postcache and respmod_postcache is not yet implemented
-
-Example:
-icap_service service_1 reqmod_precache 0 icap://icap1.mydomain.net:1344/reqmod
-icap_service service_2 respmod_precache 0 icap://icap2.mydomain.net:1344/respmod
+ Requires ftp_passive to be ON (default)
- - icap_class
-
-
+
+
- forward_max_tries
-
+
Controls how many different forward paths Squid will try
+before giving up. Default: 10
+
+ - icap_log
-
+
New option to write ICAP log files record ICAP transaction summaries, one line per
+transaction. Similar to access.log.
-Default: none
+ The icap_log option format is:
+ icap_log <filepath> [<logformat name> [acl acl ...]]
+ icap_log none [acl acl ...]]
+
+ Please see access_log option documentation for details. The two
+ kinds of logs share the overall configuration approach and many
+ features.
+
+ ICAP processing of a single HTTP message or transaction may
+ require multiple ICAP transactions. In such cases, multiple
+ ICAP transaction log lines will correspond to a single access
+ log line.
+
+ ICAP log uses logformat codes that make sense for an ICAP
+ transaction. Header-related codes are applied to the HTTP header
+ embedded in an ICAP server response, with the following caveats:
+ For REQMOD, there is no HTTP response header unless the ICAP
+ server performed request satisfaction. For RESPMOD, the HTTP
+ request header is the header sent to the ICAP server. For
+ OPTIONS, there are no HTTP headers.
+
+ The following format codes are also available for ICAP logs:
+
+ icap::<A ICAP server IP address. Similar to <A.
+
+ icap::<service_name ICAP service name from the icap_service
+ option in Squid configuration file.
+
+ icap::ru ICAP Request-URI. Similar to ru.
+
+ icap::rm ICAP request method (REQMOD, RESPMOD, or
+ OPTIONS). Similar to existing rm.
+
+ icap::>st Bytes sent to the ICAP server (TCP payload
+ only; i.e., what Squid writes to the socket).
+
+ icap::<st Bytes received from the ICAP server (TCP
+ payload only; i.e., what Squid reads from
+ the socket).
+
+ icap::tr Transaction response time (in
+ milliseconds). The timer starts when
+ the ICAP transaction is created and
+ stops when the transaction is completed.
+ Similar to tr.
+
+ icap::tio Transaction I/O time (in milliseconds). The
+ timer starts when the first ICAP request
+ byte is scheduled for sending. The timers
+ stops when the last byte of the ICAP response
+ is received.
+
+ icap::to Transaction outcome: ICAP_ERR* for all
+ transaction errors, ICAP_OPT for OPTION
+ transactions, ICAP_ECHO for 204
+ responses, ICAP_MOD for message
+ modification, and ICAP_SAT for request
+ satisfaction. Similar to Ss.
-Defines an ICAP service chain. If there are multiple services per
-vectoring point, they are processed in the specified order.
+ icap::Hs ICAP response status code. Similar to Hs.
-icap_class classname servicename...
+ icap::>h ICAP request header(s). Similar to >h.
-Example:
-icap_class class_1 service_1 service_2
-icap class class_2 service_1 service_3
+ icap::<h ICAP response header(s). Similar to <h.
+
+ The default ICAP log format, which can be used without an explicit
+ definition, is called icap_squid:
+
+logformat icap_squid %ts.%03tu %6icap::tr %>a %icap::to/%03icap::Hs %icap::<size %icap::rm %icap::ru% %un -/%icap::<A -
- - icap_access
-
-
-
-Default: none
-
-Redirects a request through an ICAP service class, depending
-on given acls
-
-icap_access classname allow|deny [!]aclname...
-The icap_access statements are processed in the order they appear in
-this configuration file. If an access list matches, the processing stops.
-For an "allow" rule, the specified class is used for the request. A "deny"
-rule simply stops processing without using the class. You can also use the
-special classname "None".
+
- icap_retry
-
+
New option to determine which retriable ICAP transactions are
+retried.
+
+ Transactions that received a complete ICAP response
+ and did not have to consume or produce HTTP bodies to receive
+ that response are usually retriable.
-For backward compatibility, it is also possible to use services
-directly here.
+ icap_retry allow|deny [!]aclname ...
-Example:
-icap_access class_1 allow all
+ Squid automatically retries some ICAP I/O timeouts and errors
+ due to persistent connection race conditions.
- - accept_filter
-
+
- icap_retry_limit
-
-The name of an accept(2) filter to install on Squid's
-listen socket(s). This feature is perhaps specific to
-FreeBSD and requires support in the kernel.
+ Limits the number of retries allowed. When set to zero (default),
+ no retries are allowed.
-The 'httpready' filter delays delivering new connections
-to Squid until a full HTTP request has been received.
-See the accf_http(9) man page.
+ Communication errors due to persistent connection race
+ conditions are unavoidable, automatically retried, and do not
+ count against this limit.
@@ -629,246 +1130,555 @@
- - acl myportname
-
-
New acl type myportname, matching the name of the http(s)_port where the request was accepted
+
- loadable_modules
-
+
Instructs Squid to load the specified dynamic module(s) or activate
+preloaded module(s).
- acl aclname myportname 3128 ... # http(s)_port name
+ Example:
+ loadable_modules @DEFAULT_PREFIX@/lib/MinimalAdapter.so
- - umask
-
-
Ported from 2.6. Behaviour identical.
+
- log_icap aclname [aclname ...]
-
+
- Minimum umask which should be enforced while the proxy
- is running, in addition to the umask set at startup.
+ This options allows you to control which requests get logged
+ to icap.log. See the icap_log directive for ICAP log details.
+
+
+
- For a traditional octal representation of umasks, start
- your value with 0.
+ - log_uses_indirect_client
-
+
Whether to use any result found by follow_x_forwarded_for in access.log.
+Default: ON
+
+ Controls whether the indirect client address
+ (see follow_x_forwarded_for) is used instead of the
+ direct client address in the access log.
-
+- netdb_filename
-
+
+
+ A filename where Squid stores it's netdb state between restarts.
+ To disable, enter "none".
+
+
+ - pinger_enable
-
+
New option to enable/disable the ICMP pinger helper with a reconfigure instead of a full rebuild.
+
+ Control whether the pinger is active at run-time.
+ Enables turning ICMP pinger on and off with a simple squid -k reconfigure.
+ default is on when --enable-icmp is compiled in.
+
+
+
- 6.2 Changes to existing tags
-
+ - ssl_bump
-
+
New Access control for which CONNECT requests to an http_port
+marked with an sslBump flag are actually "bumped". Please
+see the sslBump flag of an http_port option for more details
+about decoding proxied SSL connections.
+DEFAULT: No requests are bumped.
+
+NOCOMMENT_START
+# Example: Bump all requests except those originating from localhost and
+# those going to webax.com or example.com sites.
+#
+# acl broken_sites dstdomain .webax.com
+# acl broken_sites dstdomain .example.com
+# ssl_bump deny localhost
+# ssl_bump deny broken_sites
+# ssl_bump allow all
+
+
+
-
-
-- http_port
-
-
New options:
+
- sslproxy_cert_error
-
+
New Access Control to selectively bypass server certificate validation errors.
+DEFAULT: None bypassed.
- disable-pmtu-discovery=
- Control Path-MTU discovery usage:
- off lets OS decide on what to do (default).
- transparent disable PMTU discovery when transparent support is enabled.
- always disable always PMTU discovery.
-
- In many setups of transparently intercepting proxies Path-MTU
- discovery can not work on traffic towards the clients. This is
- the case when the intercepting device does not fully track
- connections and fails to forward ICMP must fragment messages
- to the cache server. If you have such setup and experience that
- certain clients sporadically hang or never complete requests set
- disable-pmtu-discovery option to 'transparent'.
+ For example, the following lines will bypass all validation errors
+ when talking to servers located at 172.16.0.0/16. All other
+ validation errors will result in ERR_SECURE_CONNECT_FAIL error.
+
+ acl BrokenServersAtTrustedIP dst 172.16.0.0/16
+ sslproxy_cert_error allow BrokenServersAtTrustedIP
+ sslproxy_cert_error deny all
+
+ This option must use fast ACL expressions only. Expressions that use
+ external lookups or communication result in unpredictable behavior or
+ crashes.
+
+ Without this option, all server certificate validation errors
+ terminate the transaction. Bypassing validation errors is dangerous
+ because an error usually implies that the server cannot be trusted and
+ the connection may be insecure.
- - cache_peer
-
-
New options:
+
- qos_flows local-hit= sibling-hit= parent-hit=
-
+
- basetime=n
+ Allows you to select a TOS/DSCP value to mark outgoing
+ connections with, based on where the reply was sourced.
+
+ TOS values really only have local significance - so you should
+ know what you're specifying. For more information, see RFC2474,
+ RFC2475, and RFC3260.
+
+ The TOS/DSCP byte must be exactly that - octet value 0x00-0xFF.
+ Note that in practice often only values up to 0x3F are usable
+ as the two highest bits have been redefined for use by ECN
+ (RFC3168).
+
+ This setting is configured by setting the source TOS values:
+
+ local-hit=0xFF Value to mark local cache hits.
+
+ sibling-hit=0xFF Value to mark hits from sibling peers.
- background-ping
+ parent-hit=0xFF Value to mark hits from parent peers.
- weighted-round-robin
- use 'basetime=n' to specify a base amount to
- be subtracted from round trip times of parents.
- It is subtracted before division by weight in calculating
- which parent to fectch from. If the rtt is less than the
- base time the rtt is set to a minimal value.
-
- use 'background-ping' to only send ICP queries to this
- neighbor infrequently. This is used to keep the neighbor
- round trip time updated and is usually used in
- conjunction with weighted-round-robin.
-
- use 'weighted-round-robin' to define a set of parents
- which should be used in a round-robin fashion with the
- frequency of each parent being based on the round trip
- time. Closer parents are used more often.
- Usually used for background-ping parents.
+ NOTE: 'miss' preserve feature is only possible on Linux at this time.
+
+ For the following to work correctly, you will need to patch your
+ linux kernel with the TOS preserving ZPH patch.
+ The kernel patch can be downloaded from http://zph.bratcheda.org
+
+ disable-preserve-miss
+ If set, any HTTP response towards clients will
+ have the TOS value of the response comming from the
+ remote server masked with the value of miss-mask.
+ miss-mask=0xFF
+ Allows you to mask certain bits in the TOS received from the
+ remote server, before copying the value to the TOS sent
+ towards clients.
+ Default: 0xFF (TOS from server is not changed).
- - cache_dir
-
-
Common options no-store, replaces the older read-only option
- - auth_param
-
-
Removed Basic auth option
+
+
+
+ 4.2 Changes to existing tags
+
+
+
+
+- acl
-
+
New preset content ipv6 available as a preset type in the src and dst ACL matching all of the public IPv6 network space.
+New acl type myportname, matching the name of the http_port or https_port where the request was accepted.
+New acl type tag, matching the tag= returned from the external_acl_type helper.
+New acl type peername, matching against a named cache_peer entry where the request will be attempted first.
+NP: peername currently is limited to only match the first peer possible.
- blankpasswor, not yet ported to squid-3.
- auth_param basic concurrency 0
+ acl aclname dst ipv6 # request for IPv6-enabled site
+ acl aclname src ipv6 # request from IPv6 address
+ acl aclname myportname 3128 ... # http(s)_port name
+ acl aclname peername myPeer ... # cache_peer ... name=myPeer
+ acl aclname tag value ... # tag= option from external ACL
- - external_acl_type
-
-
New format specifications:
+
- auth_param ntlm, basic, digest
-
+
BASIC, DIGEST: New parameter option utf8 on|off to permit helpers to selectively process UTF-8 characters even though
+HTTP accepts only ISO-8859-1.
+NTLM: The helper binary bundled with Squid under the name ntlm_auth has been renamed to accurately reflect
+its real behavior and to prevent confusion with the more useful Samba helper using the same name.
+Despite being used for NTLM, the helper does not in fact provide true NTLM function. What it does provide is
+SMB LanManager authentication through the NTLM interface without the need for a domain controller. Thus the
+new name is ntlm_smb_lm_auth.
+WARNING: due to the name clash with Samba helper, admin should be careful to only update their squid.conf if the
+squid bundled binary is used and needed. If the Samba helper is in use, the squid.conf should not be altered.
+
+ - balance_on_multiple_ip
-
+
The previous default behavour (rotate per-request) of this setting causes failover clashes with IPv6 built-in mechanisms.
+It has thus been turned off by default. Making the 'best choice' IP continue in use for any hostname until it encounters a connection failure and failover drops to the next known IP.
- %URI Requested URI
+ Modern IP resolvers in squid sort lookup results by preferred access.
+ By default squid will use these IP in order and only rotates to
+ the next listed when the most preffered fails.
+
+ Some load balancing servers based on round robin DNS have been
+ found not to preserve user session state across requests
+ to different IP addresses.
- %PATH Requested URL path
+ Enabling this directive Squid rotates IP's per request.
-New result keywords:
+
+
- cache
-
+
Removed the 'QUERY' acl and 'cache deny QUERY' entries.
+Replaced by new refresh_pattern instead.
+
+ - cache_dir
-
+
Default changed to 256MB in-memory cache.
+see cache_mem and maximum_object_size_in_memory for size parameters.
+'null' storage type dropped. In-memory cache is always present. Remove all cache_dir options to prevent on-disk caching.
+
+ - cache_mem
-
+
Default size increased to 256MB.
+
+ - cache_peer htcp-no-clr htcp-no-purge-clr htcp-only-clr htcp-forward-clr connection-auth[=on|off|auto] connect-fail-limit=N no-tproxy
-
+
New Options.
- tag= Apply a tag to a request (for both ERR and OK results)
- Only sets a tag, does not alter existing tags.
+ use 'htcp-no-clr' to send HTCP to the neighbor but without
+ sending any CLR requests. This cannot be used with
+ htcp-only-clr.
+
+ use 'htcp-no-purge-clr' to send HTCP to the neighbor
+ including CLRs but only when they do not result from
+ PURGE requests.
+
+ use 'htcp-only-clr' to send HTCP to the neighbor but ONLY
+ CLR requests. This cannot be used with htcp-no-clr.
+
+ use 'htcp-forward-clr' to forward any HTCP CLR requests
+ this proxy receives to the peer.
+
+ use 'connection-auth=off' to tell Squid that this peer does
+ not support Microsoft connection oriented authentication,
+ and any such challenges received from there should be
+ ignored. Default is 'auto' to automatically determine the
+ status of the peer.
+
+ use 'connect-fail-limit=nn' to specify how many times
+ connecting to a peer must fail before it is marked as
+ down. Default is 10.
+
+ use 'no-tproxy' to specify that requests passed to this peer
+ are not to have the client IP spoofed. For use to prevent
+ packet routing issues with a cluster of peers behind WCCPv2.
- - refresh_pattern
-
-
New options:
+
- cache_store_log
-
+
Default changed to OFF. Matching long-standing developer recommendations.
+
+ - error_directory
-
+
Now an optional entry in squid.conf. If present it will force all visitors to receive the error pages
+contained in the directory it points at. If absent, error page localization will be given a chance.
- ignore-no-store
- refresh-ims
+ If you wish to create your own versions of the default
+ error files to customize them to suit your company COPY
+ the error/template files to another directory and point
+ this tag at them.
- ignore-no-store ignores any ``Cache-control: no-store''
- headers received from a server. Doing this VIOLATES
- the HTTP standard. Enabling this feature could make you
- liable for problems which it causes.
-
- refresh-ims causes squid to contact the origin server
- when a client issues an If-Modified-Since request. This
- ensures that the client will receive an updated version
- if one is available.
+ WARNING: This option will disable multi-language support
+ on error pages if used.
- - acl
-
-
The 'all' ACL is now provided as a built-in. Warnings will be displayed if any attempt is made to redefine it.
-New types:
+
- debug_options rotate=
-
+
New parameter rotate=N to control number of cache.log rotations independent of other logs.
+
+
+ - external_acl_type
-
+
New options 'ipv4' and 'ipv6' are added to set the IPv4/v6 protocol between squid and its helpers.
+Please be aware of some limits to these options. These options only affet the transport protocol used
+to send data to and from the helpers. Squid in IPv6-mode may still send %SRC addresses in IPv4 or IPv6
+format, so all helpers will need to be checked and converted to cope with such information cleanly.
- acl aclname http_status 200 301 500- 400-403 ... # status code in reply
+ ipv4 / ipv6 IP-mode used to communicate to this helper.
+ For compatability with older configurations and helpers
+ the default is 'ipv4'.
-
- - short_icon_urls
-
-
New default:
+
New header input format specifiers. To seperate Request and Reply headers when both passed back.
- Default: on
- (Old default: off)
+ %>{Header} HTTP request header
+ %>{Hdr:member} HTTP request header list member
+ %>{Hdr:;member} HTTP request header list member using ; as
+ list separator. ; can be any non-alphanumeric
+ character.
+
+ %<{Header} HTTP reply header
+ %<{Hdr:member} HTTP reply header list member
+ %<{Hdr:;member} HTTP reply header list member using ; as
+ list separator. ; can be any non-alphanumeric
+ character.
- - delay_class
-
-
New delay classes:
+
+
- forwarded_for
-
+
New setting options. transparent, truncate, delete.
- class 4 Everything in a class 3 delay pool, with an
- additional limit on a per user basis. This
- only takes effect if the username is established
- in advance - by forcing authentication in your
- http_access rules.
+ If set to "transparent", Squid will not alter the
+ X-Forwarded-For header in any way.
+
+ If set to "delete", Squid will delete the entire
+ X-Forwarded-For header.
- class 5 Requests are grouped according their tag (see
- external_acl's tag= reply).
+ If set to "truncate", Squid will remove all existing
+ X-Forwarded-For entries, and place itself as the sole entry.
- - htcp_port
-
-
New default to require the feature to be enabled in squid.conf:
+
- http_port transparent intercept sslbump connection-auth[=on|off] ignore-cc
-
+
Option 'transparent' is being deprecated in favour of 'intercept' which more clearly identifies what the option does.
+For now option 'tproxy' remains with old behaviour meaning fully-invisible proxy using TPROXY support.
+New port options
- Default: 0 (disabled)
- (Old default: 4827)
+ intercept Rename of old 'transparent' option to indicate proper functionality.
+
+ allow-direct Allow direct forwarding in accelerator mode. Normally
+ accelerated requests are denied direct forwarding as if
+ never_direct was used.
+
+ connection-auth[=on|off]
+ use connection-auth=off to tell Squid to prevent
+ forwarding Microsoft connection oriented authentication
+ (NTLM, Negotiate and Kerberos)
+
+ keepalive[=idle,interval,timeout]
+ Enable TCP keepalive probes of idle connections
+ idle is the initial time before TCP starts probing
+ the connection, interval how often to probe, and
+ timeout the time before giving up.
+
+ ignore-cc Ignore request Cache-Control headers.
+
+ Warning: This option violates HTTP specifications if
+ used in non-accelerator setups.
+
+ sslBump Intercept each CONNECT request matching ssl_bump ACL,
+ establish secure connection with the client and with
+ the server, decrypt HTTP messages as they pass through
+ Squid, and treat them as unencrypted HTTP messages,
+ becoming the man-in-the-middle.
+
+ When this option is enabled, additional options become
+ available to specify SSL-related properties of the
+ client-side connection: cert, key, version, cipher,
+ options, clientca, cafile, capath, crlfile, dhparams,
+ sslflags, and sslcontext. See the https_port directive
+ for more information on these options.
+
+ The ssl_bump option is required to fully enable
+ the SslBump feature.
- - icp_port
-
-
New default to require the feature to be enabled in squid.conf:
+
- https_port intercept sslbump connection-auth[=on|off]
-
+
New port options. see http_port.
+
+ - icap_service bypass=on|off|1|0 routing=on|off|1|0
-
+
New options 'bypass=' and 'routing='.
- Default: 0 (disabled)
- (Old default: 3130)
+ bypass=on|off|1|0
+ If set to 'on' or '1', the ICAP service is treated as
+ optional. If the service cannot be reached or malfunctions,
+ Squid will try to ignore any errors and process the message as
+ if the service was not enabled. No all ICAP errors can be
+ bypassed. If set to 0, the ICAP service is treated as
+ essential and all ICAP errors will result in an error page
+ returned to the HTTP client.
+
+ Bypass is off by default: services are treated as essential.
+
+ routing=on|off|1|0
+ If set to 'on' or '1', the ICAP service is allowed to
+ dynamically change the current message adaptation plan by
+ returning a chain of services to be used next. The services
+ are specified using the X-Next-Services ICAP response header
+ value, formatted as a comma-separated list of service names.
+ Each named service should be configured in squid.conf and
+ should have the same method and vectoring point as the current
+ ICAP transaction. Services violating these rules are ignored.
+ An empty X-Next-Services value results in an empty plan which
+ ends the current adaptation.
+
+ Routing is not allowed by default: the ICAP X-Next-Services
+ response header is ignored.
- - snmp_port
-
-
New default to require the feature to be enabled in squid.conf:
+
- logfile_rotate
-
+
No longer controls cache.log rotation. Use debug_options rotate=N instead.
+
+ - logformat
-
+
New log format tag sets %icap::* %adapt::* for adaptation information.
+%Hs tag deprecated and replaced by request/reply specific >Hs and <Hs
+HTTP request/reply format tags may now be optionally prefixed with http::.
+Old forms will be deprecated in some as yet undecided future release.
+
+ dt Total time spent making DNS lookups (milliseconds)
+
+ [http::]>Hs HTTP status code sent to the client
+ [http::]<Hs HTTP status code received from the next hop
+ [http::]>sh Received HTTP request headers size
+ [http::]<sh Sent HTTP reply headers size
+ [http::]<pt Peer response time in milliseconds. The timer starts
+ when the last request byte is sent to the next hop
+ and stops when the last response byte is received.
+ [http::]<tt Total server-side time in milliseconds. The timer
+ starts with the first connect request (or write I/O)
+ sent to the first selected peer. The timer stops
+ with the last I/O with the last peer.
+
+ If ICAP is enabled, the following two codes become available (as
+ well as ICAP log codes documented with the icap_log option):
+
+ icap::tt Total ICAP processing time for the HTTP
+ transaction. The timer ticks when ICAP
+ ACLs are checked and when ICAP
+ transaction is in progress.
+
+ icap::<last_h The header of the last ICAP response
+ related to the HTTP transaction. Like
+ <h, accepts an optional header name
+ argument. Will not change semantics
+ when multiple ICAP transactions per HTTP
+ transaction are supported.
+
+ If adaptation is enabled the following two codes become available:
+
+ adapt::sum_trs Summed adaptation transaction response
+ times recorded as a comma-separated list in
+ the order of transaction start time. Each time
+ value is recorded as an integer number,
+ representing response time of one or more
+ adaptation (ICAP or eCAP) transaction in
+ milliseconds. When a failed transaction is
+ being retried or repeated, its time is not
+ logged individually but added to the
+ replacement (next) transaction.
+
+ adapt::all_trs All adaptation transaction response times.
+ Same as adaptation_strs but response times of
+ individual transactions are never added
+ together. Instead, all transaction response
+ times are recorded individually.
+
+ You can prefix adapt::*_trs format codes with adaptation
+ service name in curly braces to record response time(s) specific
+ to that service. For example: %{my_service}adapt::sum_trs
+
+
+
+
+ - maximum_object_size_in_memory
-
+
Default size limit increased to 512KB.
+
+ - negative_ttl
-
+
New default of 0 seconds. To prevent negative-caching of failure messages unless explicitly
+permitted by the message generating web server.
+Changing this is an RFC 2616 violation and now requires --enable-http-violations
+
+ - refresh_pattern
-
+
New option 'ignore-must-revalidate'.
- Default: 0 (disabled)
- (Old default: 3401)
+ ignore-must-revalidate ignores any ``Cache-Control: must-revalidate``
+ headers received from a server. Doing this VIOLATES
+ the HTTP standard. Enabling this feature could make you
+ liable for problems which it causes.
-
- - logformat
-
-
New format tags:
+
New set of basic patterns. These should always be listed after any custom patterns.
+They ensure RFC compliance with certain protocol and request handling in the absence
+of accurate Cache-Control: and Expires: information.
- rp Request URL-Path excluding hostname
-
- et Tag returned by external acl
-
- <sH Reply high offset sent
-
- <sS Upstream object size
+refresh_pattern ^ftp: 1440 20% 10080
+refresh_pattern ^gopher: 1440 0% 1440
+refresh_pattern -i (/cgi-bin/|\?) 0 0% 0
+refresh_pattern . 0 20% 4320
- - reply_body_max_size
-
-
Syntax changed:
+
- reply_header_max_size
-
+
Default limit increased to 64KB for RFC 2616 compliance.
+
+ - request_header_max_size
-
+
Default limit increased to 64KB for RFC 2616 compliance.
+
+ - tcp_outgoing_address
-
+
This option causes some problems when bridging IPv4 and IPv6. A workaround has been provided.
- reply_body_max_size size [acl acl...]
+ Squid is built with a capability of bridging the IPv4 and IPv6 internets.
+ tcp_outgoing_address as previously used breaks this bridging by forcing
+ all outbound traffic through a certain IPv4 which may be on the wrong
+ side of the IPv4/IPv6 boundary.
+
+ To operate with tcp_outgoing_address and keep the bridging benefits
+ an additional ACL needs to be used which ensures the IPv6-bound traffic
+ is never forced or permitted out the IPv4 interface.
+
+ acl to_ipv6 dst ipv6
+ tcp_outgoing_address 2002::c001 good_service_net to_ipv6
+ tcp_outgoing_address 10.0.0.2 good_service_net !to_ipv6
+
+ tcp_outgoing_address 2002::beef normal_service_net to_ipv6
+ tcp_outgoing_address 10.0.0.1 normal_service_net !to_ipv6
+
+ tcp_outgoing_address 2002::1 to_ipv6
+ tcp_outgoing_address 10.0.0.3 !to_ipv6
-allow/deny no longer used.
- - url_rewrite_program
-
-
No urlgroup support in either requests or response
+ - wccp2_assignment_method hash mask
-
+
Method names now accepted. Replacing the old magic numbers.
+'1' becomes 'hash' and '2' becomes 'mask'
- - auth_param
-
-
fake_auth helper for NTLM now accepts the '-S' parameter to strip NTLM domain off the username string.
-This is useful for class 4 Delay Pools in Squid 3.x
+ - wccp2_forwarding_method gre l2
-
+
Method names now accepted. Replacing the old magic numbers.
+'1' becomes 'gre' and '2' becomes 'l2'
+
+ - wccp2_return_method gre l2
-
+
Method names now accepted. Replacing the old magic numbers.
+'1' becomes 'gre' and '2' becomes 'l2'
- 6.3 Removed tags
+
+ 4.3 Removed tags
-- header_access
-
-
This has been replaced by request_header_access and reply_header_access
- - httpd_accel_no_pmtu_disc
-
-
Replaced by disable-pmtu-discovery http_port option
+ - dns_testnames
-
+
Obsolete. This feature is no longer relevant to modern networks and was causing boot problems.
+The -D command line option used previously to suppress these tests is also obsolete.
+
+ - extension_methods
-
+
Obsolete. All possible methods are now accepted and handled properly.
- - wais_relay_*
-
-
equivalent to cache_peer + cache_peer_access.
+ - icap_class
-
+
Replaced by adaptation_service_set.
+
+ - icap_access
-
+
Replaced by adaptation_access.
-7. Changes to ./configure Options since Squid-2.6
-
-There have been some changes to Squid's build configuration since Squid-2.6.
+5. Changes to ./configure options since Squid-3.0
+There have been some changes to Squid's build configuration since Squid-3.0.
This section gives an account of those changes in three categories:
@@ -882,198 +1692,248 @@
+ 5.1 New options
+
+
+
+
+- --enable-ecap
-
+
Build with support for loadable content adaptation modules.
+Cannot be used with --disable-loadable-modules.
+
+ - --enable-follow-x-forwarded-for
-
+
Support following the X-Forwarded-For HTTP header for determining the
+original or indirect client when a request has been forwarded through other
+proxies.
+
+ - --enable-zph-qos
-
+
Build with support for ZPH Quality of Service controls
- 7.1 New options
+- --disable-auto-locale
-
+
Disable error page localization for visitors.
+error_directory option is required if this option is used.
+
+ - --disable-caps
-
+
Build without libcap support. The default is to auto-detect system capabilities
+and enable support when possible.
+NOTE: Disabling this or building without libcap support will break TPROXY support.
+
+ - --disable-ipv6
-
+
Build without IPv6 support. The default is to auto-detect system capabilities
+and build with IPv6 when possible.
+
+ - --disable-loadable-modules
-
+
Build without support for loadable modules.
+
+ - --disable-translation
-
+
Prevent Squid generating localized error page templates and manuals.
+Which is usually tried, but may not be needed.
+This is a development optimization for building from VCS when localization is
+not needed. Has no effect on pre-translated source bundles.
+
+ - --with-dns-cname
-
+
Enable CNAME recursion within the Internal DNS resolver stub squid uses.
+This has no effect on the external DNS helper.
+Please note this extension is still experimental and may encounter problems.
+To see if it is actually needed you can run squid without it for a period and
+check the CNAME-Only Requests statistics squid maintains.
+If it produces ongoing serious problems the external helper may be needed
+but please report the bugs anyway.
+
+ - --with-logdir=PATH
-
+
Allow build-time configuration of Default location for squid logs.
+
+ - --with-ipv6-split-stack
-
+
Force enable special additions for IPv6 support in Windows XP and various BSD systems.
+see the IPv6 details above for a better description.
+
+ - --with-pidfile=PATH
-
+
Allow build-time configuration of Default location and name of squid.pid file.
+
+ - --with-po2html=PATH
-
+
Absolute path to po2html executable.
+Default is to automatically detect the binary.
+
+
+
+
+ 5.2 Changes to existing options
- --enable-shared[=PKGS]
-
-
Build shared libraries. The default is to build without.
+Default changed to yes.
- - --enable-static[=PKGS]
-
-
Build static libraries. The default is on.
+ - --enable-linux-netfilter
-
+
This option now enables support for all three netfilter interception targets.
+Adding TPROXY version 4+ support to squid through the netfilter TPROXY target.
+This options requires a linux kernel 2.6.25 or later for embeded netfilter TPROXY targets.
+Older REDIRECT and DNAT targets work as before on HTTP ports marked 'intercept'.
+
+ - --enable-linux-tproxy
-
+
Deprecated. Remains only to support old TPROXY version 2.2 installations.
+
+ - --enable-ntlm-auth-helpers
-
+
Helper previously built by SMB is now built by smb_lm.
+It also has a new squid.conf name for usage, see auth_param above for details.
+
+ - --disable-internl-dns
-
+
Better support for Linux using the external DNS helper.
+The helper will now compile and work with dns_nameservers on more variants of Linux than previously.
+
+ - --with-aio
-
+
Deprecated. POSIX AIO is now auto-detected and enabled.
+Use --without-aio to disable, but only if you really have to.
+
+ - --with-pthreads
-
+
Deprecated. pthreads library is now auto-detected and enabled.
+Use --without-pthreads to disable, but only if you really have to.
+
+
+
+ 5.3 Removed options
+
-- --enable-fast-install[=PKGS]
-
-
-Optimize for fast installation
- default: yes
-
-
-
- - --disable-libtool-lock
-
-
Avoid locking (might break parallel builds)
-
- - --disable-optimizations
-
-
Don't compile Squid with compiler optimizations enabled.
-Optimization is good for production builds, but not
-good for debugging. During development, use
---disable-optimizations to reduce compilation times
-and allow easier debugging. This option implicitly
-also enables --disable-inline
-
- - --disable-inline
-
-
Don't compile trivial methods as inline. Squid
-is coded with much of the code able to be inlined.
-Inlining is good for production builds, but not
-good for development. During development, use
---disable-inline to reduce compilation times and
-allow incremental builds to be quick. For
-production builds, or load tests, use
---enable-inline to have squid make all trivial
-methods inlinable by the compiler.
-
- - --enable-debug-cbdata
-
-
Provide some debug information in cbdata
-
- - --enable-disk-io=\"list of modules\"
-
-
Build support for the list of disk I/O modules.
-The default is only to build the "Blocking" module.
-See src/DiskIO for a list of available modules, or
-Programmers Guide for details on how to build your
-custom disk module.
-
- - --enable-esi
-
-
Enable ESI for accelerators. Requires libexpat.
-Enabling ESI will cause squid to follow the Edge
-Acceleration Specification (www.esi.org). This
-causes squid to IGNORE client Cache-Control headers.
-DO NOT use this in a squid configured as a web
-proxy, ONLY use it in a squid configured for
-webserver acceleration.
-
- - --enable-icap-client
-
-
Enable the ICAP client.
-
- - --disable-snmp
-
-
Disable SNMP monitoring support which is now built by default.
-
- - --disable-htcp
-
-
Disable HTCP protocol support which is now built by default.
-
- - --enable-kqueue
-
-
Enable kqueue() support. Marked as experimental in 3.0.
-
- - --enable-ipfw-transparent
-
-
Enable Transparent Proxy support for systems
-using FreeBSD IPFW style redirection.
-
- - --disable-mempools
-
-
Disable memPools. Note that this option now simply sets the
-default behaviour. Specific classes can override this at runtime, and
-only lib/MemPool.c needs to be altered to change the squid-wide
-default for all classes.
-
- - --enable-cpu-profiling
-
-
This option allows you to see which internal functions
-in Squid are consuming how much CPU. Compiles in probes
-that measure time spent in probed functions. Needs
-source modifications to add new probes. This is meant
-for developers to assist in performance optimisations
-of Squid internal functions.
-If you are not developer and not interested in the stats
-you shouldn't enable this, as overhead added, although
-small, is still overhead. See lib/Profiler.c for more.
-
- - --with-gnu-ld
-
-
Assume the C compiler uses GNU ld. The default is to auto-detect.
-
- - --with-pic
-
-
Try to use only PIC/non-PIC objects. The default is to use both.
-
- - --with-tags[=TAGS]
-
-
Include additional configurations. The default is automatic.
-
- - --with-default-user=USER
-
-
Sets the default System User account for squid permissions.
-The default is 'nobody' as in other releases of squid.
-
- - --with-cppunit-basedir=[PATH]
-
-
Path where the cppunit headers and libraries are found
-for unit testing. The default is automatic detection.
-NOTE: Since 3.0-PRE6 and 2.6STABLE14 squid no longer comes
-bundled with CPPUnit. Compile-time validation will be disabled
-if it is not installed on your system.
+
+- --enable-default-err-language
-
+
Replaced by error_default_language squid.conf option
+ - --enable-err-languages
-
+
Removed. All languages used now for error page localization.
+
+ - --disable-carp
-
+
Removed. CARP is required by several peering algoithms. Disabling is not useful.
- 7.2 Changes to existing options
+
+
+6. Options Removed since Squid-2
+
+
Some squid.conf and ./configure options which were available in Squid-2.6 and Squid-2.7 are made obsolete in Squid-3.1.
+
+6.1 Removed squid.conf options since Squid-2.7
-- --enable-carp
-
-
CARP support is now built by default.
---disable-carp can be used to build without it.
+ - auth_param
-
+
blankpassword option for basic scheme removed.
- - --enable-htcp
-
-
HTCP protocol support is now built by default.
-Use --disable-htcp to build without it.
+ - external_acl_type
-
+
Format tag %{Header} replaced by %>{Header}
+Format tag %{Header:member} replaced by %>{Header:member}
- - --enable-snmp
-
-
SNMP monitoring is now build by default.
-Use --disable-snmp to build without it.
+ - header_access
-
+
Replaced by request_header_access and reply_header_access
- - --enable-heap-replacement
-
-
Please use --enable-removal-policies directive instead.
+ - http_port
-
+
no-connection-auth replaced by connection-auth=[on|off]. Default is ON.
+transparent option replaced by intercept
- - --with-maxfd=N
-
-
Replaced by --with-filedescriptors=N
-Override maximum number of filedescriptors. Useful
-if you build as another user who is not privileged
-to use the number of filedescriptors you want the
-resulting binary to support
+ - httpd_accel_no_pmtu_disc
-
+
Replaced by http_port disable-pmtu-discovery= option
- - --enable-select
-
-
Deprecated.
-Automatic checks will enable best I/O loop method available.
+ - incoming_rate
-
+
Obsolete.
- - --enable-epoll
-
-
Deprecated.
-Automatic checks will enable best I/O loop method available.
+ - redirector_bypass
-
+
Replaced by url_rewrite_bypass
- - --enable-poll
-
-
Deprecated.
-Automatic checks will enable best I/O loop method available.
+ - zph_local
-
+
Replaced by qos_flows local-hit=
+
+ - zph_mode
-
+
Obsolete.
+
+ - zph_option
-
+
Obsolete.
- - --enable-kqueue
-
-
kqueue support is marked Experimental in Squid 3.0. Known to have some issues under load.
+ - zph_parent
-
+
Replaced by qos_flows parent-hit=
+
+ - zph_sibling
-
+
Replaced by qos_flows sibling-hit=
- 7.3 Removed options
+
+6.2 Removed squid.conf options since Squid-2.6
+
+
+
+
+- cache_dir
-
+
read-only option replaced by no-store.
+
+
+
+
+6.3 Removed ./configure options since Squid-2.7
-The following configure options have been removed.
-- --enable-dlmalloc
-
-
Most OS:es have good malloc implementations these days, and the version we used to ship with Squid was very very old..
- - --enable-mempool-debug
-
-
Debug option, not needed and therefore removed.
- - --enable-forward-log
-
-
Rarely used extra log file. Removed.
- - --enable-multicast-miss
-
-
Rarely used feature, and multicast ICP acheives almost the same result. Removed.
- --enable-coss-aio-ops
-
-
Specific to the COSS implementation in Squid-2
+Obsolete.
+
+ - --enable-devpoll
-
+
Replaced by automatic detection.
+
+ - --enable-dlmalloc=LIB
-
+
Obsolete.
+
+ - --enable-epoll
-
+
Replaced by automatic detection.
+
+ - --enable-forward-log
-
+
Obsolete.
+
+ - --enable-heap-replacement
-
+
Obsolete.
+
+ - --enable-htcp
-
+
Obsolete. Enabled by default.
+
- --enable-large-cache-files
-
-
Now enabled by default. Configure option was redundant and therefore removed.
+Obsolete.
+
+ - --enable-mempool-debug
-
+
Obsolete.
+
+ - --enable-multicast-miss
-
+
Obsolete.
+
+ - --enable-poll
-
+
Replaced by automatic detection.
+
+ - --enable-select
-
+
Replaced by automatic detection.
+
+ - --enable-select-simple
-
+
Replaced by automatic detection.
+
+ - --enable-snmp
-
+
Obsolete. Enabled by default.
+
- --enable-truncate
-
-
Known to cause race conditions where cache objects may get corrupted, and this for at most a marginal performance improvement. Removed.
+Obsolete.
+
+ - --disable-kqueue
-
+
Obsolete. Disabled by default.
-8. Regressions since Squid-2.7
+7. Regressions since Squid-2.7
-Some squid.conf and ./configure options which were available in Squid-2.7 are not yet available in Squid-3.0
+Some squid.conf and ./configure options which were available in Squid-2.7 are not yet available in Squid-3.1
If you need something to do then porting one of these from Squid-2 to Squid-3 is most welcome.
-8.1 Missing squid.conf options available in Squid-2.7
+7.1 Missing squid.conf options available in Squid-2.7
@@ -1082,9 +1942,6 @@
urllogin option not yet ported from 2.6
urlgroup option not yet ported from 2.6
- - acl_uses_indirect_client
-
-
Not yet ported from 2.6
-
- auth_param digest
-
concurrency option not yet ported from Squid-2
@@ -1109,7 +1966,6 @@
multicast-siblings not yet ported from 2.7
idle= not yet ported from 2.7
http11 not yet ported from 2.7
-connection-auth= not yet ported from 2.6
monitorinterval= not yet ported from 2.6
monitorsize= not yet ported from 2.6
monitortimeout= not yet ported from 2.6
@@ -1121,9 +1977,6 @@
- collapsed_forwarding
-
Not yet ported from 2.6
- - delay_pool_uses_indirect_client
-
-
Not yet ported from 2.6
-
- error_map
-
Not yet ported from 2.6
@@ -1134,18 +1987,13 @@
- external_refresh_check
-
Not yet ported from 2.7
- - follow_x_forwarded_for
-
-
Not yet ported from 2.6
-
- http_access2
-
Not yet ported from 2.6
- http_port
-
act-as-origin not yet ported from 2.7
-allow-direct not yet ported from 2.7
http11 not yet ported from 2.7
urlgroup= not yet ported from 2.6
-no-connection-auth not yet ported from 2.6
- ignore_expect_100
-
Not yet ported from 2.7
@@ -1165,9 +2013,6 @@
- location_rewrite_program
-
Not yet ported from 2.6
- - log_uses_indirect_client
-
-
Not yet ported from 2.6
-
- logfile_daemon
-
Not yet ported from 2.7
@@ -1217,21 +2062,11 @@
-8.2 Missing ./configure options available in Squid-2.7
+7.2 Missing ./configure options available in Squid-2.7
-- --enable-devpoll
-
-
Support for Solaris /dev/poll
-
- - --enable-select-simple
-
-
Basic POSIX select() loop without any binary fd_set optimizations.
-
- - --enable-follow-x-forwarded-for
-
-
Support following the X-Forwarded-For HTTP header for determining the
-client IP address
-
- --without-system-md5
-
diff -u -r -N squid-3.1.0.13/scripts/Makefile.in squid-3.1.0.14/scripts/Makefile.in
--- squid-3.1.0.13/scripts/Makefile.in 2009-08-05 01:32:35.000000000 +1200
+++ squid-3.1.0.14/scripts/Makefile.in 2009-09-27 15:28:54.000000000 +1200
@@ -52,6 +52,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/snmplib/Makefile.in squid-3.1.0.14/snmplib/Makefile.in
--- squid-3.1.0.13/snmplib/Makefile.in 2009-08-05 01:32:35.000000000 +1200
+++ squid-3.1.0.14/snmplib/Makefile.in 2009-09-27 15:28:54.000000000 +1200
@@ -70,6 +70,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/snmplib/mib.c squid-3.1.0.14/snmplib/mib.c
--- squid-3.1.0.13/snmplib/mib.c 2009-08-05 01:32:11.000000000 +1200
+++ squid-3.1.0.14/snmplib/mib.c 2009-09-27 15:28:28.000000000 +1200
@@ -100,7 +100,7 @@
static struct snmp_mib_tree *
- find_rfc1066_mib(struct snmp_mib_tree *root) {
+find_rfc1066_mib(struct snmp_mib_tree *root) {
oid *op = RFC1066_MIB;
struct snmp_mib_tree *tp;
int len;
@@ -283,8 +283,8 @@
}
static struct snmp_mib_tree *
- get_symbol(objid, objidlen, subtree, buf)
- oid *objid;
+get_symbol(objid, objidlen, subtree, buf)
+oid *objid;
int objidlen;
struct snmp_mib_tree *subtree;
char *buf;
diff -u -r -N squid-3.1.0.13/snmplib/parse.c squid-3.1.0.14/snmplib/parse.c
--- squid-3.1.0.13/snmplib/parse.c 2009-08-05 01:32:11.000000000 +1200
+++ squid-3.1.0.14/snmplib/parse.c 2009-09-27 15:28:28.000000000 +1200
@@ -428,7 +428,7 @@
static
#endif
struct snmp_mib_tree *
- build_tree(struct node *nodes) {
+build_tree(struct node *nodes) {
struct node *np;
struct snmp_mib_tree *tp;
int bucket, nodes_left = 0;
@@ -635,7 +635,7 @@
* Returns 0 on error.
*/
static struct node *
- parse_objectid(FILE *fp, char *name) {
+parse_objectid(FILE *fp, char *name) {
int type;
char token[64];
register int count;
@@ -744,7 +744,7 @@
* Returns 0 on error.
*/
static struct node *
- parse_objecttype(register FILE *fp, char *name) {
+parse_objecttype(register FILE *fp, char *name) {
register int type;
char token[64];
int count, length;
@@ -978,7 +978,7 @@
static
#endif
struct node *
- parse(FILE *fp) {
+parse(FILE *fp) {
char token[64];
char name[64];
int type = 1;
@@ -1062,7 +1062,7 @@
}
struct snmp_mib_tree *
- read_mib(char *filename) {
+read_mib(char *filename) {
FILE *fp;
struct node *nodes;
struct snmp_mib_tree *tree;
diff -u -r -N squid-3.1.0.13/snmplib/snmplib_debug.c squid-3.1.0.14/snmplib/snmplib_debug.c
--- squid-3.1.0.13/snmplib/snmplib_debug.c 2009-08-05 01:32:11.000000000 +1200
+++ squid-3.1.0.14/snmplib/snmplib_debug.c 2009-09-27 15:28:28.000000000 +1200
@@ -12,7 +12,7 @@
void (*snmplib_debug_hook) (int, char *,...) = NULL;
extern void
- snmplib_debug(int lvl, const char *fmt,...)
+snmplib_debug(int lvl, const char *fmt,...)
{
char buf[BUFSIZ];
va_list args;
diff -u -r -N squid-3.1.0.13/snmplib/snmp_pdu.c squid-3.1.0.14/snmplib/snmp_pdu.c
--- squid-3.1.0.13/snmplib/snmp_pdu.c 2009-08-05 01:32:11.000000000 +1200
+++ squid-3.1.0.14/snmplib/snmp_pdu.c 2009-09-27 15:28:28.000000000 +1200
@@ -104,7 +104,7 @@
*/
struct snmp_pdu *
- snmp_pdu_create(int command) {
+snmp_pdu_create(int command) {
struct snmp_pdu *pdu;
#ifdef DEBUG_PDU
@@ -138,7 +138,7 @@
/* Clone an existing PDU.
*/
struct snmp_pdu *
- snmp_pdu_clone(struct snmp_pdu *Src) {
+snmp_pdu_clone(struct snmp_pdu *Src) {
struct snmp_pdu *Dest;
#ifdef DEBUG_PDU
@@ -172,12 +172,12 @@
* be returned.
*/
struct snmp_pdu *
- snmp_pdu_fix(struct snmp_pdu *pdu, int command) {
+snmp_pdu_fix(struct snmp_pdu *pdu, int command) {
return (snmp_fix_pdu(pdu, command));
}
struct snmp_pdu *
- snmp_fix_pdu(struct snmp_pdu *pdu, int command) {
+snmp_fix_pdu(struct snmp_pdu *pdu, int command) {
struct variable_list *var, *newvar;
struct snmp_pdu *newpdu;
int i;
diff -u -r -N squid-3.1.0.13/snmplib/snmp_vars.c squid-3.1.0.14/snmplib/snmp_vars.c
--- squid-3.1.0.13/snmplib/snmp_vars.c 2009-08-05 01:32:11.000000000 +1200
+++ squid-3.1.0.14/snmplib/snmp_vars.c 2009-09-27 15:28:28.000000000 +1200
@@ -105,7 +105,7 @@
*/
struct variable_list *
- snmp_var_new(oid * Name, int Len) {
+snmp_var_new(oid * Name, int Len) {
struct variable_list *New;
#ifdef DEBUG_VARS
@@ -147,7 +147,7 @@
}
struct variable_list *
- snmp_var_new_integer(oid * Name, int Len, int ival, unsigned char type) {
+snmp_var_new_integer(oid * Name, int Len, int ival, unsigned char type) {
variable_list *v = snmp_var_new(Name, Len);
v->val_len = sizeof(int);
v->val.integer = xmalloc(sizeof(int));
@@ -162,7 +162,7 @@
*/
struct variable_list *
- snmp_var_clone(struct variable_list *Src) {
+snmp_var_clone(struct variable_list *Src) {
struct variable_list *Dest;
#ifdef DEBUG_VARS
diff -u -r -N squid-3.1.0.13/src/access_log.cc squid-3.1.0.14/src/access_log.cc
--- squid-3.1.0.13/src/access_log.cc 2009-08-05 01:32:15.000000000 +1200
+++ squid-3.1.0.14/src/access_log.cc 2009-09-27 15:28:32.000000000 +1200
@@ -401,7 +401,7 @@
LTF_ADAPTATION_ALL_XACT_TIMES,
#endif
-#if ICAP_CLIENT
+#if ICAP_CLIENT
LFT_ICAP_TOTAL_TIME,
LFT_ICAP_LAST_MATCHED_HEADER,
@@ -414,15 +414,15 @@
LFT_ICAP_REQUEST_METHOD,
LFT_ICAP_BYTES_SENT,
LFT_ICAP_BYTES_READ,
-
+
LFT_ICAP_REQ_HEADER,
LFT_ICAP_REQ_HEADER_ELEM,
LFT_ICAP_REQ_ALL_HEADERS,
-
+
LFT_ICAP_REP_HEADER,
LFT_ICAP_REP_HEADER_ELEM,
LFT_ICAP_REP_ALL_HEADERS,
-
+
LFT_ICAP_TR_RESPONSE_TIME,
LFT_ICAP_IO_TIME,
LFT_ICAP_OUTCOME,
@@ -622,7 +622,7 @@
if (al->cache.caddr.IsAnyAddr()) // e.g., ICAP OPTIONS lack client
out = "-";
else
- out = fqdncache_gethostbyaddr(al->cache.caddr, FQDN_LOOKUP_IF_MISS);
+ out = fqdncache_gethostbyaddr(al->cache.caddr, FQDN_LOOKUP_IF_MISS);
if (!out) {
out = al->cache.caddr.NtoA(tmp,1024);
}
@@ -703,7 +703,7 @@
doint = 1;
break;
- case LFT_PEER_RESPONSE_TIME:
+ case LFT_PEER_RESPONSE_TIME:
if (al->hier.peer_response_time < 0) {
out = "-";
} else {
@@ -769,7 +769,7 @@
break;
#endif
-#if ICAP_CLIENT
+#if ICAP_CLIENT
case LFT_ICAP_LAST_MATCHED_HEADER:
if (al->request) {
Adaptation::Icap::History::Pointer ih = al->request->icapHistory();
@@ -805,7 +805,7 @@
case LFT_ICAP_ADDR:
if (!out)
- out = al->icap.hostAddr.NtoA(tmp,1024);
+ out = al->icap.hostAddr.NtoA(tmp,1024);
break;
case LFT_ICAP_SERV_NAME:
@@ -880,7 +880,7 @@
break;
- case LFT_ICAP_REP_ALL_HEADERS:
+ case LFT_ICAP_REP_ALL_HEADERS:
if (al->icap.reply) {
HttpHeaderPos pos = HttpHeaderInitPos;
while (const HttpHeaderEntry *e = al->icap.reply->header.getEntry(&pos)) {
@@ -996,8 +996,8 @@
/* case LFT_USER_REALM: */
/* case LFT_USER_SCHEME: */
- // the fmt->type can not be LFT_HTTP_SENT_STATUS_CODE_OLD_30
- // but compiler complains if ommited
+ // the fmt->type can not be LFT_HTTP_SENT_STATUS_CODE_OLD_30
+ // but compiler complains if ommited
case LFT_HTTP_SENT_STATUS_CODE_OLD_30:
case LFT_HTTP_SENT_STATUS_CODE:
outint = al->http.code;
@@ -1007,10 +1007,9 @@
break;
case LFT_HTTP_RECEIVED_STATUS_CODE:
- if(al->hier.peer_reply_status == HTTP_STATUS_NONE) {
+ if (al->hier.peer_reply_status == HTTP_STATUS_NONE) {
out = "-";
- }
- else {
+ } else {
outint = al->hier.peer_reply_status;
doint = 1;
}
@@ -1316,12 +1315,12 @@
cur++;
}
- // For upward compatibility, assume "http::" prefix as default prefix
+ // For upward compatibility, assume "http::" prefix as default prefix
// for all log access formating codes, except those starting
// from "icap::", "adapt::" and "%"
if (strncmp(cur,"http::", 6) == 0 &&
- strncmp(cur+6, "icap::", 6) != 0 &&
- strncmp(cur+6, "adapt::", 12) != 0 && *(cur+6) != '%' ) {
+ strncmp(cur+6, "icap::", 6) != 0 &&
+ strncmp(cur+6, "adapt::", 12) != 0 && *(cur+6) != '%' ) {
cur += 6;
}
@@ -1349,7 +1348,7 @@
switch (lt->type) {
-#if ICAP_CLIENT
+#if ICAP_CLIENT
case LFT_ICAP_LAST_MATCHED_HEADER:
case LFT_ICAP_REQ_HEADER:
@@ -1375,29 +1374,51 @@
lt->data.header.element = cp;
- switch(lt->type) {
- case LFT_REQUEST_HEADER: lt->type = LFT_REQUEST_HEADER_ELEM; break;
- case LFT_REPLY_HEADER: lt->type = LFT_REPLY_HEADER_ELEM; break;
+ switch (lt->type) {
+ case LFT_REQUEST_HEADER:
+ lt->type = LFT_REQUEST_HEADER_ELEM;
+ break;
+ case LFT_REPLY_HEADER:
+ lt->type = LFT_REPLY_HEADER_ELEM;
+ break;
#if ICAP_CLIENT
- case LFT_ICAP_LAST_MATCHED_HEADER: lt->type = LFT_ICAP_LAST_MATCHED_HEADER_ELEM; break;
- case LFT_ICAP_REQ_HEADER: lt->type = LFT_ICAP_REQ_HEADER_ELEM; break;
- case LFT_ICAP_REP_HEADER: lt->type = LFT_ICAP_REP_HEADER_ELEM; break;
+ case LFT_ICAP_LAST_MATCHED_HEADER:
+ lt->type = LFT_ICAP_LAST_MATCHED_HEADER_ELEM;
+ break;
+ case LFT_ICAP_REQ_HEADER:
+ lt->type = LFT_ICAP_REQ_HEADER_ELEM;
+ break;
+ case LFT_ICAP_REP_HEADER:
+ lt->type = LFT_ICAP_REP_HEADER_ELEM;
+ break;
#endif
- default:break;
+ default:
+ break;
}
}
lt->data.header.header = header;
} else {
- switch(lt->type) {
- case LFT_REQUEST_HEADER: lt->type = LFT_REQUEST_ALL_HEADERS; break;
- case LFT_REPLY_HEADER: lt->type = LFT_REPLY_ALL_HEADERS; break;
+ switch (lt->type) {
+ case LFT_REQUEST_HEADER:
+ lt->type = LFT_REQUEST_ALL_HEADERS;
+ break;
+ case LFT_REPLY_HEADER:
+ lt->type = LFT_REPLY_ALL_HEADERS;
+ break;
#if ICAP_CLIENT
- case LFT_ICAP_LAST_MATCHED_HEADER: lt->type = LFT_ICAP_LAST_MATCHED_ALL_HEADERS; break;
- case LFT_ICAP_REQ_HEADER: lt->type = LFT_ICAP_REQ_ALL_HEADERS; break;
- case LFT_ICAP_REP_HEADER: lt->type = LFT_ICAP_REP_ALL_HEADERS; break;
+ case LFT_ICAP_LAST_MATCHED_HEADER:
+ lt->type = LFT_ICAP_LAST_MATCHED_ALL_HEADERS;
+ break;
+ case LFT_ICAP_REQ_HEADER:
+ lt->type = LFT_ICAP_REQ_ALL_HEADERS;
+ break;
+ case LFT_ICAP_REP_HEADER:
+ lt->type = LFT_ICAP_REP_ALL_HEADERS;
+ break;
#endif
- default:break;
+ default:
+ break;
}
Config.onoff.log_mime_hdrs = 1;
}
@@ -1426,7 +1447,7 @@
case LFT_HTTP_SENT_STATUS_CODE_OLD_30:
debugs(46, 0, "WARNING: the \"Hs\" formating code is deprecated use the \">Hs\" instead");
- lt->type = LFT_HTTP_SENT_STATUS_CODE;
+ lt->type = LFT_HTTP_SENT_STATUS_CODE;
break;
default:
break;
@@ -1505,15 +1526,26 @@
arg = argbuf;
- switch(type) {
- case LFT_REQUEST_HEADER_ELEM: type = LFT_REQUEST_HEADER_ELEM; break;
- case LFT_REPLY_HEADER_ELEM: type = LFT_REPLY_HEADER_ELEM; break;
+ switch (type) {
+ case LFT_REQUEST_HEADER_ELEM:
+ type = LFT_REQUEST_HEADER_ELEM;
+ break;
+ case LFT_REPLY_HEADER_ELEM:
+ type = LFT_REPLY_HEADER_ELEM;
+ break;
#if ICAP_CLIENT
- case LFT_ICAP_LAST_MATCHED_HEADER_ELEM: type = LFT_ICAP_LAST_MATCHED_HEADER; break;
- case LFT_ICAP_REQ_HEADER_ELEM: type = LFT_ICAP_REQ_HEADER; break;
- case LFT_ICAP_REP_HEADER_ELEM: type = LFT_ICAP_REP_HEADER; break;
+ case LFT_ICAP_LAST_MATCHED_HEADER_ELEM:
+ type = LFT_ICAP_LAST_MATCHED_HEADER;
+ break;
+ case LFT_ICAP_REQ_HEADER_ELEM:
+ type = LFT_ICAP_REQ_HEADER;
+ break;
+ case LFT_ICAP_REP_HEADER_ELEM:
+ type = LFT_ICAP_REP_HEADER;
+ break;
#endif
- default:break;
+ default:
+ break;
}
break;
@@ -1528,15 +1560,26 @@
case LFT_ICAP_REP_ALL_HEADERS:
#endif
- switch(type) {
- case LFT_REQUEST_ALL_HEADERS: type = LFT_REQUEST_HEADER; break;
- case LFT_REPLY_ALL_HEADERS: type = LFT_REPLY_HEADER; break;
+ switch (type) {
+ case LFT_REQUEST_ALL_HEADERS:
+ type = LFT_REQUEST_HEADER;
+ break;
+ case LFT_REPLY_ALL_HEADERS:
+ type = LFT_REPLY_HEADER;
+ break;
#if ICAP_CLIENT
- case LFT_ICAP_LAST_MATCHED_ALL_HEADERS: type = LFT_ICAP_LAST_MATCHED_HEADER; break;
- case LFT_ICAP_REQ_ALL_HEADERS: type = LFT_ICAP_REQ_HEADER; break;
- case LFT_ICAP_REP_ALL_HEADERS: type = LFT_ICAP_REP_HEADER; break;
+ case LFT_ICAP_LAST_MATCHED_ALL_HEADERS:
+ type = LFT_ICAP_LAST_MATCHED_HEADER;
+ break;
+ case LFT_ICAP_REQ_ALL_HEADERS:
+ type = LFT_ICAP_REQ_HEADER;
+ break;
+ case LFT_ICAP_REP_ALL_HEADERS:
+ type = LFT_ICAP_REP_HEADER;
+ break;
#endif
- default:break;
+ default:
+ break;
}
break;
@@ -1775,24 +1818,24 @@
if (user && !*user)
safe_free(user);
- logfilePrintf(logfile, "%9ld.%03d %6d %s -/%03d %"PRId64" %s %s %s -/%s -\n",
- (long int) current_time.tv_sec,
- (int) current_time.tv_usec / 1000,
+ logfilePrintf(logfile, "%9ld.%03d %6d %s -/%03d %"PRId64" %s %s %s -/%s -\n",
+ (long int) current_time.tv_sec,
+ (int) current_time.tv_usec / 1000,
- al->icap.trTime,
- client,
+ al->icap.trTime,
+ client,
- al->icap.resStatus,
- al->icap.bytesRead,
- Adaptation::Icap::ICAP::methodStr(al->icap.reqMethod),
- al->icap.reqUri.termedBuf(),
- user ? user : dash_str,
- al->icap.hostAddr.NtoA(tmp, MAX_IPSTRLEN));
+ al->icap.resStatus,
+ al->icap.bytesRead,
+ Adaptation::Icap::ICAP::methodStr(al->icap.reqMethod),
+ al->icap.reqUri.termedBuf(),
+ user ? user : dash_str,
+ al->icap.hostAddr.NtoA(tmp, MAX_IPSTRLEN));
safe_free(user);
}
#endif
-void
+void
accessLogLogTo(customlog* log, AccessLogEntry * al, ACLChecklist * checklist)
{
@@ -1873,7 +1916,7 @@
{
if (LogfileStatus != LOG_ENABLE)
return;
-
+
accessLogLogTo(Config.Log.accesslogs, al, checklist);
#if MULTICAST_MISS_STREAM
@@ -2009,19 +2052,17 @@
#if USE_ADAPTATION || ICAP_CLIENT
alLogformatHasAdaptToken = false;
alLogformatHasIcapToken = false;
- for (logformat_token * curr_token = (log->logFormat?log->logFormat->format:NULL); curr_token; curr_token = curr_token->next)
- {
+ for (logformat_token * curr_token = (log->logFormat?log->logFormat->format:NULL); curr_token; curr_token = curr_token->next) {
#if USE_ADAPTATION
if (curr_token->type == LTF_ADAPTATION_SUM_XACT_TIMES ||
- curr_token->type == LTF_ADAPTATION_ALL_XACT_TIMES) {
+ curr_token->type == LTF_ADAPTATION_ALL_XACT_TIMES) {
alLogformatHasAdaptToken = true;
}
-#endif
+#endif
#if ICAP_CLIENT
if (curr_token->type == LFT_ICAP_LAST_MATCHED_HEADER ||
- curr_token->type == LFT_ICAP_LAST_MATCHED_HEADER_ELEM ||
- curr_token->type == LFT_ICAP_LAST_MATCHED_ALL_HEADERS)
- {
+ curr_token->type == LFT_ICAP_LAST_MATCHED_HEADER_ELEM ||
+ curr_token->type == LFT_ICAP_LAST_MATCHED_ALL_HEADERS) {
alLogformatHasIcapToken = true;
}
#endif
diff -u -r -N squid-3.1.0.13/src/AccessLogEntry.h squid-3.1.0.14/src/AccessLogEntry.h
--- squid-3.1.0.13/src/AccessLogEntry.h 2009-08-05 01:32:11.000000000 +1200
+++ squid-3.1.0.14/src/AccessLogEntry.h 2009-09-27 15:28:28.000000000 +1200
@@ -127,9 +127,9 @@
public:
Headers() : request(NULL),
#if ICAP_CLIENT
- icap(NULL),
+ icap(NULL),
#endif
- reply(NULL) {}
+ reply(NULL) {}
char *request;
@@ -157,9 +157,10 @@
/** \brief This subclass holds log info for ICAP part of request
* \todo Inner class declarations should be moved outside
*/
- class IcapLogEntry {
+ class IcapLogEntry
+ {
public:
- IcapLogEntry():request(NULL),reply(NULL),outcome(Adaptation::Icap::xoUnknown),trTime(0),ioTime(0),resStatus(HTTP_STATUS_NONE){}
+ IcapLogEntry():request(NULL),reply(NULL),outcome(Adaptation::Icap::xoUnknown),trTime(0),ioTime(0),resStatus(HTTP_STATUS_NONE) {}
IpAddress hostAddr; ///< ICAP server IP address
String serviceName; ///< ICAP service name
@@ -178,7 +179,7 @@
int trTime;
/** \brief Transaction I/O time.
* The timer starts when the first ICAP request
- * byte is scheduled for sending and stops when the lastbyte of the
+ * byte is scheduled for sending and stops when the lastbyte of the
* ICAP response is received.
*/
int ioTime;
diff -u -r -N squid-3.1.0.13/src/acl/Arp.cc squid-3.1.0.14/src/acl/Arp.cc
--- squid-3.1.0.13/src/acl/Arp.cc 2009-08-05 01:32:15.000000000 +1200
+++ squid-3.1.0.14/src/acl/Arp.cc 2009-09-27 15:28:32.000000000 +1200
@@ -437,7 +437,7 @@
return (0 == splayLastResult);
}
-#elif defined(_SQUID_FREEBSD_) || defined(_SQUID_NETBSD_) || defined(_SQUID_OPENBSD_) || defined(_SQUID_DRAGONFLY_)
+#elif defined(_SQUID_FREEBSD_) || defined(_SQUID_NETBSD_) || defined(_SQUID_OPENBSD_) || defined(_SQUID_DRAGONFLY_) || defined(_SQUID_KFREEBSD_)
SplayNode **Top = dataptr;
diff -u -r -N squid-3.1.0.13/src/acl/Ip.cc squid-3.1.0.14/src/acl/Ip.cc
--- squid-3.1.0.13/src/acl/Ip.cc 2009-08-05 01:32:15.000000000 +1200
+++ squid-3.1.0.14/src/acl/Ip.cc 2009-09-27 15:28:32.000000000 +1200
@@ -303,18 +303,18 @@
// IPv6
} else if (sscanf(t, SCAN_ACL1_6, addr1, addr2, mask) == 3) {
- debugs(28, 9, "aclIpParseIpData: '" << t << "' matched: SCAN1-v4: " << SCAN_ACL1_6);
+ debugs(28, 9, "aclIpParseIpData: '" << t << "' matched: SCAN1-v6: " << SCAN_ACL1_6);
iptype=AF_INET6;
} else if (sscanf(t, SCAN_ACL2_6, addr1, addr2, &c) >= 2) {
- debugs(28, 9, "aclIpParseIpData: '" << t << "' matched: SCAN2-v4: " << SCAN_ACL2_6);
+ debugs(28, 9, "aclIpParseIpData: '" << t << "' matched: SCAN2-v6: " << SCAN_ACL2_6);
mask[0] = '\0';
iptype=AF_INET6;
} else if (sscanf(t, SCAN_ACL3_6, addr1, mask) == 2) {
- debugs(28, 9, "aclIpParseIpData: '" << t << "' matched: SCAN3-v4: " << SCAN_ACL3_6);
+ debugs(28, 9, "aclIpParseIpData: '" << t << "' matched: SCAN3-v6: " << SCAN_ACL3_6);
addr2[0] = '\0';
iptype=AF_INET6;
} else if (sscanf(t, SCAN_ACL4_6, addr1, mask) == 2) {
- debugs(28, 9, "aclIpParseIpData: '" << t << "' matched: SCAN4-v4: " << SCAN_ACL4_6);
+ debugs(28, 9, "aclIpParseIpData: '" << t << "' matched: SCAN4-v6: " << SCAN_ACL4_6);
addr2[0] = '\0';
iptype=AF_INET6;
diff -u -r -N squid-3.1.0.13/src/acl/Makefile.in squid-3.1.0.14/src/acl/Makefile.in
--- squid-3.1.0.13/src/acl/Makefile.in 2009-08-05 01:32:36.000000000 +1200
+++ squid-3.1.0.14/src/acl/Makefile.in 2009-09-27 15:28:55.000000000 +1200
@@ -120,6 +120,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/src/acl/MethodData.cc squid-3.1.0.14/src/acl/MethodData.cc
--- squid-3.1.0.13/src/acl/MethodData.cc 2009-08-05 01:32:15.000000000 +1200
+++ squid-3.1.0.14/src/acl/MethodData.cc 2009-09-27 15:28:32.000000000 +1200
@@ -91,7 +91,7 @@
for (Tail = &values; *Tail; Tail = &((*Tail)->next));
while ((t = strtokFile())) {
- if(strcmp(t, "PURGE") == 0)
+ if (strcmp(t, "PURGE") == 0)
++ThePurgeCount; // configuration code wants to know
CbDataList *q = new CbDataList (HttpRequestMethod(t, NULL));
*(Tail) = q;
diff -u -r -N squid-3.1.0.13/src/adaptation/AccessCheck.cc squid-3.1.0.14/src/adaptation/AccessCheck.cc
--- squid-3.1.0.13/src/adaptation/AccessCheck.cc 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/adaptation/AccessCheck.cc 2009-09-27 15:28:32.000000000 +1200
@@ -24,7 +24,7 @@
if (Config::Enabled) {
// the new check will call the callback and delete self, eventually
return AsyncStart(new AccessCheck(
- ServiceFilter(method, vp, req, rep), cb, cbdata));
+ ServiceFilter(method, vp, req, rep), cb, cbdata));
}
debugs(83, 3, HERE << "adaptation off, skipping");
@@ -34,10 +34,10 @@
Adaptation::AccessCheck::AccessCheck(const ServiceFilter &aFilter,
AccessCheckCallback *aCallback,
void *aCallbackData):
- AsyncJob("AccessCheck"), filter(aFilter),
- callback(aCallback),
- callback_data(cbdataReference(aCallbackData)),
- acl_checklist(NULL)
+ AsyncJob("AccessCheck"), filter(aFilter),
+ callback(aCallback),
+ callback_data(cbdataReference(aCallbackData)),
+ acl_checklist(NULL)
{
#if ICAP_CLIENT
Adaptation::Icap::History::Pointer h = filter.request->icapHistory();
@@ -46,7 +46,7 @@
#endif
debugs(93, 5, HERE << "AccessCheck constructed for " <<
- methodStr(filter.method) << " " << vectPointStr(filter.point));
+ methodStr(filter.method) << " " << vectPointStr(filter.point));
}
Adaptation::AccessCheck::~AccessCheck()
@@ -61,9 +61,10 @@
}
void
-Adaptation::AccessCheck::start() {
- AsyncJob::start();
- check();
+Adaptation::AccessCheck::start()
+{
+ AsyncJob::start();
+ check();
}
/// Walk the access rules list to find rules with applicable service groups
@@ -184,7 +185,7 @@
Adaptation::AccessCheck::isCandidate(AccessRule &r)
{
debugs(93,7,HERE << "checking candidacy of " << r.id << ", group " <<
- r.groupId);
+ r.groupId);
ServiceGroupPointer g = FindGroup(r.groupId);
diff -u -r -N squid-3.1.0.13/src/adaptation/Config.cc squid-3.1.0.14/src/adaptation/Config.cc
--- squid-3.1.0.13/src/adaptation/Config.cc 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/adaptation/Config.cc 2009-09-27 15:28:32.000000000 +1200
@@ -54,7 +54,7 @@
ServiceConfig *cfg = new ServiceConfig;
if (!cfg->parse()) {
fatalf("%s:%d: malformed adaptation service configuration",
- cfg_filename, config_lineno);
+ cfg_filename, config_lineno);
}
serviceConfigs.push_back(cfg);
}
diff -u -r -N squid-3.1.0.13/src/adaptation/ecap/Config.h squid-3.1.0.14/src/adaptation/ecap/Config.h
--- squid-3.1.0.13/src/adaptation/ecap/Config.h 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/adaptation/ecap/Config.h 2009-09-27 15:28:32.000000000 +1200
@@ -11,7 +11,8 @@
namespace Adaptation
{
-namespace Ecap {
+namespace Ecap
+{
class Config: public Adaptation::Config
{
diff -u -r -N squid-3.1.0.13/src/adaptation/ecap/Host.h squid-3.1.0.14/src/adaptation/ecap/Host.h
--- squid-3.1.0.13/src/adaptation/ecap/Host.h 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/adaptation/ecap/Host.h 2009-09-27 15:28:32.000000000 +1200
@@ -10,7 +10,8 @@
namespace Adaptation
{
-namespace Ecap {
+namespace Ecap
+{
// Squid wrapper, providing host application functionality to eCAP services.
class Host : public libecap::host::Host
diff -u -r -N squid-3.1.0.13/src/adaptation/ecap/Makefile.in squid-3.1.0.14/src/adaptation/ecap/Makefile.in
--- squid-3.1.0.13/src/adaptation/ecap/Makefile.in 2009-08-05 01:32:37.000000000 +1200
+++ squid-3.1.0.14/src/adaptation/ecap/Makefile.in 2009-09-27 15:28:55.000000000 +1200
@@ -77,6 +77,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/src/adaptation/ecap/MessageRep.cc squid-3.1.0.14/src/adaptation/ecap/MessageRep.cc
--- squid-3.1.0.13/src/adaptation/ecap/MessageRep.cc 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/adaptation/ecap/MessageRep.cc 2009-09-27 15:28:32.000000000 +1200
@@ -348,11 +348,10 @@
if (HttpRequest *req = dynamic_cast(theMessage.header))
theFirstLineRep = new RequestLineRep(*req);
+ else if (HttpReply *rep = dynamic_cast(theMessage.header))
+ theFirstLineRep = new StatusLineRep(*rep);
else
- if (HttpReply *rep = dynamic_cast(theMessage.header))
- theFirstLineRep = new StatusLineRep(*rep);
- else
- Must(false); // unknown message header type
+ Must(false); // unknown message header type
theHeaderRep = new HeaderRep(*theMessage.header);
diff -u -r -N squid-3.1.0.13/src/adaptation/ecap/MessageRep.h squid-3.1.0.14/src/adaptation/ecap/MessageRep.h
--- squid-3.1.0.13/src/adaptation/ecap/MessageRep.h 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/adaptation/ecap/MessageRep.h 2009-09-27 15:28:32.000000000 +1200
@@ -21,7 +21,8 @@
namespace Adaptation
{
-namespace Ecap {
+namespace Ecap
+{
class XactionRep;
diff -u -r -N squid-3.1.0.13/src/adaptation/ecap/ServiceRep.h squid-3.1.0.14/src/adaptation/ecap/ServiceRep.h
--- squid-3.1.0.13/src/adaptation/ecap/ServiceRep.h 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/adaptation/ecap/ServiceRep.h 2009-09-27 15:28:32.000000000 +1200
@@ -13,7 +13,8 @@
namespace Adaptation
{
-namespace Ecap {
+namespace Ecap
+{
/* The eCAP service representative maintains information about a single eCAP
service that Squid communicates with. One eCAP module may register many
diff -u -r -N squid-3.1.0.13/src/adaptation/ecap/XactionRep.cc squid-3.1.0.14/src/adaptation/ecap/XactionRep.cc
--- squid-3.1.0.13/src/adaptation/ecap/XactionRep.cc 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/adaptation/ecap/XactionRep.cc 2009-09-27 15:28:32.000000000 +1200
@@ -20,7 +20,7 @@
theVirginRep(virginHeader), theCauseRep(NULL),
proxyingVb(opUndecided), proxyingAb(opUndecided),
adaptHistoryId(-1),
- canAccessVb(false),
+ canAccessVb(false),
abProductionFinished(false), abProductionAtEnd(false)
{
if (virginCause)
@@ -60,10 +60,10 @@
proxyingVb = opNever;
const HttpRequest *request = dynamic_cast (theCauseRep ?
- theCauseRep->raw().header : theVirginRep.raw().header);
+ theCauseRep->raw().header : theVirginRep.raw().header);
Must(request);
Adaptation::History::Pointer ah = request->adaptLogHistory();
- if (ah != NULL) {
+ if (ah != NULL) {
// retrying=false because ecap never retries transactions
adaptHistoryId = ah->recordXactStart(service().cfg().key, current_time, false);
}
@@ -96,7 +96,7 @@
terminateMaster();
const HttpRequest *request = dynamic_cast(theCauseRep ?
- theCauseRep->raw().header : theVirginRep.raw().header);
+ theCauseRep->raw().header : theVirginRep.raw().header);
Must(request);
Adaptation::History::Pointer ah = request->adaptLogHistory();
if (ah != NULL && adaptHistoryId >= 0)
@@ -188,11 +188,10 @@
stopConsumingFrom(vbody_pipe);
canAccessVb = false;
proxyingVb = opComplete;
- } else
- if (proxyingVb == opUndecided) {
- vbody_pipe = NULL; // it is not our pipe anymore
- proxyingVb = opNever;
- }
+ } else if (proxyingVb == opUndecided) {
+ vbody_pipe = NULL; // it is not our pipe anymore
+ proxyingVb = opNever;
+ }
sendAnswer(clone);
Must(done());
@@ -410,8 +409,7 @@
stopProducingFor(answer().body_pipe, abProductionAtEnd);
proxyingAb = opComplete;
debugs(93,5, HERE << "last adapted body data retrieved");
- } else
- if (c.size > 0) {
+ } else if (c.size > 0) {
if (const size_t used = answer().body_pipe->putMoreData(c.start, c.size))
theMaster->abContentShift(used);
}
diff -u -r -N squid-3.1.0.13/src/adaptation/ecap/XactionRep.h squid-3.1.0.14/src/adaptation/ecap/XactionRep.h
--- squid-3.1.0.13/src/adaptation/ecap/XactionRep.h 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/adaptation/ecap/XactionRep.h 2009-09-27 15:28:32.000000000 +1200
@@ -18,13 +18,14 @@
namespace Adaptation
{
-namespace Ecap {
+namespace Ecap
+{
/* The eCAP xaction representative maintains information about a single eCAP
xaction that Squid communicates with. One eCAP module may register many
eCAP xactions. */
class XactionRep : public Adaptation::Initiate, public libecap::host::Xaction,
- public BodyConsumer, public BodyProducer
+ public BodyConsumer, public BodyProducer
{
public:
XactionRep(Adaptation::Initiator *anInitiator, HttpMsg *virginHeader, HttpRequest *virginCause, const Adaptation::ServicePointer &service);
diff -u -r -N squid-3.1.0.13/src/adaptation/History.cc squid-3.1.0.14/src/adaptation/History.cc
--- squid-3.1.0.13/src/adaptation/History.cc 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/adaptation/History.cc 2009-09-27 15:28:32.000000000 +1200
@@ -10,12 +10,12 @@
const static char *TheNullServices = ",null,";
Adaptation::History::Entry::Entry(const String &sid, const timeval &when):
- service(sid), start(when), theRptm(-1), retried(false)
+ service(sid), start(when), theRptm(-1), retried(false)
{
}
Adaptation::History::Entry::Entry():
- start(current_time), theRptm(-1), retried(false)
+ start(current_time), theRptm(-1), retried(false)
{
}
@@ -33,7 +33,8 @@
}
-Adaptation::History::History(): theNextServices(TheNullServices) {
+Adaptation::History::History(): theNextServices(TheNullServices)
+{
}
int Adaptation::History::recordXactStart(const String &sid, const timeval &when, bool retrying)
@@ -82,8 +83,7 @@
for (ECI i = theEntries.begin(); i != theEntries.end(); ++i) {
if (i->retried) { // do not log retried xact but accumulate their time
retriedRptm += i->rptm();
- } else
- if (!serviceId || i->service == serviceId) {
+ } else if (!serviceId || i->service == serviceId) {
if (s.size() > 0) // not the first logged time, must delimit
s.append(",");
@@ -99,7 +99,7 @@
}
// the last transaction is never retried or it would not be the last
- Must(!retriedRptm);
+ Must(!retriedRptm);
}
void Adaptation::History::updateXxRecord(const char *name, const String &value)
@@ -111,7 +111,7 @@
bool Adaptation::History::getXxRecord(String &name, String &value) const
{
if (theXxName.size() <= 0)
- return false;
+ return false;
name = theXxName;
value = theXxValue;
@@ -121,7 +121,7 @@
void Adaptation::History::updateNextServices(const String &services)
{
if (theNextServices != TheNullServices)
- debugs(93,3, HERE << "old services: " << theNextServices);
+ debugs(93,3, HERE << "old services: " << theNextServices);
debugs(93,3, HERE << "new services: " << services);
Must(services != TheNullServices);
theNextServices = services;
@@ -130,7 +130,7 @@
bool Adaptation::History::extractNextServices(String &value)
{
if (theNextServices == TheNullServices)
- return false;
+ return false;
value = theNextServices;
theNextServices = TheNullServices; // prevents resetting the plan twice
diff -u -r -N squid-3.1.0.13/src/adaptation/History.h squid-3.1.0.14/src/adaptation/History.h
--- squid-3.1.0.13/src/adaptation/History.h 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/adaptation/History.h 2009-09-27 15:28:32.000000000 +1200
@@ -5,11 +5,13 @@
#include "Array.h"
#include "SquidString.h"
-namespace Adaptation {
+namespace Adaptation
+{
/// collects information about adaptations related to a master transaction
-class History: public RefCountable {
+class History: public RefCountable
+{
public:
typedef RefCount Pointer;
@@ -41,7 +43,8 @@
private:
/// single Xaction stats (i.e., a historical record entry)
- class Entry {
+ class Entry
+ {
public:
Entry(const String &serviceId, const timeval &when);
Entry(); // required by Vector<>
@@ -58,8 +61,8 @@
public:
bool retried; ///< whether the xaction was replaced by another
};
-
- typedef Vector Entries;
+
+ typedef Vector Entries;
Entries theEntries; ///< historical record, in the order of xact starts
// theXx* will become a map, but we only support one record
diff -u -r -N squid-3.1.0.13/src/adaptation/icap/Client.h squid-3.1.0.14/src/adaptation/icap/Client.h
--- squid-3.1.0.13/src/adaptation/icap/Client.h 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/adaptation/icap/Client.h 2009-09-27 15:28:32.000000000 +1200
@@ -39,7 +39,8 @@
namespace Adaptation
{
-namespace Icap {
+namespace Icap
+{
extern void InitModule();
extern void CleanModule();
diff -u -r -N squid-3.1.0.13/src/adaptation/icap/Config.h squid-3.1.0.14/src/adaptation/icap/Config.h
--- squid-3.1.0.13/src/adaptation/icap/Config.h 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/adaptation/icap/Config.h 2009-09-27 15:28:32.000000000 +1200
@@ -45,7 +45,8 @@
namespace Adaptation
{
-namespace Icap {
+namespace Icap
+{
class ConfigParser;
diff -u -r -N squid-3.1.0.13/src/adaptation/icap/Elements.cc squid-3.1.0.14/src/adaptation/icap/Elements.cc
--- squid-3.1.0.13/src/adaptation/icap/Elements.cc 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/adaptation/icap/Elements.cc 2009-09-27 15:28:32.000000000 +1200
@@ -2,8 +2,10 @@
#include "adaptation/icap/Elements.h"
// TODO: remove this file?
-namespace Adaptation {
-namespace Icap {
+namespace Adaptation
+{
+namespace Icap
+{
const XactOutcome xoUnknown = "ICAP_ERR_UNKNOWN";
const XactOutcome xoError = "ICAP_ERR_OTHER";
diff -u -r -N squid-3.1.0.13/src/adaptation/icap/Elements.h squid-3.1.0.14/src/adaptation/icap/Elements.h
--- squid-3.1.0.13/src/adaptation/icap/Elements.h 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/adaptation/icap/Elements.h 2009-09-27 15:28:32.000000000 +1200
@@ -41,10 +41,12 @@
namespace Adaptation
{
-namespace Icap {
+namespace Icap
+{
//TODO: remove the ICAP namespace
-namespace ICAP {
+namespace ICAP
+{
using Adaptation::Method;
using Adaptation::methodNone;
using Adaptation::methodRespmod;
diff -u -r -N squid-3.1.0.13/src/adaptation/icap/History.cc squid-3.1.0.14/src/adaptation/icap/History.cc
--- squid-3.1.0.13/src/adaptation/icap/History.cc 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/adaptation/icap/History.cc 2009-09-27 15:28:32.000000000 +1200
@@ -4,8 +4,8 @@
#include "SquidTime.h"
Adaptation::Icap::History::History(): mergeOfIcapHeaders(hoRequest),
- lastIcapHeader(hoRequest), logType(LOG_TAG_NONE), req_sz(0),
- pastTime(0), concurrencyLevel(0)
+ lastIcapHeader(hoRequest), logType(LOG_TAG_NONE), req_sz(0),
+ pastTime(0), concurrencyLevel(0)
{
}
@@ -21,7 +21,7 @@
rfc931.clean();
#if USE_SSL
ssluser.clean();
-#endif
+#endif
log_uri.clean();
}
@@ -65,16 +65,16 @@
mergeOfIcapHeaders.compact();
}
-void Adaptation::Icap::History::start(const char *context)
+void Adaptation::Icap::History::start(const char *context)
{
if (!concurrencyLevel++)
currentStart = current_time;
debugs(93,4, HERE << "start " << context << " level=" << concurrencyLevel
- << " time=" << pastTime << ' ' << this);
+ << " time=" << pastTime << ' ' << this);
}
-void Adaptation::Icap::History::stop(const char *context)
+void Adaptation::Icap::History::stop(const char *context)
{
if (!concurrencyLevel) {
debugs(93,1, HERE << "Internal error: poor history accounting " << this);
@@ -83,8 +83,8 @@
const int current = currentTime();
debugs(93,4, HERE << "stop " << context << " level=" << concurrencyLevel <<
- " time=" << pastTime << '+' << current << ' ' << this);
-
+ " time=" << pastTime << '+' << current << ' ' << this);
+
if (!--concurrencyLevel)
pastTime += current;
}
@@ -99,5 +99,5 @@
int Adaptation::Icap::History::currentTime() const
{
return concurrencyLevel > 0 ?
- max(0, tvSubMsec(currentStart, current_time)) : 0;
+ max(0, tvSubMsec(currentStart, current_time)) : 0;
}
diff -u -r -N squid-3.1.0.13/src/adaptation/icap/History.h squid-3.1.0.14/src/adaptation/icap/History.h
--- squid-3.1.0.13/src/adaptation/icap/History.h 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/adaptation/icap/History.h 2009-09-27 15:28:32.000000000 +1200
@@ -5,11 +5,14 @@
#include "HttpHeader.h"
#include "enums.h"
-namespace Adaptation {
-namespace Icap {
+namespace Adaptation
+{
+namespace Icap
+{
/// collects information about ICAP processing related to an HTTP transaction
-class History: public RefCountable {
+class History: public RefCountable
+{
public:
typedef RefCount Pointer;
diff -u -r -N squid-3.1.0.13/src/adaptation/icap/icap_log.cc squid-3.1.0.14/src/adaptation/icap/icap_log.cc
--- squid-3.1.0.13/src/adaptation/icap/icap_log.cc 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/adaptation/icap/icap_log.cc 2009-09-27 15:28:32.000000000 +1200
@@ -22,7 +22,7 @@
}
}
-void
+void
icapLogClose()
{
customlog *log;
diff -u -r -N squid-3.1.0.13/src/adaptation/icap/InOut.h squid-3.1.0.14/src/adaptation/icap/InOut.h
--- squid-3.1.0.13/src/adaptation/icap/InOut.h 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/adaptation/icap/InOut.h 2009-09-27 15:28:32.000000000 +1200
@@ -46,7 +46,8 @@
namespace Adaptation
{
-namespace Icap {
+namespace Icap
+{
class InOut
{
diff -u -r -N squid-3.1.0.13/src/adaptation/icap/Launcher.cc squid-3.1.0.14/src/adaptation/icap/Launcher.cc
--- squid-3.1.0.13/src/adaptation/icap/Launcher.cc 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/adaptation/icap/Launcher.cc 2009-09-27 15:28:32.000000000 +1200
@@ -72,7 +72,7 @@
{
debugs(93,5, HERE << "launches: " << theLaunches << "; final: " << final);
clearAdaptation(theXaction);
-
+
Must(done()); // swanSong will notify the initiator
}
@@ -80,12 +80,11 @@
{
debugs(93,5, HERE << "theXaction:" << theXaction << " launches: " << theLaunches);
- // TODO: add more checks from FwdState::checkRetry()?
+ // TODO: add more checks from FwdState::checkRetry()?
if (canRetry(info)) {
clearAdaptation(theXaction);
launchXaction("retry");
- }
- else if (canRepeat(info)) {
+ } else if (canRepeat(info)) {
clearAdaptation(theXaction);
launchXaction("repeat");
} else {
@@ -93,7 +92,7 @@
clearAdaptation(theXaction);
tellQueryAborted(false); // caller decides based on bypass, consumption
Must(done());
- }
+ }
}
bool Adaptation::Icap::Launcher::doneAll() const
@@ -132,15 +131,15 @@
debugs(93,9, HERE << info.icapReply);
if (!info.icapReply) // did not get to read an ICAP reply; a timeout?
return true;
-
+
debugs(93,9, HERE << info.icapReply->sline.status);
if (!info.icapReply->sline.status) // failed to parse the reply; I/O err
return true;
-
+
ACLFilledChecklist *cl =
new ACLFilledChecklist(TheConfig.repeat, info.icapRequest, dash_str);
cl->reply = HTTPMSGLOCK(info.icapReply);
-
+
const bool result = cl->fastCheck();
delete cl;
return result;
@@ -149,17 +148,17 @@
/* ICAPXactAbortInfo */
Adaptation::Icap::XactAbortInfo::XactAbortInfo(HttpRequest *anIcapRequest,
- HttpReply *anIcapReply, bool beRetriable, bool beRepeatable):
- icapRequest(anIcapRequest ? HTTPMSGLOCK(anIcapRequest) : NULL),
- icapReply(anIcapReply ? HTTPMSGLOCK(anIcapReply) : NULL),
- isRetriable(beRetriable), isRepeatable(beRepeatable)
+ HttpReply *anIcapReply, bool beRetriable, bool beRepeatable):
+ icapRequest(anIcapRequest ? HTTPMSGLOCK(anIcapRequest) : NULL),
+ icapReply(anIcapReply ? HTTPMSGLOCK(anIcapReply) : NULL),
+ isRetriable(beRetriable), isRepeatable(beRepeatable)
{
}
Adaptation::Icap::XactAbortInfo::XactAbortInfo(const Adaptation::Icap::XactAbortInfo &i):
- icapRequest(i.icapRequest ? HTTPMSGLOCK(i.icapRequest) : NULL),
- icapReply(i.icapReply ? HTTPMSGLOCK(i.icapReply) : NULL),
- isRetriable(i.isRetriable), isRepeatable(i.isRepeatable)
+ icapRequest(i.icapRequest ? HTTPMSGLOCK(i.icapRequest) : NULL),
+ icapReply(i.icapReply ? HTTPMSGLOCK(i.icapReply) : NULL),
+ isRetriable(i.isRetriable), isRepeatable(i.isRepeatable)
{
}
diff -u -r -N squid-3.1.0.13/src/adaptation/icap/Launcher.h squid-3.1.0.14/src/adaptation/icap/Launcher.h
--- squid-3.1.0.13/src/adaptation/icap/Launcher.h 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/adaptation/icap/Launcher.h 2009-09-27 15:28:32.000000000 +1200
@@ -61,7 +61,8 @@
namespace Adaptation
{
-namespace Icap {
+namespace Icap
+{
class Xaction;
class XactAbortInfo;
@@ -84,7 +85,7 @@
private:
bool canRetry(XactAbortInfo &info) const; //< true if can retry in the case of persistent connection failures
- bool canRepeat(XactAbortInfo &info) const; //< true if can repeat in the case of no or unsatisfactory response
+ bool canRepeat(XactAbortInfo &info) const; //< true if can repeat in the case of no or unsatisfactory response
virtual void noteAdaptationQueryAbort(bool final);
protected:
@@ -103,9 +104,10 @@
int theLaunches; // the number of transaction launches
};
-/// helper class to pass information about aborted ICAP requests to
+/// helper class to pass information about aborted ICAP requests to
/// the Adaptation::Icap::Launcher class
-class XactAbortInfo {
+class XactAbortInfo
+{
public:
XactAbortInfo(HttpRequest *anIcapRequest, HttpReply *anIcapReply,
bool beRetriable, bool beRepeatable);
@@ -116,7 +118,7 @@
HttpReply *icapReply;
bool isRetriable;
bool isRepeatable;
-
+
private:
XactAbortInfo &operator =(const XactAbortInfo &); // undefined
};
@@ -129,19 +131,19 @@
}
/// A Dialer class used to schedule the Adaptation::Icap::Launcher::noteXactAbort call
-class XactAbortCall: public UnaryMemFunT {
+class XactAbortCall: public UnaryMemFunT
+{
public:
typedef void (Adaptation::Icap::Launcher::*DialMethod)(Adaptation::Icap::XactAbortInfo &);
- XactAbortCall(Adaptation::Icap::Launcher *launcer, DialMethod aMethod,
+ XactAbortCall(Adaptation::Icap::Launcher *launcer, DialMethod aMethod,
const Adaptation::Icap::XactAbortInfo &info):
- UnaryMemFunT(launcer, NULL, info),
- dialMethod(aMethod)
- {}
+ UnaryMemFunT(launcer, NULL, info),
+ dialMethod(aMethod) {}
virtual void print(std::ostream &os) const { os << '(' << "retriable:" << arg1.isRetriable << ", repeatable:" << arg1.isRepeatable << ')'; }
public:
DialMethod dialMethod;
-
+
protected:
virtual void doDial() { (object->*dialMethod)(arg1); }
};
diff -u -r -N squid-3.1.0.13/src/adaptation/icap/Makefile.in squid-3.1.0.14/src/adaptation/icap/Makefile.in
--- squid-3.1.0.13/src/adaptation/icap/Makefile.in 2009-08-05 01:32:37.000000000 +1200
+++ squid-3.1.0.14/src/adaptation/icap/Makefile.in 2009-09-27 15:28:55.000000000 +1200
@@ -78,6 +78,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/src/adaptation/icap/ModXact.cc squid-3.1.0.14/src/adaptation/icap/ModXact.cc
--- squid-3.1.0.13/src/adaptation/icap/ModXact.cc 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/adaptation/icap/ModXact.cc 2009-09-27 15:28:32.000000000 +1200
@@ -163,13 +163,12 @@
// determine next step
if (preview.enabled())
state.writing = preview.done() ? State::writingPaused : State::writingPreview;
- else
- if (virginBody.expected())
- state.writing = State::writingPrime;
- else {
- stopWriting(true);
- return;
- }
+ else if (virginBody.expected())
+ state.writing = State::writingPrime;
+ else {
+ stopWriting(true);
+ return;
+ }
writeMore();
}
@@ -319,7 +318,7 @@
const HttpRequest &Adaptation::Icap::ModXact::virginRequest() const
{
const HttpRequest *request = virgin.cause ?
- virgin.cause : dynamic_cast(virgin.header);
+ virgin.cause : dynamic_cast(virgin.header);
Must(request);
return *request;
}
@@ -656,7 +655,7 @@
if (gotEncapsulated("res-hdr")) {
adapted.setHeader(new HttpReply);
setOutcome(service().cfg().method == ICAP::methodReqmod ?
- xoSatisfied : xoModified);
+ xoSatisfied : xoModified);
} else if (gotEncapsulated("req-hdr")) {
adapted.setHeader(new HttpRequest);
setOutcome(xoModified);
@@ -763,7 +762,7 @@
// We need to store received ICAP headers for icapHistory();
if (h != NULL) {
h->mergeIcapHeaders(&icapReply->header);
@@ -857,7 +856,7 @@
if (const HttpRequest *oldR = dynamic_cast(oldHead)) {
HttpRequest *newR = new HttpRequest;
newR->canonical = oldR->canonical ?
- xstrdup(oldR->canonical) : NULL; // parse() does not set it
+ xstrdup(oldR->canonical) : NULL; // parse() does not set it
newHead = newR;
} else if (dynamic_cast(oldHead)) {
HttpReply *newRep = new HttpReply;
@@ -1110,57 +1109,56 @@
{
HttpRequest * request_ = NULL;
HttpReply * reply_ = NULL;
- if(!(request_ = dynamic_cast(adapted.header)))
- {
+ if (!(request_ = dynamic_cast(adapted.header))) {
request_ = (virgin.cause? virgin.cause: dynamic_cast(virgin.header));
reply_ = dynamic_cast(adapted.header);
}
Adaptation::Icap::History::Pointer h = request_->icapHistory();
- Must(h != NULL); // ICAPXaction::maybeLog calls only if there is a log
- al.icp.opcode = ICP_INVALID;
- al.url = h->log_uri.termedBuf();
- const Adaptation::Icap::ServiceRep &s = service();
- al.icap.reqMethod = s.cfg().method;
-
- al.cache.caddr = request_->client_addr;
-
- al.request = HTTPMSGLOCK(request_);
- if(reply_)
- al.reply = HTTPMSGLOCK(reply_);
- else
- al.reply = NULL;
+ Must(h != NULL); // ICAPXaction::maybeLog calls only if there is a log
+ al.icp.opcode = ICP_INVALID;
+ al.url = h->log_uri.termedBuf();
+ const Adaptation::Icap::ServiceRep &s = service();
+ al.icap.reqMethod = s.cfg().method;
+
+ al.cache.caddr = request_->client_addr;
+
+ al.request = HTTPMSGLOCK(request_);
+ if (reply_)
+ al.reply = HTTPMSGLOCK(reply_);
+ else
+ al.reply = NULL;
- if (h->rfc931.size())
- al.cache.rfc931 = h->rfc931.termedBuf();
+ if (h->rfc931.size())
+ al.cache.rfc931 = h->rfc931.termedBuf();
#if USE_SSL
- if (h->ssluser.size())
- al.cache.ssluser = h->ssluser.termedBuf();
+ if (h->ssluser.size())
+ al.cache.ssluser = h->ssluser.termedBuf();
#endif
- al.cache.code = h->logType;
- al.cache.requestSize = h->req_sz;
- if (reply_) {
- al.http.code = reply_->sline.status;
- al.http.content_type = reply_->content_type.termedBuf();
- al.cache.replySize = replyBodySize + reply_->hdr_sz;
- al.cache.highOffset = replyBodySize;
- //don't set al.cache.objectSize because it hasn't exist yet
-
- Packer p;
- MemBuf mb;
-
- mb.init();
- packerToMemInit(&p, &mb);
-
- reply_->header.packInto(&p);
- al.headers.reply = xstrdup(mb.buf);
-
- packerClean(&p);
- mb.clean();
- }
- prepareLogWithRequestDetails(request_, &al);
- Xaction::finalizeLogInfo();
+ al.cache.code = h->logType;
+ al.cache.requestSize = h->req_sz;
+ if (reply_) {
+ al.http.code = reply_->sline.status;
+ al.http.content_type = reply_->content_type.termedBuf();
+ al.cache.replySize = replyBodySize + reply_->hdr_sz;
+ al.cache.highOffset = replyBodySize;
+ //don't set al.cache.objectSize because it hasn't exist yet
+
+ Packer p;
+ MemBuf mb;
+
+ mb.init();
+ packerToMemInit(&p, &mb);
+
+ reply_->header.packInto(&p);
+ al.headers.reply = xstrdup(mb.buf);
+
+ packerClean(&p);
+ mb.clean();
+ }
+ prepareLogWithRequestDetails(request_, &al);
+ Xaction::finalizeLogInfo();
}
@@ -1198,12 +1196,12 @@
if (ah != NULL) {
String name, value;
if (ah->getXxRecord(name, value)) {
- buf.Printf(SQUIDSTRINGPH ": " SQUIDSTRINGPH "\r\n",
- SQUIDSTRINGPRINT(name), SQUIDSTRINGPRINT(value));
+ buf.Printf(SQUIDSTRINGPH ": " SQUIDSTRINGPH "\r\n",
+ SQUIDSTRINGPRINT(name), SQUIDSTRINGPRINT(value));
}
}
}
-
+
buf.Printf("Encapsulated: ");
@@ -1221,9 +1219,8 @@
urlPath = request->urlpath;
if (ICAP::methodRespmod == m)
encapsulateHead(buf, "req-hdr", httpBuf, request);
- else
- if (ICAP::methodReqmod == m)
- encapsulateHead(buf, "req-hdr", httpBuf, virgin.header);
+ else if (ICAP::methodReqmod == m)
+ encapsulateHead(buf, "req-hdr", httpBuf, virgin.header);
}
if (ICAP::methodRespmod == m)
@@ -1359,9 +1356,8 @@
if (!virginBody.expected())
ad = 0;
- else
- if (virginBody.knownSize())
- ad = min(static_cast(ad), virginBody.size()); // not more than we have
+ else if (virginBody.knownSize())
+ ad = min(static_cast(ad), virginBody.size()); // not more than we have
debugs(93, 5, HERE << "should offer " << ad << "-byte preview " <<
"(service wanted " << wantedSize << ")");
@@ -1513,11 +1509,10 @@
if (virgin.cause)
method = virgin.cause->method;
+ else if (HttpRequest *req = dynamic_cast(msg))
+ method = req->method;
else
- if (HttpRequest *req = dynamic_cast(msg))
- method = req->method;
- else
- method = METHOD_NONE;
+ method = METHOD_NONE;
int64_t size;
// expectingBody returns true for zero-sized bodies, but we will not
@@ -1664,9 +1659,8 @@
if (wroteEof)
theState = stIeof; // written size is irrelevant
- else
- if (theWritten >= theAd)
- theState = stDone;
+ else if (theWritten >= theAd)
+ theState = stDone;
}
bool Adaptation::Icap::ModXact::fillVirginHttpHeader(MemBuf &mb) const
@@ -1699,24 +1693,26 @@
return new Adaptation::Icap::ModXact(this, virgin.header, virgin.cause, s);
}
-void Adaptation::Icap::ModXactLauncher::swanSong() {
+void Adaptation::Icap::ModXactLauncher::swanSong()
+{
debugs(93, 5, HERE << "swan sings");
updateHistory(false);
Adaptation::Icap::Launcher::swanSong();
}
-void Adaptation::Icap::ModXactLauncher::updateHistory(bool start) {
- HttpRequest *r = virgin.cause ?
- virgin.cause : dynamic_cast(virgin.header);
-
- // r should never be NULL but we play safe; TODO: add Should()
- if (r) {
- Adaptation::Icap::History::Pointer h = r->icapHistory();
- if (h != NULL) {
- if (start)
- h->start("ICAPModXactLauncher");
- else
- h->stop("ICAPModXactLauncher");
- }
- }
+void Adaptation::Icap::ModXactLauncher::updateHistory(bool start)
+{
+ HttpRequest *r = virgin.cause ?
+ virgin.cause : dynamic_cast(virgin.header);
+
+ // r should never be NULL but we play safe; TODO: add Should()
+ if (r) {
+ Adaptation::Icap::History::Pointer h = r->icapHistory();
+ if (h != NULL) {
+ if (start)
+ h->start("ICAPModXactLauncher");
+ else
+ h->stop("ICAPModXactLauncher");
+ }
+ }
}
diff -u -r -N squid-3.1.0.13/src/adaptation/icap/ModXact.h squid-3.1.0.14/src/adaptation/icap/ModXact.h
--- squid-3.1.0.13/src/adaptation/icap/ModXact.h 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/adaptation/icap/ModXact.h 2009-09-27 15:28:32.000000000 +1200
@@ -54,7 +54,8 @@
namespace Adaptation
{
-namespace Icap {
+namespace Icap
+{
// estimated future presence and size of something (e.g., HTTP body)
diff -u -r -N squid-3.1.0.13/src/adaptation/icap/Options.h squid-3.1.0.14/src/adaptation/icap/Options.h
--- squid-3.1.0.13/src/adaptation/icap/Options.h 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/adaptation/icap/Options.h 2009-09-27 15:28:32.000000000 +1200
@@ -41,7 +41,8 @@
namespace Adaptation
{
-namespace Icap {
+namespace Icap
+{
/* Maintains options supported by a given ICAP service.
* See RFC 3507, Section "4.10.2 OPTIONS Response". */
diff -u -r -N squid-3.1.0.13/src/adaptation/icap/OptXact.h squid-3.1.0.14/src/adaptation/icap/OptXact.h
--- squid-3.1.0.13/src/adaptation/icap/OptXact.h 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/adaptation/icap/OptXact.h 2009-09-27 15:28:32.000000000 +1200
@@ -39,7 +39,8 @@
namespace Adaptation
{
-namespace Icap {
+namespace Icap
+{
/* OptXact sends an ICAP OPTIONS request to the ICAP service,
* parses the ICAP response, and sends it to the initiator. A NULL response
diff -u -r -N squid-3.1.0.13/src/adaptation/icap/ServiceRep.cc squid-3.1.0.14/src/adaptation/icap/ServiceRep.cc
--- squid-3.1.0.13/src/adaptation/icap/ServiceRep.cc 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/adaptation/icap/ServiceRep.cc 2009-09-27 15:28:32.000000000 +1200
@@ -451,12 +451,10 @@
if (!theOptions)
buf.append(",!opt", 5);
- else
- if (!theOptions->valid())
- buf.append(",!valid", 7);
- else
- if (!theOptions->fresh())
- buf.append(",stale", 6);
+ else if (!theOptions->valid())
+ buf.append(",!valid", 7);
+ else if (!theOptions->fresh())
+ buf.append(",stale", 6);
}
if (theOptionsFetcher)
diff -u -r -N squid-3.1.0.13/src/adaptation/icap/ServiceRep.h squid-3.1.0.14/src/adaptation/icap/ServiceRep.h
--- squid-3.1.0.13/src/adaptation/icap/ServiceRep.h 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/adaptation/icap/ServiceRep.h 2009-09-27 15:28:32.000000000 +1200
@@ -43,7 +43,8 @@
namespace Adaptation
{
-namespace Icap {
+namespace Icap
+{
class Options;
class OptXact;
@@ -78,7 +79,7 @@
class ServiceRep : public RefCountable, public Adaptation::Service,
- public Adaptation::Initiator
+ public Adaptation::Initiator
{
public:
diff -u -r -N squid-3.1.0.13/src/adaptation/icap/Xaction.cc squid-3.1.0.14/src/adaptation/icap/Xaction.cc
--- squid-3.1.0.13/src/adaptation/icap/Xaction.cc 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/adaptation/icap/Xaction.cc 2009-09-27 15:28:32.000000000 +1200
@@ -432,7 +432,7 @@
{
if (al.icap.outcome != xoUnknown) {
debugs(93, 3, HERE << "Warning: reseting outcome: from " <<
- al.icap.outcome << " to " << xo);
+ al.icap.outcome << " to " << xo);
} else {
debugs(93, 4, HERE << xo);
}
@@ -462,19 +462,20 @@
Adaptation::Initiate::swanSong();
}
-void Adaptation::Icap::Xaction::tellQueryAborted() {
+void Adaptation::Icap::Xaction::tellQueryAborted()
+{
Adaptation::Icap::Launcher *l = dynamic_cast(theInitiator.ptr());
Adaptation::Icap::XactAbortInfo abortInfo(icapRequest, icapReply, retriable(), repeatable());
- CallJob(91, 5, __FILE__, __LINE__,
+ CallJob(91, 5, __FILE__, __LINE__,
"Adaptation::Icap::Launcher::noteXactAbort",
XactAbortCall(l, &Adaptation::Icap::Launcher::noteXactAbort, abortInfo) );
clearInitiator();
}
-void Adaptation::Icap::Xaction::maybeLog() {
- if(IcapLogfileStatus == LOG_ENABLE)
- {
+void Adaptation::Icap::Xaction::maybeLog()
+{
+ if (IcapLogfileStatus == LOG_ENABLE) {
ACLChecklist *checklist = new ACLFilledChecklist(::Config.accessList.icap, al.request, dash_str);
if (!::Config.accessList.icap || checklist->fastCheck()) {
finalizeLogInfo();
@@ -489,12 +490,12 @@
{
//prepare log data
al.icp.opcode = ICP_INVALID;
-
+
const Adaptation::Icap::ServiceRep &s = service();
al.icap.hostAddr = s.cfg().host.termedBuf();
al.icap.serviceName = s.cfg().key;
al.icap.reqUri = s.cfg().uri;
-
+
al.icap.ioTime = tvSubMsec(icap_tio_start, icap_tio_finish);
al.icap.trTime = tvSubMsec(icap_tr_start, current_time);
diff -u -r -N squid-3.1.0.13/src/adaptation/icap/Xaction.h squid-3.1.0.14/src/adaptation/icap/Xaction.h
--- squid-3.1.0.13/src/adaptation/icap/Xaction.h 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/adaptation/icap/Xaction.h 2009-09-27 15:28:32.000000000 +1200
@@ -46,7 +46,8 @@
namespace Adaptation
{
-namespace Icap {
+namespace Icap
+{
/*
* The ICAP Xaction implements common tasks for ICAP OPTIONS, REQMOD, and
diff -u -r -N squid-3.1.0.13/src/adaptation/Initiate.cc squid-3.1.0.14/src/adaptation/Initiate.cc
--- squid-3.1.0.13/src/adaptation/Initiate.cc 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/adaptation/Initiate.cc 2009-09-27 15:28:32.000000000 +1200
@@ -30,7 +30,7 @@
/* Initiate */
Adaptation::Initiate::Initiate(const char *aTypeName, Initiator *anInitiator):
- AsyncJob(aTypeName), theInitiator(anInitiator)
+ AsyncJob(aTypeName), theInitiator(anInitiator)
{
assert(theInitiator);
}
diff -u -r -N squid-3.1.0.13/src/adaptation/Iterator.cc squid-3.1.0.14/src/adaptation/Iterator.cc
--- squid-3.1.0.13/src/adaptation/Iterator.cc 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/adaptation/Iterator.cc 2009-09-27 15:28:32.000000000 +1200
@@ -15,16 +15,16 @@
Adaptation::Iterator::Iterator(Adaptation::Initiator *anInitiator,
- HttpMsg *aMsg, HttpRequest *aCause,
- const ServiceGroupPointer &aGroup):
- AsyncJob("Iterator"),
- Adaptation::Initiate("Iterator", anInitiator),
- theGroup(aGroup),
- theMsg(HTTPMSGLOCK(aMsg)),
- theCause(aCause ? HTTPMSGLOCK(aCause) : NULL),
- theLauncher(0),
- iterations(0),
- adapted(false)
+ HttpMsg *aMsg, HttpRequest *aCause,
+ const ServiceGroupPointer &aGroup):
+ AsyncJob("Iterator"),
+ Adaptation::Initiate("Iterator", anInitiator),
+ theGroup(aGroup),
+ theMsg(HTTPMSGLOCK(aMsg)),
+ theCause(aCause ? HTTPMSGLOCK(aCause) : NULL),
+ theLauncher(0),
+ iterations(0),
+ adapted(false)
{
}
@@ -58,9 +58,9 @@
if (iterations > Adaptation::Config::service_iteration_limit) {
debugs(93,DBG_CRITICAL, "Adaptation iterations limit (" <<
- Adaptation::Config::service_iteration_limit << ") exceeded:\n" <<
- "\tPossible service loop with " <<
- theGroup->kind << " " << theGroup->id << ", plan=" << thePlan);
+ Adaptation::Config::service_iteration_limit << ") exceeded:\n" <<
+ "\tPossible service loop with " <<
+ theGroup->kind << " " << theGroup->id << ", plan=" << thePlan);
throw TexcHere("too many adaptations");
}
@@ -69,7 +69,7 @@
debugs(93,5, HERE << "using adaptation service: " << service->cfg().key);
theLauncher = initiateAdaptation(
- service->makeXactLauncher(this, theMsg, theCause));
+ service->makeXactLauncher(this, theMsg, theCause));
Must(theLauncher);
Must(!done());
}
@@ -113,8 +113,8 @@
updatePlan(false);
// can we replace the failed service (group-level bypass)?
- const bool srcIntact = !theMsg->body_pipe ||
- !theMsg->body_pipe->consumedSize();
+ const bool srcIntact = !theMsg->body_pipe ||
+ !theMsg->body_pipe->consumedSize();
// can we ignore the failure (compute while thePlan is not exhausted)?
Must(!thePlan.exhausted());
const bool canIgnore = thePlan.current()->cfg().bypass;
@@ -176,7 +176,7 @@
debugs(85,3, HERE << "rejecting service-proposed plan");
return false;
}
-
+
debugs(85,3, HERE << "retiring old plan: " << thePlan);
theGroup = new DynamicServiceChain(services, theGroup); // refcounted
thePlan = ServicePlan(theGroup, filter());
@@ -196,8 +196,7 @@
method = methodReqmod;
req = r;
rep = NULL;
- } else
- if (HttpReply *r = dynamic_cast(theMsg)) {
+ } else if (HttpReply *r = dynamic_cast(theMsg)) {
method = methodRespmod;
req = theCause;
rep = r;
diff -u -r -N squid-3.1.0.13/src/adaptation/Iterator.h squid-3.1.0.14/src/adaptation/Iterator.h
--- squid-3.1.0.13/src/adaptation/Iterator.h 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/adaptation/Iterator.h 2009-09-27 15:28:32.000000000 +1200
@@ -14,16 +14,16 @@
Note: Initiate must be the first parent for cbdata to work. We use
a temporary InitiatorHolder/toCbdata hacks and do not call cbdata
- operations on the initiator directly.
+ operations on the initiator directly.
*/
/// iterates services in ServiceGroup, starting adaptation launchers
class Iterator: public Initiate, public Initiator
{
public:
- Iterator(Adaptation::Initiator *anInitiator,
- HttpMsg *virginHeader, HttpRequest *virginCause,
- const Adaptation::ServiceGroupPointer &aGroup);
+ Iterator(Adaptation::Initiator *anInitiator,
+ HttpMsg *virginHeader, HttpRequest *virginCause,
+ const Adaptation::ServiceGroupPointer &aGroup);
virtual ~Iterator();
// Adaptation::Initiate: asynchronous communication with the initiator
diff -u -r -N squid-3.1.0.13/src/adaptation/Makefile.in squid-3.1.0.14/src/adaptation/Makefile.in
--- squid-3.1.0.13/src/adaptation/Makefile.in 2009-08-05 01:32:37.000000000 +1200
+++ squid-3.1.0.14/src/adaptation/Makefile.in 2009-09-27 15:28:55.000000000 +1200
@@ -90,6 +90,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/src/adaptation/ServiceConfig.cc squid-3.1.0.14/src/adaptation/ServiceConfig.cc
--- squid-3.1.0.13/src/adaptation/ServiceConfig.cc 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/adaptation/ServiceConfig.cc 2009-09-27 15:28:32.000000000 +1200
@@ -7,8 +7,8 @@
#include "adaptation/ServiceConfig.h"
Adaptation::ServiceConfig::ServiceConfig():
- port(-1), method(methodNone), point(pointNone),
- bypass(false), routing(false)
+ port(-1), method(methodNone), point(pointNone),
+ bypass(false), routing(false)
{}
const char *
@@ -91,12 +91,11 @@
bool grokked = false;
if (strcmp(name, "bypass") == 0)
grokked = grokBool(bypass, name, value);
- else
- if (strcmp(name, "routing") == 0)
+ else if (strcmp(name, "routing") == 0)
grokked = grokBool(routing, name, value);
else {
debugs(3, 0, cfg_filename << ':' << config_lineno << ": " <<
- "unknown adaptation service option: " << name << '=' << value);
+ "unknown adaptation service option: " << name << '=' << value);
}
if (!grokked)
return false;
@@ -109,15 +108,15 @@
// there should be nothing else left
if (const char *tail = strtok(NULL, w_space)) {
debugs(3, 0, cfg_filename << ':' << config_lineno << ": " <<
- "garbage after adaptation service URI: " << tail);
+ "garbage after adaptation service URI: " << tail);
return false;
}
debugs(3,5, cfg_filename << ':' << config_lineno << ": " <<
- "adaptation_service " << key << ' ' <<
- methodStr() << "_" << vectPointStr() << ' ' <<
- bypass << routing << ' ' <<
- uri);
+ "adaptation_service " << key << ' ' <<
+ methodStr() << "_" << vectPointStr() << ' ' <<
+ bypass << routing << ' ' <<
+ uri);
return true;
}
@@ -210,8 +209,7 @@
{
if (!strcmp(value, "0") || !strcmp(value, "off"))
var = false;
- else
- if (!strcmp(value, "1") || !strcmp(value, "on"))
+ else if (!strcmp(value, "1") || !strcmp(value, "on"))
var = true;
else {
debugs(3, 0, HERE << cfg_filename << ':' << config_lineno << ": " <<
diff -u -r -N squid-3.1.0.13/src/adaptation/ServiceConfig.h squid-3.1.0.14/src/adaptation/ServiceConfig.h
--- squid-3.1.0.13/src/adaptation/ServiceConfig.h 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/adaptation/ServiceConfig.h 2009-09-27 15:28:32.000000000 +1200
@@ -37,7 +37,7 @@
protected:
Method parseMethod(const char *buf) const;
VectPoint parseVectPoint(const char *buf) const;
-
+
/// interpret parsed values
bool grokBool(bool &var, const char *name, const char *value);
bool grokUri(const char *value);
diff -u -r -N squid-3.1.0.13/src/adaptation/ServiceFilter.cc squid-3.1.0.14/src/adaptation/ServiceFilter.cc
--- squid-3.1.0.13/src/adaptation/ServiceFilter.cc 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/adaptation/ServiceFilter.cc 2009-09-27 15:28:32.000000000 +1200
@@ -5,18 +5,18 @@
Adaptation::ServiceFilter::ServiceFilter(Method aMethod, VectPoint aPoint,
-HttpRequest *aReq, HttpReply *aRep): method(aMethod), point(aPoint),
- request(HTTPMSGLOCK(aReq)),
- reply(aRep ? HTTPMSGLOCK(aRep) : NULL)
+ HttpRequest *aReq, HttpReply *aRep): method(aMethod), point(aPoint),
+ request(HTTPMSGLOCK(aReq)),
+ reply(aRep ? HTTPMSGLOCK(aRep) : NULL)
{
// a lot of code assumes that there is always a virgin request or cause
assert(request);
}
Adaptation::ServiceFilter::ServiceFilter(const ServiceFilter &f):
- method(f.method), point(f.point),
- request(HTTPMSGLOCK(f.request)),
- reply(f.reply ? HTTPMSGLOCK(f.reply) : NULL)
+ method(f.method), point(f.point),
+ request(HTTPMSGLOCK(f.request)),
+ reply(f.reply ? HTTPMSGLOCK(f.reply) : NULL)
{
}
diff -u -r -N squid-3.1.0.13/src/adaptation/ServiceGroups.cc squid-3.1.0.14/src/adaptation/ServiceGroups.cc
--- squid-3.1.0.13/src/adaptation/ServiceGroups.cc 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/adaptation/ServiceGroups.cc 2009-09-27 15:28:32.000000000 +1200
@@ -11,8 +11,8 @@
#define ServiceGroup ServiceGroup
Adaptation::ServiceGroup::ServiceGroup(const String &aKind, bool allSame):
- kind(aKind), method(methodNone), point(pointNone),
- allServicesSame(allSame)
+ kind(aKind), method(methodNone), point(pointNone),
+ allServicesSame(allSame)
{
}
@@ -48,7 +48,7 @@
const String &sid = services[pos];
ServicePointer service = at(pos);
if (service != NULL) {
- if (method == methodNone) {
+ if (method == methodNone) {
// optimization: cache values that should be the same
method = service->cfg().method;
point = service->cfg().point;
@@ -61,18 +61,17 @@
checkUniqueness(pos);
- if (allServicesSame) {
+ if (allServicesSame) {
if (!baselineKey.size()) {
baselineKey = service->cfg().key;
baselineBypass = service->cfg().bypass;
- } else
- if (baselineBypass != service->cfg().bypass) {
+ } else if (baselineBypass != service->cfg().bypass) {
debugs(93,0, "WARNING: Inconsistent bypass in " << kind <<
- ' ' << id << " may produce surprising results: " <<
- baselineKey << " vs. " << sid);
+ ' ' << id << " may produce surprising results: " <<
+ baselineKey << " vs. " << sid);
}
}
- } else {
+ } else {
finalizeMsg("ERROR: Unknown adaptation name", sid, true);
}
}
@@ -91,8 +90,7 @@
ServicePointer s = at(p);
if (s != NULL && s->cfg().key == checkedService->cfg().key)
finalizeMsg("duplicate service name", s->cfg().key, false);
- else
- if (s != NULL && s->cfg().uri == checkedService->cfg().uri)
+ else if (s != NULL && s->cfg().uri == checkedService->cfg().uri)
finalizeMsg("duplicate service URI", s->cfg().uri, false);
}
}
@@ -100,15 +98,16 @@
/// emits a formatted warning or error message at the appropriate dbg level
void
Adaptation::ServiceGroup::finalizeMsg(const char *msg, const String &culprit,
- bool error) const
+ bool error) const
{
- const int level = error ? DBG_CRITICAL : DBG_IMPORTANT;
+ const int level = error ? DBG_CRITICAL :DBG_IMPORTANT;
const char *pfx = error ? "ERROR: " : "WARNING: ";
debugs(93,level, pfx << msg << ' ' << culprit << " in " << kind << " '" <<
- id << "'");
+ id << "'");
}
-Adaptation::ServicePointer Adaptation::ServiceGroup::at(const Pos pos) const {
+Adaptation::ServicePointer Adaptation::ServiceGroup::at(const Pos pos) const
+{
return FindService(services[pos]);
}
@@ -213,7 +212,7 @@
/* ServiceChain */
Adaptation::DynamicServiceChain::DynamicServiceChain(const String &ids,
- const ServiceGroupPointer prev)
+ const ServiceGroupPointer prev)
{
kind = "dynamic adaptation chain"; // TODO: optimize by using String const
id = ids; // use services ids as the dynamic group ID
@@ -241,8 +240,8 @@
}
Adaptation::ServicePlan::ServicePlan(const ServiceGroupPointer &g,
- const ServiceFilter &filter):
- group(g), pos(0), atEof(!g || !g->has(pos))
+ const ServiceFilter &filter):
+ group(g), pos(0), atEof(!g || !g->has(pos))
{
// this will find the first service because starting pos is zero
if (!atEof && !group->findService(filter, pos))
@@ -257,14 +256,16 @@
}
Adaptation::ServicePointer
-Adaptation::ServicePlan::replacement(const ServiceFilter &filter) {
+Adaptation::ServicePlan::replacement(const ServiceFilter &filter)
+{
if (!atEof && !group->findReplacement(filter, ++pos))
atEof = true;
return current();
}
Adaptation::ServicePointer
-Adaptation::ServicePlan::next(const ServiceFilter &filter) {
+Adaptation::ServicePlan::next(const ServiceFilter &filter)
+{
if (!atEof && !group->findLink(filter, ++pos))
atEof = true;
return current();
@@ -277,7 +278,7 @@
return os << "[nil]";
return os << group->id << '[' << pos << ".." << group->services.size() <<
- (atEof ? ".]" : "]");
+ (atEof ? ".]" : "]");
}
diff -u -r -N squid-3.1.0.13/src/adaptation/ServiceGroups.h squid-3.1.0.14/src/adaptation/ServiceGroups.h
--- squid-3.1.0.13/src/adaptation/ServiceGroups.h 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/adaptation/ServiceGroups.h 2009-09-27 15:28:32.000000000 +1200
@@ -81,7 +81,7 @@
SingleService(const String &aServiceKey);
protected:
- virtual bool replace(Pos &pos) const { return false; }
+ virtual bool replace(Pos &pos) const { return false; }
virtual bool advance(Pos &pos) const { return false; }
};
@@ -92,7 +92,7 @@
ServiceChain();
protected:
- virtual bool replace(Pos &pos) const { return false; }
+ virtual bool replace(Pos &pos) const { return false; }
virtual bool advance(Pos &pos) const { return has(++pos); }
};
@@ -106,7 +106,8 @@
/** iterates services stored in a group; iteration is not linear because we
need to both replace failed services and advance to the next chain link */
-class ServicePlan {
+class ServicePlan
+{
public:
typedef unsigned int Pos; // Vector<>::poistion_type
diff -u -r -N squid-3.1.0.13/src/auth/AclProxyAuth.cc squid-3.1.0.14/src/auth/AclProxyAuth.cc
--- squid-3.1.0.13/src/auth/AclProxyAuth.cc 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/auth/AclProxyAuth.cc 2009-09-27 15:28:32.000000000 +1200
@@ -146,7 +146,8 @@
assert(checklist->auth_user_request != NULL);
auth_user_request = checklist->auth_user_request;
- assert(authenticateValidateUser(auth_user_request));
+ int validated = authenticateValidateUser(auth_user_request);
+ assert(validated);
auth_user_request->start(LookupDone, checklist);
}
diff -u -r -N squid-3.1.0.13/src/auth/digest/auth_digest.cc squid-3.1.0.14/src/auth/digest/auth_digest.cc
--- squid-3.1.0.13/src/auth/digest/auth_digest.cc 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/auth/digest/auth_digest.cc 2009-09-27 15:28:33.000000000 +1200
@@ -1036,7 +1036,7 @@
authDigestNonceLink(nonce);
/* ping this nonce to this auth user */
- assert((nonce->user == NULL) || (nonce->user = user));
+ assert((nonce->user == NULL) || (nonce->user == user));
/* we don't lock this reference because removing the user removes the
* hash too. Of course if that changes we're stuffed so read the code huh?
@@ -1106,6 +1106,7 @@
/* quote mark */
p++;
+ safe_free(username);
username = xstrndup(p, strchr(p, '"') + 1 - p);
debugs(29, 9, "authDigestDecodeAuth: Found Username '" << username << "'");
@@ -1118,6 +1119,7 @@
/* quote mark */
p++;
+ safe_free(digest_request->realm);
digest_request->realm = xstrndup(p, strchr(p, '"') + 1 - p);
debugs(29, 9, "authDigestDecodeAuth: Found realm '" << digest_request->realm << "'");
@@ -1131,6 +1133,7 @@
/* quote mark */
p++;
+ safe_free(digest_request->qop);
digest_request->qop = xstrndup(p, strcspn(p, "\" \t\r\n()<>@,;:\\/[]?={}") + 1);
debugs(29, 9, "authDigestDecodeAuth: Found qop '" << digest_request->qop << "'");
@@ -1144,6 +1147,7 @@
/* quote mark */
p++;
+ safe_free(digest_request->algorithm);
digest_request->algorithm = xstrndup(p, strcspn(p, "\" \t\r\n()<>@,;:\\/[]?={}") + 1);
debugs(29, 9, "authDigestDecodeAuth: Found algorithm '" << digest_request->algorithm << "'");
@@ -1156,6 +1160,7 @@
/* quote mark */
p++;
+ safe_free(digest_request->uri);
digest_request->uri = xstrndup(p, strchr(p, '"') + 1 - p);
debugs(29, 9, "authDigestDecodeAuth: Found uri '" << digest_request->uri << "'");
@@ -1168,6 +1173,7 @@
/* quote mark */
p++;
+ safe_free(digest_request->nonceb64);
digest_request->nonceb64 = xstrndup(p, strchr(p, '"') + 1 - p);
debugs(29, 9, "authDigestDecodeAuth: Found nonce '" << digest_request->nonceb64 << "'");
@@ -1189,6 +1195,7 @@
/* quote mark */
p++;
+ safe_free(digest_request->cnonce);
digest_request->cnonce = xstrndup(p, strchr(p, '"') + 1 - p);
debugs(29, 9, "authDigestDecodeAuth: Found cnonce '" << digest_request->cnonce << "'");
@@ -1201,6 +1208,7 @@
/* quote mark */
p++;
+ safe_free(digest_request->response);
digest_request->response = xstrndup(p, strchr(p, '"') + 1 - p);
debugs(29, 9, "authDigestDecodeAuth: Found response '" << digest_request->response << "'");
diff -u -r -N squid-3.1.0.13/src/auth/Makefile.in squid-3.1.0.14/src/auth/Makefile.in
--- squid-3.1.0.13/src/auth/Makefile.in 2009-08-05 01:32:37.000000000 +1200
+++ squid-3.1.0.14/src/auth/Makefile.in 2009-09-27 15:28:56.000000000 +1200
@@ -95,6 +95,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/src/auth/negotiate/auth_negotiate.cc squid-3.1.0.14/src/auth/negotiate/auth_negotiate.cc
--- squid-3.1.0.13/src/auth/negotiate/auth_negotiate.cc 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/auth/negotiate/auth_negotiate.cc 2009-09-27 15:28:33.000000000 +1200
@@ -379,13 +379,12 @@
debugs(29, 5, "NegotiateUser::~NegotiateUser: doing nothing to clearNegotiate scheme data for '" << this << "'");
}
-static stateful_helper_callback_t
+static void
authenticateNegotiateHandleReply(void *data, void *lastserver, char *reply)
{
authenticateStateData *r = static_cast(data);
int valid;
- stateful_helper_callback_t result = S_HELPER_UNKNOWN;
char *blob, *arg = NULL;
AuthUserRequest *auth_user_request;
@@ -397,11 +396,10 @@
valid = cbdataReferenceValid(r->data);
if (!valid) {
- debugs(29, 1, "authenticateNegotiateHandleReply: invalid callback data. Releasing helper '" << lastserver << "'.");
+ debugs(29, 1, "authenticateNegotiateHandleReply: invalid callback data. helper '" << lastserver << "'.");
cbdataReferenceDone(r->data);
authenticateStateFree(r);
- debugs(29, 9, "authenticateNegotiateHandleReply: telling stateful helper : " << S_HELPER_RELEASE);
- return S_HELPER_RELEASE;
+ return;
}
if (!reply) {
@@ -451,11 +449,9 @@
negotiate_request->auth_state = AUTHENTICATE_STATE_IN_PROGRESS;
auth_user_request->denyMessage("Authentication in progress");
debugs(29, 4, "authenticateNegotiateHandleReply: Need to challenge the client with a server blob '" << blob << "'");
- result = S_HELPER_RESERVE;
} else {
negotiate_request->auth_state = AUTHENTICATE_STATE_FAILED;
auth_user_request->denyMessage("NTLM authentication requires a persistent connection");
- result = S_HELPER_RELEASE;
}
} else if (strncasecmp(reply, "AF ", 3) == 0 && arg != NULL) {
/* we're finished, release the helper */
@@ -475,8 +471,6 @@
negotiate_request->auth_state = AUTHENTICATE_STATE_DONE;
- result = S_HELPER_RELEASE;
-
debugs(29, 4, "authenticateNegotiateHandleReply: Successfully validated user via Negotiate. Username '" << blob << "'");
/* connection is authenticated */
@@ -522,8 +516,6 @@
negotiate_request->releaseAuthServer();
- result = S_HELPER_RELEASE;
-
debugs(29, 4, "authenticateNegotiateHandleReply: Failed validating user via Negotiate. Error returned '" << blob << "'");
} else if (strncasecmp(reply, "BH ", 3) == 0) {
/* TODO kick off a refresh process. This can occur after a YR or after
@@ -535,7 +527,6 @@
negotiate_request->auth_state = AUTHENTICATE_STATE_FAILED;
safe_free(negotiate_request->server_blob);
negotiate_request->releaseAuthServer();
- result = S_HELPER_RELEASE;
debugs(29, 1, "authenticateNegotiateHandleReply: Error validating user via Negotiate. Error returned '" << reply << "'");
} else {
/* protocol error */
@@ -549,8 +540,6 @@
r->handler(r->data, NULL);
cbdataReferenceDone(r->data);
authenticateStateFree(r);
- debugs(29, 9, "authenticateNegotiateHandleReply: telling stateful helper : " << result);
- return result;
}
static void
@@ -613,8 +602,7 @@
debugs(29, 6, HERE << "releasing Negotiate auth server '" << authserver << "'");
helperStatefulReleaseServer(authserver);
authserver = NULL;
- }
- else
+ } else
debugs(29, 6, HERE << "No Negotiate auth server to release.");
}
diff -u -r -N squid-3.1.0.13/src/auth/ntlm/auth_ntlm.cc squid-3.1.0.14/src/auth/ntlm/auth_ntlm.cc
--- squid-3.1.0.13/src/auth/ntlm/auth_ntlm.cc 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/auth/ntlm/auth_ntlm.cc 2009-09-27 15:28:33.000000000 +1200
@@ -325,13 +325,12 @@
debugs(29, 5, "NTLMUser::~NTLMUser: doing nothing to clearNTLM scheme data for '" << this << "'");
}
-static stateful_helper_callback_t
+static void
authenticateNTLMHandleReply(void *data, void *lastserver, char *reply)
{
authenticateStateData *r = static_cast(data);
int valid;
- stateful_helper_callback_t result = S_HELPER_UNKNOWN;
char *blob;
AuthUserRequest *auth_user_request;
@@ -343,11 +342,10 @@
valid = cbdataReferenceValid(r->data);
if (!valid) {
- debugs(29, 1, "authenticateNTLMHandleReply: invalid callback data. Releasing helper '" << lastserver << "'.");
+ debugs(29, 1, "authenticateNTLMHandleReply: invalid callback data. helper '" << lastserver << "'.");
cbdataReferenceDone(r->data);
authenticateStateFree(r);
- debugs(29, 9, "authenticateNTLMHandleReply: telling stateful helper : " << S_HELPER_RELEASE);
- return S_HELPER_RELEASE;
+ return;
}
if (!reply) {
@@ -391,11 +389,9 @@
ntlm_request->auth_state = AUTHENTICATE_STATE_IN_PROGRESS;
auth_user_request->denyMessage("Authentication in progress");
debugs(29, 4, "authenticateNTLMHandleReply: Need to challenge the client with a server blob '" << blob << "'");
- result = S_HELPER_RESERVE;
} else {
ntlm_request->auth_state = AUTHENTICATE_STATE_FAILED;
auth_user_request->denyMessage("NTLM authentication requires a persistent connection");
- result = S_HELPER_RELEASE;
}
} else if (strncasecmp(reply, "AF ", 3) == 0) {
/* we're finished, release the helper */
@@ -403,7 +399,6 @@
auth_user_request->denyMessage("Login successful");
safe_free(ntlm_request->server_blob);
- result = S_HELPER_RELEASE;
debugs(29, 4, "authenticateNTLMHandleReply: Successfully validated user via NTLM. Username '" << blob << "'");
/* connection is authenticated */
debugs(29, 4, "AuthNTLMUserRequest::authenticate: authenticated user " << ntlm_user->username());
@@ -437,7 +432,6 @@
ntlm_request->auth_state = AUTHENTICATE_STATE_FAILED;
safe_free(ntlm_request->server_blob);
ntlm_request->releaseAuthServer();
- result = S_HELPER_RELEASE;
debugs(29, 4, "authenticateNTLMHandleReply: Failed validating user via NTLM. Error returned '" << blob << "'");
} else if (strncasecmp(reply, "BH ", 3) == 0) {
/* TODO kick off a refresh process. This can occur after a YR or after
@@ -449,7 +443,6 @@
ntlm_request->auth_state = AUTHENTICATE_STATE_FAILED;
safe_free(ntlm_request->server_blob);
ntlm_request->releaseAuthServer();
- result = S_HELPER_RELEASE;
debugs(29, 1, "authenticateNTLMHandleReply: Error validating user via NTLM. Error returned '" << reply << "'");
} else {
/* protocol error */
@@ -463,8 +456,6 @@
r->handler(r->data, NULL);
cbdataReferenceDone(r->data);
authenticateStateFree(r);
- debugs(29, 9, "authenticateNTLMHandleReply: telling stateful helper : " << result);
- return result;
}
static void
@@ -525,10 +516,9 @@
{
if (authserver) {
debugs(29, 6, HERE << "releasing NTLM auth server '" << authserver << "'");
- helperStatefulReleaseServer(authserver);
- authserver = NULL;
- }
- else
+ helperStatefulReleaseServer(authserver);
+ authserver = NULL;
+ } else
debugs(29, 6, HERE << "No NTLM auth server to release.");
}
diff -u -r -N squid-3.1.0.13/src/base/Makefile.in squid-3.1.0.14/src/base/Makefile.in
--- squid-3.1.0.13/src/base/Makefile.in 2009-08-05 01:32:37.000000000 +1200
+++ squid-3.1.0.14/src/base/Makefile.in 2009-09-27 15:28:56.000000000 +1200
@@ -76,6 +76,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/src/BodyPipe.cc squid-3.1.0.14/src/BodyPipe.cc
--- squid-3.1.0.13/src/BodyPipe.cc 2009-08-05 01:32:11.000000000 +1200
+++ squid-3.1.0.14/src/BodyPipe.cc 2009-09-27 15:28:28.000000000 +1200
@@ -189,9 +189,8 @@
if (atEof) {
if (!bodySizeKnown())
theBodySize = thePutSize;
- else
- if (bodySize() != thePutSize)
- debugs(91,3, HERE << "aborting on premature eof" << status());
+ else if (bodySize() != thePutSize)
+ debugs(91,3, HERE << "aborting on premature eof" << status());
} else {
// asserta that we can detect the abort if the consumer joins later
assert(!bodySizeKnown() || bodySize() != thePutSize);
@@ -321,9 +320,8 @@
const size_t currentSize = theBuf.contentSize();
if (checkout.checkedOutSize > currentSize)
postConsume(checkout.checkedOutSize - currentSize);
- else
- if (checkout.checkedOutSize < currentSize)
- postAppend(currentSize - checkout.checkedOutSize);
+ else if (checkout.checkedOutSize < currentSize)
+ postAppend(currentSize - checkout.checkedOutSize);
}
void
diff -u -r -N squid-3.1.0.13/src/cache_cf.cc squid-3.1.0.14/src/cache_cf.cc
--- squid-3.1.0.13/src/cache_cf.cc 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/cache_cf.cc 2009-09-27 15:28:33.000000000 +1200
@@ -430,6 +430,12 @@
memConfigure();
/* Sanity checks */
+ if (Config.cacheSwap.swapDirs == NULL) {
+ /* Memory-only cache probably in effect. */
+ /* turn off the cache rebuild delays... */
+ StoreController::store_dirs_rebuilding = 0;
+ }
+
if (Debug::rotateNumber < 0) {
Debug::rotateNumber = Config.Log.rotateNumber;
}
@@ -1648,7 +1654,7 @@
return 0; /* NEVER REACHED */
}
/** Returns either the service port number from /etc/services */
- if( !isUnsignedNumeric(token, strlen(token)) )
+ if ( !isUnsignedNumeric(token, strlen(token)) )
port = getservbyname(token, proto);
if (port != NULL) {
return ntohs((u_short)port->s_port);
@@ -1724,6 +1730,8 @@
p->options.background_ping = 1;
} else if (!strcasecmp(token, "no-digest")) {
p->options.no_digest = 1;
+ } else if (!strcasecmp(token, "no-tproxy")) {
+ p->options.no_tproxy = 1;
} else if (!strcasecmp(token, "multicast-responder")) {
p->options.mcast_responder = 1;
} else if (!strncasecmp(token, "weight=", 7)) {
@@ -1889,7 +1897,7 @@
p->weight = 1;
if (p->connect_fail_limit < 1)
- p->connect_fail_limit = 1;
+ p->connect_fail_limit = 10;
p->icp.version = ICP_VERSION_CURRENT;
@@ -2996,6 +3004,14 @@
s->accel = 1;
} else if (strcmp(token, "allow-direct") == 0) {
s->allow_direct = 1;
+ } else if (strcmp(token, "ignore-cc") == 0) {
+ s->ignore_cc = 1;
+#if !HTTP_VIOLATIONS
+ if (!s->accel) {
+ debugs(3, DBG_CRITICAL, "FATAL: ignore-cc is only valid in accelerator mode");
+ self_destruct();
+ }
+#endif
} else if (strcmp(token, "no-connection-auth") == 0) {
s->connection_auth_disabled = true;
} else if (strcmp(token, "connection-auth=off") == 0) {
@@ -3038,16 +3054,12 @@
IpInterceptor.StartTransparency();
/* Log information regarding the port modes under transparency. */
debugs(3, DBG_IMPORTANT, "Starting IP Spoofing on port " << s->s);
- debugs(3, DBG_IMPORTANT, "Disabling Authentication on port " << s->s << " (Ip spoofing enabled)");
+ debugs(3, DBG_IMPORTANT, "Disabling Authentication on port " << s->s << " (IP spoofing enabled)");
-#if USE_IPV6
- /* INET6: until target TPROXY is known to work on IPv6 SOCKET, force wildcard to IPv4 */
- debugs(3, DBG_IMPORTANT, "Disabling IPv6 on port " << s->s << " (interception enabled)");
- if ( s->s.IsIPv6() && !s->s.SetIPv4() ) {
- debugs(3, DBG_CRITICAL, "http(s)_port: IPv6 addresses cannot be transparent (protocol does not provide NAT)" << s->s );
+ if (!IpInterceptor.ProbeForTproxy(s->s)) {
+ debugs(3, DBG_CRITICAL, "FATAL: http(s)_port: TPROXY support in the system does not work.");
self_destruct();
}
-#endif
} else if (strcmp(token, "ipv4") == 0) {
#if USE_IPV6
diff -u -r -N squid-3.1.0.13/src/cbdata.h squid-3.1.0.14/src/cbdata.h
--- squid-3.1.0.13/src/cbdata.h 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/cbdata.h 2009-09-27 15:28:33.000000000 +1200
@@ -334,8 +334,13 @@
*/
#define cbdataReferenceValidDone(var, ptr) cbdataInternalReferenceDoneValid((void **)&(var), (ptr))
-/// \ingroup CBDATAAPI
+/**
+ * \ingroup CBDATAAPI
+ *
+ * This needs to be defined LAST in teh class definition. It plays with private/public states in C++.
+ */
#define CBDATA_CLASS2(type) \
+ private: \
static cbdata_type CBDATA_##type; \
public: \
void *operator new(size_t size) { \
@@ -346,8 +351,7 @@
void operator delete (void *address) { \
if (address) cbdataInternalFree(address);\
} \
- void *toCbdata() { return this; } \
- private:
+ void *toCbdata() { return this; }
#endif /* !CBDATA_DEBUG */
/**
@@ -459,6 +463,7 @@
\todo CODE: make this a private field.
*/
void *data; /* the wrapped data */
+
private:
CBDATA_CLASS2(generic_cbdata);
};
diff -u -r -N squid-3.1.0.13/src/cf.data.pre squid-3.1.0.14/src/cf.data.pre
--- squid-3.1.0.13/src/cf.data.pre 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/cf.data.pre 2009-09-27 15:28:33.000000000 +1200
@@ -688,7 +688,7 @@
#Recommended minimum configuration:
acl manager proto cache_object
acl localhost src 127.0.0.1/32
-acl to_localhost dst 127.0.0.0/8
+acl to_localhost dst 127.0.0.0/8 0.0.0.0/32
#
# Example rule allowing access from your local networks.
# Adapt to list your (internal) IP networks from where browsing
@@ -821,8 +821,8 @@
opposite of the last line in the list. If the last line was
deny, the default is allow. Conversely, if the last line
is allow, the default will be deny. For these reasons, it is a
- good idea to have an "deny all" or "allow all" entry at the end
- of your access lists to avoid potential confusion.
+ good idea to have an "deny all" entry at the end of your access
+ lists to avoid potential confusion.
This clause supports both fast and slow acl types.
See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
@@ -1105,6 +1105,11 @@
protocol= Protocol to reconstruct accelerated requests with.
Defaults to http.
+ ignore-cc Ignore request Cache-Control headers.
+
+ Warning: This option violates HTTP specifications if
+ used in non-accelerator setups.
+
connection-auth[=on|off]
use connection-auth=off to tell Squid to prevent
forwarding Microsoft connection oriented authentication
@@ -1619,274 +1624,248 @@
LOC: Config.peers
DOC_START
To specify other caches in a hierarchy, use the format:
-
+
cache_peer hostname type http-port icp-port [options]
-
+
For example,
-
+
# proxy icp
# hostname type port port options
# -------------------- -------- ----- ----- -----------
- cache_peer parent.foo.net parent 3128 3130 proxy-only default
+ cache_peer parent.foo.net parent 3128 3130 default
cache_peer sib1.foo.net sibling 3128 3130 proxy-only
cache_peer sib2.foo.net sibling 3128 3130 proxy-only
-
- type: either 'parent', 'sibling', or 'multicast'.
-
- proxy-port: The port number where the cache listens for proxy
- requests.
-
- icp-port: Used for querying neighbor caches about
- objects. To have a non-ICP neighbor
- specify '0' for the ICP port.
- NOTE: Also requires icp_port option enabled to send/receive
- requests via this method.
-
- options: proxy-only
- weight=n
- basetime=n
- ttl=n
- no-query
- background-ping
- default
- round-robin
- weighted-round-robin
- carp
- userhash
- sourcehash
- multicast-responder
- closest-only
- no-digest
- no-netdb-exchange
- no-delay
- login=user:password | PASS | *:password
- connect-timeout=nn
- connect-fail-limit=nn
- digest-url=url
- allow-miss
- max-conn=n
- htcp
- htcp-oldsquid
- htcp-no-clr
- htcp-no-purge-clr
- htcp-only-clr
- htcp-forward-clr
- originserver
- name=xxx
- forceddomain=name
- ssl
- sslcert=/path/to/ssl/certificate
- sslkey=/path/to/ssl/key
- sslversion=1|2|3|4
- sslcipher=...
- ssloptions=...
- front-end-https[=on|auto]
- connection-auth[=on|off|auto]
-
- use 'proxy-only' to specify objects fetched
- from this cache should not be saved locally.
-
- use 'weight=n' to affect the selection of a peer
- during any weighted peer-selection mechanisms.
- The weight must be an integer; default is 1,
- larger weights are favored more.
- This option does not affect parent selection if a peering
- protocol is not in use.
-
- use 'basetime=n' to specify a base amount to
- be subtracted from round trip times of parents.
- It is subtracted before division by weight in calculating
- which parent to fectch from. If the rtt is less than the
- base time the rtt is set to a minimal value.
-
- use 'ttl=n' to specify a IP multicast TTL to use
- when sending an ICP queries to this address.
- Only useful when sending to a multicast group.
- Because we don't accept ICP replies from random
- hosts, you must configure other group members as
- peers with the 'multicast-responder' option below.
-
- use 'no-query' to NOT send ICP queries to this
- neighbor.
-
- use 'background-ping' to only send ICP queries to this
- neighbor infrequently. This is used to keep the neighbor
- round trip time updated and is usually used in
- conjunction with weighted-round-robin.
-
- use 'default' if this is a parent cache which can
- be used as a "last-resort" if a peer cannot be located
- by any of the peer-selection mechanisms.
- If specified more than once, only the first is used.
-
- use 'round-robin' to define a set of parents which
- should be used in a round-robin fashion in the
- absence of any ICP queries.
-
- use 'weighted-round-robin' to define a set of parents
- which should be used in a round-robin fashion with the
- frequency of each parent being based on the round trip
- time. Closer parents are used more often.
- Usually used for background-ping parents.
-
- use 'carp' to define a set of parents which should
- be used as a CARP array. The requests will be
- distributed among the parents based on the CARP load
- balancing hash function based on their weight.
-
- use 'userhash' to load-balance amongst a set of parents
- based on the client proxy_auth or ident username.
-
- use 'sourcehash' to load-balance amongst a set of parents
- based on the client source ip.
-
- 'multicast-responder' indicates the named peer
- is a member of a multicast group. ICP queries will
- not be sent directly to the peer, but ICP replies
- will be accepted from it.
-
- 'closest-only' indicates that, for ICP_OP_MISS
- replies, we'll only forward CLOSEST_PARENT_MISSes
- and never FIRST_PARENT_MISSes.
-
- use 'no-digest' to NOT request cache digests from
- this neighbor.
-
- 'no-netdb-exchange' disables requesting ICMP
- RTT database (NetDB) from the neighbor.
-
- use 'no-delay' to prevent access to this neighbor
- from influencing the delay pools.
-
- use 'login=user:password' if this is a personal/workgroup
- proxy and your parent requires proxy authentication.
- Note: The string can include URL escapes (i.e. %20 for
- spaces). This also means % must be written as %%.
-
- use 'login=PASS' if users must authenticate against
- the upstream proxy or in the case of a reverse proxy
- configuration, the origin web server. This will pass
- the users credentials as they are to the peer.
- This only works for the Basic HTTP authentication scheme.
- Note: To combine this with proxy_auth both proxies must
- share the same user database as HTTP only allows for
- a single login (one for proxy, one for origin server).
- Also be warned this will expose your users proxy
- password to the peer. USE WITH CAUTION
-
- use 'login=*:password' to pass the username to the
- upstream cache, but with a fixed password. This is meant
- to be used when the peer is in another administrative
- domain, but it is still needed to identify each user.
- The star can optionally be followed by some extra
- information which is added to the username. This can
- be used to identify this proxy to the peer, similar to
- the login=username:password option above.
-
- use 'connect-timeout=nn' to specify a peer
- specific connect timeout (also see the
- peer_connect_timeout directive)
-
- use 'connect-fail-limit=nn' to specify how many times
- connecting to a peer must fail before it is marked as
- down. Default is 10.
-
- use 'digest-url=url' to tell Squid to fetch the cache
- digest (if digests are enabled) for this host from
- the specified URL rather than the Squid default
- location.
-
- use 'allow-miss' to disable Squid's use of only-if-cached
- when forwarding requests to siblings. This is primarily
- useful when icp_hit_stale is used by the sibling. To
- extensive use of this option may result in forwarding
- loops, and you should avoid having two-way peerings
- with this option. (for example to deny peer usage on
- requests from peer by denying cache_peer_access if the
- source is a peer)
-
- use 'max-conn=n' to limit the amount of connections Squid
- may open to this peer.
-
- use 'htcp' to send HTCP, instead of ICP, queries
- to the neighbor. You probably also want to
- set the "icp port" to 4827 instead of 3130.
- You MUST also set htcp_access expicitly. The default of
- deny all will prevent peer traffic.
-
- use 'htcp-oldsquid' to send HTCP to old Squid versions
- You MUST also set htcp_access expicitly. The default of
- deny all will prevent peer traffic.
-
- use 'htcp-no-clr' to send HTCP to the neighbor but without
- sending any CLR requests. This cannot be used with
- htcp-only-clr.
-
- use 'htcp-no-purge-clr' to send HTCP to the neighbor
- including CLRs but only when they do not result from
- PURGE requests.
-
- use 'htcp-only-clr' to send HTCP to the neighbor but ONLY
- CLR requests. This cannot be used with htcp-no-clr.
-
- use 'htcp-forward-clr' to forward any HTCP CLR requests
- this proxy receives to the peer.
-
- 'originserver' causes this parent peer to be contacted as
- a origin server. Meant to be used in accelerator setups.
-
- use 'name=xxx' if you have multiple peers on the same
- host but different ports. This name can be used to
- differentiate the peers in cache_peer_access and similar
- directives. Including the peername ACL type.
-
- use 'forceddomain=name' to forcibly set the Host header
- of requests forwarded to this peer. Useful in accelerator
- setups where the server (peer) expects a certain domain
- name and using redirectors to feed this domain name
- is not feasible.
-
- use 'ssl' to indicate connections to this peer should
- be SSL/TLS encrypted.
-
- use 'sslcert=/path/to/ssl/certificate' to specify a client
- SSL certificate to use when connecting to this peer.
-
- use 'sslkey=/path/to/ssl/key' to specify the private SSL
- key corresponding to sslcert above. If 'sslkey' is not
- specified 'sslcert' is assumed to reference a
- combined file containing both the certificate and the key.
-
- use sslversion=1|2|3|4 to specify the SSL version to use
- when connecting to this peer
- 1 = automatic (default)
- 2 = SSL v2 only
- 3 = SSL v3 only
- 4 = TLS v1 only
-
- use sslcipher=... to specify the list of valid SSL ciphers
- to use when connecting to this peer.
-
- use ssloptions=... to specify various SSL engine options:
- NO_SSLv2 Disallow the use of SSLv2
- NO_SSLv3 Disallow the use of SSLv3
- NO_TLSv1 Disallow the use of TLSv1
- See src/ssl_support.c or the OpenSSL documentation for
- a more complete list.
-
- use sslcafile=... to specify a file containing
- additional CA certificates to use when verifying the
- peer certificate.
-
- use sslcapath=... to specify a directory containing
- additional CA certificates to use when verifying the
- peer certificate.
-
- use sslcrlfile=... to specify a certificate revocation
- list file to use when verifying the peer certificate.
-
- use sslflags=... to specify various flags modifying the
- SSL implementation:
+ cache_peer example.com parent 80 0 no-query default
+ cache_peer cdn.example.com sibling 3128 0
+
+ type: either 'parent', 'sibling', or 'multicast'.
+
+ proxy-port: The port number where the peer accept HTTP requests.
+ For other Squid proxies this is usually 3128
+ For web servers this is usually 80
+
+ icp-port: Used for querying neighbor caches about objects.
+ Set to 0 if the peer does not support ICP or HTCP.
+ See ICP and HTCP options below for additional details.
+
+
+ ==== ICP OPTIONS ====
+
+ You MUST also set icp_port and icp_access explicitly when using these options.
+ The defaults will prevent peer traffic using ICP.
+
+
+ no-query Disable ICP queries to this neighbor.
+
+ multicast-responder
+ Indicates the named peer is a member of a multicast group.
+ ICP queries will not be sent directly to the peer, but ICP
+ replies will be accepted from it.
+
+ closest-only Indicates that, for ICP_OP_MISS replies, we'll only forward
+ CLOSEST_PARENT_MISSes and never FIRST_PARENT_MISSes.
+
+ background-ping
+ To only send ICP queries to this neighbor infrequently.
+ This is used to keep the neighbor round trip time updated
+ and is usually used in conjunction with weighted-round-robin.
+
+
+ ==== HTCP OPTIONS ====
+
+ You MUST also set htcp_port and htcp_access explicitly when using these options.
+ The defaults will prevent peer traffic using HTCP.
+
+
+ htcp Send HTCP, instead of ICP, queries to the neighbor.
+ You probably also want to set the "icp-port" to 4827
+ instead of 3130.
+
+ htcp-oldsquid Send HTCP to old Squid versions.
+
+ htcp-no-clr Send HTCP to the neighbor but without
+ sending any CLR requests. This cannot be used with
+ htcp-only-clr.
+
+ htcp-only-clr Send HTCP to the neighbor but ONLY CLR requests.
+ This cannot be used with htcp-no-clr.
+
+ htcp-no-purge-clr
+ Send HTCP to the neighbor including CLRs but only when
+ they do not result from PURGE requests.
+
+ htcp-forward-clr
+ Forward any HTCP CLR requests this proxy receives to the peer.
+
+
+ ==== PEER SELECTION METHODS ====
+
+ The default peer selection method is ICP, with the first responding peer
+ being used as source. These options can be used for better load balancing.
+
+
+ default This is a parent cache which can be used as a "last-resort"
+ if a peer cannot be located by any of the peer-selection methods.
+ If specified more than once, only the first is used.
+
+ round-robin Load-Balance parents which should be used in a round-robin
+ fashion in the absence of any ICP queries.
+ weight=N can be used to add bias.
+
+ weighted-round-robin
+ Load-Balance parents which should be used in a round-robin
+ fashion with the frequency of each parent being based on the
+ round trip time. Closer parents are used more often.
+ Usually used for background-ping parents.
+ weight=N can be used to add bias.
+
+ carp Load-Balance parents which should be used as a CARP array.
+ The requests will be distributed among the parents based on the
+ CARP load balancing hash function based on their weight.
+
+ userhash Load-balance parents based on the client proxy_auth or ident username.
+
+ sourcehash Load-balance parents based on the client source IP.
+
+
+ ==== PEER SELECTION OPTIONS ====
+
+ weight=N use to affect the selection of a peer during any weighted
+ peer-selection mechanisms.
+ The weight must be an integer; default is 1,
+ larger weights are favored more.
+ This option does not affect parent selection if a peering
+ protocol is not in use.
+
+ basetime=N Specify a base amount to be subtracted from round trip
+ times of parents.
+ It is subtracted before division by weight in calculating
+ which parent to fectch from. If the rtt is less than the
+ base time the rtt is set to a minimal value.
+
+ ttl=N Specify a IP multicast TTL to use when sending an ICP
+ queries to this address.
+ Only useful when sending to a multicast group.
+ Because we don't accept ICP replies from random
+ hosts, you must configure other group members as
+ peers with the 'multicast-responder' option.
+
+ no-delay To prevent access to this neighbor from influencing the
+ delay pools.
+
+ digest-url=URL Tell Squid to fetch the cache digest (if digests are
+ enabled) for this host from the specified URL rather
+ than the Squid default location.
+
+
+ ==== ACCELERATOR / REVERSE-PROXY OPTIONS ====
+
+ originserver Causes this parent to be contacted as an origin server.
+ Meant to be used in accelerator setups when the peer
+ is a web server.
+
+ forceddomain=name
+ Set the Host header of requests forwarded to this peer.
+ Useful in accelerator setups where the server (peer)
+ expects a certain domain name but clients may request
+ others. ie example.com or www.example.com
+
+ no-digest Disable request of cache digests.
+
+ no-netdb-exchange
+ Disables requesting ICMP RTT database (NetDB).
+
+
+ ==== AUTHENTICATION OPTIONS ====
+
+ login=user:password
+ If this is a personal/workgroup proxy and your parent
+ requires proxy authentication.
+
+ Note: The string can include URL escapes (i.e. %20 for
+ spaces). This also means % must be written as %%.
+
+ login=PROXYPASS
+ Send login details received from client to this peer.
+ Authentication is not required, nor changed.
+
+ Note: This will pass any form of authentication but
+ only Basic auth will work through a proxy unless the
+ connection-auth options are also used.
+
+ login=PASS Send login details received from client to this peer.
+ Authentication is not required by this option.
+ If there are no client-provided authentication headers
+ to pass on, but username and password are available
+ from either proxy login or an external ACL user= and
+ password= result tags they may be sent instead.
+
+ Note: To combine this with proxy_auth both proxies must
+ share the same user database as HTTP only allows for
+ a single login (one for proxy, one for origin server).
+ Also be warned this will expose your users proxy
+ password to the peer. USE WITH CAUTION
+
+ login=*:password
+ Send the username to the upstream cache, but with a
+ fixed password. This is meant to be used when the peer
+ is in another administrative domain, but it is still
+ needed to identify each user.
+ The star can optionally be followed by some extra
+ information which is added to the username. This can
+ be used to identify this proxy to the peer, similar to
+ the login=username:password option above.
+
+ connection-auth=on|off
+ Tell Squid that this peer does or not support Microsoft
+ connection oriented authentication, and any such
+ challenges received from there should be ignored.
+ Default is auto to automatically determine the status
+ of the peer.
+
+
+ ==== SSL / HTTPS / TLS OPTIONS ====
+
+ ssl Encrypt connections to this peer with SSL/TLS.
+
+ sslcert=/path/to/ssl/certificate
+ A client SSL certificate to use when connecting to
+ this peer.
+
+ sslkey=/path/to/ssl/key
+ The private SSL key corresponding to sslcert above.
+ If 'sslkey' is not specified 'sslcert' is assumed to
+ reference a combined file containing both the
+ certificate and the key.
+
+ sslversion=1|2|3|4
+ The SSL version to use when connecting to this peer
+ 1 = automatic (default)
+ 2 = SSL v2 only
+ 3 = SSL v3 only
+ 4 = TLS v1 only
+
+ sslcipher=... The list of valid SSL ciphers to use when connecting
+ to this peer.
+
+ ssloptions=... Specify various SSL engine options:
+ NO_SSLv2 Disallow the use of SSLv2
+ NO_SSLv3 Disallow the use of SSLv3
+ NO_TLSv1 Disallow the use of TLSv1
+ See src/ssl_support.c or the OpenSSL documentation for
+ a more complete list.
+
+ sslcafile=... A file containing additional CA certificates to use
+ when verifying the peer certificate.
+
+ sslcapath=... A directory containing additional CA certificates to
+ use when verifying the peer certificate.
+
+ sslcrlfile=... A certificate revocation list file to use when
+ verifying the peer certificate.
+
+ sslflags=... Specify various flags modifying the SSL implementation:
+
DONT_VERIFY_PEER
Accept certificates even if they fail to
verify.
@@ -1896,24 +1875,54 @@
DONT_VERIFY_DOMAIN
Don't verify the peer certificate
matches the server name
-
- use ssldomain= to specify the peer name as advertised
- in it's certificate. Used for verifying the correctness
- of the received peer certificate. If not specified the
- peer hostname will be used.
-
- use front-end-https to enable the "Front-End-Https: On"
- header needed when using Squid as a SSL frontend in front
- of Microsoft OWA. See MS KB document Q307347 for details
- on this header. If set to auto the header will
- only be added if the request is forwarded as a https://
- URL.
-
- use connection-auth=off to tell Squid that this peer does
- not support Microsoft connection oriented authentication,
- and any such challenges received from there should be
- ignored. Default is auto to automatically determine the
- status of the peer.
+
+ ssldomain= The peer name as advertised in it's certificate.
+ Used for verifying the correctness of the received peer
+ certificate. If not specified the peer hostname will be
+ used.
+
+ front-end-https
+ Enable the "Front-End-Https: On" header needed when
+ using Squid as a SSL frontend in front of Microsoft OWA.
+ See MS KB document Q307347 for details on this header.
+ If set to auto the header will only be added if the
+ request is forwarded as a https:// URL.
+
+
+ ==== GENERAL OPTIONS ====
+
+ connect-timeout=N
+ A peer-specific connect timeout.
+ Also see the peer_connect_timeout directive.
+
+ connect-fail-limit=N
+ How many times connecting to a peer must fail before
+ it is marked as down. Default is 10.
+
+ allow-miss Disable Squid's use of only-if-cached when forwarding
+ requests to siblings. This is primarily useful when
+ icp_hit_stale is used by the sibling. To extensive use
+ of this option may result in forwarding loops, and you
+ should avoid having two-way peerings with this option.
+ For example to deny peer usage on requests from peer
+ by denying cache_peer_access if the source is a peer.
+
+ max-conn=N Limit the amount of connections Squid may open to this
+ peer. see also
+
+ name=xxx Unique name for the peer.
+ Required if you have multiple peers on the same host
+ but different ports.
+ This name can be used in cache_peer_access and similar
+ directives to dentify the peer.
+ Can be used by outgoing access controls through the
+ peername ACL type.
+
+ no-tproxy Do not use the client-spoof TPROXY support when forwarding
+ requests to this peer. Use normal address selection instead.
+
+ proxy-only objects fetched from the peer will not be stored locally.
+
DOC_END
NAME: cache_peer_domain cache_host_domain
@@ -6448,11 +6457,11 @@
LOC: Config.chroot_dir
DEFAULT: none
DOC_START
- Use this to have Squid do a chroot() while initializing. This
- also causes Squid to fully drop root privileges after
- initializing. This means, for example, if you use a HTTP
- port less than 1024 and try to reconfigure, you will may get an
- error saying that Squid can not open the port.
+ Specifies a directiry where Squid should do a chroot() while
+ initializing. This also causes Squid to fully drop root
+ privileges after initializing. This means, for example, if you
+ use a HTTP port less than 1024 and try to reconfigure, you may
+ get an error saying that Squid can not open the port.
DOC_END
NAME: balance_on_multiple_ip
diff -u -r -N squid-3.1.0.13/src/ChunkedCodingParser.cc squid-3.1.0.14/src/ChunkedCodingParser.cc
--- squid-3.1.0.13/src/ChunkedCodingParser.cc 2009-08-05 01:32:12.000000000 +1200
+++ squid-3.1.0.14/src/ChunkedCodingParser.cc 2009-09-27 15:28:28.000000000 +1200
@@ -192,17 +192,15 @@
if (quoted) {
if (c == '\\')
slashed = true;
- else
- if (c == '"')
- quoted = false;
+ else if (c == '"')
+ quoted = false;
continue;
- } else
- if (c == '"') {
- quoted = true;
- crOff = -1;
- continue;
- }
+ } else if (c == '"') {
+ quoted = true;
+ crOff = -1;
+ continue;
+ }
if (crOff < 0) { // looking for the first CR or LF
diff -u -r -N squid-3.1.0.13/src/client_side.cc squid-3.1.0.14/src/client_side.cc
--- squid-3.1.0.13/src/client_side.cc 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/client_side.cc 2009-09-27 15:28:33.000000000 +1200
@@ -450,7 +450,7 @@
assert(request);
assert(aLogEntry);
-#if ICAP_CLIENT
+#if ICAP_CLIENT
Adaptation::Icap::History::Pointer ih = request->icapHistory();
#endif
if (Config.onoff.log_mime_hdrs) {
@@ -1898,13 +1898,14 @@
// Temporary hack helper: determine whether the request is chunked, expensive
static bool
-isChunkedRequest(const HttpParser *hp) {
+isChunkedRequest(const HttpParser *hp)
+{
HttpRequest request;
if (!request.parseHeader(HttpParserHdrBuf(hp), HttpParserHdrSz(hp)))
- return false;
+ return false;
return request.header.has(HDR_TRANSFER_ENCODING) &&
- request.header.hasListMember(HDR_TRANSFER_ENCODING, "chunked", ',');
+ request.header.hasListMember(HDR_TRANSFER_ENCODING, "chunked", ',');
}
@@ -1936,8 +1937,7 @@
if ( hp->bufsiz <= 0) {
debugs(33, 5, "Incomplete request, waiting for end of request line");
return NULL;
- }
- else if ( (size_t)hp->bufsiz >= Config.maxRequestHeaderSize && headersEnd(hp->buf, Config.maxRequestHeaderSize) == 0) {
+ } else if ( (size_t)hp->bufsiz >= Config.maxRequestHeaderSize && headersEnd(hp->buf, Config.maxRequestHeaderSize) == 0) {
debugs(33, 5, "parseHttpRequest: Too large request");
return parseHttpRequestAbort(conn, "error:request-too-large");
}
@@ -1989,6 +1989,15 @@
/* Set method_p */
*method_p = HttpRequestMethod(&hp->buf[hp->m_start], &hp->buf[hp->m_end]+1);
+ /* deny CONNECT via accelerated ports */
+ if (*method_p == METHOD_CONNECT && conn && conn->port && conn->port->accel) {
+ debugs(33, DBG_IMPORTANT, "WARNING: CONNECT method received on " << conn->port->protocol << " Accelerator port " << conn->port->s.GetPort() );
+ /* XXX need a way to say "this many character length string" */
+ debugs(33, DBG_IMPORTANT, "WARNING: for request: " << hp->buf);
+ /* XXX need some way to set 405 status on the error reply */
+ return parseHttpRequestAbort(conn, "error:method-not-allowed");
+ }
+
if (*method_p == METHOD_NONE) {
/* XXX need a way to say "this many character length string" */
debugs(33, 1, "clientParseRequestMethod: Unsupported method in request '" << hp->buf << "'");
@@ -2033,8 +2042,8 @@
// logic to determine when it is valid to do so.
// FUTURE_CODE_TO_SUPPORT_CHUNKED_REQUESTS below will replace this hack.
if (hp->v_min == 1 && hp->v_maj == 1 && // broken client, may send chunks
- Config.maxChunkedRequestBodySize > 0 && // configured to dechunk
- (*method_p == METHOD_PUT || *method_p == METHOD_POST)) {
+ Config.maxChunkedRequestBodySize > 0 && // configured to dechunk
+ (*method_p == METHOD_PUT || *method_p == METHOD_POST)) {
// check only once per request because isChunkedRequest is expensive
if (conn->in.dechunkingState == ConnStateData::chunkUnknown) {
@@ -2361,6 +2370,7 @@
}
request->flags.accelerated = http->flags.accel;
+ request->flags.ignore_cc = conn->port->ignore_cc;
request->flags.no_direct = request->flags.accelerated ? !conn->port->allow_direct : 0;
/** \par
@@ -2743,15 +2753,15 @@
{
assert(bodyPipe != NULL);
- size_t putSize = 0;
+ size_t putSize = 0;
#if FUTURE_CODE_TO_SUPPORT_CHUNKED_REQUESTS
- // The code below works, in principle, but we cannot do dechunking
- // on-the-fly because that would mean sending chunked requests to
- // the next hop. Squid lacks logic to determine which servers can
- // receive chunk requests. Squid v3.0 code cannot even handle chunked
- // responses which we may encourage by sending chunked requests.
- // The error generation code probably needs more work.
+ // The code below works, in principle, but we cannot do dechunking
+ // on-the-fly because that would mean sending chunked requests to
+ // the next hop. Squid lacks logic to determine which servers can
+ // receive chunk requests. Squid v3.0 code cannot even handle chunked
+ // responses which we may encourage by sending chunked requests.
+ // The error generation code probably needs more work.
if (in.bodyParser) { // chunked body
debugs(33,5, HERE << "handling chunked request body for FD " << fd);
bool malformedChunks = false;
@@ -2771,7 +2781,7 @@
} else {
// parser needy state must imply body pipe needy state
if (in.bodyParser->needsMoreData() &&
- !bodyPipe->mayNeedMoreData())
+ !bodyPipe->mayNeedMoreData())
malformedChunks = true;
// XXX: if bodyParser->needsMoreSpace, how can we guarantee it?
}
@@ -2789,14 +2799,14 @@
clientReplyContext *repContext = dynamic_cast(node->data.getRaw());
assert (repContext);
repContext->setReplyToError(ERR_INVALID_REQ, HTTP_BAD_REQUEST,
- METHOD_NONE, NULL, &peer.sin_addr,
- NULL, NULL, NULL);
+ METHOD_NONE, NULL, &peer.sin_addr,
+ NULL, NULL, NULL);
context->pullData();
}
flags.readMoreRequests = false;
return; // XXX: is that sufficient to generate an error?
}
- } else // identity encoding
+ } else // identity encoding
#endif
{
debugs(33,5, HERE << "handling plain request body for FD " << fd);
@@ -3612,7 +3622,7 @@
debugs(33, 5, HERE << "finish dechunking; content: " << in.dechunked.contentSize());
assert(in.dechunkingState == chunkReady);
- assert(in.bodyParser);
+ assert(in.bodyParser);
delete in.bodyParser;
in.bodyParser = NULL;
@@ -3620,7 +3630,7 @@
// dechunking cannot make data bigger
assert(headerSize + in.dechunked.contentSize() + in.chunked.contentSize()
- <= static_cast(in.notYetUsed));
+ <= static_cast(in.notYetUsed));
assert(in.notYetUsed <= in.allocatedSize);
// copy dechunked content
@@ -3647,7 +3657,7 @@
ConnStateData::parseRequestChunks(HttpParser *)
{
debugs(33,5, HERE << "parsing chunked request body at " <<
- in.chunkedSeen << " < " << in.notYetUsed);
+ in.chunkedSeen << " < " << in.notYetUsed);
assert(in.bodyParser);
assert(in.dechunkingState == chunkParsing);
@@ -3657,20 +3667,20 @@
// be safe: count some chunked coding metadata towards the total body size
if (fresh + in.dechunked.contentSize() > Config.maxChunkedRequestBodySize) {
debugs(33,3, HERE << "chunked body (" << fresh << " + " <<
- in.dechunked.contentSize() << " may exceed " <<
- "chunked_request_body_max_size=" <<
- Config.maxChunkedRequestBodySize);
+ in.dechunked.contentSize() << " may exceed " <<
+ "chunked_request_body_max_size=" <<
+ Config.maxChunkedRequestBodySize);
in.dechunkingState = chunkError;
return false;
}
-
+
if (fresh > in.chunked.potentialSpaceSize()) {
// should not happen if Config.maxChunkedRequestBodySize is reasonable
debugs(33,1, HERE << "request_body_max_size exceeds chunked buffer " <<
- "size: " << fresh << " + " << in.chunked.contentSize() << " > " <<
- in.chunked.potentialSpaceSize() << " with " <<
- "chunked_request_body_max_size=" <<
- Config.maxChunkedRequestBodySize);
+ "size: " << fresh << " + " << in.chunked.contentSize() << " > " <<
+ in.chunked.potentialSpaceSize() << " with " <<
+ "chunked_request_body_max_size=" <<
+ Config.maxChunkedRequestBodySize);
in.dechunkingState = chunkError;
return false;
}
@@ -3696,8 +3706,8 @@
}
ConnStateData::In::In() : bodyParser(NULL),
- buf (NULL), notYetUsed (0), allocatedSize (0),
- dechunkingState(ConnStateData::chunkUnknown)
+ buf (NULL), notYetUsed (0), allocatedSize (0),
+ dechunkingState(ConnStateData::chunkUnknown)
{}
ConnStateData::In::~In()
diff -u -r -N squid-3.1.0.13/src/client_side.h squid-3.1.0.14/src/client_side.h
--- squid-3.1.0.13/src/client_side.h 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/client_side.h 2009-09-27 15:28:33.000000000 +1200
@@ -155,14 +155,14 @@
~In();
char *addressToReadInto() const;
- ChunkedCodingParser *bodyParser; ///< parses chunked request body
- MemBuf chunked; ///< contains unparsed raw (chunked) body data
- MemBuf dechunked; ///< accumulates parsed (dechunked) content
+ ChunkedCodingParser *bodyParser; ///< parses chunked request body
+ MemBuf chunked; ///< contains unparsed raw (chunked) body data
+ MemBuf dechunked; ///< accumulates parsed (dechunked) content
char *buf;
size_t notYetUsed;
size_t allocatedSize;
- size_t chunkedSeen; ///< size of processed or ignored raw read data
- DechunkingState dechunkingState; ///< request dechunking state
+ size_t chunkedSeen; ///< size of processed or ignored raw read data
+ DechunkingState dechunkingState; ///< request dechunking state
} in;
int64_t bodySizeLeft();
diff -u -r -N squid-3.1.0.13/src/client_side_reply.cc squid-3.1.0.14/src/client_side_reply.cc
--- squid-3.1.0.13/src/client_side_reply.cc 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/client_side_reply.cc 2009-09-27 15:28:33.000000000 +1200
@@ -368,7 +368,7 @@
// if client sent IMS
- if (http->request->flags.ims) {
+ if (http->request->flags.ims && !old_entry->modifiedSince(http->request)) {
// forward the 304 from origin
debugs(88, 3, "handleIMSReply: origin replied 304, revalidating existing entry and forwarding 304 to client");
sendClientUpstreamResponse();
@@ -652,7 +652,7 @@
/// Deny loops for accelerator and interceptor. TODO: deny in all modes?
if (r->flags.loopdetect &&
- (http->flags.accel || http->flags.intercepted)) {
+ (http->flags.accel || http->flags.intercepted)) {
http->al.http.code = HTTP_FORBIDDEN;
err = clientBuildError(ERR_ACCESS_DENIED, HTTP_FORBIDDEN, NULL, http->getConn()->peer, http->request);
createStoreEntry(r->method, request_flags());
diff -u -r -N squid-3.1.0.13/src/client_side_request.cc squid-3.1.0.14/src/client_side_request.cc
--- squid-3.1.0.13/src/client_side_request.cc 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/client_side_request.cc 2009-09-27 15:28:33.000000000 +1200
@@ -636,16 +636,14 @@
debugs(93,3,HERE << this << " adaptationAclCheckDone called");
assert(http);
-#if ICAP_CLIENT
+#if ICAP_CLIENT
Adaptation::Icap::History::Pointer ih = http->request->icapHistory();
- if(ih != NULL)
- {
- if (http->getConn() != NULL)
- {
+ if (ih != NULL) {
+ if (http->getConn() != NULL) {
ih->rfc931 = http->getConn()->rfc931;
-#if USE_SSL
+#if USE_SSL
ih->ssluser = sslGetUserEmail(fd_table[http->getConn()->fd].ssl);
-#endif
+#endif
}
ih->log_uri = http->log_uri;
ih->req_sz = http->req_sz;
@@ -744,10 +742,7 @@
HttpRequest *request = http->request;
HttpHeader *req_hdr = &request->header;
int no_cache = 0;
-#if !(USE_SQUID_ESI) || defined(USE_USERAGENT_LOG) || defined(USE_REFERER_LOG)
-
const char *str;
-#endif
request->imslen = -1;
request->ims = req_hdr->getTime(HDR_IF_MODIFIED_SINCE);
@@ -755,44 +750,39 @@
if (request->ims > 0)
request->flags.ims = 1;
-#if USE_SQUID_ESI
- /*
- * We ignore Cache-Control as per the Edge Architecture Section 3. See
- * www.esi.org for more information.
- */
-#else
-
- if (req_hdr->has(HDR_PRAGMA)) {
- String s = req_hdr->getList(HDR_PRAGMA);
+ if (!request->flags.ignore_cc) {
+ if (req_hdr->has(HDR_PRAGMA)) {
+ String s = req_hdr->getList(HDR_PRAGMA);
- if (strListIsMember(&s, "no-cache", ','))
- no_cache++;
+ if (strListIsMember(&s, "no-cache", ','))
+ no_cache++;
- s.clean();
- }
+ s.clean();
+ }
- if (request->cache_control)
- if (EBIT_TEST(request->cache_control->mask, CC_NO_CACHE))
- no_cache++;
+ if (request->cache_control)
+ if (EBIT_TEST(request->cache_control->mask, CC_NO_CACHE))
+ no_cache++;
- /*
- * Work around for supporting the Reload button in IE browsers when Squid
- * is used as an accelerator or transparent proxy, by turning accelerated
- * IMS request to no-cache requests. Now knows about IE 5.5 fix (is
- * actually only fixed in SP1, but we can't tell whether we are talking to
- * SP1 or not so all 5.5 versions are treated 'normally').
- */
- if (Config.onoff.ie_refresh) {
- if (http->flags.accel && request->flags.ims) {
- if ((str = req_hdr->getStr(HDR_USER_AGENT))) {
- if (strstr(str, "MSIE 5.01") != NULL)
- no_cache++;
- else if (strstr(str, "MSIE 5.0") != NULL)
- no_cache++;
- else if (strstr(str, "MSIE 4.") != NULL)
- no_cache++;
- else if (strstr(str, "MSIE 3.") != NULL)
- no_cache++;
+ /*
+ * Work around for supporting the Reload button in IE browsers when Squid
+ * is used as an accelerator or transparent proxy, by turning accelerated
+ * IMS request to no-cache requests. Now knows about IE 5.5 fix (is
+ * actually only fixed in SP1, but we can't tell whether we are talking to
+ * SP1 or not so all 5.5 versions are treated 'normally').
+ */
+ if (Config.onoff.ie_refresh) {
+ if (http->flags.accel && request->flags.ims) {
+ if ((str = req_hdr->getStr(HDR_USER_AGENT))) {
+ if (strstr(str, "MSIE 5.01") != NULL)
+ no_cache++;
+ else if (strstr(str, "MSIE 5.0") != NULL)
+ no_cache++;
+ else if (strstr(str, "MSIE 4.") != NULL)
+ no_cache++;
+ else if (strstr(str, "MSIE 3.") != NULL)
+ no_cache++;
+ }
}
}
}
@@ -801,7 +791,6 @@
no_cache++;
}
-#endif
if (no_cache) {
#if HTTP_VIOLATIONS
@@ -1340,9 +1329,9 @@
assert(!virginHeadSource);
assert(!adaptedBodySource);
virginHeadSource = initiateAdaptation(
- new Adaptation::Iterator(this, request, NULL, g));
+ new Adaptation::Iterator(this, request, NULL, g));
- // we could try to guess whether we can bypass this adaptation
+ // we could try to guess whether we can bypass this adaptation
// initiation failure, but it should not really happen
assert(virginHeadSource != NULL); // Must, really
}
@@ -1372,7 +1361,8 @@
// subscribe to receive reply body
if (new_rep->body_pipe != NULL) {
adaptedBodySource = new_rep->body_pipe;
- assert(adaptedBodySource->setConsumerIfNotLate(this));
+ int consumer_ok = adaptedBodySource->setConsumerIfNotLate(this);
+ assert(consumer_ok);
}
clientStreamNode *node = (clientStreamNode *)client_stream.tail->prev->data;
diff -u -r -N squid-3.1.0.13/src/client_side_request.h squid-3.1.0.14/src/client_side_request.h
--- squid-3.1.0.13/src/client_side_request.h 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/client_side_request.h 2009-09-27 15:28:33.000000000 +1200
@@ -61,8 +61,8 @@
class ClientHttpRequest
#if USE_ADAPTATION
- : public Adaptation::Initiator, // to start adaptation transactions
- public BodyConsumer // to receive reply bodies in request satisf. mode
+ : public Adaptation::Initiator, // to start adaptation transactions
+ public BodyConsumer // to receive reply bodies in request satisf. mode
#endif
{
diff -u -r -N squid-3.1.0.13/src/CommCalls.h squid-3.1.0.14/src/CommCalls.h
--- squid-3.1.0.13/src/CommCalls.h 2009-08-05 01:32:12.000000000 +1200
+++ squid-3.1.0.14/src/CommCalls.h 2009-09-27 15:28:28.000000000 +1200
@@ -172,7 +172,7 @@
// accept (IOACB) dialer
class CommAcceptCbPtrFun: public CallDialer,
- public CommDialerParamsT
+ public CommDialerParamsT
{
public:
typedef CommAcceptCbParams Params;
@@ -188,7 +188,7 @@
// connect (CNCB) dialer
class CommConnectCbPtrFun: public CallDialer,
- public CommDialerParamsT
+ public CommDialerParamsT
{
public:
typedef CommConnectCbParams Params;
@@ -205,7 +205,7 @@
// read/write (IOCB) dialer
class CommIoCbPtrFun: public CallDialer,
- public CommDialerParamsT
+ public CommDialerParamsT
{
public:
typedef CommIoCbParams Params;
@@ -222,7 +222,7 @@
// close (PF) dialer
class CommCloseCbPtrFun: public CallDialer,
- public CommDialerParamsT
+ public CommDialerParamsT
{
public:
typedef CommCloseCbParams Params;
@@ -237,7 +237,7 @@
};
class CommTimeoutCbPtrFun:public CallDialer,
- public CommDialerParamsT
+ public CommDialerParamsT
{
public:
typedef CommTimeoutCbParams Params;
diff -u -r -N squid-3.1.0.13/src/comm.cc squid-3.1.0.14/src/comm.cc
--- squid-3.1.0.13/src/comm.cc 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/comm.cc 2009-09-27 15:28:33.000000000 +1200
@@ -590,8 +590,10 @@
{
statCounter.syscalls.sock.binds++;
- if (bind(s, inaddr.ai_addr, inaddr.ai_addrlen) == 0)
+ if (bind(s, inaddr.ai_addr, inaddr.ai_addrlen) == 0) {
+ debugs(50, 6, "commBind: bind socket FD " << s << " to " << fd_table[s].local_addr);
return COMM_OK;
+ }
debugs(50, 0, "commBind: Cannot bind socket FD " << s << " to " << fd_table[s].local_addr << ": " << xstrerror());
@@ -621,6 +623,9 @@
{
int sock = -1;
+ /* all listener sockets require bind() */
+ flags |= COMM_DOBIND;
+
/* attempt native enabled port. */
sock = comm_openex(sock_type, proto, addr, flags, 0, note);
@@ -783,10 +788,8 @@
if (addr.GetPort() > (u_short) 0) {
#ifdef _SQUID_MSWIN_
-
if (sock_type != SOCK_DGRAM)
#endif
-
commSetNoLinger(new_socket);
if (opt_reuseaddr)
@@ -798,7 +801,12 @@
comm_set_transparent(new_socket);
}
- if (!addr.IsNoAddr()) {
+ if ( (flags & COMM_DOBIND) || addr.GetPort() > 0 || !addr.IsAnyAddr() ) {
+ if ( !(flags & COMM_DOBIND) && addr.IsAnyAddr() )
+ debugs(5,1,"WARNING: Squid is attempting to bind() port " << addr << " without being a listener.");
+ if ( addr.IsNoAddr() )
+ debugs(5,0,"CRITICAL: Squid is attempting to bind() port " << addr << "!!");
+
if (commBind(new_socket, *AI) != COMM_OK) {
comm_close(new_socket);
addr.FreeAddrInfo(AI);
@@ -1851,7 +1859,8 @@
}
void
-commSetCloseOnExec(int fd) {
+commSetCloseOnExec(int fd)
+{
#ifdef FD_CLOEXEC
int flags;
int dummy = 0;
@@ -1871,7 +1880,8 @@
#ifdef TCP_NODELAY
static void
-commSetTcpNoDelay(int fd) {
+commSetTcpNoDelay(int fd)
+{
int on = 1;
if (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, (char *) &on, sizeof(on)) < 0)
@@ -1883,7 +1893,8 @@
#endif
void
-commSetTcpKeepalive(int fd, int idle, int interval, int timeout) {
+commSetTcpKeepalive(int fd, int idle, int interval, int timeout)
+{
int on = 1;
#ifdef TCP_KEEPCNT
if (timeout && interval) {
@@ -1909,7 +1920,8 @@
}
void
-comm_init(void) {
+comm_init(void)
+{
fd_table =(fde *) xcalloc(Squid_MaxFD, sizeof(fde));
fdd_table = (fd_debug_t *)xcalloc(Squid_MaxFD, sizeof(fd_debug_t));
@@ -1939,7 +1951,8 @@
}
void
-comm_exit(void) {
+comm_exit(void)
+{
delete TheHalfClosed;
TheHalfClosed = NULL;
@@ -1954,7 +1967,8 @@
/* Write to FD. */
static void
-commHandleWrite(int fd, void *data) {
+commHandleWrite(int fd, void *data)
+{
comm_io_callback_t *state = (comm_io_callback_t *)data;
int len = 0;
int nleft;
@@ -2022,7 +2036,8 @@
* free_func is used to free the passed buffer when the write has completed.
*/
void
-comm_write(int fd, const char *buf, int size, IOCB * handler, void *handler_data, FREE * free_func) {
+comm_write(int fd, const char *buf, int size, IOCB * handler, void *handler_data, FREE * free_func)
+{
AsyncCall::Pointer call = commCbCall(5,5, "SomeCommWriteHander",
CommIoCbPtrFun(handler, handler_data));
@@ -2030,7 +2045,8 @@
}
void
-comm_write(int fd, const char *buf, int size, AsyncCall::Pointer &callback, FREE * free_func) {
+comm_write(int fd, const char *buf, int size, AsyncCall::Pointer &callback, FREE * free_func)
+{
debugs(5, 5, "comm_write: FD " << fd << ": sz " << size << ": asynCall " << callback);
/* Make sure we are open, not closing, and not writing */
@@ -2048,12 +2064,14 @@
/* a wrapper around comm_write to allow for MemBuf to be comm_written in a snap */
void
-comm_write_mbuf(int fd, MemBuf *mb, IOCB * handler, void *handler_data) {
+comm_write_mbuf(int fd, MemBuf *mb, IOCB * handler, void *handler_data)
+{
comm_write(fd, mb->buf, mb->size, handler, handler_data, mb->freeFunc());
}
void
-comm_write_mbuf(int fd, MemBuf *mb, AsyncCall::Pointer &callback) {
+comm_write_mbuf(int fd, MemBuf *mb, AsyncCall::Pointer &callback)
+{
comm_write(fd, mb->buf, mb->size, callback, mb->freeFunc());
}
@@ -2063,7 +2081,8 @@
* like to use it.
*/
int
-ignoreErrno(int ierrno) {
+ignoreErrno(int ierrno)
+{
switch (ierrno) {
case EINPROGRESS:
@@ -2092,7 +2111,8 @@
}
void
-commCloseAllSockets(void) {
+commCloseAllSockets(void)
+{
int fd;
fde *F = NULL;
@@ -2121,7 +2141,8 @@
}
static bool
-AlreadyTimedOut(fde *F) {
+AlreadyTimedOut(fde *F)
+{
if (!F->flags.open)
return true;
@@ -2135,7 +2156,8 @@
}
void
-checkTimeouts(void) {
+checkTimeouts(void)
+{
int fd;
fde *F = NULL;
AsyncCall::Pointer callback;
@@ -2168,7 +2190,8 @@
* accept()ed.
*/
int
-comm_listen(int sock) {
+comm_listen(int sock)
+{
int x;
if ((x = listen(sock, Squid_MaxFD >> 2)) < 0) {
@@ -2201,7 +2224,8 @@
}
void
-comm_accept(int fd, IOACB *handler, void *handler_data) {
+comm_accept(int fd, IOACB *handler, void *handler_data)
+{
debugs(5, 5, "comm_accept: FD " << fd << " handler: " << (void*)handler);
assert(isOpen(fd));
@@ -2211,7 +2235,8 @@
}
void
-comm_accept(int fd, AsyncCall::Pointer &call) {
+comm_accept(int fd, AsyncCall::Pointer &call)
+{
debugs(5, 5, "comm_accept: FD " << fd << " AsyncCall: " << call);
assert(isOpen(fd));
@@ -2221,7 +2246,8 @@
// Called when somebody wants to be notified when our socket accepts new
// connection. We do not probe the FD until there is such interest.
void
-AcceptFD::subscribe(AsyncCall::Pointer &call) {
+AcceptFD::subscribe(AsyncCall::Pointer &call)
+{
/* make sure we're not pending! */
assert(!theCallback);
theCallback = call;
@@ -2237,7 +2263,8 @@
}
bool
-AcceptFD::acceptOne() {
+AcceptFD::acceptOne()
+{
// If there is no callback and we accept, we will leak the accepted FD.
// When we are running out of FDs, there is often no callback.
if (!theCallback) {
@@ -2286,12 +2313,14 @@
}
void
-AcceptFD::acceptNext() {
+AcceptFD::acceptNext()
+{
mayAcceptMore = acceptOne();
}
void
-AcceptFD::notify(int newfd, comm_err_t errcode, int xerrno, const ConnectionDetail &connDetails) {
+AcceptFD::notify(int newfd, comm_err_t errcode, int xerrno, const ConnectionDetail &connDetails)
+{
if (theCallback != NULL) {
typedef CommAcceptCbParams Params;
Params ¶ms = GetCommParams(theCallback);
@@ -2310,12 +2339,14 @@
* to dupe itself and fob off an accept()ed connection
*/
static void
-comm_accept_try(int fd, void *) {
+comm_accept_try(int fd, void *)
+{
assert(isOpen(fd));
fdc_table[fd].acceptNext();
}
-void CommIO::Initialise() {
+void CommIO::Initialise()
+{
/* Initialize done pipe signal */
int DonePipe[2];
if (pipe(DonePipe)) {}
@@ -2329,7 +2360,8 @@
Initialised = true;
}
-void CommIO::NotifyIOClose() {
+void CommIO::NotifyIOClose()
+{
/* Close done pipe signal */
FlushPipe();
close(DoneFD);
@@ -2345,19 +2377,22 @@
int CommIO::DoneReadFD = -1;
void
-CommIO::FlushPipe() {
+CommIO::FlushPipe()
+{
char buf[256];
FD_READ_METHOD(DoneReadFD, buf, sizeof(buf));
}
void
-CommIO::NULLFDHandler(int fd, void *data) {
+CommIO::NULLFDHandler(int fd, void *data)
+{
FlushPipe();
commSetSelect(fd, COMM_SELECT_READ, NULLFDHandler, NULL, 0);
}
void
-CommIO::ResetNotifications() {
+CommIO::ResetNotifications()
+{
if (DoneSignalled) {
FlushPipe();
DoneSignalled = false;
@@ -2366,17 +2401,20 @@
AcceptLimiter AcceptLimiter::Instance_;
-AcceptLimiter &AcceptLimiter::Instance() {
+AcceptLimiter &AcceptLimiter::Instance()
+{
return Instance_;
}
bool
-AcceptLimiter::deferring() const {
+AcceptLimiter::deferring() const
+{
return deferred.size() > 0;
}
void
-AcceptLimiter::defer (int fd, Acceptor::AcceptorFunction *aFunc, void *data) {
+AcceptLimiter::defer (int fd, Acceptor::AcceptorFunction *aFunc, void *data)
+{
debugs(5, 5, "AcceptLimiter::defer: FD " << fd << " handler: " << (void*)aFunc);
Acceptor temp;
temp.theFunction = aFunc;
@@ -2386,7 +2424,8 @@
}
void
-AcceptLimiter::kick() {
+AcceptLimiter::kick()
+{
if (!deferring())
return;
@@ -2402,7 +2441,8 @@
// by scheduling a read callback to a monitoring handler that
// will close the connection on read errors.
void
-commStartHalfClosedMonitor(int fd) {
+commStartHalfClosedMonitor(int fd)
+{
debugs(5, 5, HERE << "adding FD " << fd << " to " << *TheHalfClosed);
assert(isOpen(fd));
assert(!commHasHalfClosedMonitor(fd));
@@ -2412,7 +2452,8 @@
static
void
-commPlanHalfClosedCheck() {
+commPlanHalfClosedCheck()
+{
if (!WillCheckHalfClosed && !TheHalfClosed->empty()) {
eventAdd("commHalfClosedCheck", &commHalfClosedCheck, NULL, 1.0, 1);
WillCheckHalfClosed = true;
@@ -2423,7 +2464,8 @@
/// calls comm_read for those that do; re-schedules the check if needed
static
void
-commHalfClosedCheck(void *) {
+commHalfClosedCheck(void *)
+{
debugs(5, 5, HERE << "checking " << *TheHalfClosed);
typedef DescriptorSet::const_iterator DSCI;
@@ -2445,13 +2487,15 @@
/// checks whether we are waiting for possibly half-closed connection to close
// We are monitoring if the read handler for the fd is the monitoring handler.
bool
-commHasHalfClosedMonitor(int fd) {
+commHasHalfClosedMonitor(int fd)
+{
return TheHalfClosed->has(fd);
}
/// stop waiting for possibly half-closed connection to close
static void
-commStopHalfClosedMonitor(int const fd) {
+commStopHalfClosedMonitor(int const fd)
+{
debugs(5, 5, HERE << "removing FD " << fd << " from " << *TheHalfClosed);
// cancel the read if one was scheduled
@@ -2465,7 +2509,8 @@
/// I/O handler for the possibly half-closed connection monitoring code
static void
-commHalfClosedReader(int fd, char *, size_t size, comm_err_t flag, int, void *) {
+commHalfClosedReader(int fd, char *, size_t size, comm_err_t flag, int, void *)
+{
// there cannot be more data coming in on half-closed connections
assert(size == 0);
assert(commHasHalfClosedMonitor(fd)); // or we would have canceled the read
@@ -2497,7 +2542,8 @@
DeferredRead::DeferredRead (DeferrableRead *aReader, void *data, CommRead const &aRead) : theReader(aReader), theContext (data), theRead(aRead), cancelled(false) {}
-DeferredReadManager::~DeferredReadManager() {
+DeferredReadManager::~DeferredReadManager()
+{
flushReads();
assert (deferredReads.empty());
}
@@ -2509,7 +2555,8 @@
/// \endcond
void
-DeferredReadManager::delayRead(DeferredRead const &aRead) {
+DeferredReadManager::delayRead(DeferredRead const &aRead)
+{
debugs(5, 3, "Adding deferred read on FD " << aRead.theRead.fd);
CbDataList *temp = deferredReads.push_back(aRead);
@@ -2524,7 +2571,8 @@
}
void
-DeferredReadManager::CloseHandler(int fd, void *thecbdata) {
+DeferredReadManager::CloseHandler(int fd, void *thecbdata)
+{
if (!cbdataReferenceValid (thecbdata))
return;
@@ -2535,7 +2583,8 @@
}
DeferredRead
-DeferredReadManager::popHead(CbDataListContainer &deferredReads) {
+DeferredReadManager::popHead(CbDataListContainer &deferredReads)
+{
assert (!deferredReads.empty());
DeferredRead &read = deferredReads.head->element;
@@ -2550,7 +2599,8 @@
}
void
-DeferredReadManager::kickReads(int const count) {
+DeferredReadManager::kickReads(int const count)
+{
/* if we had CbDataList::size() we could consolidate this and flushReads */
if (count < 1) {
@@ -2570,7 +2620,8 @@
}
void
-DeferredReadManager::flushReads() {
+DeferredReadManager::flushReads()
+{
CbDataListContainer reads;
reads = deferredReads;
deferredReads = CbDataListContainer();
@@ -2583,7 +2634,8 @@
}
void
-DeferredReadManager::kickARead(DeferredRead const &aRead) {
+DeferredReadManager::kickARead(DeferredRead const &aRead)
+{
if (aRead.cancelled)
return;
@@ -2596,15 +2648,18 @@
}
void
-DeferredRead::markCancelled() {
+DeferredRead::markCancelled()
+{
cancelled = true;
}
-ConnectionDetail::ConnectionDetail() : me(), peer() {
+ConnectionDetail::ConnectionDetail() : me(), peer()
+{
}
int
-CommSelectEngine::checkEvents(int timeout) {
+CommSelectEngine::checkEvents(int timeout)
+{
static time_t last_timeout = 0;
/* No, this shouldn't be here. But it shouldn't be in each comm handler. -adrian */
diff -u -r -N squid-3.1.0.13/src/comm.h squid-3.1.0.14/src/comm.h
--- squid-3.1.0.13/src/comm.h 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/comm.h 2009-09-27 15:28:33.000000000 +1200
@@ -25,6 +25,7 @@
#if USE_IPV6
COMM_ERR_PROTOCOL = -11, /* IPv4 or IPv6 cannot be used on the fd socket */
#endif
+ COMM_ERR__END__ = -999999 /* Dummy entry to make syntax valid (comma on line above), do not use. New entries added above */
} comm_err_t;
class DnsLookupDetails;
diff -u -r -N squid-3.1.0.13/src/ConfigParser.cc squid-3.1.0.14/src/ConfigParser.cc
--- squid-3.1.0.13/src/ConfigParser.cc 2009-08-05 01:32:12.000000000 +1200
+++ squid-3.1.0.14/src/ConfigParser.cc 2009-09-27 15:28:28.000000000 +1200
@@ -51,7 +51,7 @@
static FILE *wordFile = NULL;
char *t, *fn;
- LOCAL_ARRAY(char, buf, 256);
+ LOCAL_ARRAY(char, buf, CONFIG_LINE_LIMIT);
do {
@@ -86,7 +86,7 @@
}
/* fromFile */
- if (fgets(buf, 256, wordFile) == NULL) {
+ if (fgets(buf, CONFIG_LINE_LIMIT, wordFile) == NULL) {
/* stop reading from file */
fclose(wordFile);
wordFile = NULL;
diff -u -r -N squid-3.1.0.13/src/ConfigParser.h squid-3.1.0.14/src/ConfigParser.h
--- squid-3.1.0.13/src/ConfigParser.h 2009-08-05 01:32:12.000000000 +1200
+++ squid-3.1.0.14/src/ConfigParser.h 2009-09-27 15:28:28.000000000 +1200
@@ -38,7 +38,17 @@
#include "squid.h"
-/*
+/**
+ * Limit to how long any given config line may be.
+ * This affects squid.conf and all included files.
+ *
+ * Behaviour when setting larger than 2KB is unknown.
+ * The config parser read mechanism can cope, but the other systems
+ * receiving the data from its buffers on such lines may not.
+ */
+#define CONFIG_LINE_LIMIT 2048
+
+/**
* A configuration file Parser. Instances of this class track
* parsing state and perform tokenisation. Syntax is currently
* taken care of outside this class.
@@ -48,7 +58,6 @@
* in all of squid by reference. Instead the tokeniser only is
* brought in.
*/
-
class ConfigParser
{
diff -u -r -N squid-3.1.0.13/src/defines.h squid-3.1.0.14/src/defines.h
--- squid-3.1.0.13/src/defines.h 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/defines.h 2009-09-27 15:28:33.000000000 +1200
@@ -66,6 +66,7 @@
#define COMM_NOCLOEXEC 0x02
#define COMM_REUSEADDR 0x04
#define COMM_TRANSPARENT 0x08
+#define COMM_DOBIND 0x10
#include "Debug.h"
#define do_debug(SECTION, LEVEL) ((Debug::level = (LEVEL)) > Debug::Levels[SECTION])
diff -u -r -N squid-3.1.0.13/src/DiskIO/AIO/AIODiskFile.h squid-3.1.0.14/src/DiskIO/AIO/AIODiskFile.h
--- squid-3.1.0.13/src/DiskIO/AIO/AIODiskFile.h 2009-08-05 01:32:12.000000000 +1200
+++ squid-3.1.0.14/src/DiskIO/AIO/AIODiskFile.h 2009-09-27 15:28:28.000000000 +1200
@@ -33,6 +33,10 @@
#ifndef SQUID_AIODISKFILE_H
#define SQUID_AIODISKFILE_H
+#include "config.h"
+
+#if USE_DISKIO_AIO
+
#include "DiskIO/DiskFile.h"
#include "async_io.h"
#include "cbdata.h"
@@ -79,4 +83,5 @@
bool error_;
};
+#endif /* USE_DISKIO_AIO */
#endif /* SQUID_AIODISKFILE_H */
diff -u -r -N squid-3.1.0.13/src/DiskIO/AIO/AIODiskIOModule.h squid-3.1.0.14/src/DiskIO/AIO/AIODiskIOModule.h
--- squid-3.1.0.13/src/DiskIO/AIO/AIODiskIOModule.h 2009-08-05 01:32:12.000000000 +1200
+++ squid-3.1.0.14/src/DiskIO/AIO/AIODiskIOModule.h 2009-09-27 15:28:28.000000000 +1200
@@ -1,4 +1,3 @@
-
/*
* $Id$
*
@@ -34,6 +33,10 @@
#ifndef SQUID_AIODISKIOMODULE_H
#define SQUID_AIODISKIOMODULE_H
+#include "config.h"
+
+#if USE_DISKIO_AIO
+
#include "DiskIO/DiskIOModule.h"
class AIODiskIOModule : public DiskIOModule
@@ -51,4 +54,5 @@
static AIODiskIOModule Instance;
};
+#endif /* USE_DISKIO_AIO */
#endif /* SQUID_AIODISKIOMODULE_H */
diff -u -r -N squid-3.1.0.13/src/DiskIO/AIO/AIODiskIOStrategy.h squid-3.1.0.14/src/DiskIO/AIO/AIODiskIOStrategy.h
--- squid-3.1.0.13/src/DiskIO/AIO/AIODiskIOStrategy.h 2009-08-05 01:32:12.000000000 +1200
+++ squid-3.1.0.14/src/DiskIO/AIO/AIODiskIOStrategy.h 2009-09-27 15:28:28.000000000 +1200
@@ -1,4 +1,3 @@
-
/*
* $Id$
*
@@ -34,6 +33,10 @@
#ifndef SQUID_AIODISKIOSTRATEGY_H
#define SQUID_AIODISKIOSTRATEGY_H
+#include "config.h"
+
+#if USE_DISKIO_AIO
+
#include "DiskIO/DiskIOStrategy.h"
#include "async_io.h"
@@ -72,4 +75,5 @@
int findSlot();
};
+#endif /* USE_DISKIO_AIO */
#endif /* SQUID_AIODISKIOSTRATEGY_H */
diff -u -r -N squid-3.1.0.13/src/DiskIO/AIO/aio_win32.h squid-3.1.0.14/src/DiskIO/AIO/aio_win32.h
--- squid-3.1.0.13/src/DiskIO/AIO/aio_win32.h 2009-08-05 01:32:12.000000000 +1200
+++ squid-3.1.0.14/src/DiskIO/AIO/aio_win32.h 2009-09-27 15:28:28.000000000 +1200
@@ -36,6 +36,8 @@
#include "config.h"
+#if USE_DISKIO_AIO
+
#ifdef _SQUID_CYGWIN_
#include "squid_windows.h"
#endif
@@ -107,4 +109,5 @@
void aio_close(int);
#endif /* _SQUID_MSWIN_ */
+#endif /* USE_DISKIO_AIO */
#endif /* __WIN32_AIO_H__ */
diff -u -r -N squid-3.1.0.13/src/DiskIO/AIO/async_io.h squid-3.1.0.14/src/DiskIO/AIO/async_io.h
--- squid-3.1.0.13/src/DiskIO/AIO/async_io.h 2009-08-05 01:32:12.000000000 +1200
+++ squid-3.1.0.14/src/DiskIO/AIO/async_io.h 2009-09-27 15:28:28.000000000 +1200
@@ -3,6 +3,8 @@
#include "config.h"
+#if USE_DISKIO_AIO
+
#ifdef _SQUID_WIN32_
#include "aio_win32.h"
#else
@@ -61,4 +63,5 @@
int aq_numpending; /* Num of pending ops */
};
-#endif
+#endif /* USE_DISKIO_AIO */
+#endif /* __ASYNC_IO_H_ */
diff -u -r -N squid-3.1.0.13/src/dns_internal.cc squid-3.1.0.14/src/dns_internal.cc
--- squid-3.1.0.13/src/dns_internal.cc 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/dns_internal.cc 2009-09-27 15:28:33.000000000 +1200
@@ -346,7 +346,7 @@
if (NULL == t)
continue;
- if (strncmp(t, "ndots:", 6) != 0) {
+ if (strncmp(t, "ndots:", 6) == 0) {
ndots = atoi(t + 6);
if (ndots < 1)
diff -u -r -N squid-3.1.0.13/src/DnsLookupDetails.cc squid-3.1.0.14/src/DnsLookupDetails.cc
--- squid-3.1.0.13/src/DnsLookupDetails.cc 2009-08-05 01:32:12.000000000 +1200
+++ squid-3.1.0.14/src/DnsLookupDetails.cc 2009-09-27 15:28:29.000000000 +1200
@@ -10,7 +10,7 @@
}
DnsLookupDetails::DnsLookupDetails(const String &e, int w):
- error(e), wait(w)
+ error(e), wait(w)
{
}
diff -u -r -N squid-3.1.0.13/src/enums.h squid-3.1.0.14/src/enums.h
--- squid-3.1.0.13/src/enums.h 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/enums.h 2009-09-27 15:28:33.000000000 +1200
@@ -370,18 +370,9 @@
typedef enum {
S_HELPER_UNKNOWN,
S_HELPER_RESERVE,
- S_HELPER_RELEASE,
- S_HELPER_DEFER
+ S_HELPER_RELEASE
} stateful_helper_callback_t;
-/* stateful helper reservation info */
-typedef enum {
- S_HELPER_FREE, /* available for requests */
- S_HELPER_RESERVED, /* in a reserved state - no active request, but state data in the helper shouldn't be disturbed */
- S_HELPER_DEFERRED /* available for requests, and at least one more will come from a previous caller with the server pointer */
-} stateful_helper_reserve_t;
-
-
#if SQUID_SNMP
enum {
SNMP_C_VIEW,
diff -u -r -N squid-3.1.0.13/src/errorpage.cc squid-3.1.0.14/src/errorpage.cc
--- squid-3.1.0.13/src/errorpage.cc 2009-08-05 01:32:16.000000000 +1200
+++ squid-3.1.0.14/src/errorpage.cc 2009-09-27 15:28:33.000000000 +1200
@@ -955,6 +955,9 @@
while ( pos < hdr.size() ) {
+ /* skip any initial whitespace. */
+ while (pos < hdr.size() && xisspace(hdr[pos])) pos++;
+
/*
* Header value format:
* - sequence of whitespace delimited tags
@@ -962,17 +965,43 @@
* - IFF a tag contains only two characters we can wildcard ANY translations matching: '-'? .*
* with preference given to an exact match.
*/
+ bool invalid_byte = false;
while (pos < hdr.size() && hdr[pos] != ';' && hdr[pos] != ',' && !xisspace(hdr[pos]) && dt < (dir+256) ) {
- *dt++ = xtolower(hdr[pos++]);
+ if (!invalid_byte) {
+#if HTTP_VIOLATIONS
+ // if accepting violations we may as well accept some broken browsers
+ // which may send us the right code, wrong ISO formatting.
+ if (hdr[pos] == '_')
+ *dt = '-';
+ else
+#endif
+ *dt = xtolower(hdr[pos]);
+ // valid codes only contain A-Z, hyphen (-) and *
+ if (*dt != '-' && *dt != '*' && (*dt < 'a' || *dt > 'z') )
+ invalid_byte = true;
+ else
+ dt++; // move to next destination byte.
+ }
+ pos++;
}
*dt++ = '\0'; // nul-terminated the filename content string before system use.
- debugs(4, 9, HERE << "STATE: dt='" << dt << "', reset='" << reset << "', reset[1]='" << reset[1] << "', pos=" << pos << ", buf='" << hdr.substr(pos,hdr.size()) << "'");
+ debugs(4, 9, HERE << "STATE: dt='" << dt << "', reset='" << reset << "', pos=" << pos << ", buf='" << ((pos < hdr.size()) ? hdr.substr(pos,hdr.size()) : "") << "'");
/* if we found anything we might use, try it. */
- if (*reset != '\0') {
+ if (*reset != '\0' && !invalid_byte) {
+
+ /* wildcard uses the configured default language */
+ if (reset[0] == '*' && reset[1] == '\0') {
+ debugs(4, 6, HERE << "Found language '" << reset << "'. Using configured default.");
+ m = error_text[page_id];
+ if (!Config.errorDirectory)
+ err_language = Config.errorDefaultLanguage;
+ break;
+ }
debugs(4, 6, HERE << "Found language '" << reset << "', testing for available template in: '" << dir << "'");
+
m = errorTryLoadText( err_type_str[page_id], dir, false);
if (m) {
@@ -994,7 +1023,7 @@
dt = reset; // reset for next tag testing. we replace the failed name instead of cloning.
- // IFF we terminated the tag on ';' we need to skip the 'q=' bit to the next ',' or end.
+ // IFF we terminated the tag on whitespace or ';' we need to skip to the next ',' or end of header.
while (pos < hdr.size() && hdr[pos] != ',') pos++;
if (hdr[pos] == ',') pos++;
}
diff -u -r -N squid-3.1.0.13/src/esi/Makefile.in squid-3.1.0.14/src/esi/Makefile.in
--- squid-3.1.0.13/src/esi/Makefile.in 2009-08-05 01:32:37.000000000 +1200
+++ squid-3.1.0.14/src/esi/Makefile.in 2009-09-27 15:28:56.000000000 +1200
@@ -78,6 +78,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/src/esi/VarState.cc squid-3.1.0.14/src/esi/VarState.cc
--- squid-3.1.0.13/src/esi/VarState.cc 2009-08-05 01:32:17.000000000 +1200
+++ squid-3.1.0.14/src/esi/VarState.cc 2009-09-27 15:28:33.000000000 +1200
@@ -112,9 +112,9 @@
}
struct _query_elem const *
- ESIVariableQuery::queryVector() const {
- return query;
- }
+ESIVariableQuery::queryVector() const {
+ return query;
+}
size_t const &
ESIVariableQuery::queryElements() const
diff -u -r -N squid-3.1.0.13/src/forward.cc squid-3.1.0.14/src/forward.cc
--- squid-3.1.0.13/src/forward.cc 2009-08-05 01:32:17.000000000 +1200
+++ squid-3.1.0.14/src/forward.cc 2009-09-27 15:28:33.000000000 +1200
@@ -774,7 +774,6 @@
IpAddress outgoing;
unsigned short tos;
-
IpAddress client_addr;
assert(fs);
assert(server_fd == -1);
@@ -790,8 +789,11 @@
ctimeout = Config.Timeout.connect;
}
- if (request->flags.spoof_client_ip)
- client_addr = request->client_addr;
+ if (request->flags.spoof_client_ip) {
+ if (!fs->_peer || !fs->_peer->options.no_tproxy)
+ client_addr = request->client_addr;
+ // else no tproxy today ...
+ }
if (ftimeout < 0)
ftimeout = 5;
@@ -868,7 +870,9 @@
int flags = COMM_NONBLOCKING;
if (request->flags.spoof_client_ip) {
- flags |= COMM_TRANSPARENT;
+ if (!fs->_peer || !fs->_peer->options.no_tproxy)
+ flags |= COMM_TRANSPARENT;
+ // else no tproxy today ...
}
fd = comm_openex(SOCK_STREAM, IPPROTO_TCP, outgoing, flags, tos, url);
@@ -1341,8 +1345,11 @@
IpAddress
getOutgoingAddr(HttpRequest * request, struct peer *dst_peer)
{
- if (request && request->flags.spoof_client_ip)
- return request->client_addr;
+ if (request && request->flags.spoof_client_ip) {
+ if (!dst_peer || !dst_peer->options.no_tproxy)
+ return request->client_addr;
+ // else no tproxy today ...
+ }
if (!Config.accessList.outgoing_address) {
return IpAddress(); // anything will do.
diff -u -r -N squid-3.1.0.13/src/forward.h squid-3.1.0.14/src/forward.h
--- squid-3.1.0.13/src/forward.h 2009-08-05 01:32:17.000000000 +1200
+++ squid-3.1.0.14/src/forward.h 2009-09-27 15:28:33.000000000 +1200
@@ -82,7 +82,6 @@
static void abort(void*);
private:
- CBDATA_CLASS2(FwdState);
Pointer self;
ErrorState *err;
int client_fd;
@@ -101,6 +100,9 @@
} flags;
IpAddress src; /* Client address for this connection. Needed for transparent operations. */
+
+ // NP: keep this last. It plays with private/public
+ CBDATA_CLASS2(FwdState);
};
#endif /* SQUID_FORWARD_H */
diff -u -r -N squid-3.1.0.13/src/fqdncache.cc squid-3.1.0.14/src/fqdncache.cc
--- squid-3.1.0.13/src/fqdncache.cc 2009-08-05 01:32:17.000000000 +1200
+++ squid-3.1.0.14/src/fqdncache.cc 2009-09-27 15:28:33.000000000 +1200
@@ -90,7 +90,8 @@
* where structures of type fqdncache_entry whose most
* interesting members are:
*/
-class fqdncache_entry {
+class fqdncache_entry
+{
public:
hash_link hash; /* must be first */
time_t lastref;
diff -u -r -N squid-3.1.0.13/src/fs/Makefile.in squid-3.1.0.14/src/fs/Makefile.in
--- squid-3.1.0.13/src/fs/Makefile.in 2009-08-05 01:32:38.000000000 +1200
+++ squid-3.1.0.14/src/fs/Makefile.in 2009-09-27 15:28:56.000000000 +1200
@@ -93,6 +93,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/src/fs/ufs/store_dir_ufs.cc squid-3.1.0.14/src/fs/ufs/store_dir_ufs.cc
--- squid-3.1.0.13/src/fs/ufs/store_dir_ufs.cc 2009-08-05 01:32:17.000000000 +1200
+++ squid-3.1.0.14/src/fs/ufs/store_dir_ufs.cc 2009-09-27 15:28:33.000000000 +1200
@@ -143,7 +143,7 @@
IO->io = anIO;
/* Change the IO Options */
- if (currentIOOptions->options.size() > 2)
+ if (currentIOOptions && currentIOOptions->options.size() > 2)
delete currentIOOptions->options.pop_back();
/* TODO: factor out these 4 lines */
diff -u -r -N squid-3.1.0.13/src/ftp.cc squid-3.1.0.14/src/ftp.cc
--- squid-3.1.0.13/src/ftp.cc 2009-08-05 01:32:17.000000000 +1200
+++ squid-3.1.0.14/src/ftp.cc 2009-09-27 15:28:33.000000000 +1200
@@ -529,38 +529,56 @@
/**
* Parse a possible login username:password pair.
- * Produces filled member varisbles user, password, password_url if anything found.
+ * Produces filled member variables user, password, password_url if anything found.
*/
void
FtpStateData::loginParser(const char *login, int escaped)
{
- char *s = NULL;
+ const char *u = NULL; // end of the username sub-string
+ int len; // length of the current sub-string to handle.
+
+ int total_len = strlen(login);
+
debugs(9, 4, HERE << ": login='" << login << "', escaped=" << escaped);
debugs(9, 9, HERE << ": IN : login='" << login << "', escaped=" << escaped << ", user=" << user << ", password=" << password);
- if ((s = strchr(login, ':'))) {
- *s = '\0';
+ if ((u = strchr(login, ':'))) {
/* if there was a username part */
- if (s > login) {
- xstrncpy(user, login, MAX_URL);
+ if (u > login) {
+ len = u - login;
+ ++u; // jump off the delimiter.
+ if (len > MAX_URL)
+ len = MAX_URL-1;
+ xstrncpy(user, login, len +1);
+ debugs(9, 9, HERE << ": found user='" << user << "'(" << len <<"), escaped=" << escaped);
if (escaped)
rfc1738_unescape(user);
+ debugs(9, 9, HERE << ": found user='" << user << "'(" << len <<") unescaped.");
}
/* if there was a password part */
- if ( s[1] != '\0' ) {
- xstrncpy(password, s + 1, MAX_URL);
+ len = login + total_len - u;
+ if ( len > 0) {
+ if (len > MAX_URL)
+ len = MAX_URL -1;
+ xstrncpy(password, u, len +1);
+ debugs(9, 9, HERE << ": found password='" << password << "'(" << len <<"), escaped=" << escaped);
if (escaped) {
rfc1738_unescape(password);
password_url = 1;
}
+ debugs(9, 9, HERE << ": found password='" << password << "'(" << len <<") unescaped.");
}
} else if (login[0]) {
/* no password, just username */
- xstrncpy(user, login, MAX_URL);
+ if (total_len > MAX_URL)
+ total_len = MAX_URL -1;
+ xstrncpy(user, login, total_len +1);
+ debugs(9, 9, HERE << ": found user='" << user << "'(" << total_len <<"), escaped=" << escaped);
if (escaped)
rfc1738_unescape(user);
+ debugs(9, 9, HERE << ": found user='" << user << "'(" << total_len <<") unescaped.");
}
debugs(9, 9, HERE << ": OUT: login='" << login << "', escaped=" << escaped << ", user=" << user << ", password=" << password);
@@ -1418,12 +1436,11 @@
if (flags.isdir) {
parseListing();
- } else
- if (const int csize = data.readBuf->contentSize()) {
- writeReplyBody(data.readBuf->content(), csize);
- debugs(9, 5, HERE << "consuming " << csize << " bytes of readBuf");
- data.readBuf->consume(csize);
- }
+ } else if (const int csize = data.readBuf->contentSize()) {
+ writeReplyBody(data.readBuf->content(), csize);
+ debugs(9, 5, HERE << "consuming " << csize << " bytes of readBuf");
+ data.readBuf->consume(csize);
+ }
entry->flush();
@@ -1479,8 +1496,7 @@
xstrncpy(password, Config.Ftp.anon_user, MAX_URL);
flags.tried_auth_anonymous=1;
return 1;
- }
- else if (!flags.tried_auth_nopass) {
+ } else if (!flags.tried_auth_nopass) {
xstrncpy(password, null_string, MAX_URL);
flags.tried_auth_nopass=1;
return 1;
@@ -2447,7 +2463,7 @@
/* server response with list of supported methods */
/* 522 Network protocol not supported, use (1) */
/* 522 Network protocol not supported, use (1,2) */
- /* TODO: handle the (1,2) case. We might get it back after EPSV ALL
+ /* TODO: handle the (1,2) case. We might get it back after EPSV ALL
* which means close data + control without self-destructing and re-open from scratch. */
debugs(9, 5, HERE << "scanning: " << ftpState->ctrl.last_reply);
buf = ftpState->ctrl.last_reply;
@@ -2476,8 +2492,7 @@
ftpState->state = SENT_EPSV_1;
ftpSendPassive(ftpState);
#endif
- }
- else {
+ } else {
/* handle broken server (RFC 2428 says MUST specify supported protocols in 522) */
debugs(9, DBG_IMPORTANT, "WARNING: Server at " << fd_table[ftpState->ctrl.fd].ipaddr << " sent unknown protocol negotiation hint: " << buf);
ftpSendPassive(ftpState);
@@ -2597,72 +2612,90 @@
}
addr = *AI;
-
addr.FreeAddrInfo(AI);
- /** Otherwise, Open data channel with the same local address as control channel (on a new random port!) */
- addr.SetPort(0);
- int fd = comm_open(SOCK_STREAM,
- IPPROTO_TCP,
- addr,
- COMM_NONBLOCKING,
- ftpState->entry->url());
-
- debugs(9, 3, HERE << "Unconnected data socket created on FD " << fd << " to " << addr);
-
- if (fd < 0) {
- ftpFail(ftpState);
- return;
- }
-
- ftpState->data.opened(fd, ftpState->dataCloser());
-
/** \par
* Send EPSV (ALL,2,1) or PASV on the control channel.
*
* - EPSV ALL is used if enabled.
- * - EPSV 2 is used if ALL is disabled and IPv6 is available.
- * - EPSV 1 is used if EPSV 2 (IPv6) fails or is not available.
+ * - EPSV 2 is used if ALL is disabled and IPv6 is available and ctrl channel is IPv6.
+ * - EPSV 1 is used if EPSV 2 (IPv6) fails or is not available or ctrl channel is IPv4.
* - PASV is used if EPSV 1 fails.
*/
switch (ftpState->state) {
- case SENT_EPSV_1: /* EPSV options exhausted. Try PASV now. */
- snprintf(cbuf, 1024, "PASV\r\n");
- ftpState->state = SENT_PASV;
- break;
+ case SENT_EPSV_ALL: /* EPSV ALL resulted in a bad response. Try ther EPSV methods. */
+ ftpState->flags.epsv_all_sent = true;
+ if (addr.IsIPv6()) {
+ debugs(9, 5, HERE << "FTP Channel is IPv6 (" << addr << ") attempting EPSV 2 after EPSV ALL has failed.");
+ snprintf(cbuf, 1024, "EPSV 2\r\n");
+ ftpState->state = SENT_EPSV_2;
+ break;
+ }
+ // else fall through to skip EPSV 2
case SENT_EPSV_2: /* EPSV IPv6 failed. Try EPSV IPv4 */
- snprintf(cbuf, 1024, "EPSV 1\r\n");
- ftpState->state = SENT_EPSV_1;
- break;
+ if (addr.IsIPv4()) {
+ debugs(9, 5, HERE << "FTP Channel is IPv4 (" << addr << ") attempting EPSV 1 after EPSV ALL has failed.");
+ snprintf(cbuf, 1024, "EPSV 1\r\n");
+ ftpState->state = SENT_EPSV_1;
+ break;
+ } else if (ftpState->flags.epsv_all_sent) {
+ debugs(9, DBG_IMPORTANT, "FTP does not allow PASV method after 'EPSV ALL' has been sent.");
+ ftpFail(ftpState);
+ return;
+ }
+ // else fall through to skip EPSV 1
- case SENT_EPSV_ALL: /* EPSV ALL resulted in a bad response. Try ther EPSV methods. */
- ftpState->flags.epsv_all_sent = true;
- snprintf(cbuf, 1024, "EPSV 2\r\n");
- ftpState->state = SENT_EPSV_2;
+ case SENT_EPSV_1: /* EPSV options exhausted. Try PASV now. */
+ debugs(9, 5, HERE << "FTP Channel (" << addr << ") rejects EPSV connection attempts. Trying PASV instead.");
+ snprintf(cbuf, 1024, "PASV\r\n");
+ ftpState->state = SENT_PASV;
break;
default:
if (!Config.Ftp.epsv) {
+ debugs(9, 5, HERE << "EPSV support manually disabled. Sending PASV for FTP Channel (" << addr <<")");
snprintf(cbuf, 1024, "PASV\r\n");
ftpState->state = SENT_PASV;
} else if (Config.Ftp.epsv_all) {
+ debugs(9, 5, HERE << "EPSV ALL manually enabled. Attempting with FTP Channel (" << addr <<")");
snprintf(cbuf, 1024, "EPSV ALL\r\n");
ftpState->state = SENT_EPSV_ALL;
/* block other non-EPSV connections being attempted */
ftpState->flags.epsv_all_sent = true;
} else {
#if USE_IPV6
- snprintf(cbuf, 1024, "EPSV 2\r\n");
- ftpState->state = SENT_EPSV_2;
-#else
- snprintf(cbuf, 1024, "EPSV 1\r\n");
- ftpState->state = SENT_EPSV_1;
+ if (addr.IsIPv6()) {
+ debugs(9, 5, HERE << "FTP Channel (" << addr << "). Sending default EPSV 2");
+ snprintf(cbuf, 1024, "EPSV 2\r\n");
+ ftpState->state = SENT_EPSV_2;
+ }
#endif
+ if (addr.IsIPv4()) {
+ debugs(9, 5, HERE << "Channel (" << addr <<"). Sending default EPSV 1");
+ snprintf(cbuf, 1024, "EPSV 1\r\n");
+ ftpState->state = SENT_EPSV_1;
+ }
}
break;
}
+ /** Otherwise, Open data channel with the same local address as control channel (on a new random port!) */
+ addr.SetPort(0);
+ int fd = comm_open(SOCK_STREAM,
+ IPPROTO_TCP,
+ addr,
+ COMM_NONBLOCKING,
+ ftpState->entry->url());
+
+ debugs(9, 3, HERE << "Unconnected data socket created on FD " << fd << " from " << addr);
+
+ if (fd < 0) {
+ ftpFail(ftpState);
+ return;
+ }
+
+ ftpState->data.opened(fd, ftpState->dataCloser());
ftpState->writeCommand(cbuf);
/*
diff -u -r -N squid-3.1.0.13/src/globals.h squid-3.1.0.14/src/globals.h
--- squid-3.1.0.13/src/globals.h 2009-08-05 01:32:17.000000000 +1200
+++ squid-3.1.0.14/src/globals.h 2009-09-27 15:28:33.000000000 +1200
@@ -46,8 +46,7 @@
#ifdef __cplusplus
-extern "C"
-{
+extern "C" {
#endif
//MOVED:Debug.h extern FILE *debug_log; /* NULL */
diff -u -r -N squid-3.1.0.13/src/gopher.cc squid-3.1.0.14/src/gopher.cc
--- squid-3.1.0.13/src/gopher.cc 2009-08-05 01:32:17.000000000 +1200
+++ squid-3.1.0.14/src/gopher.cc 2009-09-27 15:28:33.000000000 +1200
@@ -37,6 +37,7 @@
#include "errorpage.h"
#include "Store.h"
#include "HttpRequest.h"
+#include "HttpReply.h"
#include "comm.h"
#if DELAY_POOLS
#include "DelayPools.h"
@@ -138,7 +139,6 @@
} GopherStateData;
static PF gopherStateFree;
-static void gopher_mime_content(MemBuf * mb, const char *name, const char *def);
static void gopherMimeCreate(GopherStateData *);
static void gopher_request_parse(const HttpRequest * req,
char *type_id,
@@ -178,26 +178,6 @@
cbdataFree(gopherState);
}
-
-/**
- \ingroup ServerProtocolGopherInternal
- * Figure out content type from file extension
- */
-static void
-gopher_mime_content(MemBuf * mb, const char *name, const char *def_ctype)
-{
- char *ctype = mimeGetContentType(name);
- char *cenc = mimeGetContentEncoding(name);
-
- if (cenc)
- mb->Printf("Content-Encoding: %s\r\n", cenc);
-
- mb->Printf("Content-Type: %s\r\n",
- ctype ? ctype : def_ctype);
-}
-
-
-
/**
\ingroup ServerProtocolGopherInternal
* Create MIME Header for Gopher Data
@@ -205,14 +185,9 @@
static void
gopherMimeCreate(GopherStateData * gopherState)
{
- MemBuf mb;
-
- mb.init();
-
- mb.Printf("HTTP/1.0 200 OK Gatewaying\r\n"
- "Server: Squid/%s\r\n"
- "Date: %s\r\n",
- version_string, mkrfc1123(squid_curtime));
+ StoreEntry *entry = gopherState->entry;
+ const char *mime_type = NULL;
+ const char *mime_enc = NULL;
switch (gopherState->type_id) {
@@ -225,7 +200,7 @@
case GOPHER_WWW:
case GOPHER_CSO:
- mb.Printf("Content-Type: text/html\r\n");
+ mime_type = "text/html";
break;
case GOPHER_GIF:
@@ -233,17 +208,17 @@
case GOPHER_IMAGE:
case GOPHER_PLUS_IMAGE:
- mb.Printf("Content-Type: image/gif\r\n");
+ mime_type = "image/gif";
break;
case GOPHER_SOUND:
case GOPHER_PLUS_SOUND:
- mb.Printf("Content-Type: audio/basic\r\n");
+ mime_type = "audio/basic";
break;
case GOPHER_PLUS_MOVIE:
- mb.Printf("Content-Type: video/mpeg\r\n");
+ mime_type = "video/mpeg";
break;
case GOPHER_MACBINHEX:
@@ -254,20 +229,33 @@
case GOPHER_BIN:
/* Rightnow We have no idea what it is. */
- gopher_mime_content(&mb, gopherState->request, def_gopher_bin);
+ mime_enc = mimeGetContentEncoding(gopherState->request);
+ mime_type = mimeGetContentType(gopherState->request);
+ if (!mime_type)
+ mime_type = def_gopher_bin;
break;
case GOPHER_FILE:
default:
- gopher_mime_content(&mb, gopherState->request, def_gopher_text);
+ mime_enc = mimeGetContentEncoding(gopherState->request);
+ mime_type = mimeGetContentType(gopherState->request);
+ if (!mime_type)
+ mime_type = def_gopher_text;
break;
}
- mb.Printf("\r\n");
- EBIT_CLR(gopherState->entry->flags, ENTRY_FWD_HDR_WAIT);
- gopherState->entry->append(mb.buf, mb.size);
- mb.clean();
+ assert(entry->isEmpty());
+ EBIT_CLR(entry->flags, ENTRY_FWD_HDR_WAIT);
+
+ HttpReply *reply = new HttpReply;
+ entry->buffer();
+ HttpVersion version(1, 0);
+ reply->setHeaders(version, HTTP_OK, "Gatewaying", mime_type, -1, -1, -2);
+ if (mime_enc)
+ reply->header.putStr(HDR_CONTENT_ENCODING, mime_enc);
+
+ entry->replaceHttpReply(reply);
}
/**
diff -u -r -N squid-3.1.0.13/src/helper.cc squid-3.1.0.14/src/helper.cc
--- squid-3.1.0.13/src/helper.cc 2009-08-05 01:32:17.000000000 +1200
+++ squid-3.1.0.14/src/helper.cc 2009-09-27 15:28:33.000000000 +1200
@@ -60,12 +60,10 @@
static void helperStatefulDispatch(helper_stateful_server * srv, helper_stateful_request * r);
static void helperKickQueue(helper * hlp);
static void helperStatefulKickQueue(statefulhelper * hlp);
+static void helperStatefulServerDone(helper_stateful_server * srv);
static void helperRequestFree(helper_request * r);
static void helperStatefulRequestFree(helper_stateful_request * r);
static void StatefulEnqueue(statefulhelper * hlp, helper_stateful_request * r);
-static helper_stateful_request *StatefulServerDequeue(helper_stateful_server * srv);
-static void StatefulServerEnqueue(helper_stateful_server * srv, helper_stateful_request * r);
-static void helperStatefulServerKickQueue(helper_stateful_server * srv);
static bool helperStartStats(StoreEntry *sentry, void *hlp, const char *label);
@@ -184,7 +182,7 @@
helperKickQueue(hlp);
}
-/*
+/**
* DPW 2007-05-08
*
* helperStatefulOpenServers: create the stateful child helper processes
@@ -256,10 +254,7 @@
helper_stateful_server *srv = cbdataAlloc(helper_stateful_server);
srv->hIpc = hIpc;
srv->pid = pid;
- srv->flags.reserved = S_HELPER_FREE;
- srv->deferred_requests = 0;
- srv->stats.deferbyfunc = 0;
- srv->stats.deferbycb = 0;
+ srv->flags.reserved = 0;
srv->stats.submits = 0;
srv->stats.releases = 0;
srv->index = k;
@@ -326,9 +321,7 @@
debugs(84, 9, "helperSubmit: " << buf);
}
-/* lastserver = "server last used as part of a deferred or reserved
- * request sequence"
- */
+/// lastserver = "server last used as part of a reserved request sequence"
void
helperStatefulSubmit(statefulhelper * hlp, const char *buf, HLPSCB * callback, void *data, helper_stateful_server * lastserver)
{
@@ -353,28 +346,11 @@
if ((buf != NULL) && lastserver) {
debugs(84, 5, "StatefulSubmit with lastserver " << lastserver);
- /* the queue doesn't count for this assert because queued requests
- * have already gone through here and been tested.
- * It's legal to have deferred_requests == 0 and queue entries
- * and status of S_HELPEER_DEFERRED.
- * BUT: It's not legal to submit a new request w/lastserver in
- * that state.
- */
- assert(!(lastserver->deferred_requests == 0 &&
- lastserver->flags.reserved == S_HELPER_DEFERRED));
-
- if (lastserver->flags.reserved != S_HELPER_RESERVED) {
- lastserver->stats.submits++;
- lastserver->deferred_requests--;
- }
+ assert(lastserver->flags.reserved);
+ assert(!(lastserver->request));
- if (!(lastserver->request)) {
- debugs(84, 5, "StatefulSubmit dispatching");
- helperStatefulDispatch(lastserver, r);
- } else {
- debugs(84, 5, "StatefulSubmit queuing");
- StatefulServerEnqueue(lastserver, r);
- }
+ debugs(84, 5, "StatefulSubmit dispatching");
+ helperStatefulDispatch(lastserver, r);
} else {
helper_stateful_server *srv;
if ((srv = StatefulGetFirstAvailable(hlp))) {
@@ -386,159 +362,38 @@
debugs(84, 9, "helperStatefulSubmit: placeholder: '" << r->placeholder << "', buf '" << buf << "'.");
}
-/*
- * helperStatefulDefer
- *
- * find and add a deferred request to a helper
- */
-helper_stateful_server *
-helperStatefulDefer(statefulhelper * hlp)
-{
- if (hlp == NULL) {
- debugs(84, 3, "helperStatefulDefer: hlp == NULL");
- return NULL;
- }
-
- debugs(84, 5, "helperStatefulDefer: Running servers " << hlp->n_running);
-
- if (hlp->n_running == 0) {
- debugs(84, 1, "helperStatefulDefer: No running servers!. ");
- return NULL;
- }
-
- helper_stateful_server *rv = StatefulGetFirstAvailable(hlp);
-
- if (rv == NULL) {
- /*
- * all currently busy; loop through servers and find server
- * with the shortest queue
- */
-
- for (dlink_node *n = hlp->servers.head; n != NULL; n = n->next) {
- helper_stateful_server *srv = (helper_stateful_server *)n->data;
-
- if (srv->flags.reserved == S_HELPER_RESERVED)
- continue;
-
- if (!srv->flags.shutdown)
- continue;
-
- if ((hlp->IsAvailable != NULL) && (srv->data != NULL) &&
- !(hlp->IsAvailable(srv->data)))
- continue;
-
- if ((rv != NULL) && (rv->deferred_requests < srv->deferred_requests))
- continue;
-
- rv = srv;
- }
- }
-
- if (rv == NULL) {
- debugs(84, 1, "helperStatefulDefer: None available.");
- return NULL;
- }
-
- /* consistency check:
- * when the deferred count is 0,
- * submits + releases == deferbyfunc + deferbycb
- * Or in english, when there are no deferred requests, the amount
- * we have submitted to the queue or cancelled must equal the amount
- * we have said we wanted to be able to submit or cancel
- */
- if (rv->deferred_requests == 0)
- assert(rv->stats.submits + rv->stats.releases ==
- rv->stats.deferbyfunc + rv->stats.deferbycb);
-
- rv->flags.reserved = S_HELPER_DEFERRED;
-
- rv->deferred_requests++;
-
- rv->stats.deferbyfunc++;
-
- return rv;
-}
-
-void
-helperStatefulReset(helper_stateful_server * srv)
-/* puts this helper back in the queue. the calling app is required to
- * manage the state in the helper.
- */
-{
- statefulhelper *hlp = srv->parent;
- helper_stateful_request *r = srv->request;
-
- if (r != NULL) {
- /* reset attempt DURING an outstaning request */
- debugs(84, 1, "helperStatefulReset: RESET During request " << hlp->id_name << " ");
- srv->flags.busy = 0;
- srv->roffset = 0;
- helperStatefulRequestFree(r);
- srv->request = NULL;
- }
-
- srv->flags.busy = 0;
-
- if (srv->queue.head) {
- srv->flags.reserved = S_HELPER_DEFERRED;
- } else {
- srv->flags.reserved = S_HELPER_FREE;
-
- if ((srv->parent->OnEmptyQueue != NULL) && (srv->data))
- srv->parent->OnEmptyQueue(srv->data);
- }
-
- helperStatefulServerKickQueue(srv);
-}
-
-/*
+/**
* DPW 2007-05-08
*
* helperStatefulReleaseServer tells the helper that whoever was
* using it no longer needs its services.
- *
- * If the state is S_HELPER_DEFERRED, decrease the deferred count.
- * If the count goes to zero, then it can become S_HELPER_FREE.
- *
- * If the state is S_HELPER_RESERVED, then it should always
- * become S_HELPER_FREE.
*/
void
helperStatefulReleaseServer(helper_stateful_server * srv)
{
debugs(84, 3, HERE << "srv-" << srv->index << " flags.reserved = " << srv->flags.reserved);
- if (srv->flags.reserved == S_HELPER_FREE)
+ if (!srv->flags.reserved)
return;
srv->stats.releases++;
- if (srv->flags.reserved == S_HELPER_DEFERRED) {
- assert(srv->deferred_requests);
- srv->deferred_requests--;
- if (srv->deferred_requests) {
- debugs(0,0,HERE << "helperStatefulReleaseServer srv->deferred_requests=" << srv->deferred_requests);
- return;
- }
- if (srv->queue.head) {
- debugs(0,0,HERE << "helperStatefulReleaseServer srv->queue.head not NULL");
- return;
- }
- }
-
- srv->flags.reserved = S_HELPER_FREE;
+ srv->flags.reserved = 0;
if (srv->parent->OnEmptyQueue != NULL && srv->data)
srv->parent->OnEmptyQueue(srv->data);
- helperStatefulServerKickQueue(srv);
+ helperStatefulServerDone(srv);
}
+/** return a pointer to the stateful routines data area */
void *
helperStatefulServerGetData(helper_stateful_server * srv)
-/* return a pointer to the stateful routines data area */
{
return srv->data;
}
+/**
+ * Dump some stats about the helper states to a StoreEntry
+ */
void
helperStats(StoreEntry * sentry, helper * hlp, const char *label)
{
@@ -611,12 +466,11 @@
storeAppendPrintf(sentry, "avg service time: %d msec\n",
hlp->stats.avg_svc_time);
storeAppendPrintf(sentry, "\n");
- storeAppendPrintf(sentry, "%7s\t%7s\t%7s\t%11s\t%20s\t%s\t%7s\t%7s\t%7s\n",
+ storeAppendPrintf(sentry, "%7s\t%7s\t%7s\t%11s\t%6s\t%7s\t%7s\t%7s\n",
"#",
"FD",
"PID",
"# Requests",
- "# Deferred Requests",
"Flags",
"Time",
"Offset",
@@ -624,17 +478,15 @@
for (dlink_node *link = hlp->servers.head; link; link = link->next) {
helper_stateful_server *srv = (helper_stateful_server *)link->data;
- double tt = 0.001 * tvSubMsec(srv->dispatch_time,
- srv->flags.busy ? current_time : srv->answer_time);
- storeAppendPrintf(sentry, "%7d\t%7d\t%7d\t%11d\t%20d\t%c%c%c%c%c\t%7.3f\t%7d\t%s\n",
+ double tt = 0.001 * tvSubMsec(srv->dispatch_time, srv->flags.busy ? current_time : srv->answer_time);
+ storeAppendPrintf(sentry, "%7d\t%7d\t%7d\t%11d\t%c%c%c%c%c\t%7.3f\t%7d\t%s\n",
srv->index + 1,
srv->rfd,
srv->pid,
srv->stats.uses,
- (int) srv->deferred_requests,
srv->flags.busy ? 'B' : ' ',
srv->flags.closing ? 'C' : ' ',
- srv->flags.reserved == S_HELPER_RESERVED ? 'R' : (srv->flags.reserved == S_HELPER_DEFERRED ? 'D' : ' '),
+ srv->flags.reserved ? 'R' : ' ',
srv->flags.shutdown ? 'S' : ' ',
srv->request ? (srv->request->placeholder ? 'P' : ' ') : ' ',
tt < 0.0 ? 0.0 : tt,
@@ -645,7 +497,7 @@
storeAppendPrintf(sentry, "\nFlags key:\n\n");
storeAppendPrintf(sentry, " B = BUSY\n");
storeAppendPrintf(sentry, " C = CLOSING\n");
- storeAppendPrintf(sentry, " R = RESERVED or DEFERRED\n");
+ storeAppendPrintf(sentry, " R = RESERVED\n");
storeAppendPrintf(sentry, " S = SHUTDOWN PENDING\n");
storeAppendPrintf(sentry, " P = PLACEHOLDER\n");
}
@@ -753,14 +605,13 @@
continue;
}
- if (srv->flags.reserved != S_HELPER_FREE) {
- debugs(84, 3, "helperStatefulShutdown: " << hlp->id_name << " #" << srv->index + 1 << " is RESERVED.");
- continue;
- }
-
- if (srv->deferred_requests) {
- debugs(84, 3, "helperStatefulShutdown: " << hlp->id_name << " #" << srv->index + 1 << " has DEFERRED requests.");
- continue;
+ if (srv->flags.reserved) {
+ if (shutting_down) {
+ debugs(84, 3, "helperStatefulShutdown: " << hlp->id_name << " #" << srv->index + 1 << " is RESERVED. Closing anyway.");
+ } else {
+ debugs(84, 3, "helperStatefulShutdown: " << hlp->id_name << " #" << srv->index + 1 << " is RESERVED. Not Shutting Down Yet.");
+ continue;
+ }
}
srv->flags.closing = 1;
@@ -1147,6 +998,7 @@
if ((t = strchr(srv->rbuf, '\n'))) {
/* end of reply found */
+ int called = 1;
debugs(84, 3, "helperStatefulHandleRead: end of reply found");
if (t > srv->rbuf && t[-1] == '\r')
@@ -1155,58 +1007,10 @@
*t = '\0';
if (r && cbdataReferenceValid(r->data)) {
- switch ((r->callback(r->data, srv, srv->rbuf))) { /*if non-zero reserve helper */
-
- case S_HELPER_UNKNOWN:
- fatal("helperStatefulHandleRead: either a non-state aware callback was give to the stateful helper routines, or an uninitialised callback response was received.\n");
- break;
-
- case S_HELPER_RELEASE: /* helper finished with */
-
- if (!srv->deferred_requests && !srv->queue.head) {
- srv->flags.reserved = S_HELPER_FREE;
-
- if ((srv->parent->OnEmptyQueue != NULL) && (srv->data))
- srv->parent->OnEmptyQueue(srv->data);
-
- debugs(84, 5, "StatefulHandleRead: releasing " << hlp->id_name << " #" << srv->index + 1);
- } else {
- srv->flags.reserved = S_HELPER_DEFERRED;
- debugs(84, 5, "StatefulHandleRead: outstanding deferred requests on " <<
- hlp->id_name << " #" << srv->index + 1 <<
- ". reserving for deferred requests.");
- }
-
- break;
-
- case S_HELPER_RESERVE: /* 'pin' this helper for the caller */
-
- if (!srv->queue.head) {
- assert(srv->deferred_requests == 0);
- srv->flags.reserved = S_HELPER_RESERVED;
- debugs(84, 5, "StatefulHandleRead: reserving " << hlp->id_name << " #" << srv->index + 1);
- } else {
- fatal("StatefulHandleRead: Callback routine attempted to reserve a stateful helper with deferred requests. This can lead to deadlock.\n");
- }
-
- break;
-
- case S_HELPER_DEFER:
- /* the helper is still needed, but can
- * be used for other requests in the meantime.
- */
- srv->flags.reserved = S_HELPER_DEFERRED;
- srv->deferred_requests++;
- srv->stats.deferbycb++;
- debugs(84, 5, "StatefulHandleRead: reserving " << hlp->id_name << " #" << srv->index + 1 << " for deferred requests.");
- break;
-
- default:
- fatal("helperStatefulHandleRead: unknown stateful helper callback result.\n");
- }
-
+ r->callback(r->data, srv, srv->rbuf);
} else {
debugs(84, 1, "StatefulHandleRead: no callback data registered");
+ called = 0;
}
srv->flags.busy = 0;
@@ -1220,12 +1024,15 @@
tvSubMsec(srv->dispatch_time, current_time),
hlp->stats.replies, REDIRECT_AV_FACTOR);
- helperStatefulServerKickQueue(srv);
+ if (called)
+ helperStatefulServerDone(srv);
+ else
+ helperStatefulReleaseServer(srv);
}
if (srv->rfd != -1)
comm_read(srv->rfd, srv->rbuf + srv->roffset, srv->rbuf_sz - srv->roffset - 1,
- helperStatefulHandleRead, srv);
+ helperStatefulHandleRead, srv);
}
static void
@@ -1285,31 +1092,6 @@
}
-static void
-StatefulServerEnqueue(helper_stateful_server * srv, helper_stateful_request * r)
-{
- dlink_node *link = (dlink_node *)memAllocate(MEM_DLINK_NODE);
- dlinkAddTail(r, link, &srv->queue);
- /* TODO: warning if the queue on this server is more than X
- * We don't check the queue size at the moment, because
- * requests hitting here are deferrable
- */
- /* hlp->stats.queue_size++;
- * if (hlp->stats.queue_size < hlp->n_running)
- * return;
- * if (squid_curtime - hlp->last_queue_warn < 600)
- * return;
- * if (shutting_down || reconfiguring)
- * return;
- * hlp->last_queue_warn = squid_curtime;
- * debugs(84, 0, "WARNING: All " << hlp->id_name << " processes are busy.");
- * debugs(84, 0, "WARNING: " << hlp->stats.queue_size << " pending requests queued");
- * if (hlp->stats.queue_size > hlp->n_running * 2)
- * fatalf("Too many queued %s requests", hlp->id_name);
- * debugs(84, 1, "Consider increasing the number of " << hlp->id_name << " processes in your config file." ); */
-}
-
-
static helper_request *
Dequeue(helper * hlp)
{
@@ -1327,21 +1109,6 @@
}
static helper_stateful_request *
-StatefulServerDequeue(helper_stateful_server * srv)
-{
- dlink_node *link;
- helper_stateful_request *r = NULL;
-
- if ((link = srv->queue.head)) {
- r = (helper_stateful_request *)link->data;
- dlinkDelete(link, &srv->queue);
- memFree(link, MEM_DLINK_NODE);
- }
-
- return r;
-}
-
-static helper_stateful_request *
StatefulDequeue(statefulhelper * hlp)
{
dlink_node *link;
@@ -1414,7 +1181,7 @@
if (srv->flags.busy)
continue;
- if (srv->flags.reserved == S_HELPER_RESERVED)
+ if (srv->flags.reserved)
continue;
if (srv->flags.shutdown)
@@ -1527,6 +1294,7 @@
if (!cbdataReferenceValid(r->data)) {
debugs(84, 1, "helperStatefulDispatch: invalid callback data");
helperStatefulRequestFree(r);
+ helperStatefulReleaseServer(srv);
return;
}
@@ -1534,7 +1302,7 @@
if (r->placeholder == 1) {
/* a callback is needed before this request can _use_ a helper. */
- /* we don't care about releasing/deferring this helper. The request NEVER
+ /* we don't care about releasing this helper. The request NEVER
* gets to the helper. So we throw away the return code */
r->callback(r->data, srv, NULL);
/* throw away the placeholder */
@@ -1543,12 +1311,13 @@
* request to the helper which is why we test for the request*/
if (srv->request == NULL)
- helperStatefulServerKickQueue(srv);
+ helperStatefulServerDone(srv);
return;
}
srv->flags.busy = 1;
+ srv->flags.reserved = 1;
srv->request = r;
srv->dispatch_time = current_time;
comm_write(srv->wfd,
@@ -1586,18 +1355,11 @@
}
static void
-helperStatefulServerKickQueue(helper_stateful_server * srv)
+helperStatefulServerDone(helper_stateful_server * srv)
{
- helper_stateful_request *r;
-
- if ((r = StatefulServerDequeue(srv))) {
- helperStatefulDispatch(srv, r);
- return;
- }
-
if (!srv->flags.shutdown) {
helperStatefulKickQueue(srv->parent);
- } else if (!srv->flags.closing && srv->flags.reserved == S_HELPER_FREE && !srv->flags.busy) {
+ } else if (!srv->flags.closing && !srv->flags.reserved && !srv->flags.busy) {
int wfd = srv->wfd;
srv->wfd = -1;
if (srv->rfd == wfd)
diff -u -r -N squid-3.1.0.13/src/helper.h squid-3.1.0.14/src/helper.h
--- squid-3.1.0.13/src/helper.h 2009-08-05 01:32:17.000000000 +1200
+++ squid-3.1.0.14/src/helper.h 2009-09-27 15:28:33.000000000 +1200
@@ -51,7 +51,7 @@
typedef struct _helper_stateful_flags helper_stateful_flags;
-typedef stateful_helper_callback_t HLPSCB(void *, void *lastserver, char *buf);
+typedef void HLPSCB(void *, void *lastserver, char *buf);
struct _helper {
wordlist *cmdline;
@@ -152,7 +152,6 @@
struct timeval answer_time;
dlink_node link;
- dlink_list queue;
statefulhelper *parent;
helper_stateful_request *request;
@@ -160,17 +159,14 @@
unsigned int busy:1;
unsigned int closing:1;
unsigned int shutdown:1;
- stateful_helper_reserve_t reserved;
+ unsigned int reserved:1;
} flags;
struct {
int uses;
int submits;
int releases;
- int deferbyfunc;
- int deferbycb;
} stats;
- int deferred_requests; /* current number of deferred requests */
void *data; /* State data used by the calling routines */
void *hIpc;
};
@@ -196,7 +192,7 @@
MEMPROXY_CLASS(helper_stateful_request);
char *buf;
HLPSCB *callback;
- int placeholder; /* if 1, this is a dummy request waiting for a stateful helper to become available for deferred requests.*/
+ int placeholder; /* if 1, this is a dummy request waiting for a stateful helper to become available */
void *data;
};
@@ -215,10 +211,8 @@
SQUIDCEXTERN statefulhelper *helperStatefulCreate(const char *);
SQUIDCEXTERN void helperFree(helper *);
SQUIDCEXTERN void helperStatefulFree(statefulhelper *);
-SQUIDCEXTERN void helperStatefulReset(helper_stateful_server * srv);
SQUIDCEXTERN void helperStatefulReleaseServer(helper_stateful_server * srv);
SQUIDCEXTERN void *helperStatefulServerGetData(helper_stateful_server * srv);
-SQUIDCEXTERN helper_stateful_server *helperStatefulDefer(statefulhelper *);
diff -u -r -N squid-3.1.0.13/src/HierarchyLogEntry.h squid-3.1.0.14/src/HierarchyLogEntry.h
--- squid-3.1.0.13/src/HierarchyLogEntry.h 2009-08-05 01:32:12.000000000 +1200
+++ squid-3.1.0.14/src/HierarchyLogEntry.h 2009-09-27 15:28:29.000000000 +1200
@@ -56,7 +56,7 @@
struct timeval peer_select_start;
struct timeval store_complete_stop;
-
+
http_status peer_reply_status; ///< last HTTP status code received
timeval peer_http_request_sent; ///< last peer finished writing req
int64_t peer_response_time; ///< last peer response delay
diff -u -r -N squid-3.1.0.13/src/http.cc squid-3.1.0.14/src/http.cc
--- squid-3.1.0.13/src/http.cc 2009-08-05 01:32:17.000000000 +1200
+++ squid-3.1.0.14/src/http.cc 2009-09-27 15:28:34.000000000 +1200
@@ -76,8 +76,8 @@
HttpHeader * hdr_out, const int we_do_ranges, const http_state_flags);
HttpStateData::HttpStateData(FwdState *theFwdState) : AsyncJob("HttpStateData"), ServerStateData(theFwdState),
- lastChunk(0), header_bytes_read(0), reply_bytes_read(0),
- body_bytes_truncated(0), httpChunkDecoder(NULL)
+ lastChunk(0), header_bytes_read(0), reply_bytes_read(0),
+ body_bytes_truncated(0), httpChunkDecoder(NULL)
{
debugs(11,5,HERE << "HttpStateData " << this << " created");
ignoreCacheControl = false;
@@ -1150,13 +1150,12 @@
const HttpVersion &v = vrep->sline.version;
if (s == HTTP_INVALID_HEADER && v != HttpVersion(0,9)) {
error = ERR_INVALID_RESP;
- } else
- if (s == HTTP_HEADER_TOO_LARGE) {
- fwd->dontRetry(true);
- error = ERR_TOO_BIG;
- } else {
- return true; // done parsing, got reply, and no error
- }
+ } else if (s == HTTP_HEADER_TOO_LARGE) {
+ fwd->dontRetry(true);
+ error = ERR_TOO_BIG;
+ } else {
+ return true; // done parsing, got reply, and no error
+ }
} else {
// parsed headers but got no reply
error = ERR_INVALID_RESP;
@@ -1188,14 +1187,14 @@
return; // no body or a body of unknown size, including chunked
const int64_t body_bytes_read = reply_bytes_read - header_bytes_read;
- if (body_bytes_read - body_bytes_truncated <= clen)
+ if (body_bytes_read - body_bytes_truncated <= clen)
return; // we did not read too much or already took care of the extras
if (const int64_t extras = body_bytes_read - body_bytes_truncated - clen) {
// server sent more that the advertised content length
- debugs(11,5, HERE << "body_bytes_read=" << body_bytes_read <<
- " clen=" << clen << '/' << vrep->content_length <<
- " body_bytes_truncated=" << body_bytes_truncated << '+' << extras);
+ debugs(11,5, HERE << "body_bytes_read=" << body_bytes_read <<
+ " clen=" << clen << '/' << vrep->content_length <<
+ " body_bytes_truncated=" << body_bytes_truncated << '+' << extras);
readBuf->truncate(extras);
body_bytes_truncated += extras;
@@ -1422,7 +1421,7 @@
commSetTimeout(fd, Config.Timeout.read, timeoutCall);
flags.request_sent = 1;
-
+
orig_request->hier.peer_http_request_sent = current_time;
}
@@ -1504,7 +1503,7 @@
}
#if USE_SQUID_ESI
- {
+ if (orig_request->flags.accelerated) {
/* Append Surrogate-Capabilities */
String strSurrogate (hdr_in->getList(HDR_SURROGATE_CAPABILITY));
snprintf(bbuf, BBUF_SZ, "%s=\"Surrogate/1.0 ESI/1.0\"",
diff -u -r -N squid-3.1.0.13/src/HttpHeader.cc squid-3.1.0.14/src/HttpHeader.cc
--- squid-3.1.0.13/src/HttpHeader.cc 2009-08-05 01:32:12.000000000 +1200
+++ squid-3.1.0.14/src/HttpHeader.cc 2009-09-27 15:28:29.000000000 +1200
@@ -321,7 +321,7 @@
httpHeaderCalcMask(&RequestHeadersMask, EntityHeadersArr, countof(EntityHeadersArr));
httpHeaderMaskInit(&HopByHopHeadersMask, 0);
-
+
httpHeaderCalcMask(&HopByHopHeadersMask, HopByHopHeadersArr, countof(HopByHopHeadersArr));
/* init header stats */
diff -u -r -N squid-3.1.0.13/src/HttpHeaderTools.cc squid-3.1.0.14/src/HttpHeaderTools.cc
--- squid-3.1.0.13/src/HttpHeaderTools.cc 2009-08-05 01:32:12.000000000 +1200
+++ squid-3.1.0.14/src/HttpHeaderTools.cc 2009-09-27 15:28:29.000000000 +1200
@@ -229,6 +229,10 @@
strListGetItem(const String * str, char del, const char **item, int *ilen, const char **pos)
{
size_t len;
+ /* ',' is always enabled as field delimiter as this is required for
+ * processing merged header values properly, even if Cookie normally
+ * uses ';' as delimiter.
+ */
static char delim[3][8] = {
"\"?,",
"\"\\",
@@ -247,7 +251,7 @@
return 0;
}
- /* skip leading ws and delimiters */
+ /* skip leading whitespace and delimiters */
*pos += strspn(*pos, delim[2]);
*item = *pos; /* remember item's start */
@@ -255,20 +259,15 @@
/* find next delimiter */
do {
*pos += strcspn(*pos, delim[quoted]);
-
- if (**pos == del)
- break;
-
if (**pos == '"') {
quoted = !quoted;
*pos += 1;
- }
-
- if (quoted && **pos == '\\') {
+ } else if (quoted && **pos == '\\') {
*pos += 1;
-
if (**pos)
*pos += 1;
+ } else {
+ break; /* Delimiter found, marking the end of this value */
}
} while (**pos);
diff -u -r -N squid-3.1.0.13/src/HttpMsg.h squid-3.1.0.14/src/HttpMsg.h
--- squid-3.1.0.13/src/HttpMsg.h 2009-08-05 01:32:12.000000000 +1200
+++ squid-3.1.0.14/src/HttpMsg.h 2009-09-27 15:28:29.000000000 +1200
@@ -99,13 +99,13 @@
virtual bool inheritProperties(const HttpMsg *aMsg) = 0;
protected:
- /**
- * Validate the message start line is syntactically correct.
- * Set HTTP error status according to problems found.
- *
- * \retval true Status line has no serious problems.
- * \retval false Status line has a serious problem. Correct response is indicated by error.
- */
+ /**
+ * Validate the message start line is syntactically correct.
+ * Set HTTP error status according to problems found.
+ *
+ * \retval true Status line has no serious problems.
+ * \retval false Status line has a serious problem. Correct response is indicated by error.
+ */
virtual bool sanityCheckStartLine(MemBuf *buf, const size_t hdr_len, http_status *error) = 0;
virtual void packFirstLineInto(Packer * p, bool full_uri) const = 0;
@@ -119,7 +119,8 @@
};
/* Temporary parsing state; might turn into the replacement parser later on */
-class HttpParser {
+class HttpParser
+{
public:
char state;
const char *buf;
diff -u -r -N squid-3.1.0.13/src/HttpReply.cc squid-3.1.0.14/src/HttpReply.cc
--- squid-3.1.0.13/src/HttpReply.cc 2009-08-05 01:32:13.000000000 +1200
+++ squid-3.1.0.14/src/HttpReply.cc 2009-09-27 15:28:29.000000000 +1200
@@ -481,7 +481,7 @@
// skip arbitrary number of spaces...
while (pos <= buf->contentSize() && (char)*(buf->content()+pos) == ' ') ++pos;
- if (!xisdigit(*(buf->content()+pos))) {
+ if (pos < buf->contentSize() && !xisdigit(*(buf->content()+pos))) {
debugs(58, 3, "HttpReply::sanityCheckStartLine: missing or invalid status number in '" << buf->content() << "'");
*error = HTTP_INVALID_HEADER;
return false;
diff -u -r -N squid-3.1.0.13/src/HttpRequest.cc squid-3.1.0.14/src/HttpRequest.cc
--- squid-3.1.0.13/src/HttpRequest.cc 2009-08-05 01:32:13.000000000 +1200
+++ squid-3.1.0.14/src/HttpRequest.cc 2009-09-27 15:28:29.000000000 +1200
@@ -377,12 +377,12 @@
}
#if ICAP_CLIENT
-Adaptation::Icap::History::Pointer
+Adaptation::Icap::History::Pointer
HttpRequest::icapHistory() const
{
if (!icapHistory_) {
if ((LogfileStatus == LOG_ENABLE && alLogformatHasIcapToken) ||
- IcapLogfileStatus == LOG_ENABLE) {
+ IcapLogfileStatus == LOG_ENABLE) {
icapHistory_ = new Adaptation::Icap::History();
debugs(93,4, HERE << "made " << icapHistory_ << " for " << this);
}
@@ -393,7 +393,7 @@
#endif
#if USE_ADAPTATION
-Adaptation::History::Pointer
+Adaptation::History::Pointer
HttpRequest::adaptHistory(bool createIfNone) const
{
if (!adaptHistory_ && createIfNone) {
@@ -404,11 +404,11 @@
return adaptHistory_;
}
-Adaptation::History::Pointer
+Adaptation::History::Pointer
HttpRequest::adaptLogHistory() const
{
const bool loggingNeedsHistory = (LogfileStatus == LOG_ENABLE) &&
- alLogformatHasAdaptToken; // TODO: make global to remove this method?
+ alLogformatHasAdaptToken; // TODO: make global to remove this method?
return HttpRequest::adaptHistory(loggingNeedsHistory);
}
diff -u -r -N squid-3.1.0.13/src/icmp/Makefile.in squid-3.1.0.14/src/icmp/Makefile.in
--- squid-3.1.0.13/src/icmp/Makefile.in 2009-08-05 01:32:38.000000000 +1200
+++ squid-3.1.0.14/src/icmp/Makefile.in 2009-09-27 15:28:56.000000000 +1200
@@ -105,6 +105,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/src/ident/Ident.cc squid-3.1.0.14/src/ident/Ident.cc
--- squid-3.1.0.13/src/ident/Ident.cc 2009-08-05 01:32:17.000000000 +1200
+++ squid-3.1.0.14/src/ident/Ident.cc 2009-09-27 15:28:34.000000000 +1200
@@ -80,7 +80,7 @@
/**** PRIVATE FUNCTIONS ****/
-static void
+void
Ident::identCallback(IdentStateData * state, char *result)
{
IdentClient *client;
@@ -99,7 +99,7 @@
}
}
-static void
+void
Ident::Close(int fdnotused, void *data)
{
IdentStateData *state = (IdentStateData *)data;
@@ -110,7 +110,7 @@
cbdataFree(state);
}
-static void
+void
Ident::Timeout(int fd, void *data)
{
IdentStateData *state = (IdentStateData *)data;
@@ -119,7 +119,7 @@
comm_close(fd);
}
-static void
+void
Ident::ConnectDone(int fd, const DnsLookupDetails &, comm_err_t status, int xerrno, void *data)
{
IdentStateData *state = (IdentStateData *)data;
@@ -155,7 +155,7 @@
commSetTimeout(fd, Ident::TheConfig.timeout, Ident::Timeout, state);
}
-static void
+void
Ident::ReadReply(int fd, char *buf, size_t len, comm_err_t flag, int xerrno, void *data)
{
IdentStateData *state = (IdentStateData *)data;
@@ -194,7 +194,7 @@
comm_close(fd);
}
-static void
+void
Ident::ClientAdd(IdentStateData * state, IDCB * callback, void *callback_data)
{
IdentClient *c = (IdentClient *)xcalloc(1, sizeof(*c));
diff -u -r -N squid-3.1.0.13/src/ident/Makefile.in squid-3.1.0.14/src/ident/Makefile.in
--- squid-3.1.0.13/src/ident/Makefile.in 2009-08-05 01:32:38.000000000 +1200
+++ squid-3.1.0.14/src/ident/Makefile.in 2009-09-27 15:28:56.000000000 +1200
@@ -76,6 +76,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/src/ip/IpAddress.cc squid-3.1.0.14/src/ip/IpAddress.cc
--- squid-3.1.0.13/src/ip/IpAddress.cc 2009-08-05 01:32:17.000000000 +1200
+++ squid-3.1.0.14/src/ip/IpAddress.cc 2009-09-27 15:28:34.000000000 +1200
@@ -140,7 +140,7 @@
#endif
- for (;shift 0) {
@@ -1050,7 +1051,8 @@
return (p - buf);
}
-char* IpAddress::ToURL(char* buf, unsigned int blen) const {
+char* IpAddress::ToURL(char* buf, unsigned int blen) const
+{
char *p = buf;
// Ensure we have a buffer.
@@ -1072,7 +1074,8 @@
return buf;
}
-void IpAddress::GetSockAddr(struct sockaddr_storage &addr, const int family) const {
+void IpAddress::GetSockAddr(struct sockaddr_storage &addr, const int family) const
+{
struct sockaddr_in *sin = NULL;
if ( family == AF_INET && !IsIPv4()) {
@@ -1097,7 +1100,8 @@
#endif /* USE_IPV6 */
}
-void IpAddress::GetSockAddr(struct sockaddr_in &buf) const {
+void IpAddress::GetSockAddr(struct sockaddr_in &buf) const
+{
#if USE_IPV6
if ( IsIPv4() ) {
@@ -1130,7 +1134,8 @@
#if USE_IPV6
-void IpAddress::GetSockAddr(struct sockaddr_in6 &buf) const {
+void IpAddress::GetSockAddr(struct sockaddr_in6 &buf) const
+{
memcpy(&buf, &m_SocketAddr, sizeof(struct sockaddr_in6));
/* maintain address family. It may have changed inside us. */
buf.sin6_family = AF_INET6;
@@ -1145,7 +1150,8 @@
#if USE_IPV6
-void IpAddress::Map4to6(const struct in_addr &in, struct in6_addr &out) const {
+void IpAddress::Map4to6(const struct in_addr &in, struct in6_addr &out) const
+{
/* check for special cases */
if ( in.s_addr == 0x00000000) {
@@ -1169,7 +1175,8 @@
}
}
-void IpAddress::Map6to4(const struct in6_addr &in, struct in_addr &out) const {
+void IpAddress::Map6to4(const struct in6_addr &in, struct in_addr &out) const
+{
/* ANYADDR */
/* NOADDR */
/* general */
@@ -1181,13 +1188,15 @@
#endif
#if USE_IPV6
-void IpAddress::GetInAddr(in6_addr &buf) const {
+void IpAddress::GetInAddr(in6_addr &buf) const
+{
memcpy(&buf, &m_SocketAddr.sin6_addr, sizeof(struct in6_addr));
}
#endif
-bool IpAddress::GetInAddr(struct in_addr &buf) const {
+bool IpAddress::GetInAddr(struct in_addr &buf) const
+{
#if USE_IPV6
if ( IsIPv4() ) {
diff -u -r -N squid-3.1.0.13/src/ip/IpIntercept.cc squid-3.1.0.14/src/ip/IpIntercept.cc
--- squid-3.1.0.13/src/ip/IpIntercept.cc 2009-08-05 01:32:17.000000000 +1200
+++ squid-3.1.0.14/src/ip/IpIntercept.cc 2009-09-27 15:28:34.000000000 +1200
@@ -369,9 +369,6 @@
client = me;
dst = peer;
- if ( !me.IsIPv4() ) return -1;
- if ( !peer.IsIPv4() ) return -1;
-
#if 0
// Crop interception errors down to one per minute.
int silent = (squid_curtime - last_reported > 60 ? 0 : 1);
@@ -388,6 +385,10 @@
if ( NetfilterTransparent(fd, me, dst, silent) == 0) return 0;
}
+ /* NAT is only available in IPv4 */
+ if ( !me.IsIPv4() ) return -1;
+ if ( !peer.IsIPv4() ) return -1;
+
if (intercept_active) {
/* NAT methods that use sock-opts to return client address */
if ( NetfilterInterception(fd, me, client, silent) == 0) return 0;
@@ -438,3 +439,85 @@
return 0;
}
#endif
+
+bool
+IpIntercept::ProbeForTproxy(IpAddress &test)
+{
+ debugs(3, 3, "Detect TPROXY support on port " << test);
+#if LINUX_TPROXY2
+
+#if USE_IPV6
+ /* TPROXYv2 is not IPv6 capable. Force wildcard sockets to IPv4. Die on IPv6 IPs */
+ debugs(3, DBG_IMPORTANT, "Disabling IPv6 on port " << test << " (TPROXYv2 interception enabled)");
+ if ( test.IsIPv6() && !test.SetIPv4() ) {
+ debugs(3, DBG_CRITICAL, "IPv6 requires TPROXYv4 support. You only have TPROXYv2 for " << test );
+ return false;
+ }
+#endif /* USE_IPV6 */
+ return true;
+
+#else /* not LINUX_TPROXY2 */
+
+#if defined(IP_TRANSPARENT)
+
+ int tos = 1;
+ int tmp_sock = -1;
+
+#if USE_IPV6
+ /* Probe to see if the Kernel TPROXY support is IPv6-enabled */
+ if (test.IsIPv6()) {
+ debugs(3, 3, "...Probing for IPv6 TPROXY support.");
+
+ struct sockaddr_in6 tmp_ip6;
+ IpAddress tmp = "::2";
+ tmp.SetPort(0);
+ tmp.GetSockAddr(tmp_ip6);
+
+ if ( (tmp_sock = socket(PF_INET6, SOCK_STREAM, IPPROTO_TCP)) >= 0 &&
+ setsockopt(tmp_sock, SOL_IP, IP_TRANSPARENT, (char *)&tos, sizeof(int)) == 0 &&
+ bind(tmp_sock, (struct sockaddr*)&tmp_ip6, sizeof(struct sockaddr_in6)) == 0 ) {
+
+ debugs(3, 3, "IPv6 TPROXY support detected. Using.");
+ close(tmp_sock);
+ return true;
+ }
+ if (tmp_sock >= 0) {
+ close(tmp_sock);
+ tmp_sock = -1;
+ }
+ }
+
+ if ( test.IsIPv6() && !test.SetIPv4() ) {
+ debugs(3, DBG_CRITICAL, "TPROXY lacks IPv6 support for " << test );
+ return false;
+ }
+#endif
+
+ /* Probe to see if the Kernel TPROXY support is IPv4-enabled (aka present) */
+ if (test.IsIPv4()) {
+ debugs(3, 3, "...Probing for IPv4 TPROXY support.");
+
+ struct sockaddr_in tmp_ip4;
+ IpAddress tmp = "127.0.0.2";
+ tmp.SetPort(0);
+ tmp.GetSockAddr(tmp_ip4);
+
+ if ( (tmp_sock = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP)) >= 0 &&
+ setsockopt(tmp_sock, SOL_IP, IP_TRANSPARENT, (char *)&tos, sizeof(int)) == 0 &&
+ bind(tmp_sock, (struct sockaddr*)&tmp_ip4, sizeof(struct sockaddr_in)) == 0 ) {
+
+ debugs(3, 3, "IPv4 TPROXY support detected. Using.");
+ close(tmp_sock);
+ return true;
+ }
+ if (tmp_sock >= 0) {
+ close(tmp_sock);
+ }
+ }
+
+#else /* undefined IP_TRANSPARENT */
+ debugs(3, 3, "setsockopt(IP_TRANSPARENT) not supported on this platform. Disabling TPROXYv4.");
+#endif
+#endif /* LINUX_TPROXY2 */
+ return false;
+}
diff -u -r -N squid-3.1.0.13/src/ip/IpIntercept.h squid-3.1.0.14/src/ip/IpIntercept.h
--- squid-3.1.0.13/src/ip/IpIntercept.h 2009-08-05 01:32:17.000000000 +1200
+++ squid-3.1.0.14/src/ip/IpIntercept.h 2009-09-27 15:28:34.000000000 +1200
@@ -36,6 +36,17 @@
#endif
/**
+ * Test system networking calls for TPROXY support.
+ * Detects IPv6 and IPv4 level of support matches the address being listened on
+ * and if the compiled v2/v4 is usable as far down as a bind()ing.
+ *
+ * \param test Address set on the http(s)_port being checked.
+ * \retval true TPROXY is available.
+ * \retval false TPROXY is not available.
+ */
+ bool ProbeForTproxy(IpAddress &test);
+
+ /**
\retval 0 Full transparency is disabled.
\retval 1 Full transparency is enabled and active.
*/
diff -u -r -N squid-3.1.0.13/src/ip/Makefile.in squid-3.1.0.14/src/ip/Makefile.in
--- squid-3.1.0.13/src/ip/Makefile.in 2009-08-05 01:32:38.000000000 +1200
+++ squid-3.1.0.14/src/ip/Makefile.in 2009-09-27 15:28:57.000000000 +1200
@@ -87,6 +87,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/src/ipcache.cc squid-3.1.0.14/src/ipcache.cc
--- squid-3.1.0.13/src/ipcache.cc 2009-08-05 01:32:17.000000000 +1200
+++ squid-3.1.0.14/src/ipcache.cc 2009-09-27 15:28:34.000000000 +1200
@@ -85,7 +85,8 @@
* where structures of type ipcache_entry whose most
* interesting members are:
*/
-class ipcache_entry {
+class ipcache_entry
+{
public:
hash_link hash; /* must be first */
time_t lastref;
@@ -656,7 +657,7 @@
IpcacheStats.replies++;
const int age = i->age();
statHistCount(&statCounter.dns.svc_time, age);
-
+
#if USE_DNSSERVERS
done = ipcacheParse(i, reply);
diff -u -r -N squid-3.1.0.13/src/main.cc squid-3.1.0.14/src/main.cc
--- squid-3.1.0.13/src/main.cc 2009-08-05 01:32:17.000000000 +1200
+++ squid-3.1.0.14/src/main.cc 2009-09-27 15:28:34.000000000 +1200
@@ -717,7 +717,7 @@
enter_suid(); /* root to read config file */
// we may have disabled the need for PURGE
- if(Config2.onoff.enable_purge)
+ if (Config2.onoff.enable_purge)
Config2.onoff.enable_purge = 2;
parseConfigFile(ConfigFile);
@@ -786,13 +786,12 @@
{
icmpEngine.Close();
#if USE_DNSSERVERS
-
dnsShutdown();
#endif
-
redirectShutdown();
authenticateShutdown();
externalAclShutdown();
+
_db_rotate_log(); /* cache.log */
storeDirWriteCleanLogs(1);
storeLogRotate(); /* store.log */
@@ -803,16 +802,13 @@
icapLogRotate(); /*icap.log*/
#endif
#if WIP_FWD_LOG
-
fwdLogRotate();
#endif
icmpEngine.Open();
#if USE_DNSSERVERS
-
dnsInit();
#endif
-
redirectInit();
authenticateInit(&Config.authConfiguration);
externalAclInit();
@@ -963,7 +959,7 @@
accessLogInit();
-#if ICAP_CLIENT
+#if ICAP_CLIENT
icapLogOpen();
#endif
@@ -1122,7 +1118,7 @@
#if USE_WIN32_SERVICE
/* When USE_WIN32_SERVICE is defined, the main function is placed in win32.cc */
extern "C" void WINAPI
- SquidWinSvcMain(int argc, char **argv)
+SquidWinSvcMain(int argc, char **argv)
#else
int
main(int argc, char **argv)
diff -u -r -N squid-3.1.0.13/src/Makefile.am squid-3.1.0.14/src/Makefile.am
--- squid-3.1.0.13/src/Makefile.am 2009-08-05 01:32:13.000000000 +1200
+++ squid-3.1.0.14/src/Makefile.am 2009-09-27 15:28:29.000000000 +1200
@@ -522,6 +522,7 @@
@DISK_LINKOBJS@ \
@REPL_OBJS@ \
@DISK_LIBS@ \
+ @AIOLIB@ \
@CRYPTLIB@ \
@REGEXLIB@ \
@SNMPLIB@ \
@@ -804,9 +805,9 @@
$(INSTALL_DATA) squid.conf.documented $(DESTDIR)$(DEFAULT_CONFIG_FILE).documented; \
$(mkinstalldirs) $(DESTDIR)$(DEFAULT_LOG_PREFIX)
-uninstall-local:
- @$(SHELL) $(top_srcdir)/scripts/remove-cfg.sh "$(RM)" $(DESTDIR)$(DEFAULT_MIME_TABLE)
- @$(SHELL) $(top_srcdir)/scripts/remove-cfg.sh "$(RM)" $(DESTDIR)$(DEFAULT_CONFIG_FILE)
+uninstall-local: squid.conf.default
+ @$(SHELL) $(top_srcdir)/scripts/remove-cfg.sh "$(RM)" $(DESTDIR)$(DEFAULT_MIME_TABLE) $(srcdir)/mime.conf.default
+ @$(SHELL) $(top_srcdir)/scripts/remove-cfg.sh "$(RM)" $(DESTDIR)$(DEFAULT_CONFIG_FILE) squid.conf.default
CLEANFILES += cf_gen_defines.h cf.data cf_parser.h squid.conf.default squid.conf.documented \
globals.cc string_arrays.c repl_modules.cc DiskIO/DiskIOModules_gen.cc \
@@ -1200,12 +1201,14 @@
tests_testDiskIO_LDADD = \
$(SWAP_TEST_LDADD) \
@DISK_LIBS@ \
+ @AIOLIB@ \
$(COMMON_LIBS) \
SquidConfig.o
tests_testDiskIO_LDFLAGS = $(LIBADD_DL)
tests_testDiskIO_DEPENDENCIES = $(top_builddir)/lib/libmiscutil.a \
@DISK_LIBS@ \
+ @AIOLIB@ \
$(SWAP_TEST_DS) \
@SQUID_CPPUNIT_LA@
@@ -1940,6 +1943,7 @@
$(COMMON_LIBS) \
@REPL_OBJS@ \
@DISK_LIBS@ \
+ @AIOLIB@ \
-L../lib -lmiscutil \
acl/libapi.la \
@SQUID_CPPUNIT_LIBS@
@@ -1947,6 +1951,7 @@
$(top_builddir)/lib/libmiscutil.a \
repl_modules.o \
@DISK_LIBS@ \
+ @AIOLIB@ \
$(COMMON_LIBS) \
@REPL_OBJS@ \
@SQUID_CPPUNIT_LA@
diff -u -r -N squid-3.1.0.13/src/Makefile.in squid-3.1.0.14/src/Makefile.in
--- squid-3.1.0.13/src/Makefile.in 2009-08-05 01:32:36.000000000 +1200
+++ squid-3.1.0.14/src/Makefile.in 2009-09-27 15:28:55.000000000 +1200
@@ -1329,6 +1329,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
@@ -1765,9 +1766,9 @@
squid_LDADD = $(COMMON_LIBS) icmp/libicmp.la icmp/libicmp-core.la \
../compat/libcompat.la -L../lib @XTRA_OBJS@ @DISK_LINKOBJS@ \
- @REPL_OBJS@ @DISK_LIBS@ @CRYPTLIB@ @REGEXLIB@ @SNMPLIB@ \
- ${ADAPTATION_LIBS} ${ESI_LIBS} @SSLLIB@ -lmiscutil @XTRA_LIBS@ \
- @EPOLL_LIBS@ @MINGW_LIBS@ $(am__append_4)
+ @REPL_OBJS@ @DISK_LIBS@ @AIOLIB@ @CRYPTLIB@ @REGEXLIB@ \
+ @SNMPLIB@ ${ADAPTATION_LIBS} ${ESI_LIBS} @SSLLIB@ -lmiscutil \
+ @XTRA_LIBS@ @EPOLL_LIBS@ @MINGW_LIBS@ $(am__append_4)
squid_DEPENDENCIES = $(top_builddir)/lib/libmiscutil.a \
@DISK_LIBS@ \
@DISK_LINKOBJS@ \
@@ -2281,12 +2282,14 @@
tests_testDiskIO_LDADD = \
$(SWAP_TEST_LDADD) \
@DISK_LIBS@ \
+ @AIOLIB@ \
$(COMMON_LIBS) \
SquidConfig.o
tests_testDiskIO_LDFLAGS = $(LIBADD_DL)
tests_testDiskIO_DEPENDENCIES = $(top_builddir)/lib/libmiscutil.a \
@DISK_LIBS@ \
+ @AIOLIB@ \
$(SWAP_TEST_DS) \
@SQUID_CPPUNIT_LA@
@@ -3029,6 +3032,7 @@
$(COMMON_LIBS) \
@REPL_OBJS@ \
@DISK_LIBS@ \
+ @AIOLIB@ \
-L../lib -lmiscutil \
acl/libapi.la \
@SQUID_CPPUNIT_LIBS@
@@ -3037,6 +3041,7 @@
$(top_builddir)/lib/libmiscutil.a \
repl_modules.o \
@DISK_LIBS@ \
+ @AIOLIB@ \
$(COMMON_LIBS) \
@REPL_OBJS@ \
@SQUID_CPPUNIT_LA@
@@ -4527,9 +4532,9 @@
$(INSTALL_DATA) squid.conf.documented $(DESTDIR)$(DEFAULT_CONFIG_FILE).documented; \
$(mkinstalldirs) $(DESTDIR)$(DEFAULT_LOG_PREFIX)
-uninstall-local:
- @$(SHELL) $(top_srcdir)/scripts/remove-cfg.sh "$(RM)" $(DESTDIR)$(DEFAULT_MIME_TABLE)
- @$(SHELL) $(top_srcdir)/scripts/remove-cfg.sh "$(RM)" $(DESTDIR)$(DEFAULT_CONFIG_FILE)
+uninstall-local: squid.conf.default
+ @$(SHELL) $(top_srcdir)/scripts/remove-cfg.sh "$(RM)" $(DESTDIR)$(DEFAULT_MIME_TABLE) $(srcdir)/mime.conf.default
+ @$(SHELL) $(top_srcdir)/scripts/remove-cfg.sh "$(RM)" $(DESTDIR)$(DEFAULT_CONFIG_FILE) squid.conf.default
test_tools.cc: $(top_srcdir)/test-suite/test_tools.cc
cp $(top_srcdir)/test-suite/test_tools.cc .
diff -u -r -N squid-3.1.0.13/src/neighbors.cc squid-3.1.0.14/src/neighbors.cc
--- squid-3.1.0.13/src/neighbors.cc 2009-08-05 01:32:17.000000000 +1200
+++ squid-3.1.0.14/src/neighbors.cc 2009-09-27 15:28:34.000000000 +1200
@@ -1624,6 +1624,9 @@
if (p->options.allow_miss)
storeAppendPrintf(sentry, " allow-miss");
+ if (p->options.no_tproxy)
+ storeAppendPrintf(sentry, " no-tproxy");
+
if (p->max_conn > 0)
storeAppendPrintf(sentry, " max-conn=%d", p->max_conn);
diff -u -r -N squid-3.1.0.13/src/ProtoPort.h squid-3.1.0.14/src/ProtoPort.h
--- squid-3.1.0.13/src/ProtoPort.h 2009-08-05 01:32:13.000000000 +1200
+++ squid-3.1.0.14/src/ProtoPort.h 2009-09-27 15:28:29.000000000 +1200
@@ -24,6 +24,7 @@
unsigned int allow_direct:1; /**< Allow direct forwarding in accelerator mode */
unsigned int vhost:1; /**< uses host header */
unsigned int sslBump:1; /**< intercepts CONNECT requests */
+ unsigned int ignore_cc:1; /**< Ignore request Cache-Control directives */
int vport; /* virtual port support, -1 for dynamic, >0 static*/
bool connection_auth_disabled; /* Don't support connection oriented auth */
diff -u -r -N squid-3.1.0.13/src/protos.h squid-3.1.0.14/src/protos.h
--- squid-3.1.0.13/src/protos.h 2009-08-05 01:32:17.000000000 +1200
+++ squid-3.1.0.14/src/protos.h 2009-09-27 15:28:34.000000000 +1200
@@ -748,23 +748,23 @@
SQUIDCEXTERN int WIN32_pipe(int[2]);
SQUIDCEXTERN int WIN32_getrusage(int, struct rusage *);
- SQUIDCEXTERN void WIN32_ExceptionHandlerInit(void);
+SQUIDCEXTERN void WIN32_ExceptionHandlerInit(void);
- SQUIDCEXTERN int Win32__WSAFDIsSet(int fd, fd_set* set);
- SQUIDCEXTERN DWORD WIN32_IpAddrChangeMonitorInit();
+SQUIDCEXTERN int Win32__WSAFDIsSet(int fd, fd_set* set);
+SQUIDCEXTERN DWORD WIN32_IpAddrChangeMonitorInit();
#endif
- /* external_acl.c */
- class external_acl;
- SQUIDCEXTERN void parse_externalAclHelper(external_acl **);
+/* external_acl.c */
+class external_acl;
+ SQUIDCEXTERN void parse_externalAclHelper(external_acl **);
- SQUIDCEXTERN void dump_externalAclHelper(StoreEntry * sentry, const char *name, const external_acl *);
+ SQUIDCEXTERN void dump_externalAclHelper(StoreEntry * sentry, const char *name, const external_acl *);
- SQUIDCEXTERN void free_externalAclHelper(external_acl **);
+ SQUIDCEXTERN void free_externalAclHelper(external_acl **);
- typedef void EAH(void *data, void *result);
- class ACLChecklist;
+ typedef void EAH(void *data, void *result);
+ class ACLChecklist;
SQUIDCEXTERN void externalAclLookup(ACLChecklist * ch, void *acl_data, EAH * handler, void *data);
SQUIDCEXTERN void externalAclInit(void);
diff -u -r -N squid-3.1.0.13/src/refresh.cc squid-3.1.0.14/src/refresh.cc
--- squid-3.1.0.13/src/refresh.cc 2009-08-05 01:32:17.000000000 +1200
+++ squid-3.1.0.14/src/refresh.cc 2009-09-27 15:28:34.000000000 +1200
@@ -281,7 +281,7 @@
}
/* request-specific checks */
- if (request) {
+ if (request && !request->flags.ignore_cc) {
HttpHdrCc *cc = request->cache_control;
if (request->flags.ims && (R->flags.refresh_ims || Config.onoff.refresh_all_ims)) {
diff -u -r -N squid-3.1.0.13/src/repl/Makefile.in squid-3.1.0.14/src/repl/Makefile.in
--- squid-3.1.0.13/src/repl/Makefile.in 2009-08-05 01:32:38.000000000 +1200
+++ squid-3.1.0.14/src/repl/Makefile.in 2009-09-27 15:28:57.000000000 +1200
@@ -89,6 +89,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/src/Server.cc squid-3.1.0.14/src/Server.cc
--- squid-3.1.0.13/src/Server.cc 2009-08-05 01:32:13.000000000 +1200
+++ squid-3.1.0.14/src/Server.cc 2009-09-27 15:28:29.000000000 +1200
@@ -176,8 +176,8 @@
HttpRequest *r = originalRequest();
r->hier.total_response_time = r->hier.first_conn_start.tv_sec ?
- tvSubMsec(r->hier.first_conn_start, current_time) : -1;
-
+ tvSubMsec(r->hier.first_conn_start, current_time) : -1;
+
if (requestBodySource != NULL)
stopConsumingFrom(requestBodySource);
@@ -529,7 +529,7 @@
}
adaptedHeadSource = initiateAdaptation(
- new Adaptation::Iterator(this, vrep, cause, group));
+ new Adaptation::Iterator(this, vrep, cause, group));
startedAdaptation = adaptedHeadSource != NULL;
Must(startedAdaptation);
}
diff -u -r -N squid-3.1.0.13/src/Server.h squid-3.1.0.14/src/Server.h
--- squid-3.1.0.13/src/Server.h 2009-08-05 01:32:13.000000000 +1200
+++ squid-3.1.0.14/src/Server.h 2009-09-27 15:28:29.000000000 +1200
@@ -56,10 +56,10 @@
*/
class ServerStateData:
#if USE_ADAPTATION
- public Adaptation::Initiator,
- public BodyProducer,
+ public Adaptation::Initiator,
+ public BodyProducer,
#endif
- public BodyConsumer
+ public BodyConsumer
{
public:
diff -u -r -N squid-3.1.0.13/src/snmp_core.cc squid-3.1.0.14/src/snmp_core.cc
--- squid-3.1.0.13/src/snmp_core.cc 2009-08-05 01:32:18.000000000 +1200
+++ squid-3.1.0.14/src/snmp_core.cc 2009-09-27 15:28:34.000000000 +1200
@@ -577,7 +577,7 @@
static struct snmp_pdu *
- snmpAgentResponse(struct snmp_pdu *PDU) {
+snmpAgentResponse(struct snmp_pdu *PDU) {
struct snmp_pdu *Answer = NULL;
diff -u -r -N squid-3.1.0.13/src/stat.cc squid-3.1.0.14/src/stat.cc
--- squid-3.1.0.13/src/stat.cc 2009-08-05 01:32:18.000000000 +1200
+++ squid-3.1.0.14/src/stat.cc 2009-09-27 15:28:34.000000000 +1200
@@ -1536,7 +1536,7 @@
}
extern double
- statRequestHitRatio(int minutes)
+statRequestHitRatio(int minutes)
{
assert(minutes < N_COUNT_HIST);
return dpercent(CountHist[0].client_http.hits -
@@ -1546,7 +1546,7 @@
}
extern double
- statRequestHitMemoryRatio(int minutes)
+statRequestHitMemoryRatio(int minutes)
{
assert(minutes < N_COUNT_HIST);
return dpercent(CountHist[0].client_http.mem_hits -
@@ -1556,7 +1556,7 @@
}
extern double
- statRequestHitDiskRatio(int minutes)
+statRequestHitDiskRatio(int minutes)
{
assert(minutes < N_COUNT_HIST);
return dpercent(CountHist[0].client_http.disk_hits -
@@ -1566,7 +1566,7 @@
}
extern double
- statByteHitRatio(int minutes)
+statByteHitRatio(int minutes)
{
size_t s;
size_t c;
diff -u -r -N squid-3.1.0.13/src/store_dir.cc squid-3.1.0.14/src/store_dir.cc
--- squid-3.1.0.13/src/store_dir.cc 2009-08-05 01:32:18.000000000 +1200
+++ squid-3.1.0.14/src/store_dir.cc 2009-09-27 15:28:34.000000000 +1200
@@ -189,6 +189,10 @@
int load;
RefCount sd;
+ ssize_t objsize = e->objectLen();
+ if (objsize != -1)
+ objsize += e->mem_obj->swap_hdr_sz;
+
for (i = 0; i <= Config.cacheSwap.n_configured; i++) {
if (++dirn >= Config.cacheSwap.n_configured)
dirn = 0;
@@ -201,7 +205,7 @@
if (sd->cur_size > sd->max_size)
continue;
- if (!sd->objectSizeIsAcceptable(e->objectLen()))
+ if (!sd->objectSizeIsAcceptable(objsize))
continue;
/* check for error or overload condition */
diff -u -r -N squid-3.1.0.13/src/Store.h squid-3.1.0.14/src/Store.h
--- squid-3.1.0.13/src/Store.h 2009-08-05 01:32:15.000000000 +1200
+++ squid-3.1.0.14/src/Store.h 2009-09-27 15:28:32.000000000 +1200
@@ -207,7 +207,7 @@
bool isEmpty () const {return true;}
- virtual size_t bytesWanted(Range const aRange) const { assert (aRange.size());return aRange.end - 1;}
+ virtual size_t bytesWanted(Range const aRange) const { assert (aRange.size()); return aRange.end - 1;}
void operator delete(void *address);
void complete() {}
diff -u -r -N squid-3.1.0.13/src/StoreHashIndex.h squid-3.1.0.14/src/StoreHashIndex.h
--- squid-3.1.0.13/src/StoreHashIndex.h 2009-08-05 01:32:15.000000000 +1200
+++ squid-3.1.0.14/src/StoreHashIndex.h 2009-09-27 15:28:32.000000000 +1200
@@ -108,13 +108,15 @@
RefCount sd;
private:
- CBDATA_CLASS2(StoreSearchHashIndex);
void copyBucket();
void (*callback)(void *cbdata);
void *cbdata;
bool _done;
int bucket;
Vector entries;
+
+ // keep this last. it plays with private/public
+ CBDATA_CLASS2(StoreSearchHashIndex);
};
#endif /* SQUID_STOREHASHINDEX_H */
diff -u -r -N squid-3.1.0.13/src/store_swapout.cc squid-3.1.0.14/src/store_swapout.cc
--- squid-3.1.0.13/src/store_swapout.cc 2009-08-05 01:32:18.000000000 +1200
+++ squid-3.1.0.14/src/store_swapout.cc 2009-09-27 15:28:34.000000000 +1200
@@ -63,6 +63,15 @@
/* If we start swapping out objects with OutOfBand Metadata,
* then this code needs changing
*/
+
+ /* TODO: make some sort of data,size refcounted immutable buffer
+ * and stop fooling ourselves with "const char*" buffers.
+ */
+
+ // Create metadata now, possibly in vain: storeCreate needs swap_hdr_sz.
+ const char *buf = e->getSerialisedMetaData ();
+ assert(buf);
+
/* Create the swap file */
generic_cbdata *c = new generic_cbdata(e);
sio = storeCreate(e, storeSwapOutFileNotify, storeSwapOutFileClosed, c);
@@ -70,6 +79,7 @@
if (sio == NULL) {
e->swap_status = SWAPOUT_NONE;
delete c;
+ xfree((char*)buf);
storeLog(STORE_LOG_SWAPOUTFAIL, e);
return;
}
@@ -85,16 +95,6 @@
e->swap_dirn = mem->swapout.sio->swap_dirn;
/* write out the swap metadata */
- /* TODO: make some sort of data,size refcounted immutable buffer
- * for use by this sort of function.
- */
- char const *buf = e->getSerialisedMetaData ();
-
- /* If we start swapping out with out of band metadata, this assert
- * will catch it - this code needs to be adjusted if that happens
- */
- assert (buf);
-
storeIOWrite(mem->swapout.sio, buf, mem->swap_hdr_sz, 0, xfree);
}
diff -u -r -N squid-3.1.0.13/src/structs.h squid-3.1.0.14/src/structs.h
--- squid-3.1.0.13/src/structs.h 2009-08-05 01:32:18.000000000 +1200
+++ squid-3.1.0.14/src/structs.h 2009-09-27 15:28:34.000000000 +1200
@@ -265,7 +265,7 @@
customlog *accesslogs;
-#if ICAP_CLIENT
+#if ICAP_CLIENT
customlog *icaplogs;
#endif
@@ -879,6 +879,7 @@
unsigned int userhash:1;
unsigned int sourcehash:1;
unsigned int originserver:1;
+ unsigned int no_tproxy:1;
} options;
int weight;
@@ -999,7 +1000,7 @@
struct request_flags {
- request_flags(): range(0),nocache(0),ims(0),auth(0),cachable(0),hierarchical(0),loopdetect(0),proxy_keepalive(0),proxying(0),refresh(0),redirected(0),need_validation(0),accelerated(0),intercepted(0),spoof_client_ip(0),internal(0),internalclient(0),must_keepalive(0),destinationIPLookedUp_(0) {
+ request_flags(): range(0),nocache(0),ims(0),auth(0),cachable(0),hierarchical(0),loopdetect(0),proxy_keepalive(0),proxying(0),refresh(0),redirected(0),need_validation(0),accelerated(0),ignore_cc(0),intercepted(0),spoof_client_ip(0),internal(0),internalclient(0),must_keepalive(0),destinationIPLookedUp_(0) {
#if HTTP_VIOLATIONS
nocache_hack = 0;
#endif
@@ -1025,6 +1026,7 @@
unsigned int nocache_hack:1; /* for changing/ignoring no-cache requests */
#endif
unsigned int accelerated:1;
+ unsigned int ignore_cc:1;
unsigned int intercepted:1; /**< transparently intercepted request */
unsigned int spoof_client_ip:1; /**< spoof client ip if possible */
unsigned int internal:1;
diff -u -r -N squid-3.1.0.13/src/tests/stub_store.cc squid-3.1.0.14/src/tests/stub_store.cc
--- squid-3.1.0.13/src/tests/stub_store.cc 2009-08-05 01:32:18.000000000 +1200
+++ squid-3.1.0.14/src/tests/stub_store.cc 2009-09-27 15:28:34.000000000 +1200
@@ -38,13 +38,13 @@
StorePointer Store::CurrentRoot = NULL;
extern "C" void
- storeAppendPrintf(StoreEntry * e, const char *fmt,...)
+storeAppendPrintf(StoreEntry * e, const char *fmt,...)
{
fatal("storeAppendPrintf: Not implemented");
}
extern "C" void
- storeAppendVPrintf(StoreEntry * e, const char *fmt, va_list vargs)
+storeAppendVPrintf(StoreEntry * e, const char *fmt, va_list vargs)
{
fatal("storeAppendVPrintf: Not implemented");
}
diff -u -r -N squid-3.1.0.13/src/tests/testHttpReply.cc squid-3.1.0.14/src/tests/testHttpReply.cc
--- squid-3.1.0.13/src/tests/testHttpReply.cc 2009-08-05 01:32:18.000000000 +1200
+++ squid-3.1.0.14/src/tests/testHttpReply.cc 2009-09-27 15:28:34.000000000 +1200
@@ -129,6 +129,49 @@
input.reset();
error = HTTP_STATUS_NONE;
+ // incomplete (short) status lines... not sane (yet), but no error either.
+ input.append("H", 1);
+ hdr_len = headersEnd(input.content(),input.contentSize());
+ CPPUNIT_ASSERT(!engine.sanityCheckStartLine(&input, hdr_len, &error) );
+ CPPUNIT_ASSERT_EQUAL(error, HTTP_STATUS_NONE);
+ input.reset();
+ error = HTTP_STATUS_NONE;
+
+ input.append("HTTP/", 5);
+ hdr_len = headersEnd(input.content(),input.contentSize());
+ CPPUNIT_ASSERT(!engine.sanityCheckStartLine(&input, hdr_len, &error) );
+ CPPUNIT_ASSERT_EQUAL(error, HTTP_STATUS_NONE);
+ input.reset();
+ error = HTTP_STATUS_NONE;
+
+ input.append("HTTP/1", 6);
+ hdr_len = headersEnd(input.content(),input.contentSize());
+ CPPUNIT_ASSERT(!engine.sanityCheckStartLine(&input, hdr_len, &error) );
+ CPPUNIT_ASSERT_EQUAL(error, HTTP_STATUS_NONE);
+ input.reset();
+ error = HTTP_STATUS_NONE;
+
+ input.append("HTTP/1.1", 8);
+ hdr_len = headersEnd(input.content(),input.contentSize());
+ CPPUNIT_ASSERT(!engine.sanityCheckStartLine(&input, hdr_len, &error) );
+ CPPUNIT_ASSERT_EQUAL(error, HTTP_STATUS_NONE);
+ input.reset();
+ error = HTTP_STATUS_NONE;
+
+ input.append("HTTP/1.1 ", 9); /* real case seen */
+ hdr_len = headersEnd(input.content(),input.contentSize());
+ CPPUNIT_ASSERT(engine.sanityCheckStartLine(&input, hdr_len, &error) );
+ CPPUNIT_ASSERT_EQUAL(error, HTTP_STATUS_NONE);
+ input.reset();
+ error = HTTP_STATUS_NONE;
+
+ input.append("HTTP/1.1 20", 14);
+ hdr_len = headersEnd(input.content(),input.contentSize());
+ CPPUNIT_ASSERT(engine.sanityCheckStartLine(&input, hdr_len, &error) );
+ CPPUNIT_ASSERT_EQUAL(error, HTTP_STATUS_NONE);
+ input.reset();
+ error = HTTP_STATUS_NONE;
+
// status line with no status
input.append("HTTP/1.1 \n\n", 11);
hdr_len = headersEnd(input.content(),input.contentSize());
@@ -176,4 +219,5 @@
CPPUNIT_ASSERT_EQUAL(error, HTTP_INVALID_HEADER);
input.reset();
error = HTTP_STATUS_NONE;
+
}
diff -u -r -N squid-3.1.0.13/src/TextException.cc squid-3.1.0.14/src/TextException.cc
--- squid-3.1.0.13/src/TextException.cc 2009-08-05 01:32:15.000000000 +1200
+++ squid-3.1.0.14/src/TextException.cc 2009-09-27 15:28:32.000000000 +1200
@@ -3,13 +3,13 @@
TextException::TextException()
{
- message=NULL;
- theFileName=NULL;
- theLineNo=0;
+ message=NULL;
+ theFileName=NULL;
+ theLineNo=0;
}
TextException::TextException(const TextException& right) :
- message((right.message?xstrdup(right.message):NULL)), theFileName(right.theFileName), theLineNo(right.theLineNo)
+ message((right.message?xstrdup(right.message):NULL)), theFileName(right.theFileName), theLineNo(right.theLineNo)
{
}
@@ -19,18 +19,18 @@
TextException::~TextException() throw()
{
- if(message) xfree(message);
+ if (message) xfree(message);
}
TextException& TextException::operator=(const TextException &right)
{
- if(this==&right) return *this;
- if(message) xfree(message);
+ if (this==&right) return *this;
+ if (message) xfree(message);
message=(right.message?xstrdup(right.message):NULL);
theFileName=right.theFileName;
theLineNo=right.theLineNo;
- return *this;
+ return *this;
}
const char *TextException::what() const throw()
diff -u -r -N squid-3.1.0.13/src/TextException.h squid-3.1.0.14/src/TextException.h
--- squid-3.1.0.13/src/TextException.h 2009-08-05 01:32:15.000000000 +1200
+++ squid-3.1.0.14/src/TextException.h 2009-09-27 15:28:32.000000000 +1200
@@ -20,7 +20,7 @@
virtual const char *what() const throw();
- TextException& operator=(const TextException &right);
+ TextException& operator=(const TextException &right);
public:
char *message; // read-only
diff -u -r -N squid-3.1.0.13/src/tools.cc squid-3.1.0.14/src/tools.cc
--- squid-3.1.0.13/src/tools.cc 2009-08-05 01:32:18.000000000 +1200
+++ squid-3.1.0.14/src/tools.cc 2009-09-27 15:28:35.000000000 +1200
@@ -41,17 +41,6 @@
#include "SquidTime.h"
#include "ip/IpIntercept.h"
-#ifdef _SQUID_LINUX_
-#if HAVE_SYS_CAPABILITY_H
-#undef _POSIX_SOURCE
-/* Ugly glue to get around linux header madness colliding with glibc */
-#define _LINUX_TYPES_H
-#define _LINUX_FS_H
-typedef uint32_t __u32;
-#include
-#endif
-#endif
-
#if HAVE_SYS_PRCTL_H
#include
#endif
diff -u -r -N squid-3.1.0.13/src/wccp2.cc squid-3.1.0.14/src/wccp2.cc
--- squid-3.1.0.13/src/wccp2.cc 2009-08-05 01:32:18.000000000 +1200
+++ squid-3.1.0.14/src/wccp2.cc 2009-09-27 15:28:35.000000000 +1200
@@ -551,7 +551,7 @@
}
static struct wccp2_service_list_t *
- wccp2_get_service_by_id(int service, int service_id) {
+wccp2_get_service_by_id(int service, int service_id) {
struct wccp2_service_list_t *p;
diff -u -r -N squid-3.1.0.13/test-suite/Makefile.in squid-3.1.0.14/test-suite/Makefile.in
--- squid-3.1.0.13/test-suite/Makefile.in 2009-08-05 01:32:39.000000000 +1200
+++ squid-3.1.0.14/test-suite/Makefile.in 2009-09-27 15:28:57.000000000 +1200
@@ -165,6 +165,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/test-suite/MemPoolTest.cc squid-3.1.0.14/test-suite/MemPoolTest.cc
--- squid-3.1.0.13/test-suite/MemPoolTest.cc 2009-08-05 01:32:18.000000000 +1200
+++ squid-3.1.0.14/test-suite/MemPoolTest.cc 2009-09-27 15:28:35.000000000 +1200
@@ -33,6 +33,9 @@
*/
#include "squid.h"
+
+#if !DISABLE_POOLS
+
#include "MemPool.h"
#if HAVE_IOSTREAM
@@ -79,11 +82,15 @@
delete Pool;
}
+#endif /* DISABLE_POOLS */
+
int
main (int argc, char **argv)
{
+#if !DISABLE_POOLS
MemPoolTest aTest;
aTest.run();
+#endif
return 0;
}
diff -u -r -N squid-3.1.0.13/test-suite/tcp-banger2.c squid-3.1.0.14/test-suite/tcp-banger2.c
--- squid-3.1.0.13/test-suite/tcp-banger2.c 2009-08-05 01:32:18.000000000 +1200
+++ squid-3.1.0.14/test-suite/tcp-banger2.c 2009-09-27 15:28:35.000000000 +1200
@@ -298,7 +298,7 @@
}
struct _request *
- request(char *urlin) {
+request(char *urlin) {
int s = -1, f = -1;
char buf[4096];
char msg[8192];
diff -u -r -N squid-3.1.0.13/test-suite/testheaders.sh squid-3.1.0.14/test-suite/testheaders.sh
--- squid-3.1.0.13/test-suite/testheaders.sh 2009-08-05 01:32:18.000000000 +1200
+++ squid-3.1.0.14/test-suite/testheaders.sh 2009-09-27 15:28:35.000000000 +1200
@@ -18,27 +18,29 @@
for f in `cd ${dir} && ls -1 *.h 2>/dev/null`; do
echo -n "Testing ${dir}/${f} ..."
- if [[ "${f}" == "cf_gen_defines.h" || "${f}" == "cf_parser.h" ]]; then
+ if [ "${f}" = "cf_gen_defines.h" -o "${f}" = "cf_parser.h" ]; then
echo " IGNORED!"
continue
fi
- if [ ${dir}/${f} -nt ./testHeaderDeps_${f/.h/}.o ]; then
+ hdr=`echo "${f}" | sed s/.h//`
+ if [ ! -e ./testHeaderDeps_${hdr}.o -o ${dir}/${f} -nt ./testHeaderDeps_${hdr}.o ]
+ then
( echo "/* This file is AUTOMATICALLY GENERATED. DO NOT ALTER IT */"
echo "#include \"${dir}/${f}\" "
echo "int main( int argc, char* argv[] ) { return 0; } "
- ) >./testHeaderDeps_${f/.h/}.cc
+ ) >./testHeaderDeps_${hdr}.cc
# run compile test on the new file.
- # DEBUG: echo "TRY: ${cc} -o testHeaderDeps.o ./testHeaderDeps_${f/.h/}.cc"
- ${cc} -o testHeaderDeps_${f/.h/}.o ./testHeaderDeps_${f/.h/}.cc
- rm ./testHeaderDeps_${f/.h/}.cc
+ # DEBUG: echo "TRY: ${cc} -o testHeaderDeps.o ./testHeaderDeps_${hdr}.cc"
+ ${cc} -o testHeaderDeps_${hdr}.o ./testHeaderDeps_${hdr}.cc
+ rm ./testHeaderDeps_${hdr}.cc
fi
- if [ ! -f testHeaderDeps_${f/.h/}.o ]; then
+ if [ ! -f testHeaderDeps_${hdr}.o ]; then
rm testHeaders
exit 1
fi
echo "OK."
# unit-tests require an app to run.
# our most-recent object suits this purpose
- cp ./testHeaderDeps_${f/.h/}.o ./testHeaders
+ cp ./testHeaderDeps_${hdr}.o ./testHeaders
done
diff -u -r -N squid-3.1.0.13/tools/cachemgr.cc squid-3.1.0.14/tools/cachemgr.cc
--- squid-3.1.0.13/tools/cachemgr.cc 2009-08-05 01:32:18.000000000 +1200
+++ squid-3.1.0.14/tools/cachemgr.cc 2009-09-27 15:28:35.000000000 +1200
@@ -123,8 +123,7 @@
#include
#endif
#if HAVE_FNMATCH_H
-extern "C"
-{
+extern "C" {
#include
}
#endif
@@ -880,7 +879,8 @@
}
int
-main(int argc, char *argv[]) {
+main(int argc, char *argv[])
+{
char *s;
cachemgr_request *req;
@@ -912,7 +912,8 @@
}
static char *
-read_post_request(void) {
+read_post_request(void)
+{
char *s;
char *buf;
int len;
@@ -940,7 +941,8 @@
}
static char *
-read_get_request(void) {
+read_get_request(void)
+{
char *s;
if ((s = getenv("QUERY_STRING")) == NULL)
@@ -950,7 +952,8 @@
}
static cachemgr_request *
-read_request(void) {
+read_request(void)
+{
char *buf;
cachemgr_request *req;
@@ -1029,7 +1032,8 @@
* Currently no powerful encryption is used.
*/
static void
-make_pub_auth(cachemgr_request * req) {
+make_pub_auth(cachemgr_request * req)
+{
static char buf[1024];
safe_free(req->pub_auth);
debug(3) fprintf(stderr, "cmgr: encoding for pub...\n");
@@ -1052,7 +1056,8 @@
}
static void
-decode_pub_auth(cachemgr_request * req) {
+decode_pub_auth(cachemgr_request * req)
+{
char *buf;
const char *host_name;
const char *time_str;
@@ -1110,13 +1115,15 @@
}
static void
-reset_auth(cachemgr_request * req) {
+reset_auth(cachemgr_request * req)
+{
safe_free(req->passwd);
safe_free(req->pub_auth);
}
static const char *
-make_auth_header(const cachemgr_request * req) {
+make_auth_header(const cachemgr_request * req)
+{
static char buf[1024];
size_t stringLength = 0;
const char *str64;
@@ -1141,7 +1148,8 @@
}
static int
-check_target_acl(const char *hostname, int port) {
+check_target_acl(const char *hostname, int port)
+{
char config_line[BUFSIZ];
FILE *fp = NULL;
int ret = 0;
diff -u -r -N squid-3.1.0.13/tools/Makefile.in squid-3.1.0.14/tools/Makefile.in
--- squid-3.1.0.13/tools/Makefile.in 2009-08-05 01:32:39.000000000 +1200
+++ squid-3.1.0.14/tools/Makefile.in 2009-09-27 15:28:57.000000000 +1200
@@ -105,6 +105,7 @@
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ADAPTATION_LIBS = @ADAPTATION_LIBS@
+AIOLIB = @AIOLIB@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
AR = @AR@
diff -u -r -N squid-3.1.0.13/tools/squidclient.cc squid-3.1.0.14/tools/squidclient.cc
--- squid-3.1.0.13/tools/squidclient.cc 2009-08-05 01:32:18.000000000 +1200
+++ squid-3.1.0.14/tools/squidclient.cc 2009-09-27 15:28:35.000000000 +1200
@@ -436,16 +436,16 @@
/* HTTP/1.0 may need keep-alive */
if (strcmp(version, "1.0") == 0) {
if (keep_alive) {
- if (strchr(url, ':'))
+ if (strchr(url, ':')) {
snprintf(buf, BUFSIZ, "Proxy-Connection: keep-alive\r\n");
- else
+ strcat(msg, buf);
+ } else
strcat(msg, "Connection: keep-alive\r\n");
}
} else {
if (!keep_alive)
strcat(msg, "Connection: close\r\n");
}
- strcat(msg, buf);
strcat(msg, extra_hdrs);
strcat(msg, "\r\n");
@@ -636,7 +636,8 @@
}
static int
-client_comm_bind(int sock, const IpAddress &addr) {
+client_comm_bind(int sock, const IpAddress &addr)
+{
int res;
@@ -654,7 +655,8 @@
}
static int
-client_comm_connect(int sock, const IpAddress &addr, struct timeval *tvp) {
+client_comm_connect(int sock, const IpAddress &addr, struct timeval *tvp)
+{
int res;
static struct addrinfo *AI = NULL;
@@ -673,7 +675,8 @@
}
static int
-Now(struct timeval *tp) {
+Now(struct timeval *tp)
+{
#if GETTIMEOFDAY_NO_TZP
return gettimeofday(tp);
#else
@@ -683,18 +686,21 @@
} /* ARGSUSED */
static void
-catchSignal(int sig) {
+catchSignal(int sig)
+{
interrupted = 1;
fprintf(stderr, "Interrupted.\n");
}
static void
-pipe_handler(int sig) {
+pipe_handler(int sig)
+{
fprintf(stderr, "SIGPIPE received.\n");
}
static void
-set_our_signal(void) {
+set_our_signal(void)
+{
#if HAVE_SIGACTION
struct sigaction sa;
@@ -714,7 +720,8 @@
}
static ssize_t
-myread(int fd, void *buf, size_t len) {
+myread(int fd, void *buf, size_t len)
+{
#ifndef _SQUID_MSWIN_
alarm(io_timeout);
return read(fd, buf, len);
@@ -725,7 +732,8 @@
}
static ssize_t
-mywrite(int fd, void *buf, size_t len) {
+mywrite(int fd, void *buf, size_t len)
+{
#ifndef _SQUID_MSWIN_
alarm(io_timeout);
return write(fd, buf, len);