From 61b7d800bab0acfac733c1d2499d5cce3c772a23 Mon Sep 17 00:00:00 2001 From: hz <572478035@qq.com> Date: Wed, 13 Jul 2022 17:07:41 +0800 Subject: [PATCH] import squid-3.5.20-17.el7_9.7 Signed-off-by: hz <572478035@qq.com> --- README.md | 11 - cache_swap.sh | 16 + download | 2 + perl-requires-squid.sh | 3 + squid-3.0.STABLE1-perlpath.patch | 9 + squid-3.1.0.9-config.patch | 25 + squid-3.1.0.9-location.patch | 42 + squid-3.1.9-ltdl.patch | 65 + squid-3.2.0.9-fpic.patch | 45 + squid-3.3.8-active-ftp-1.patch | 88 ++ squid-3.3.8-active-ftp-2.patch | 66 + squid-3.5.10-ssl-helper.patch | 73 + squid-3.5.20-CVE-2018-1000024.patch | 28 + squid-3.5.20-CVE-2018-1000027.patch | 23 + squid-3.5.20-CVE-2019-12519.patch | 275 ++++ squid-3.5.20-CVE-2019-12525.patch | 30 + squid-3.5.20-CVE-2019-12528.patch | 158 ++ squid-3.5.20-CVE-2019-13345.patch | 91 ++ squid-3.5.20-CVE-2020-11945.patch | 50 + squid-3.5.20-CVE-2020-15049.patch | 531 +++++++ squid-3.5.20-CVE-2020-15810.patch | 52 + squid-3.5.20-CVE-2020-15811.patch | 170 ++ squid-3.5.20-CVE-2020-24606.patch | 20 + squid-3.5.20-CVE-2020-25097.patch | 28 + squid-3.5.20-CVE-2020-8449-and-8450.patch | 49 + squid-3.5.20-CVE-2021-46784.patch | 129 ++ squid-3.5.20-cache-peer-tolower.patch | 32 + squid-3.5.20-cache-siblings-gw.patch | 308 ++++ squid-3.5.20-conf-casecmp.patch | 583 +++++++ squid-3.5.20-empty-cname.patch | 138 ++ squid-3.5.20-https-packet-size.patch | 154 ++ squid-3.5.20-man-see-also.patch | 20 + squid-3.5.20-man-typos.patch | 379 +++++ squid-3.5.20-mem-usage-out-of-fd.patch | 155 ++ squid-3.5.20-tunnel-sigsegv.patch | 14 + squid-CVE-2016-10002.patch | 290 ++++ squid-migrate-conf.py | 287 ++++ squid.init | 180 +++ squid.logrotate | 16 + squid.nm | 15 + squid.pam | 3 + squid.service | 16 + squid.spec | 1708 +++++++++++++++++++++ squid.sysconfig | 9 + 44 files changed, 6375 insertions(+), 11 deletions(-) delete mode 100644 README.md create mode 100644 cache_swap.sh create mode 100644 download create mode 100755 perl-requires-squid.sh create mode 100644 squid-3.0.STABLE1-perlpath.patch create mode 100644 squid-3.1.0.9-config.patch create mode 100644 squid-3.1.0.9-location.patch create mode 100644 squid-3.1.9-ltdl.patch create mode 100644 squid-3.2.0.9-fpic.patch create mode 100644 squid-3.3.8-active-ftp-1.patch create mode 100644 squid-3.3.8-active-ftp-2.patch create mode 100644 squid-3.5.10-ssl-helper.patch create mode 100644 squid-3.5.20-CVE-2018-1000024.patch create mode 100644 squid-3.5.20-CVE-2018-1000027.patch create mode 100644 squid-3.5.20-CVE-2019-12519.patch create mode 100644 squid-3.5.20-CVE-2019-12525.patch create mode 100644 squid-3.5.20-CVE-2019-12528.patch create mode 100644 squid-3.5.20-CVE-2019-13345.patch create mode 100644 squid-3.5.20-CVE-2020-11945.patch create mode 100644 squid-3.5.20-CVE-2020-15049.patch create mode 100644 squid-3.5.20-CVE-2020-15810.patch create mode 100644 squid-3.5.20-CVE-2020-15811.patch create mode 100644 squid-3.5.20-CVE-2020-24606.patch create mode 100644 squid-3.5.20-CVE-2020-25097.patch create mode 100644 squid-3.5.20-CVE-2020-8449-and-8450.patch create mode 100644 squid-3.5.20-CVE-2021-46784.patch create mode 100644 squid-3.5.20-cache-peer-tolower.patch create mode 100644 squid-3.5.20-cache-siblings-gw.patch create mode 100644 squid-3.5.20-conf-casecmp.patch create mode 100644 squid-3.5.20-empty-cname.patch create mode 100644 squid-3.5.20-https-packet-size.patch create mode 100644 squid-3.5.20-man-see-also.patch create mode 100644 squid-3.5.20-man-typos.patch create mode 100644 squid-3.5.20-mem-usage-out-of-fd.patch create mode 100644 squid-3.5.20-tunnel-sigsegv.patch create mode 100644 squid-CVE-2016-10002.patch create mode 100644 squid-migrate-conf.py create mode 100644 squid.init create mode 100644 squid.logrotate create mode 100755 squid.nm create mode 100644 squid.pam create mode 100644 squid.service create mode 100644 squid.spec create mode 100644 squid.sysconfig diff --git a/README.md b/README.md deleted file mode 100644 index 7342728..0000000 --- a/README.md +++ /dev/null @@ -1,11 +0,0 @@ -Anolis OS -======================================= -# 代码仓库说明 -## 分支说明 ->进行代码开发工作时,请注意选择当前版本对应的分支 -* aX分支为对应大版本的主分支,如a8分支对应当前最新版本 -* aX.Y分支为对应小版本的维护分支,如a8.2分支对应8.2版本 -## 开发流程 -1. 首先fork目标分支到自己的namespace -2. 在自己的fork分支上做出修改 -3. 向对应的仓库中提交merge request,源分支为fork分支 diff --git a/cache_swap.sh b/cache_swap.sh new file mode 100644 index 0000000..5e94072 --- /dev/null +++ b/cache_swap.sh @@ -0,0 +1,16 @@ +#!/bin/bash +if [ -f /etc/sysconfig/squid ]; then + . /etc/sysconfig/squid +fi + +SQUID_CONF=${SQUID_CONF:-"/etc/squid/squid.conf"} + +CACHE_SWAP=`sed -e 's/#.*//g' $SQUID_CONF | \ + grep cache_dir | awk '{ print $3 }'` + +for adir in $CACHE_SWAP; do + if [ ! -d $adir/00 ]; then + echo -n "init_cache_dir $adir... " + squid -N -z -F -f $SQUID_CONF >> /var/log/squid/squid.out 2>&1 + fi +done diff --git a/download b/download new file mode 100644 index 0000000..f8ba61e --- /dev/null +++ b/download @@ -0,0 +1,2 @@ +48fb18679a30606de98882528beab3a7 squid-3.5.20.tar.xz +455ca06da417675b69118b7a4095373e squid-3.5.20.tar.xz.asc diff --git a/perl-requires-squid.sh b/perl-requires-squid.sh new file mode 100755 index 0000000..029e7b9 --- /dev/null +++ b/perl-requires-squid.sh @@ -0,0 +1,3 @@ +#!/bin/sh + +/usr/lib/rpm/perl.req $* | grep -v "Authen::Smb" diff --git a/squid-3.0.STABLE1-perlpath.patch b/squid-3.0.STABLE1-perlpath.patch new file mode 100644 index 0000000..19b512e --- /dev/null +++ b/squid-3.0.STABLE1-perlpath.patch @@ -0,0 +1,9 @@ +diff -up squid-3.0.STABLE1/contrib/url-normalizer.pl.perlpath squid-3.0.STABLE1/contrib/url-normalizer.pl +--- squid-3.0.STABLE1/contrib/url-normalizer.pl.perlpath 1996-12-06 18:54:31.000000000 +0100 ++++ squid-3.0.STABLE1/contrib/url-normalizer.pl 2008-01-23 12:07:50.000000000 +0100 +@@ -1,4 +1,4 @@ +-#!/usr/local/bin/perl -Tw ++#!/usr/bin/perl -Tw + + # From: Markus Gyger + # diff --git a/squid-3.1.0.9-config.patch b/squid-3.1.0.9-config.patch new file mode 100644 index 0000000..1f8a762 --- /dev/null +++ b/squid-3.1.0.9-config.patch @@ -0,0 +1,25 @@ +--- ./src/cf.data.pre 2013-04-27 05:34:48.000000000 +0200 ++++ ./src/cf.data.pre 2013-05-03 10:13:16.007067356 +0200 +@@ -3889,7 +3889,7 @@ + + NAME: logfile_rotate + TYPE: int +-DEFAULT: 10 ++DEFAULT: 0 + LOC: Config.Log.rotateNumber + DOC_START + Specifies the number of logfile rotations to make when you +@@ -5313,11 +5313,11 @@ + + NAME: cache_mgr + TYPE: string +-DEFAULT: webmaster ++DEFAULT: root + LOC: Config.adminEmail + DOC_START + Email-address of local cache manager who will receive +- mail if the cache dies. The default is "webmaster". ++ mail if the cache dies. The default is "root". + DOC_END + + NAME: mail_from diff --git a/squid-3.1.0.9-location.patch b/squid-3.1.0.9-location.patch new file mode 100644 index 0000000..0f6369d --- /dev/null +++ b/squid-3.1.0.9-location.patch @@ -0,0 +1,42 @@ +diff --git a/QUICKSTART b/QUICKSTART +index f0294ba..e7b14ea 100644 +--- a/QUICKSTART ++++ b/QUICKSTART +@@ -10,10 +10,9 @@ After you retrieved, compiled and installed the Squid software (see + INSTALL in the same directory), you have to configure the squid.conf + file. This is the list of the values you *need* to change, because no + sensible defaults could be defined. Do not touch the other variables +-for now. We assume you have installed Squid in the default location: +-/usr/local/squid ++for now. + +-Uncomment and edit the following lines in /usr/local/squid/etc/squid.conf: ++Uncomment and edit the following lines in /etc/squid/squid.conf: + + ============================================================================== + +@@ -52,7 +51,7 @@ cache_effective_user + as after startup (typically "nobody" and "nogroup"). Do not use + "root", for security reasons. + +-cache_dir ufs /usr/local/squid/var/cache 100 16 256 ++cache_dir ufs /var/spool/squid 100 16 256 + + Add here (first number, here 100) the amount of hard disk space + (in megabytes) to devote to caching. +@@ -80,12 +79,12 @@ After editing squid.conf to your liking, run Squid from the command + line TWICE: + + To create any disk cache_dir configured: +- % /usr/local/squid/sbin/squid -z ++ % /usr/sbin/squid -z + + To start squid: +- % /usr/local/squid/sbin/squid ++ % /usr/sbin/squid + +-Check in the cache.log (/usr/local/squid/var/logs/cache.log) that ++Check in the cache.log (/var/log/squid/cache.log) that + everything is all right. + + Once Squid created all its files (it can take several minutes on some diff --git a/squid-3.1.9-ltdl.patch b/squid-3.1.9-ltdl.patch new file mode 100644 index 0000000..6c0b0e7 --- /dev/null +++ b/squid-3.1.9-ltdl.patch @@ -0,0 +1,65 @@ +diff --git a/Makefile.am b/Makefile.am +index c582a62..18ee5bb 100644 +--- a/Makefile.am ++++ b/Makefile.am +@@ -6,11 +6,8 @@ + ## + + AUTOMAKE_OPTIONS = dist-bzip2 1.5 foreign +-DIST_SUBDIRS = compat lib libltdl scripts icons errors contrib doc helpers src test-suite tools ++DIST_SUBDIRS = compat lib scripts icons errors contrib doc helpers src test-suite tools + SUBDIRS = compat lib +-if ENABLE_LOADABLE_MODULES +-SUBDIRS += libltdl +-endif + SUBDIRS += scripts icons errors doc helpers src tools test-suite + + DISTCLEANFILES = include/stamp-h include/stamp-h[0-9]* +diff --git a/Makefile.in b/Makefile.in +index 09c9304..dad017b 100644 +--- a/Makefile.in ++++ b/Makefile.in +@@ -87,7 +87,7 @@ PRE_UNINSTALL = : + POST_UNINSTALL = : + build_triplet = @build@ + host_triplet = @host@ +-@ENABLE_LOADABLE_MODULES_TRUE@am__append_1 = libltdl ++#@ENABLE_LOADABLE_MODULES_TRUE@am__append_1 = libltdl + subdir = . + ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 + am__aclocal_m4_deps = $(top_srcdir)/acinclude/ax_with_prog.m4 \ +@@ -493,7 +493,7 @@ top_build_prefix = @top_build_prefix@ + top_builddir = @top_builddir@ + top_srcdir = @top_srcdir@ + AUTOMAKE_OPTIONS = dist-bzip2 1.5 foreign +-DIST_SUBDIRS = compat lib libltdl scripts icons errors contrib doc helpers src test-suite tools ++DIST_SUBDIRS = compat lib scripts icons errors contrib doc helpers src test-suite tools + SUBDIRS = compat lib $(am__append_1) scripts icons errors doc helpers \ + src tools test-suite + DISTCLEANFILES = include/stamp-h include/stamp-h[0-9]* +diff --git a/aclocal.m4 b/aclocal.m4 +index 4dd204a..b66d5aa 100644 +--- a/aclocal.m4 ++++ b/aclocal.m4 +@@ -108,7 +108,7 @@ _LT_SET_OPTIONS([$0], [$1]) + LIBTOOL_DEPS=$ltmain + + # Always use our own libtool. +-LIBTOOL='$(SHELL) $(top_builddir)/libtool' ++LIBTOOL='$(SHELL) libtool' + AC_SUBST(LIBTOOL)dnl + + _LT_SETUP +diff --git a/configure b/configure +index ba4a984..6926f03 100755 +--- a/configure ++++ b/configure +@@ -11551,7 +11551,7 @@ esac + LIBTOOL_DEPS=$ltmain + + # Always use our own libtool. +-LIBTOOL='$(SHELL) $(top_builddir)/libtool' ++LIBTOOL='$(SHELL) libtool' + + + diff --git a/squid-3.2.0.9-fpic.patch b/squid-3.2.0.9-fpic.patch new file mode 100644 index 0000000..9a048dc --- /dev/null +++ b/squid-3.2.0.9-fpic.patch @@ -0,0 +1,45 @@ +diff --git a/compat/Makefile.in b/compat/Makefile.in +index 55b7ba0..ec58c7d 100644 +--- a/compat/Makefile.in ++++ b/compat/Makefile.in +@@ -698,8 +698,8 @@ target_alias = @target_alias@ + top_build_prefix = @top_build_prefix@ + top_builddir = @top_builddir@ + top_srcdir = @top_srcdir@ +-AM_CFLAGS = $(SQUID_CFLAGS) +-AM_CXXFLAGS = $(SQUID_CXXFLAGS) ++AM_CFLAGS = $(SQUID_CFLAGS) -fPIC ++AM_CXXFLAGS = $(SQUID_CXXFLAGS) -fPIC + CLEANFILES = testHeaders + AM_CPPFLAGS = -I$(top_srcdir) -I$(top_srcdir)/include \ + -I$(top_srcdir)/lib -I$(top_srcdir)/src \ +diff --git a/lib/snmplib/Makefile.am b/lib/snmplib/Makefile.am +index 52921e8..ad780f6 100644 +--- a/lib/snmplib/Makefile.am ++++ b/lib/snmplib/Makefile.am +@@ -9,8 +9,8 @@ + ## + ## Makefile for libsnmp. + ## +-AM_CFLAGS = $(SQUID_CFLAGS) +-AM_CXXFLAGS = $(SQUID_CXXFLAGS) ++AM_CFLAGS = $(SQUID_CFLAGS) -fPIC ++AM_CXXFLAGS = $(SQUID_CXXFLAGS) -fPIC + AM_CPPFLAGS = \ + -I$(top_srcdir) \ + -I$(top_builddir)/include \ +diff --git a/lib/snmplib/Makefile.in b/lib/snmplib/Makefile.in +index af07cd7..f59f62e 100644 +--- a/lib/snmplib/Makefile.in ++++ b/lib/snmplib/Makefile.in +@@ -454,8 +454,8 @@ target_alias = @target_alias@ + top_build_prefix = @top_build_prefix@ + top_builddir = @top_builddir@ + top_srcdir = @top_srcdir@ +-AM_CFLAGS = $(SQUID_CFLAGS) +-AM_CXXFLAGS = $(SQUID_CXXFLAGS) ++AM_CFLAGS = $(SQUID_CFLAGS) -fPIC ++AM_CXXFLAGS = $(SQUID_CXXFLAGS) -fPIC + AM_CPPFLAGS = \ + -I$(top_srcdir) \ + -I$(top_builddir)/include \ diff --git a/squid-3.3.8-active-ftp-1.patch b/squid-3.3.8-active-ftp-1.patch new file mode 100644 index 0000000..0d4d13e --- /dev/null +++ b/squid-3.3.8-active-ftp-1.patch @@ -0,0 +1,88 @@ +diff --git a/src/clients/FtpGateway.cc b/src/clients/FtpGateway.cc +index 2f09b12..7e13a29 100644 +--- a/src/clients/FtpGateway.cc ++++ b/src/clients/FtpGateway.cc +@@ -136,7 +136,10 @@ public: + + /// create a data channel acceptor and start listening. + void listenForDataChannel(const Comm::ConnectionPointer &conn); +- ++ virtual bool openListenSocket() { ++ ftpOpenListenSocket(this, 0); ++ return Comm::IsConnOpen(data.conn); ++ } + int checkAuth(const HttpHeader * req_hdr); + void checkUrlpath(); + void buildTitleUrl(); +diff --git a/src/clients/FtpGateway.cc b/src/clients/FtpGateway.cc +index 1a7b092..025df84 100644 +--- a/src/clients/FtpGateway.cc ++++ b/src/clients/FtpGateway.cc +@@ -87,6 +87,13 @@ struct GatewayFlags { + class Gateway; + typedef void (StateMethod)(Ftp::Gateway *); + ++} // namespace FTP ++ ++static void ftpOpenListenSocket(Ftp::Gateway * ftpState, int fallback); ++ ++namespace Ftp ++{ ++ + /// FTP Gateway: An FTP client that takes an HTTP request with an ftp:// URI, + /// converts it into one or more FTP commands, and then + /// converts one or more FTP responses into the final HTTP response. +diff --git a/src/clients/FtpClient.cc b/src/clients/FtpClient.cc +index a262eea..0978831 100644 +--- a/src/clients/FtpClient.cc ++++ b/src/clients/FtpClient.cc +@@ -743,7 +743,8 @@ Ftp::Client::connectDataChannel() + bool + Ftp::Client::openListenSocket() + { +- return false; ++ debugs(9, 3, HERE); ++ return false; + } + + /// creates a data channel Comm close callback +diff --git a/src/clients/FtpGateway.cc b/src/clients/FtpGateway.cc +index 025df84..dbc4809 100644 +--- a/src/clients/FtpGateway.cc ++++ b/src/clients/FtpGateway.cc +@@ -144,7 +144,8 @@ public: + /// create a data channel acceptor and start listening. + void listenForDataChannel(const Comm::ConnectionPointer &conn); + virtual bool openListenSocket() { +- ftpOpenListenSocket(this, 0); ++ debugs(9, 3, HERE); ++ ftpOpenListenSocket(this, 0); + return Comm::IsConnOpen(data.conn); + } + int checkAuth(const HttpHeader * req_hdr); +diff --git a/src/clients/FtpClient.h b/src/clients/FtpClient.h +index ef2aa98..10a511e 100644 +--- a/src/clients/FtpClient.h ++++ b/src/clients/FtpClient.h +@@ -115,7 +115,7 @@ public: + bool sendPort(); + bool sendPassive(); + void connectDataChannel(); +- bool openListenSocket(); ++ virtual bool openListenSocket(); + void switchTimeoutToDataChannel(); + + CtrlChannel ctrl; ///< FTP control channel state +diff --git a/src/clients/FtpGateway.cc b/src/clients/FtpGateway.cc +index 8754e62..0ef1171 100644 +--- a/src/clients/FtpGateway.cc ++++ b/src/clients/FtpGateway.cc +@@ -1979,7 +1979,7 @@ ftpReadEPRT(Ftp::Gateway * ftpState) + ftpSendPORT(ftpState); + return; + } +- ++ ftpState->ctrl.message = NULL; + ftpRestOrList(ftpState); + } + diff --git a/squid-3.3.8-active-ftp-2.patch b/squid-3.3.8-active-ftp-2.patch new file mode 100644 index 0000000..deca280 --- /dev/null +++ b/squid-3.3.8-active-ftp-2.patch @@ -0,0 +1,66 @@ +diff --git a/src/clients/FtpGateway.cc b/src/clients/FtpGateway.cc +index 524eebb..2f09b12 100644 +--- a/src/clients/FtpGateway.cc ++++ b/src/clients/FtpGateway.cc +@@ -1834,6 +1834,7 @@ ftpOpenListenSocket(Ftp::Gateway * ftpState, int fallback) + } + + ftpState->listenForDataChannel(temp); ++ ftpState->data.listenConn = temp; + } + + static void +@@ -1869,13 +1870,19 @@ ftpSendPORT(Ftp::Gateway * ftpState) + // pull out the internal IP address bytes to send in PORT command... + // source them from the listen_conn->local + +- struct addrinfo *AI = NULL; ++ struct sockaddr_in addr; ++ socklen_t addrlen = sizeof(addr); ++ getsockname(ftpState->data.listenConn->fd, (struct sockaddr *) &addr, &addrlen); ++ unsigned char port_high = ntohs(addr.sin_port) >> 8; ++ unsigned char port_low = ntohs(addr.sin_port) & 0xff; ++ ++ struct addrinfo *AI = NULL; + ftpState->data.listenConn->local.getAddrInfo(AI, AF_INET); + unsigned char *addrptr = (unsigned char *) &((struct sockaddr_in*)AI->ai_addr)->sin_addr; +- unsigned char *portptr = (unsigned char *) &((struct sockaddr_in*)AI->ai_addr)->sin_port; ++ // unsigned char *portptr = (unsigned char *) &((struct sockaddr_in*)AI->ai_addr)->sin_port; + snprintf(cbuf, CTRL_BUFLEN, "PORT %d,%d,%d,%d,%d,%d\r\n", + addrptr[0], addrptr[1], addrptr[2], addrptr[3], +- portptr[0], portptr[1]); ++ port_high, port_low); + ftpState->writeCommand(cbuf); + ftpState->state = Ftp::Client::SENT_PORT; + +@@ -1923,14 +1930,27 @@ ftpSendEPRT(Ftp::Gateway * ftpState) + return; + } + +- char buf[MAX_IPSTRLEN]; ++ ++ unsigned int port; ++ struct sockaddr_storage addr; ++ socklen_t addrlen = sizeof(addr); ++ getsockname(ftpState->data.listenConn->fd, (struct sockaddr *) &addr, &addrlen); ++ if (addr.ss_family == AF_INET) { ++ struct sockaddr_in *addr4 = (struct sockaddr_in*) &addr; ++ port = ntohs( addr4->sin_port ); ++ } else { ++ struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *) &addr; ++ port = ntohs( addr6->sin6_port ); ++ } ++ ++ char buf[MAX_IPSTRLEN]; + + /* RFC 2428 defines EPRT as IPv6 equivalent to IPv4 PORT command. */ + /* Which can be used by EITHER protocol. */ +- snprintf(cbuf, CTRL_BUFLEN, "EPRT |%d|%s|%d|\r\n", ++ snprintf(cbuf, CTRL_BUFLEN, "EPRT |%d|%s|%u|\r\n", + ( ftpState->data.listenConn->local.isIPv6() ? 2 : 1 ), + ftpState->data.listenConn->local.toStr(buf,MAX_IPSTRLEN), +- ftpState->data.listenConn->local.port() ); ++ port); + + ftpState->writeCommand(cbuf); + ftpState->state = Ftp::Client::SENT_EPRT; diff --git a/squid-3.5.10-ssl-helper.patch b/squid-3.5.10-ssl-helper.patch new file mode 100644 index 0000000..e20de4c --- /dev/null +++ b/squid-3.5.10-ssl-helper.patch @@ -0,0 +1,73 @@ +diff --git a/configure b/configure +index 752a86d..fa0f0e3 100755 +--- a/configure ++++ b/configure +@@ -40311,7 +40311,7 @@ $as_echo "$as_me: BUILD Tools C++ FLAGS: $BUILDCXXFLAGS" >&6;} + + rm -f core + +-ac_config_files="$ac_config_files Makefile compat/Makefile lib/Makefile lib/ntlmauth/Makefile lib/libTrie/Makefile lib/libTrie/test/Makefile lib/profiler/Makefile lib/rfcnb/Makefile lib/smblib/Makefile lib/snmplib/Makefile scripts/Makefile src/Makefile src/anyp/Makefile src/ftp/Makefile src/base/Makefile src/acl/Makefile src/clients/Makefile src/servers/Makefile src/fs/Makefile src/repl/Makefile src/auth/Makefile src/auth/basic/Makefile src/auth/digest/Makefile src/auth/negotiate/Makefile src/auth/ntlm/Makefile src/adaptation/Makefile src/adaptation/icap/Makefile src/adaptation/ecap/Makefile src/comm/Makefile src/esi/Makefile src/eui/Makefile src/format/Makefile src/helper/Makefile src/http/Makefile src/icmp/Makefile src/ident/Makefile src/ip/Makefile src/log/Makefile src/ipc/Makefile src/ssl/Makefile src/mgr/Makefile src/parser/Makefile src/snmp/Makefile contrib/Makefile icons/Makefile errors/Makefile test-suite/Makefile doc/Makefile doc/manuals/Makefile doc/release-notes/Makefile helpers/Makefile helpers/basic_auth/Makefile helpers/basic_auth/DB/Makefile helpers/basic_auth/fake/Makefile helpers/basic_auth/getpwnam/Makefile helpers/basic_auth/LDAP/Makefile helpers/basic_auth/MSNT-multi-domain/Makefile helpers/basic_auth/NCSA/Makefile helpers/basic_auth/NIS/Makefile helpers/basic_auth/PAM/Makefile helpers/basic_auth/POP3/Makefile helpers/basic_auth/RADIUS/Makefile helpers/basic_auth/SASL/Makefile helpers/basic_auth/SMB/Makefile helpers/basic_auth/SMB_LM/Makefile helpers/basic_auth/SSPI/Makefile helpers/digest_auth/Makefile helpers/digest_auth/eDirectory/Makefile helpers/digest_auth/file/Makefile helpers/digest_auth/LDAP/Makefile helpers/ntlm_auth/Makefile helpers/ntlm_auth/fake/Makefile helpers/ntlm_auth/smb_lm/Makefile helpers/ntlm_auth/SSPI/Makefile helpers/negotiate_auth/Makefile helpers/negotiate_auth/kerberos/Makefile helpers/negotiate_auth/SSPI/Makefile helpers/negotiate_auth/wrapper/Makefile helpers/external_acl/Makefile helpers/external_acl/AD_group/Makefile helpers/external_acl/delayer/Makefile helpers/external_acl/eDirectory_userip/Makefile helpers/external_acl/file_userip/Makefile helpers/external_acl/kerberos_ldap_group/Makefile helpers/external_acl/LDAP_group/Makefile helpers/external_acl/LM_group/Makefile helpers/external_acl/session/Makefile helpers/external_acl/SQL_session/Makefile helpers/external_acl/unix_group/Makefile helpers/external_acl/wbinfo_group/Makefile helpers/external_acl/time_quota/Makefile helpers/log_daemon/Makefile helpers/log_daemon/DB/Makefile helpers/log_daemon/file/Makefile helpers/url_rewrite/Makefile helpers/url_rewrite/fake/Makefile helpers/ssl/Makefile helpers/storeid_rewrite/Makefile helpers/storeid_rewrite/file/Makefile tools/Makefile tools/purge/Makefile tools/squidclient/Makefile tools/systemd/Makefile tools/sysvinit/Makefile" ++ac_config_files="$ac_config_files Makefile compat/Makefile lib/Makefile lib/ntlmauth/Makefile lib/libTrie/Makefile lib/libTrie/test/Makefile lib/profiler/Makefile lib/rfcnb/Makefile lib/smblib/Makefile lib/snmplib/Makefile scripts/Makefile src/Makefile src/anyp/Makefile src/ftp/Makefile src/base/Makefile src/acl/Makefile src/clients/Makefile src/servers/Makefile src/fs/Makefile src/repl/Makefile src/auth/Makefile src/auth/basic/Makefile src/auth/digest/Makefile src/auth/negotiate/Makefile src/auth/ntlm/Makefile src/adaptation/Makefile src/adaptation/icap/Makefile src/adaptation/ecap/Makefile src/comm/Makefile src/esi/Makefile src/eui/Makefile src/format/Makefile src/helper/Makefile src/http/Makefile src/icmp/Makefile src/ident/Makefile src/ip/Makefile src/log/Makefile src/ipc/Makefile src/ssl/Makefile src/mgr/Makefile src/parser/Makefile src/snmp/Makefile contrib/Makefile icons/Makefile errors/Makefile test-suite/Makefile doc/Makefile doc/manuals/Makefile doc/release-notes/Makefile helpers/Makefile helpers/basic_auth/Makefile helpers/basic_auth/DB/Makefile helpers/basic_auth/fake/Makefile helpers/basic_auth/getpwnam/Makefile helpers/basic_auth/LDAP/Makefile helpers/basic_auth/MSNT-multi-domain/Makefile helpers/basic_auth/NCSA/Makefile helpers/basic_auth/NIS/Makefile helpers/basic_auth/PAM/Makefile helpers/basic_auth/POP3/Makefile helpers/basic_auth/RADIUS/Makefile helpers/basic_auth/SASL/Makefile helpers/basic_auth/SMB/Makefile helpers/basic_auth/SMB_LM/Makefile helpers/basic_auth/SSPI/Makefile helpers/digest_auth/Makefile helpers/digest_auth/eDirectory/Makefile helpers/digest_auth/file/Makefile helpers/digest_auth/LDAP/Makefile helpers/ntlm_auth/Makefile helpers/ntlm_auth/fake/Makefile helpers/ntlm_auth/smb_lm/Makefile helpers/ntlm_auth/SSPI/Makefile helpers/negotiate_auth/Makefile helpers/negotiate_auth/kerberos/Makefile helpers/negotiate_auth/SSPI/Makefile helpers/negotiate_auth/wrapper/Makefile helpers/external_acl/Makefile helpers/external_acl/AD_group/Makefile helpers/external_acl/delayer/Makefile helpers/external_acl/eDirectory_userip/Makefile helpers/external_acl/file_userip/Makefile helpers/external_acl/kerberos_ldap_group/Makefile helpers/external_acl/LDAP_group/Makefile helpers/external_acl/LM_group/Makefile helpers/external_acl/session/Makefile helpers/external_acl/SQL_session/Makefile helpers/external_acl/unix_group/Makefile helpers/external_acl/wbinfo_group/Makefile helpers/external_acl/time_quota/Makefile helpers/log_daemon/Makefile helpers/log_daemon/DB/Makefile helpers/log_daemon/file/Makefile helpers/url_rewrite/Makefile helpers/url_rewrite/fake/Makefile helpers/storeid_rewrite/Makefile helpers/storeid_rewrite/file/Makefile tools/Makefile tools/purge/Makefile tools/squidclient/Makefile tools/systemd/Makefile tools/sysvinit/Makefile" + + + # must configure libltdl subdir unconditionally for "make distcheck" to work +@@ -41710,7 +41710,6 @@ do + "helpers/log_daemon/file/Makefile") CONFIG_FILES="$CONFIG_FILES helpers/log_daemon/file/Makefile" ;; + "helpers/url_rewrite/Makefile") CONFIG_FILES="$CONFIG_FILES helpers/url_rewrite/Makefile" ;; + "helpers/url_rewrite/fake/Makefile") CONFIG_FILES="$CONFIG_FILES helpers/url_rewrite/fake/Makefile" ;; +- "helpers/ssl/Makefile") CONFIG_FILES="$CONFIG_FILES helpers/ssl/Makefile" ;; + "helpers/storeid_rewrite/Makefile") CONFIG_FILES="$CONFIG_FILES helpers/storeid_rewrite/Makefile" ;; + "helpers/storeid_rewrite/file/Makefile") CONFIG_FILES="$CONFIG_FILES helpers/storeid_rewrite/file/Makefile" ;; + "tools/Makefile") CONFIG_FILES="$CONFIG_FILES tools/Makefile" ;; +diff --git a/configure.ac b/configure.ac +index 6054369..0728672 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -3885,7 +3885,6 @@ AC_CONFIG_FILES([ + helpers/log_daemon/file/Makefile + helpers/url_rewrite/Makefile + helpers/url_rewrite/fake/Makefile +- helpers/ssl/Makefile + helpers/storeid_rewrite/Makefile + helpers/storeid_rewrite/file/Makefile + tools/Makefile +diff --git a/helpers/Makefile.am b/helpers/Makefile.am +index 043ec10..d835535 100644 +--- a/helpers/Makefile.am ++++ b/helpers/Makefile.am +@@ -15,7 +15,6 @@ DIST_SUBDIRS = \ + negotiate_auth \ + ntlm_auth \ + url_rewrite \ +- ssl \ + storeid_rewrite + + SUBDIRS = \ +@@ -31,7 +30,4 @@ if ENABLE_AUTH_NTLM + SUBDIRS += ntlm_auth + endif + +-if ENABLE_SSL +-SUBDIRS += ssl +-endif + +diff --git a/helpers/Makefile.in b/helpers/Makefile.in +index cf47dd2..9c53427 100644 +--- a/helpers/Makefile.in ++++ b/helpers/Makefile.in +@@ -88,7 +88,6 @@ POST_UNINSTALL = : + build_triplet = @build@ + host_triplet = @host@ + @ENABLE_AUTH_NTLM_TRUE@am__append_1 = ntlm_auth +-@ENABLE_SSL_TRUE@am__append_2 = ssl + subdir = helpers + ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 + am__aclocal_m4_deps = $(top_srcdir)/acinclude/ax_with_prog.m4 \ +@@ -473,7 +472,6 @@ DIST_SUBDIRS = \ + negotiate_auth \ + ntlm_auth \ + url_rewrite \ +- ssl \ + storeid_rewrite + + SUBDIRS = basic_auth digest_auth external_acl log_daemon \ diff --git a/squid-3.5.20-CVE-2018-1000024.patch b/squid-3.5.20-CVE-2018-1000024.patch new file mode 100644 index 0000000..9392219 --- /dev/null +++ b/squid-3.5.20-CVE-2018-1000024.patch @@ -0,0 +1,28 @@ +commit eb2db98a676321b814fc4a51c4fb7928a8bb45d9 (refs/remotes/origin/v3.5) +Author: Amos Jeffries +Date: 2018-01-19 13:54:14 +1300 + + ESI: make sure endofName never exceeds tagEnd (#130) + +diff --git a/src/esi/CustomParser.cc b/src/esi/CustomParser.cc +index d86d2d3..db634d9 100644 +--- a/src/esi/CustomParser.cc ++++ b/src/esi/CustomParser.cc +@@ -121,7 +121,7 @@ ESICustomParser::parse(char const *dataToParse, size_t const lengthOfData, bool + + char * endofName = strpbrk(const_cast(tag), w_space); + +- if (endofName > tagEnd) ++ if (!endofName || endofName > tagEnd) + endofName = const_cast(tagEnd); + + *endofName = '\0'; +@@ -214,7 +214,7 @@ ESICustomParser::parse(char const *dataToParse, size_t const lengthOfData, bool + + char * endofName = strpbrk(const_cast(tag), w_space); + +- if (endofName > tagEnd) ++ if (!endofName || endofName > tagEnd) + endofName = const_cast(tagEnd); + + *endofName = '\0'; diff --git a/squid-3.5.20-CVE-2018-1000027.patch b/squid-3.5.20-CVE-2018-1000027.patch new file mode 100644 index 0000000..9ecd8a5 --- /dev/null +++ b/squid-3.5.20-CVE-2018-1000027.patch @@ -0,0 +1,23 @@ +commit 8232b83d3fa47a1399f155cb829db829369fbae9 (refs/remotes/origin/v3.5) +Author: squidadm +Date: 2018-01-21 08:07:08 +1300 + + Fix indirect IP logging for transactions without a client connection (#129) (#136) + +diff --git a/src/client_side_request.cc b/src/client_side_request.cc +index be124f3..203f89d 100644 +--- a/src/client_side_request.cc ++++ b/src/client_side_request.cc +@@ -488,9 +488,9 @@ clientFollowXForwardedForCheck(allow_t answer, void *data) + * Ensure that the access log shows the indirect client + * instead of the direct client. + */ +- ConnStateData *conn = http->getConn(); +- conn->log_addr = request->indirect_client_addr; +- http->al->cache.caddr = conn->log_addr; ++ http->al->cache.caddr = request->indirect_client_addr; ++ if (ConnStateData *conn = http->getConn()) ++ conn->log_addr = request->indirect_client_addr; + } + request->x_forwarded_for_iterator.clean(); + request->flags.done_follow_x_forwarded_for = true; diff --git a/squid-3.5.20-CVE-2019-12519.patch b/squid-3.5.20-CVE-2019-12519.patch new file mode 100644 index 0000000..568b277 --- /dev/null +++ b/squid-3.5.20-CVE-2019-12519.patch @@ -0,0 +1,275 @@ +diff --git a/src/esi/Context.h b/src/esi/Context.h +index be49742..34b1fd0 100644 +--- a/src/esi/Context.h ++++ b/src/esi/Context.h +@@ -12,6 +12,7 @@ + #include "clientStream.h" + #include "err_type.h" + #include "esi/Element.h" ++#include "esi/Esi.h" + #include "esi/Parser.h" + #include "http/StatusCode.h" + #include "HttpReply.h" +@@ -112,7 +113,7 @@ public: + { + + public: +- ESIElement::Pointer stack[10]; /* a stack of esi elements that are open */ ++ ESIElement::Pointer stack[ESI_STACK_DEPTH_LIMIT]; /* a stack of esi elements that are open */ + int stackdepth; /* self explanatory */ + ESIParser::Pointer theParser; + ESIElement::Pointer top(); +diff --git a/src/esi/Esi.cc b/src/esi/Esi.cc +index 1816c76..674bae2 100644 +--- a/src/esi/Esi.cc ++++ b/src/esi/Esi.cc +@@ -29,6 +29,7 @@ + #include "esi/Expression.h" + #include "esi/Segment.h" + #include "esi/VarState.h" ++#include "FadingCounter.h" + #include "HttpHdrSc.h" + #include "HttpHdrScTarget.h" + #include "HttpReply.h" +@@ -943,13 +944,18 @@ void + ESIContext::addStackElement (ESIElement::Pointer element) + { + /* Put on the stack to allow skipping of 'invalid' markup */ +- assert (parserState.stackdepth <11); ++ ++ // throw an error if the stack location would be invalid ++ if (parserState.stackdepth >= ESI_STACK_DEPTH_LIMIT) ++ throw Esi::Error("ESI Too many nested elements"); ++ if (parserState.stackdepth < 0) ++ throw Esi::Error("ESI elements stack error, probable error in ESI template"); ++ + assert (!failed()); + debugs(86, 5, "ESIContext::addStackElement: About to add ESI Node " << element.getRaw()); + + if (!parserState.top()->addElement(element)) { +- debugs(86, DBG_IMPORTANT, "ESIContext::addStackElement: failed to add esi node, probable error in ESI template"); +- flags.error = 1; ++ throw Esi::Error("ESIContext::addStackElement failed, probable error in ESI template"); + } else { + /* added ok, push onto the stack */ + parserState.stack[parserState.stackdepth] = element; +@@ -1201,13 +1207,10 @@ ESIContext::addLiteral (const char *s, int len) + assert (len); + debugs(86, 5, "literal length is " << len); + /* give a literal to the current element */ +- assert (parserState.stackdepth <11); + ESIElement::Pointer element (new esiLiteral (this, s, len)); + +- if (!parserState.top()->addElement(element)) { +- debugs(86, DBG_IMPORTANT, "ESIContext::addLiteral: failed to add esi node, probable error in ESI template"); +- flags.error = 1; +- } ++ if (!parserState.top()->addElement(element)) ++ throw Esi::Error("ESIContext::addLiteral failed, probable error in ESI template"); + } + + void +@@ -1269,8 +1272,24 @@ ESIContext::parse() + + PROF_start(esiParsing); + +- while (buffered.getRaw() && !flags.error) +- parseOneBuffer(); ++ try { ++ while (buffered.getRaw() && !flags.error) ++ parseOneBuffer(); ++ ++ } catch (Esi::ErrorDetail &errMsg) { // FIXME: non-const for c_str() ++ // level-2: these are protocol/syntax errors from upstream ++ debugs(86, 2, "WARNING: ESI syntax error: " << errMsg); ++ setError(); ++ setErrorMessage(errMsg.c_str()); ++ ++ } catch (...) { ++ // DBG_IMPORTANT because these are local issues the admin needs to fix ++ static FadingCounter logEntries; // TODO: set horizon less than infinity ++ if (logEntries.count(1) < 100) ++ debugs(86, DBG_IMPORTANT, "ERROR: ESI parser: unhandled exception"); ++ setError(); ++ setErrorMessage("ESI parser error"); ++ } + + PROF_stop(esiParsing); + +diff --git a/src/esi/Esi.h b/src/esi/Esi.h +index bbdb566..85f80f7 100644 +--- a/src/esi/Esi.h ++++ b/src/esi/Esi.h +@@ -10,6 +10,11 @@ + #define SQUID_ESI_H + + #include "clientStream.h" ++#include "SBuf.h" ++ ++#if !defined(ESI_STACK_DEPTH_LIMIT) ++#define ESI_STACK_DEPTH_LIMIT 20 ++#endif + + /* ESI.c */ + extern CSR esiStreamRead; +@@ -18,5 +23,14 @@ extern CSD esiStreamDetach; + extern CSS esiStreamStatus; + int esiEnableProcessing (HttpReply *); + ++namespace Esi ++{ ++ ++typedef SBuf ErrorDetail; ++/// prepare an Esi::ErrorDetail for throw on ESI parser internal errors ++inline Esi::ErrorDetail Error(const char *msg) { return ErrorDetail(msg); } ++ ++} // namespace Esi ++ + #endif /* SQUID_ESI_H */ + +diff --git a/src/esi/Expression.cc b/src/esi/Expression.cc +index 8a1d3e9..a65edfb 100644 +--- a/src/esi/Expression.cc ++++ b/src/esi/Expression.cc +@@ -10,6 +10,7 @@ + + #include "squid.h" + #include "Debug.h" ++#include "esi/Esi.h" + #include "esi/Expression.h" + #include "profiler/Profiler.h" + +@@ -97,6 +98,17 @@ stackpop(stackmember * s, int *depth) + cleanmember(&s[*depth]); + } + ++static void ++stackpush(stackmember *stack, stackmember &item, int *depth) ++{ ++ if (*depth < 0) ++ throw Esi::Error("ESIExpression stack has negative size"); ++ if (*depth >= ESI_STACK_DEPTH_LIMIT) ++ throw Esi::Error("ESIExpression stack is full, cannot push"); ++ ++ stack[(*depth)++] = item; ++} ++ + static evaluate evalnegate; + static evaluate evalliteral; + static evaluate evalor; +@@ -208,6 +220,11 @@ evalnegate(stackmember * stack, int *depth, int whereAmI, stackmember * candidat + /* invalid stack */ + return 1; + ++ if (whereAmI < 0) ++ throw Esi::Error("negate expression location too small"); ++ if (*depth >= ESI_STACK_DEPTH_LIMIT) ++ throw Esi::Error("negate expression too complex"); ++ + if (stack[whereAmI + 1].valuetype != ESI_EXPR_EXPR) + /* invalid operand */ + return 1; +@@ -280,7 +297,7 @@ evalor(stackmember * stack, int *depth, int whereAmI, stackmember * candidate) + + srv.precedence = 1; + +- stack[(*depth)++] = srv; ++ stackpush(stack, srv, depth); + + /* we're out of way, try adding now */ + if (!addmember(stack, depth, candidate)) +@@ -327,7 +344,7 @@ evaland(stackmember * stack, int *depth, int whereAmI, stackmember * candidate) + + srv.precedence = 1; + +- stack[(*depth)++] = srv; ++ stackpush(stack, srv, depth); + + /* we're out of way, try adding now */ + if (!addmember(stack, depth, candidate)) +@@ -373,7 +390,7 @@ evallesseq(stackmember * stack, int *depth, int whereAmI, stackmember * candidat + + srv.precedence = 1; + +- stack[(*depth)++] = srv; ++ stackpush(stack, srv, depth); + + /* we're out of way, try adding now */ + if (!addmember(stack, depth, candidate)) +@@ -421,7 +438,7 @@ evallessthan(stackmember * stack, int *depth, int whereAmI, stackmember * candid + + srv.precedence = 1; + +- stack[(*depth)++] = srv; ++ stackpush(stack, srv, depth); + + /* we're out of way, try adding now */ + if (!addmember(stack, depth, candidate)) +@@ -469,7 +486,7 @@ evalmoreeq(stackmember * stack, int *depth, int whereAmI, stackmember * candidat + + srv.precedence = 1; + +- stack[(*depth)++] = srv; ++ stackpush(stack, srv, depth); + + /* we're out of way, try adding now */ + if (!addmember(stack, depth, candidate)) +@@ -517,7 +534,7 @@ evalmorethan(stackmember * stack, int *depth, int whereAmI, stackmember * candid + + srv.precedence = 1; + +- stack[(*depth)++] = srv; ++ stackpush(stack, srv, depth); + + /* we're out of way, try adding now */ + if (!addmember(stack, depth, candidate)) +@@ -566,7 +583,7 @@ evalequals(stackmember * stack, int *depth, int whereAmI, + + srv.precedence = 1; + +- stack[(*depth)++] = srv; ++ stackpush(stack, srv, depth); + + /* we're out of way, try adding now */ + if (!addmember(stack, depth, candidate)) +@@ -613,7 +630,7 @@ evalnotequals(stackmember * stack, int *depth, int whereAmI, stackmember * candi + + srv.precedence = 1; + +- stack[(*depth)++] = srv; ++ stackpush(stack, srv, depth); + + /* we're out of way, try adding now */ + if (!addmember(stack, depth, candidate)) +@@ -953,6 +970,9 @@ addmember(stackmember * stack, int *stackdepth, stackmember * candidate) + /* !(!(a==b))) is why thats safe */ + /* strictly less than until we unwind */ + ++ if (*stackdepth >= ESI_STACK_DEPTH_LIMIT) ++ throw Esi::Error("ESI expression too complex to add member"); ++ + if (candidate->precedence < stack[*stackdepth - 1].precedence || + candidate->precedence < stack[*stackdepth - 2].precedence) { + /* must be an operator */ +@@ -968,10 +988,10 @@ addmember(stackmember * stack, int *stackdepth, stackmember * candidate) + return 0; + } + } else { +- stack[(*stackdepth)++] = *candidate; ++ stackpush(stack, *candidate, stackdepth); + } + } else if (candidate->valuetype != ESI_EXPR_INVALID) +- stack[(*stackdepth)++] = *candidate; ++ stackpush(stack, *candidate, stackdepth); + + return 1; + } +@@ -979,7 +999,7 @@ addmember(stackmember * stack, int *stackdepth, stackmember * candidate) + int + ESIExpression::Evaluate(char const *s) + { +- stackmember stack[20]; ++ stackmember stack[ESI_STACK_DEPTH_LIMIT]; + int stackdepth = 0; + char const *end; + PROF_start(esiExpressionEval); diff --git a/squid-3.5.20-CVE-2019-12525.patch b/squid-3.5.20-CVE-2019-12525.patch new file mode 100644 index 0000000..6bfe4e3 --- /dev/null +++ b/squid-3.5.20-CVE-2019-12525.patch @@ -0,0 +1,30 @@ +commit ec0d0f39cf28da14eead0ba5e777e95855bc2f67 +Author: Amos Jeffries +Date: 2019-06-08 21:09:23 +0000 + + Fix Digest auth parameter parsing (#415) + + Only remove quoting if the domain=, uri= or qop= parameter + value is surrounded by double-quotes. + +diff --git a/src/auth/digest/Config.cc b/src/auth/digest/Config.cc +index 674dd93..d2cd2e9 100644 +--- a/src/auth/digest/Config.cc ++++ b/src/auth/digest/Config.cc +@@ -781,14 +781,14 @@ Auth::Digest::Config::decode(char const *proxy_auth, const char *aRequestRealm) + if (keyName == SBuf("domain",6) || keyName == SBuf("uri",3)) { + // domain is Special. Not a quoted-string, must not be de-quoted. But is wrapped in '"' + // BUG 3077: uri= can also be sent to us in a mangled (invalid!) form like domain +- if (*p == '"' && *(p + vlen -1) == '"') { ++ if (vlen > 1 && *p == '"' && *(p + vlen -1) == '"') { + value.limitInit(p+1, vlen-2); + } + } else if (keyName == SBuf("qop",3)) { + // qop is more special. + // On request this must not be quoted-string de-quoted. But is several values wrapped in '"' + // On response this is a single un-quoted token. +- if (*p == '"' && *(p + vlen -1) == '"') { ++ if (vlen > 1 && *p == '"' && *(p + vlen -1) == '"') { + value.limitInit(p+1, vlen-2); + } else { + value.limitInit(p, vlen); diff --git a/squid-3.5.20-CVE-2019-12528.patch b/squid-3.5.20-CVE-2019-12528.patch new file mode 100644 index 0000000..b8470af --- /dev/null +++ b/squid-3.5.20-CVE-2019-12528.patch @@ -0,0 +1,158 @@ +diff --git a/src/clients/FtpGateway.cc b/src/clients/FtpGateway.cc +index 4f8319a..3a35784 100644 +--- a/src/clients/FtpGateway.cc ++++ b/src/clients/FtpGateway.cc +@@ -543,8 +543,10 @@ ftpListParseParts(const char *buf, struct Ftp::GatewayFlags flags) + { + ftpListParts *p = NULL; + char *t = NULL; +- const char *ct = NULL; +- char *tokens[MAX_TOKENS]; ++ struct FtpLineToken { ++ char *token = NULL; ///< token image copied from the received line ++ size_t pos = 0; ///< token offset on the received line ++ } tokens[MAX_TOKENS]; + int i; + int n_tokens; + static char tbuf[128]; +@@ -585,7 +587,8 @@ ftpListParseParts(const char *buf, struct Ftp::GatewayFlags flags) + } + + for (t = strtok(xbuf, w_space); t && n_tokens < MAX_TOKENS; t = strtok(NULL, w_space)) { +- tokens[n_tokens] = xstrdup(t); ++ tokens[n_tokens].token = xstrdup(t); ++ tokens[n_tokens].pos = t - xbuf; + ++n_tokens; + } + +@@ -593,10 +596,10 @@ ftpListParseParts(const char *buf, struct Ftp::GatewayFlags flags) + + /* locate the Month field */ + for (i = 3; i < n_tokens - 2; ++i) { +- char *size = tokens[i - 1]; +- char *month = tokens[i]; +- char *day = tokens[i + 1]; +- char *year = tokens[i + 2]; ++ const char *size = tokens[i - 1].token; ++ char *month = tokens[i].token; ++ char *day = tokens[i + 1].token; ++ char *year = tokens[i + 2].token; + + if (!is_month(month)) + continue; +@@ -610,30 +613,35 @@ ftpListParseParts(const char *buf, struct Ftp::GatewayFlags flags) + if (regexec(&scan_ftp_time, year, 0, NULL, 0) != 0) /* Yr | hh:mm */ + continue; + +- snprintf(tbuf, 128, "%s %2s %5s", +- month, day, year); ++ const char *copyFrom = buf + tokens[i].pos; + +- if (!strstr(buf, tbuf)) +- snprintf(tbuf, 128, "%s %2s %-5s", +- month, day, year); ++ // "MMM DD [ YYYY|hh:mm]" with at most two spaces between DD and YYYY ++ int dateSize = snprintf(tbuf, sizeof(tbuf), "%s %2s %5s", month, day, year); ++ bool isTypeA = (dateSize == 12) && (strncmp(copyFrom, tbuf, dateSize) == 0); + +- char const *copyFrom = NULL; ++ // "MMM DD [YYYY|hh:mm]" with one space between DD and YYYY ++ dateSize = snprintf(tbuf, sizeof(tbuf), "%s %2s %-5s", month, day, year); ++ bool isTypeB = (dateSize == 12 || dateSize == 11) && (strncmp(copyFrom, tbuf, dateSize) == 0); + +- if ((copyFrom = strstr(buf, tbuf))) { +- p->type = *tokens[0]; ++ // TODO: replace isTypeA and isTypeB with a regex. ++ if (isTypeA || isTypeB) { ++ p->type = *tokens[0].token; + p->size = strtoll(size, NULL, 10); ++ const int finalDateSize = snprintf(tbuf, sizeof(tbuf), "%s %2s %5s", month, day, year); ++ assert(finalDateSize >= 0); + p->date = xstrdup(tbuf); + ++ // point after tokens[i+2] : ++ copyFrom = buf + tokens[i + 2].pos + strlen(tokens[i + 2].token); + if (flags.skip_whitespace) { +- copyFrom += strlen(tbuf); +- + while (strchr(w_space, *copyFrom)) + ++copyFrom; + } else { + /* XXX assumes a single space between date and filename + * suggested by: Nathan.Bailey@cc.monash.edu.au and + * Mike Battersby */ +- copyFrom += strlen(tbuf) + 1; ++ if (strchr(w_space, *copyFrom)) ++ ++copyFrom; + } + + p->name = xstrdup(copyFrom); +@@ -651,45 +659,36 @@ ftpListParseParts(const char *buf, struct Ftp::GatewayFlags flags) + + /* try it as a DOS listing, 04-05-70 09:33PM ... */ + if (n_tokens > 3 && +- regexec(&scan_ftp_dosdate, tokens[0], 0, NULL, 0) == 0 && +- regexec(&scan_ftp_dostime, tokens[1], 0, NULL, 0) == 0) { +- if (!strcasecmp(tokens[2], "")) { ++ regexec(&scan_ftp_dosdate, tokens[0].token, 0, NULL, 0) == 0 && ++ regexec(&scan_ftp_dostime, tokens[1].token, 0, NULL, 0) == 0) { ++ if (!strcasecmp(tokens[2].token, "")) { + p->type = 'd'; + } else { + p->type = '-'; +- p->size = strtoll(tokens[2], NULL, 10); ++ p->size = strtoll(tokens[2].token, NULL, 10); + } + +- snprintf(tbuf, 128, "%s %s", tokens[0], tokens[1]); ++ snprintf(tbuf, sizeof(tbuf), "%s %s", tokens[0].token, tokens[1].token); + p->date = xstrdup(tbuf); + + if (p->type == 'd') { +- /* Directory.. name begins with first printable after */ +- ct = strstr(buf, tokens[2]); +- ct += strlen(tokens[2]); +- +- while (xisspace(*ct)) +- ++ct; +- +- if (!*ct) +- ct = NULL; ++ // Directory.. name begins with first printable after ++ // Because of the "n_tokens > 3", the next printable after ++ // is stored at token[3]. No need for more checks here. + } else { +- /* A file. Name begins after size, with a space in between */ +- snprintf(tbuf, 128, " %s %s", tokens[2], tokens[3]); +- ct = strstr(buf, tbuf); +- +- if (ct) { +- ct += strlen(tokens[2]) + 2; +- } ++ // A file. Name begins after size, with a space in between. ++ // Also a space should exist before size. ++ // But there is not needed to be very strict with spaces. ++ // The name is stored at token[3], take it from here. + } + +- p->name = xstrdup(ct ? ct : tokens[3]); ++ p->name = xstrdup(tokens[3].token); + goto found; + } + + /* Try EPLF format; carson@lehman.com */ + if (buf[0] == '+') { +- ct = buf + 1; ++ const char *ct = buf + 1; + p->type = 0; + + while (ct && *ct) { +@@ -760,7 +759,7 @@ blank: + found: + + for (i = 0; i < n_tokens; ++i) +- xfree(tokens[i]); ++ xfree(tokens[i].token); + + if (!p->name) + ftpListPartsFree(&p); /* cleanup */ diff --git a/squid-3.5.20-CVE-2019-13345.patch b/squid-3.5.20-CVE-2019-13345.patch new file mode 100644 index 0000000..9e4782c --- /dev/null +++ b/squid-3.5.20-CVE-2019-13345.patch @@ -0,0 +1,91 @@ +diff --git a/tools/cachemgr.cc b/tools/cachemgr.cc +index 1ec4e15..fb5146e 100644 +--- a/tools/cachemgr.cc ++++ b/tools/cachemgr.cc +@@ -354,7 +354,7 @@ auth_html(const char *host, int port, const char *user_name) + + printf("Manager name:\n", user_name); ++ printf("size=\"30\" VALUE=\"%s\">\n", rfc1738_escape(user_name)); + + printf("Password:hostname, + req->port, +- safe_str(req->user_name), ++ rfc1738_escape(safe_str(req->user_name)), + action, + safe_str(req->pub_auth)); + return url; +@@ -1073,8 +1073,8 @@ make_pub_auth(cachemgr_request * req) + const int bufLen = snprintf(buf, sizeof(buf), "%s|%d|%s|%s", + req->hostname, + (int) now, +- req->user_name ? req->user_name : "", +- req->passwd); ++ rfc1738_escape(safe_str(req->user_name)), ++ rfc1738_escape(req->passwd)); + debug("cmgr: pre-encoded for pub: %s\n", buf); + + const int encodedLen = base64_encode_len(bufLen); +@@ -1089,8 +1089,6 @@ decode_pub_auth(cachemgr_request * req) + char *buf; + const char *host_name; + const char *time_str; +- const char *user_name; +- const char *passwd; + + debug("cmgr: decoding pub: '%s'\n", safe_str(req->pub_auth)); + safe_free(req->passwd); +@@ -1119,17 +1117,21 @@ decode_pub_auth(cachemgr_request * req) + + debug("cmgr: decoded time: '%s' (now: %d)\n", time_str, (int) now); + ++ char *user_name; + if ((user_name = strtok(NULL, "|")) == NULL) { + xfree(buf); + return; + } ++ rfc1738_unescape(user_name); + + debug("cmgr: decoded uname: '%s'\n", user_name); + ++ char *passwd; + if ((passwd = strtok(NULL, "|")) == NULL) { + xfree(buf); + return; + } ++ rfc1738_unescape(passwd); + + debug("cmgr: decoded passwd: '%s'\n", passwd); + +diff --git a/tools/cachemgr.cc b/tools/cachemgr.cc +index fb5146e..388c87b 100644 +--- a/tools/cachemgr.cc ++++ b/tools/cachemgr.cc +@@ -1069,14 +1069,20 @@ make_pub_auth(cachemgr_request * req) + if (!req->passwd || !strlen(req->passwd)) + return; + ++ auto *rfc1738_username = xstrdup(rfc1738_escape(safe_str(req->user_name))); ++ auto *rfc1738_passwd = xstrdup(rfc1738_escape(req->passwd)); ++ + /* host | time | user | passwd */ + const int bufLen = snprintf(buf, sizeof(buf), "%s|%d|%s|%s", + req->hostname, + (int) now, +- rfc1738_escape(safe_str(req->user_name)), +- rfc1738_escape(req->passwd)); ++ rfc1738_username, ++ rfc1738_passwd); + debug("cmgr: pre-encoded for pub: %s\n", buf); + ++ safe_free(rfc1738_username); ++ safe_free(rfc1738_passwd); ++ + const int encodedLen = base64_encode_len(bufLen); + req->pub_auth = (char *) xmalloc(encodedLen); + base64_encode_str(req->pub_auth, encodedLen, buf, bufLen); diff --git a/squid-3.5.20-CVE-2020-11945.patch b/squid-3.5.20-CVE-2020-11945.patch new file mode 100644 index 0000000..281b5cb --- /dev/null +++ b/squid-3.5.20-CVE-2020-11945.patch @@ -0,0 +1,50 @@ +diff --git a/src/auth/digest/Config.cc b/src/auth/digest/Config.cc +index 4a9e762..1008ca6 100644 +--- a/src/auth/digest/Config.cc ++++ b/src/auth/digest/Config.cc +@@ -85,9 +85,6 @@ static void authenticateDigestNonceDelete(digest_nonce_h * nonce); + static void authenticateDigestNonceSetup(void); + static void authDigestNonceEncode(digest_nonce_h * nonce); + static void authDigestNonceLink(digest_nonce_h * nonce); +-#if NOT_USED +-static int authDigestNonceLinks(digest_nonce_h * nonce); +-#endif + static void authDigestNonceUserUnlink(digest_nonce_h * nonce); + + static void +@@ -276,21 +273,10 @@ authDigestNonceLink(digest_nonce_h * nonce) + { + assert(nonce != NULL); + ++nonce->references; ++ assert(nonce->references != 0); // no overflows + debugs(29, 9, "nonce '" << nonce << "' now at '" << nonce->references << "'."); + } + +-#if NOT_USED +-static int +-authDigestNonceLinks(digest_nonce_h * nonce) +-{ +- if (!nonce) +- return -1; +- +- return nonce->references; +-} +- +-#endif +- + void + authDigestNonceUnlink(digest_nonce_h * nonce) + { +diff --git a/src/auth/digest/Config.h b/src/auth/digest/Config.h +index 8baeb31..c7b353d 100644 +--- a/src/auth/digest/Config.h ++++ b/src/auth/digest/Config.h +@@ -42,7 +42,7 @@ struct _digest_nonce_h : public hash_link { + /* number of uses we've seen of this nonce */ + unsigned long nc; + /* reference count */ +- short references; ++ uint64_t references; + /* the auth_user this nonce has been tied to */ + Auth::Digest::User *user; + /* has this nonce been invalidated ? */ diff --git a/squid-3.5.20-CVE-2020-15049.patch b/squid-3.5.20-CVE-2020-15049.patch new file mode 100644 index 0000000..707b315 --- /dev/null +++ b/squid-3.5.20-CVE-2020-15049.patch @@ -0,0 +1,531 @@ +From abdf3942a848b3de8c4fcdbccf15139b1ed0d9c2 Mon Sep 17 00:00:00 2001 +From: Lubos Uhliarik +Date: Mon, 3 Aug 2020 16:48:15 +0200 +Subject: [PATCH] Fix for CVE-2020-15049 + +--- + src/HttpHeader.cc | 85 ++++++------ + src/HttpHeaderTools.cc | 27 ++++ + src/HttpHeaderTools.h | 8 +- + src/http/ContentLengthInterpreter.cc | 190 +++++++++++++++++++++++++++ + src/http/ContentLengthInterpreter.h | 66 ++++++++++ + src/http/Makefile.am | 2 + + src/http/Makefile.in | 4 +- + 7 files changed, 337 insertions(+), 45 deletions(-) + create mode 100644 src/http/ContentLengthInterpreter.cc + create mode 100644 src/http/ContentLengthInterpreter.h + +diff --git a/src/HttpHeader.cc b/src/HttpHeader.cc +index 7e8c77e..ef60c02 100644 +--- a/src/HttpHeader.cc ++++ b/src/HttpHeader.cc +@@ -11,6 +11,7 @@ + #include "squid.h" + #include "base64.h" + #include "globals.h" ++#include "http/ContentLengthInterpreter.h" + #include "HttpHdrCc.h" + #include "HttpHdrContRange.h" + #include "HttpHdrSc.h" +@@ -588,7 +589,6 @@ int + HttpHeader::parse(const char *header_start, const char *header_end) + { + const char *field_ptr = header_start; +- HttpHeaderEntry *e, *e2; + int warnOnError = (Config.onoff.relaxed_header_parser <= 0 ? DBG_IMPORTANT : 2); + + PROF_start(HttpHeaderParse); +@@ -605,6 +605,7 @@ HttpHeader::parse(const char *header_start, const char *header_end) + return reset(); + } + ++ Http::ContentLengthInterpreter clen(warnOnError); + /* common format headers are ":[ws]" lines delimited by . + * continuation lines start with a (single) space or tab */ + while (field_ptr < header_end) { +@@ -681,6 +682,7 @@ HttpHeader::parse(const char *header_start, const char *header_end) + break; /* terminating blank line */ + } + ++ HttpHeaderEntry *e; + if ((e = HttpHeaderEntry::parse(field_start, field_end)) == NULL) { + debugs(55, warnOnError, "WARNING: unparseable HTTP header field {" << + getStringPrefix(field_start, field_end) << "}"); +@@ -693,45 +695,19 @@ HttpHeader::parse(const char *header_start, const char *header_end) + return reset(); + } + +- // XXX: RFC 7230 Section 3.3.3 item #4 requires sending a 502 error in +- // several cases that we do not yet cover. TODO: Rewrite to cover more. +- if (e->id == HDR_CONTENT_LENGTH && (e2 = findEntry(e->id)) != NULL) { +- if (e->value != e2->value) { +- int64_t l1, l2; +- debugs(55, warnOnError, "WARNING: found two conflicting content-length headers in {" << +- getStringPrefix(header_start, header_end) << "}"); +- +- if (!Config.onoff.relaxed_header_parser) { +- delete e; +- PROF_stop(HttpHeaderParse); +- return reset(); +- } + +- if (!httpHeaderParseOffset(e->value.termedBuf(), &l1)) { +- debugs(55, DBG_IMPORTANT, "WARNING: Unparseable content-length '" << e->value << "'"); +- delete e; +- continue; +- } else if (!httpHeaderParseOffset(e2->value.termedBuf(), &l2)) { +- debugs(55, DBG_IMPORTANT, "WARNING: Unparseable content-length '" << e2->value << "'"); +- delById(e2->id); +- } else { +- if (l1 != l2) +- conflictingContentLength_ = true; +- delete e; +- continue; +- } +- } else { +- debugs(55, warnOnError, "NOTICE: found double content-length header"); +- delete e; ++ if (e->id == HDR_CONTENT_LENGTH && !clen.checkField(e->value)) { ++ delete e; + +- if (Config.onoff.relaxed_header_parser) +- continue; ++ if (Config.onoff.relaxed_header_parser) ++ continue; // clen has printed any necessary warnings + +- PROF_stop(HttpHeaderParse); +- return reset(); +- } ++ PROF_stop(HttpHeaderParse); ++ clean(); ++ return 0; + } + ++ + if (e->id == HDR_OTHER && stringHasWhitespace(e->name.termedBuf())) { + debugs(55, warnOnError, "WARNING: found whitespace in HTTP header name {" << + getStringPrefix(field_start, field_end) << "}"); +@@ -746,6 +722,32 @@ HttpHeader::parse(const char *header_start, const char *header_end) + addEntry(e); + } + ++ if (clen.headerWideProblem) { ++ debugs(55, warnOnError, "WARNING: " << clen.headerWideProblem << ++ " Content-Length field values in" << ++ Raw("header", header_start, (size_t)(header_end - header_start))); ++ } ++ ++ if (chunked()) { ++ // RFC 2616 section 4.4: ignore Content-Length with Transfer-Encoding ++ // RFC 7230 section 3.3.3 #3: Transfer-Encoding overwrites Content-Length ++ delById(HDR_CONTENT_LENGTH); ++ ++ // and clen state becomes irrelevant ++ } else if (clen.sawBad) { ++ // ensure our callers do not accidentally see bad Content-Length values ++ delById(HDR_CONTENT_LENGTH); ++ conflictingContentLength_ = true; // TODO: Rename to badContentLength_. ++ } else if (clen.needsSanitizing) { ++ // RFC 7230 section 3.3.2: MUST either reject or ... [sanitize]; ++ // ensure our callers see a clean Content-Length value or none at all ++ delById(HDR_CONTENT_LENGTH); ++ if (clen.sawGood) { ++ putInt64(HDR_CONTENT_LENGTH, clen.value); ++ debugs(55, 5, "sanitized Content-Length to be " << clen.value); ++ } ++ } ++ + if (chunked()) { + // RFC 2616 section 4.4: ignore Content-Length with Transfer-Encoding + delById(HDR_CONTENT_LENGTH); +@@ -1722,6 +1724,7 @@ HttpHeaderEntry::getInt() const + assert_eid (id); + assert (Headers[id].type == ftInt); + int val = -1; ++ + int ok = httpHeaderParseInt(value.termedBuf(), &val); + httpHeaderNoteParsedEntry(id, value, !ok); + /* XXX: Should we check ok - ie +@@ -1733,15 +1736,11 @@ HttpHeaderEntry::getInt() const + int64_t + HttpHeaderEntry::getInt64() const + { +- assert_eid (id); +- assert (Headers[id].type == ftInt64); + int64_t val = -1; +- int ok = httpHeaderParseOffset(value.termedBuf(), &val); +- httpHeaderNoteParsedEntry(id, value, !ok); +- /* XXX: Should we check ok - ie +- * return ok ? -1 : value; +- */ +- return val; ++ ++ const bool ok = httpHeaderParseOffset(value.termedBuf(), &val); ++ httpHeaderNoteParsedEntry(id, value, ok); ++ return val; // remains -1 if !ok (XXX: bad method API) + } + + static void +diff --git a/src/HttpHeaderTools.cc b/src/HttpHeaderTools.cc +index d8c29d8..02087cd 100644 +--- a/src/HttpHeaderTools.cc ++++ b/src/HttpHeaderTools.cc +@@ -188,6 +188,33 @@ httpHeaderParseInt(const char *start, int *value) + return 1; + } + ++bool ++httpHeaderParseOffset(const char *start, int64_t *value, char **endPtr) ++{ ++ char *end = nullptr; ++ errno = 0; ++ ++ const int64_t res = strtoll(start, &end, 10); ++ if (errno && !res) { ++ debugs(66, 7, "failed to parse malformed offset in " << start); ++ return false; ++ } ++ if (errno == ERANGE && (res == LLONG_MIN || res == LLONG_MAX)) { // no overflow ++ debugs(66, 7, "failed to parse huge offset in " << start); ++ return false; ++ } ++ if (start == end) { ++ debugs(66, 7, "failed to parse empty offset"); ++ return false; ++ } ++ *value = res; ++ if (endPtr) ++ *endPtr = end; ++ debugs(66, 7, "offset " << start << " parsed as " << res); ++ return true; ++} ++ ++ + int + httpHeaderParseOffset(const char *start, int64_t * value) + { +diff --git a/src/HttpHeaderTools.h b/src/HttpHeaderTools.h +index 509d940..2d97ad4 100644 +--- a/src/HttpHeaderTools.h ++++ b/src/HttpHeaderTools.h +@@ -113,7 +113,13 @@ public: + bool quoted; + }; + +-int httpHeaderParseOffset(const char *start, int64_t * off); ++/// A strtoll(10) wrapper that checks for strtoll() failures and other problems. ++/// XXX: This function is not fully compatible with some HTTP syntax rules. ++/// Just like strtoll(), allows whitespace prefix, a sign, and _any_ suffix. ++/// Requires at least one digit to be present. ++/// Sets "off" and "end" arguments if and only if no problems were found. ++/// \return true if and only if no problems were found. ++bool httpHeaderParseOffset(const char *start, int64_t *offPtr, char **endPtr = nullptr); + + HttpHeaderFieldInfo *httpHeaderBuildFieldsInfo(const HttpHeaderFieldAttrs * attrs, int count); + void httpHeaderDestroyFieldsInfo(HttpHeaderFieldInfo * info, int count); +diff --git a/src/http/ContentLengthInterpreter.cc b/src/http/ContentLengthInterpreter.cc +new file mode 100644 +index 0000000..1d40f4a +--- /dev/null ++++ b/src/http/ContentLengthInterpreter.cc +@@ -0,0 +1,190 @@ ++/* ++ * Copyright (C) 1996-2016 The Squid Software Foundation and contributors ++ * ++ * Squid software is distributed under GPLv2+ license and includes ++ * contributions from numerous individuals and organizations. ++ * Please see the COPYING and CONTRIBUTORS files for details. ++ */ ++ ++/* DEBUG: section 55 HTTP Header */ ++ ++#include "squid.h" ++#include "base/CharacterSet.h" ++#include "Debug.h" ++#include "http/ContentLengthInterpreter.h" ++#include "HttpHeaderTools.h" ++#include "SquidConfig.h" ++#include "SquidString.h" ++#include "StrList.h" ++ ++Http::ContentLengthInterpreter::ContentLengthInterpreter(const int aDebugLevel): ++ value(-1), ++ headerWideProblem(nullptr), ++ debugLevel(aDebugLevel), ++ sawBad(false), ++ needsSanitizing(false), ++ sawGood(false) ++{ ++} ++ ++/// characters HTTP permits tolerant parsers to accept as delimiters ++static const CharacterSet & ++RelaxedDelimiterCharacters() ++{ ++ // RFC 7230 section 3.5 ++ // tolerant parser MAY accept any of SP, HTAB, VT (%x0B), FF (%x0C), ++ // or bare CR as whitespace between request-line fields ++ static const CharacterSet RelaxedDels = ++ (CharacterSet::SP + ++ CharacterSet::HTAB + ++ CharacterSet("VT,FF","\x0B\x0C") + ++ CharacterSet::CR).rename("relaxed-WSP"); ++ ++ return RelaxedDels; ++} ++ ++const CharacterSet & ++Http::ContentLengthInterpreter::WhitespaceCharacters() ++{ ++ return Config.onoff.relaxed_header_parser ? ++ RelaxedDelimiterCharacters() : CharacterSet::WSP; ++} ++ ++const CharacterSet & ++Http::ContentLengthInterpreter::DelimiterCharacters() ++{ ++ return Config.onoff.relaxed_header_parser ? ++ RelaxedDelimiterCharacters() : CharacterSet::SP; ++} ++ ++/// checks whether all characters before the Content-Length number are allowed ++/// \returns the start of the digit sequence (or nil on errors) ++const char * ++Http::ContentLengthInterpreter::findDigits(const char *prefix, const char * const valueEnd) const ++{ ++ // skip leading OWS in RFC 7230's `OWS field-value OWS` ++ const CharacterSet &whitespace = WhitespaceCharacters(); ++ while (prefix < valueEnd) { ++ const auto ch = *prefix; ++ if (CharacterSet::DIGIT[ch]) ++ return prefix; // common case: a pre-trimmed field value ++ if (!whitespace[ch]) ++ return nullptr; // (trimmed) length does not start with a digit ++ ++prefix; ++ } ++ return nullptr; // empty or whitespace-only value ++} ++ ++/// checks whether all characters after the Content-Length are allowed ++bool ++Http::ContentLengthInterpreter::goodSuffix(const char *suffix, const char * const end) const ++{ ++ // optimize for the common case that does not need delimiters ++ if (suffix == end) ++ return true; ++ ++ for (const CharacterSet &delimiters = DelimiterCharacters(); ++ suffix < end; ++suffix) { ++ if (!delimiters[*suffix]) ++ return false; ++ } ++ // needsSanitizing = true; // TODO: Always remove trailing whitespace? ++ return true; // including empty suffix ++} ++ ++/// handles a single-token Content-Length value ++/// rawValue null-termination requirements are those of httpHeaderParseOffset() ++bool ++Http::ContentLengthInterpreter::checkValue(const char *rawValue, const int valueSize) ++{ ++ Must(!sawBad); ++ ++ const auto valueEnd = rawValue + valueSize; ++ ++ const auto digits = findDigits(rawValue, valueEnd); ++ if (!digits) { ++ debugs(55, debugLevel, "WARNING: Leading garbage or empty value in" << Raw("Content-Length", rawValue, valueSize)); ++ sawBad = true; ++ return false; ++ } ++ ++ int64_t latestValue = -1; ++ char *suffix = nullptr; ++ ++ if (!httpHeaderParseOffset(digits, &latestValue, &suffix)) { ++ debugs(55, DBG_IMPORTANT, "WARNING: Malformed" << Raw("Content-Length", rawValue, valueSize)); ++ sawBad = true; ++ return false; ++ } ++ ++ if (latestValue < 0) { ++ debugs(55, debugLevel, "WARNING: Negative" << Raw("Content-Length", rawValue, valueSize)); ++ sawBad = true; ++ return false; ++ } ++ ++ // check for garbage after the number ++ if (!goodSuffix(suffix, valueEnd)) { ++ debugs(55, debugLevel, "WARNING: Trailing garbage in" << Raw("Content-Length", rawValue, valueSize)); ++ sawBad = true; ++ return false; ++ } ++ ++ if (sawGood) { ++ /* we have found at least two, possibly identical values */ ++ ++ needsSanitizing = true; // replace identical values with a single value ++ ++ const bool conflicting = value != latestValue; ++ if (conflicting) ++ headerWideProblem = "Conflicting"; // overwrite any lesser problem ++ else if (!headerWideProblem) // preserve a possibly worse problem ++ headerWideProblem = "Duplicate"; ++ ++ // with relaxed_header_parser, identical values are permitted ++ sawBad = !Config.onoff.relaxed_header_parser || conflicting; ++ return false; // conflicting or duplicate ++ } ++ ++ sawGood = true; ++ value = latestValue; ++ return true; ++} ++ ++/// handles Content-Length: a, b, c ++bool ++Http::ContentLengthInterpreter::checkList(const String &list) ++{ ++ Must(!sawBad); ++ ++ if (!Config.onoff.relaxed_header_parser) { ++ debugs(55, debugLevel, "WARNING: List-like" << Raw("Content-Length", list.rawBuf(), list.size())); ++ sawBad = true; ++ return false; ++ } ++ ++ needsSanitizing = true; // remove extra commas (at least) ++ ++ const char *pos = nullptr; ++ const char *item = nullptr;; ++ int ilen = -1; ++ while (strListGetItem(&list, ',', &item, &ilen, &pos)) { ++ if (!checkValue(item, ilen) && sawBad) ++ break; ++ // keep going after a duplicate value to find conflicting ones ++ } ++ return false; // no need to keep this list field; it will be sanitized away ++} ++ ++bool ++Http::ContentLengthInterpreter::checkField(const String &rawValue) ++{ ++ if (sawBad) ++ return false; // one rotten apple is enough to spoil all of them ++ ++ // TODO: Optimize by always parsing the first integer first. ++ return rawValue.pos(',') ? ++ checkList(rawValue) : ++ checkValue(rawValue.rawBuf(), rawValue.size()); ++} ++ +diff --git a/src/http/ContentLengthInterpreter.h b/src/http/ContentLengthInterpreter.h +new file mode 100644 +index 0000000..ba7080c +--- /dev/null ++++ b/src/http/ContentLengthInterpreter.h +@@ -0,0 +1,66 @@ ++/* ++ * Copyright (C) 1996-2016 The Squid Software Foundation and contributors ++ * ++ * Squid software is distributed under GPLv2+ license and includes ++ * contributions from numerous individuals and organizations. ++ * Please see the COPYING and CONTRIBUTORS files for details. ++ */ ++ ++#ifndef SQUID_SRC_HTTP_CONTENTLENGTH_INTERPRETER_H ++#define SQUID_SRC_HTTP_CONTENTLENGTH_INTERPRETER_H ++ ++class String; ++ ++namespace Http ++{ ++ ++/// Finds the intended Content-Length value while parsing message-header fields. ++/// Deals with complications such as value lists and/or repeated fields. ++class ContentLengthInterpreter ++{ ++public: ++ explicit ContentLengthInterpreter(const int aDebugLevel); ++ ++ /// updates history based on the given message-header field ++ /// \return true iff the field should be added/remembered for future use ++ bool checkField(const String &field); ++ ++ /// intended Content-Length value if sawGood is set and sawBad is not set ++ /// meaningless otherwise ++ int64_t value; ++ ++ /* for debugging (declared here to minimize padding) */ ++ const char *headerWideProblem; ///< worst header-wide problem found (or nil) ++ const int debugLevel; ///< debugging level for certain warnings ++ ++ /// whether a malformed Content-Length value was present ++ bool sawBad; ++ ++ /// whether all remembered fields should be removed ++ /// removed fields ought to be replaced with the intended value (if known) ++ /// irrelevant if sawBad is set ++ bool needsSanitizing; ++ ++ /// whether a valid field value was present, possibly among problematic ones ++ /// irrelevant if sawBad is set ++ bool sawGood; ++ ++ /// Whitespace between protocol elements in restricted contexts like ++ /// request line, status line, asctime-date, and credentials ++ /// Seen in RFCs as SP but may be "relaxed" by us. ++ /// See also: WhitespaceCharacters(). ++ /// XXX: Misnamed and overused. ++ static const CharacterSet &DelimiterCharacters(); ++ ++ static const CharacterSet &WhitespaceCharacters(); ++protected: ++ const char *findDigits(const char *prefix, const char *valueEnd) const; ++ bool goodSuffix(const char *suffix, const char * const end) const; ++ bool checkValue(const char *start, const int size); ++ bool checkList(const String &list); ++}; ++ ++} // namespace Http ++ ++#endif /* SQUID_SRC_HTTP_CONTENTLENGTH_INTERPRETER_H */ ++ +diff --git a/src/http/Makefile.am b/src/http/Makefile.am +index 7887ef0..78b503e 100644 +--- a/src/http/Makefile.am ++++ b/src/http/Makefile.am +@@ -11,6 +11,8 @@ include $(top_srcdir)/src/TestHeaders.am + noinst_LTLIBRARIES = libsquid-http.la + + libsquid_http_la_SOURCES = \ ++ ContentLengthInterpreter.cc \ ++ ContentLengthInterpreter.h \ + MethodType.cc \ + MethodType.h \ + ProtocolVersion.h \ +diff --git a/src/http/Makefile.in b/src/http/Makefile.in +index f5b62fb..c7891ae 100644 +--- a/src/http/Makefile.in ++++ b/src/http/Makefile.in +@@ -160,7 +160,7 @@ CONFIG_CLEAN_VPATH_FILES = + LTLIBRARIES = $(noinst_LTLIBRARIES) + libsquid_http_la_LIBADD = + am_libsquid_http_la_OBJECTS = MethodType.lo StatusCode.lo \ +- StatusLine.lo ++ StatusLine.lo ContentLengthInterpreter.lo + libsquid_http_la_OBJECTS = $(am_libsquid_http_la_OBJECTS) + AM_V_lt = $(am__v_lt_@AM_V@) + am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) +@@ -694,6 +694,8 @@ COMPAT_LIB = $(top_builddir)/compat/libcompat-squid.la $(LIBPROFILER) + subst_perlshell = sed -e 's,[@]PERL[@],$(PERL),g' <$(srcdir)/$@.pl.in >$@ || ($(RM) -f $@ ; exit 1) + noinst_LTLIBRARIES = libsquid-http.la + libsquid_http_la_SOURCES = \ ++ ContentLengthInterpreter.cc \ ++ ContentLengthInterpreter.h \ + MethodType.cc \ + MethodType.h \ + ProtocolVersion.h \ +-- +2.21.0 + diff --git a/squid-3.5.20-CVE-2020-15810.patch b/squid-3.5.20-CVE-2020-15810.patch new file mode 100644 index 0000000..b80108f --- /dev/null +++ b/squid-3.5.20-CVE-2020-15810.patch @@ -0,0 +1,52 @@ +diff --git a/src/HttpHeader.cc b/src/HttpHeader.cc +index ef60c02..ce55a6f 100644 +--- a/src/HttpHeader.cc ++++ b/src/HttpHeader.cc +@@ -12,6 +12,7 @@ + #include "base64.h" + #include "globals.h" + #include "http/ContentLengthInterpreter.h" ++#include "base/CharacterSet.h" + #include "HttpHdrCc.h" + #include "HttpHdrContRange.h" + #include "HttpHdrSc.h" +@@ -707,18 +708,6 @@ HttpHeader::parse(const char *header_start, const char *header_end) + return 0; + } + +- +- if (e->id == HDR_OTHER && stringHasWhitespace(e->name.termedBuf())) { +- debugs(55, warnOnError, "WARNING: found whitespace in HTTP header name {" << +- getStringPrefix(field_start, field_end) << "}"); +- +- if (!Config.onoff.relaxed_header_parser) { +- delete e; +- PROF_stop(HttpHeaderParse); +- return reset(); +- } +- } +- + addEntry(e); + } + +@@ -1653,6 +1642,20 @@ HttpHeaderEntry::parse(const char *field_start, const char *field_end) + return NULL; + } + ++ /* RFC 7230 section 3.2: ++ * ++ * header-field = field-name ":" OWS field-value OWS ++ * field-name = token ++ * token = 1*TCHAR ++ */ ++ for (const char *pos = field_start; pos < (field_start+name_len); ++pos) { ++ if (!CharacterSet::TCHAR[*pos]) { ++ debugs(55, 2, "found header with invalid characters in " << ++ Raw("field-name", field_start, min(name_len,100)) << "..."); ++ return nullptr; ++ } ++ } ++ + /* now we know we can parse it */ + + debugs(55, 9, "parsing HttpHeaderEntry: near '" << getStringPrefix(field_start, field_end) << "'"); diff --git a/squid-3.5.20-CVE-2020-15811.patch b/squid-3.5.20-CVE-2020-15811.patch new file mode 100644 index 0000000..445bebc --- /dev/null +++ b/squid-3.5.20-CVE-2020-15811.patch @@ -0,0 +1,170 @@ +diff --git a/src/HttpHeader.cc b/src/HttpHeader.cc +index ce55a6f..6ce06f2 100644 +--- a/src/HttpHeader.cc ++++ b/src/HttpHeader.cc +@@ -470,6 +470,7 @@ HttpHeader::operator =(const HttpHeader &other) + update(&other, NULL); // will update the mask as well + len = other.len; + conflictingContentLength_ = other.conflictingContentLength_; ++ teUnsupported_ = other.teUnsupported_; + } + return *this; + } +@@ -519,6 +520,7 @@ HttpHeader::clean() + httpHeaderMaskInit(&mask, 0); + len = 0; + conflictingContentLength_ = false; ++ teUnsupported_ = false; + PROF_stop(HttpHeaderClean); + } + +@@ -717,12 +719,24 @@ HttpHeader::parse(const char *header_start, const char *header_end) + Raw("header", header_start, (size_t)(header_end - header_start))); + } + +- if (chunked()) { ++ ++ ++ String rawTe; ++ if (getByIdIfPresent(HDR_TRANSFER_ENCODING, &rawTe)) { + // RFC 2616 section 4.4: ignore Content-Length with Transfer-Encoding + // RFC 7230 section 3.3.3 #3: Transfer-Encoding overwrites Content-Length + delById(HDR_CONTENT_LENGTH); +- + // and clen state becomes irrelevant ++ ++ if (rawTe == "chunked") { ++ ; // leave header present for chunked() method ++ } else if (rawTe == "identity") { // deprecated. no coding ++ delById(HDR_TRANSFER_ENCODING); ++ } else { ++ // This also rejects multiple encodings until we support them properly. ++ debugs(55, warnOnError, "WARNING: unsupported Transfer-Encoding used by client: " << rawTe); ++ teUnsupported_ = true; ++ } + } else if (clen.sawBad) { + // ensure our callers do not accidentally see bad Content-Length values + delById(HDR_CONTENT_LENGTH); +@@ -1084,6 +1098,18 @@ HttpHeader::getStrOrList(http_hdr_type id) const + return String(); + } + ++bool ++HttpHeader::getByIdIfPresent(http_hdr_type id, String *result) const ++{ ++ if (id == HDR_BAD_HDR) ++ return false; ++ if (!has(id)) ++ return false; ++ if (result) ++ *result = getStrOrList(id); ++ return true; ++} ++ + /* + * Returns the value of the specified header and/or an undefined String. + */ +diff --git a/src/HttpHeader.h b/src/HttpHeader.h +index 836a26f..c49b105 100644 +--- a/src/HttpHeader.h ++++ b/src/HttpHeader.h +@@ -239,6 +239,9 @@ public: + bool getByNameIfPresent(const char *name, String &value) const; + String getByNameListMember(const char *name, const char *member, const char separator) const; + String getListMember(http_hdr_type id, const char *member, const char separator) const; ++ /// returns true iff a [possibly empty] field identified by id is there ++ /// when returning true, also sets the `result` parameter (if it is not nil) ++ bool getByIdIfPresent(http_hdr_type id, String *result) const; + int has(http_hdr_type id) const; + void putInt(http_hdr_type id, int number); + void putInt64(http_hdr_type id, int64_t number); +@@ -267,7 +270,13 @@ public: + int hasListMember(http_hdr_type id, const char *member, const char separator) const; + int hasByNameListMember(const char *name, const char *member, const char separator) const; + void removeHopByHopEntries(); +- inline bool chunked() const; ///< whether message uses chunked Transfer-Encoding ++ ++ /// whether the message uses chunked Transfer-Encoding ++ /// optimized implementation relies on us rejecting/removing other codings ++ bool chunked() const { return has(HDR_TRANSFER_ENCODING); } ++ ++ /// whether message used an unsupported and/or invalid Transfer-Encoding ++ bool unsupportedTe() const { return teUnsupported_; } + + /* protected, do not use these, use interface functions instead */ + std::vector entries; /**< parsed fields in raw format */ +@@ -282,6 +291,9 @@ protected: + private: + HttpHeaderEntry *findLastEntry(http_hdr_type id) const; + bool conflictingContentLength_; ///< found different Content-Length fields ++ /// unsupported encoding, unnecessary syntax characters, and/or ++ /// invalid field-value found in Transfer-Encoding header ++ bool teUnsupported_ = false; + }; + + int httpHeaderParseQuotedString(const char *start, const int len, String *val); +@@ -293,13 +305,6 @@ int httpHeaderHasByNameListMember(const HttpHeader * hdr, const char *name, cons + void httpHeaderUpdate(HttpHeader * old, const HttpHeader * fresh, const HttpHeaderMask * denied_mask); + void httpHeaderCalcMask(HttpHeaderMask * mask, http_hdr_type http_hdr_type_enums[], size_t count); + +-inline bool +-HttpHeader::chunked() const +-{ +- return has(HDR_TRANSFER_ENCODING) && +- hasListMember(HDR_TRANSFER_ENCODING, "chunked", ','); +-} +- + void httpHeaderInitModule(void); + void httpHeaderCleanModule(void); + +diff --git a/src/client_side.cc b/src/client_side.cc +index 261abdf..6858eb4 100644 +--- a/src/client_side.cc ++++ b/src/client_side.cc +@@ -2581,9 +2581,7 @@ clientProcessRequest(ConnStateData *conn, HttpParser *hp, ClientSocketContext *c + ClientHttpRequest *http = context->http; + HttpRequest::Pointer request; + bool notedUseOfBuffer = false; +- bool chunked = false; + bool mustReplyToOptions = false; +- bool unsupportedTe = false; + bool expectBody = false; + + // temporary hack to avoid splitting this huge function with sensitive code +@@ -2767,13 +2765,7 @@ clientProcessRequest(ConnStateData *conn, HttpParser *hp, ClientSocketContext *c + // TODO: this effectively obsoletes a lot of conn->FOO copying. That needs cleaning up later. + request->clientConnectionManager = conn; + +- if (request->header.chunked()) { +- chunked = true; +- } else if (request->header.has(HDR_TRANSFER_ENCODING)) { +- const String te = request->header.getList(HDR_TRANSFER_ENCODING); +- // HTTP/1.1 requires chunking to be the last encoding if there is one +- unsupportedTe = te.size() && te != "identity"; +- } // else implied identity coding ++ const auto unsupportedTe = request->header.unsupportedTe(); + + mustReplyToOptions = (method == Http::METHOD_OPTIONS) && + (request->header.getInt64(HDR_MAX_FORWARDS) == 0); +@@ -2791,6 +2783,7 @@ clientProcessRequest(ConnStateData *conn, HttpParser *hp, ClientSocketContext *c + return; + } + ++ const auto chunked = request->header.chunked(); + if (!chunked && !clientIsContentLengthValid(request.getRaw())) { + clientStreamNode *node = context->getClientReplyContext(); + clientReplyContext *repContext = dynamic_cast(node->data.getRaw()); +diff --git a/src/http.cc b/src/http.cc +index 08531dc..f0fe648 100644 +--- a/src/http.cc ++++ b/src/http.cc +@@ -1296,6 +1296,9 @@ HttpStateData::continueAfterParsingHeader() + } else if (vrep->header.conflictingContentLength()) { + fwd->dontRetry(true); + error = ERR_INVALID_RESP; ++ } else if (vrep->header.unsupportedTe()) { ++ fwd->dontRetry(true); ++ error = ERR_INVALID_RESP; + } else { + return true; // done parsing, got reply, and no error + } diff --git a/squid-3.5.20-CVE-2020-24606.patch b/squid-3.5.20-CVE-2020-24606.patch new file mode 100644 index 0000000..ad74097 --- /dev/null +++ b/squid-3.5.20-CVE-2020-24606.patch @@ -0,0 +1,20 @@ +diff --git a/src/peer_digest.cc b/src/peer_digest.cc +index 1b81fe7..25a18e0 100644 +--- a/src/peer_digest.cc ++++ b/src/peer_digest.cc +@@ -469,6 +469,15 @@ peerDigestHandleReply(void *data, StoreIOBuffer receivedData) + + } while (cbdataReferenceValid(fetch) && prevstate != fetch->state && fetch->bufofs > 0); + ++ // Check for EOF here, thus giving the parser one extra run. We could avoid this overhead by ++ // checking at the beginning of this function. However, in this case, we would have to require ++ // that the parser does not regard EOF as a special condition (it is true now but may change ++ // in the future). ++ if (!receivedData.length) { // EOF ++ peerDigestFetchAbort(fetch, fetch->buf, "premature end of digest reply"); ++ return; ++ } ++ + /* Update the copy offset */ + fetch->offset += receivedData.length; + diff --git a/squid-3.5.20-CVE-2020-25097.patch b/squid-3.5.20-CVE-2020-25097.patch new file mode 100644 index 0000000..dec4382 --- /dev/null +++ b/squid-3.5.20-CVE-2020-25097.patch @@ -0,0 +1,28 @@ +diff --git a/src/url.cc b/src/url.cc +index ebdecbf..44255c2 100644 +--- a/src/url.cc ++++ b/src/url.cc +@@ -256,8 +256,9 @@ urlParse(const HttpRequestMethod& method, char *url, HttpRequest *request) + return NULL; + *dst = '\0'; + +- // bug 3074: received 'path' starting with '?', '#', or '\0' implies '/' +- if (*src == '?' || *src == '#' || *src == '\0') { ++ // We are looking at path-abempty. ++ if (*src != '/') { ++ // path-empty, including the end of the `src` c-string cases + urlpath[0] = '/'; + dst = &urlpath[1]; + } else { +@@ -271,11 +272,6 @@ urlParse(const HttpRequestMethod& method, char *url, HttpRequest *request) + /* We -could- be at the end of the buffer here */ + if (i > l) + return NULL; +- /* If the URL path is empty we set it to be "/" */ +- if (dst == urlpath) { +- *dst = '/'; +- ++dst; +- } + *dst = '\0'; + + protocol = urlParseProtocol(proto); diff --git a/squid-3.5.20-CVE-2020-8449-and-8450.patch b/squid-3.5.20-CVE-2020-8449-and-8450.patch new file mode 100644 index 0000000..645931c --- /dev/null +++ b/squid-3.5.20-CVE-2020-8449-and-8450.patch @@ -0,0 +1,49 @@ +diff --git a/src/client_side.cc b/src/client_side.cc +index 01760f3..261abdf 100644 +--- a/src/client_side.cc ++++ b/src/client_side.cc +@@ -2018,6 +2018,23 @@ setLogUri(ClientHttpRequest * http, char const *uri, bool cleanUrl) + } + } + ++static char * ++getHostHeader(const char *req_hdr) ++{ ++ char *host = mime_get_header(req_hdr, "Host"); ++ if (!host) ++ return NULL; ++ ++ // check the header contents are valid ++ for(const char *c = host; *c != '\0'; ++c) { ++ // currently only used for pre-parse Host header, ensure valid domain[:port] or ip[:port] ++ static const CharacterSet hostChars = CharacterSet("host",":[].-_") + CharacterSet::ALPHA + CharacterSet::DIGIT; ++ if (!hostChars[*c]) ++ return NULL; // error. line contains character not accepted in Host header ++ } ++ return host; ++} ++ + static void + prepareAcceleratedURL(ConnStateData * conn, ClientHttpRequest *http, char *url, const char *req_hdr) + { +@@ -2060,9 +2077,9 @@ prepareAcceleratedURL(ConnStateData * conn, ClientHttpRequest *http, char *url, + + const bool switchedToHttps = conn->switchedToHttps(); + const bool tryHostHeader = vhost || switchedToHttps; +- if (tryHostHeader && (host = mime_get_header(req_hdr, "Host")) != NULL) { ++ if (tryHostHeader && (host = getHostHeader(req_hdr)) != NULL && strlen(host) <= SQUIDHOSTNAMELEN) { + debugs(33, 5, "ACCEL VHOST REWRITE: vhost=" << host << " + vport=" << vport); +- char thost[256]; ++ char thost[SQUIDHOSTNAMELEN + 6 /* ':' vport */]; + if (vport > 0) { + thost[0] = '\0'; + char *t = NULL; +@@ -2119,7 +2136,7 @@ prepareTransparentURL(ConnStateData * conn, ClientHttpRequest *http, char *url, + + /* BUG: Squid cannot deal with '*' URLs (RFC2616 5.1.2) */ + +- if ((host = mime_get_header(req_hdr, "Host")) != NULL) { ++ if ((host = getHostHeader(req_hdr)) != NULL) { + int url_sz = strlen(url) + 32 + Config.appendDomainLen + + strlen(host); + http->uri = (char *)xcalloc(url_sz, 1); diff --git a/squid-3.5.20-CVE-2021-46784.patch b/squid-3.5.20-CVE-2021-46784.patch new file mode 100644 index 0000000..2389414 --- /dev/null +++ b/squid-3.5.20-CVE-2021-46784.patch @@ -0,0 +1,129 @@ +From 780c4ea1b4c9d2fb41f6962aa6ed73ae57f74b2b Mon Sep 17 00:00:00 2001 +From: Joshua Rogers +Date: Mon, 18 Apr 2022 13:42:36 +0000 +Subject: [PATCH] Improve handling of Gopher responses (#1022) + +--- + src/gopher.cc | 45 ++++++++++++++++++++------------------------- + 1 file changed, 20 insertions(+), 25 deletions(-) + +diff --git a/src/gopher.cc b/src/gopher.cc +index 6d4ab1e..07acdba 100644 +--- a/src/gopher.cc ++++ b/src/gopher.cc +@@ -365,7 +365,6 @@ gopherToHTML(GopherStateData * gopherState, char *inbuf, int len) + char *lpos = NULL; + char *tline = NULL; + LOCAL_ARRAY(char, line, TEMP_BUF_SIZE); +- LOCAL_ARRAY(char, tmpbuf, TEMP_BUF_SIZE); + char *name = NULL; + char *selector = NULL; + char *host = NULL; +@@ -375,7 +374,6 @@ gopherToHTML(GopherStateData * gopherState, char *inbuf, int len) + char gtype; + StoreEntry *entry = NULL; + +- memset(tmpbuf, '\0', TEMP_BUF_SIZE); + memset(line, '\0', TEMP_BUF_SIZE); + + entry = gopherState->entry; +@@ -410,7 +408,7 @@ gopherToHTML(GopherStateData * gopherState, char *inbuf, int len) + return; + } + +- String outbuf; ++ SBuf outbuf; + + if (!gopherState->HTML_header_added) { + if (gopherState->conversion == gopher_ds::HTML_CSO_RESULT) +@@ -577,34 +575,34 @@ gopherToHTML(GopherStateData * gopherState, char *inbuf, int len) + break; + } + +- memset(tmpbuf, '\0', TEMP_BUF_SIZE); +- + if ((gtype == GOPHER_TELNET) || (gtype == GOPHER_3270)) { + if (strlen(escaped_selector) != 0) +- snprintf(tmpbuf, TEMP_BUF_SIZE, " %s\n", +- icon_url, escaped_selector, rfc1738_escape_part(host), +- *port ? ":" : "", port, html_quote(name)); ++ outbuf.appendf(" %s\n", ++ icon_url, escaped_selector, rfc1738_escape_part(host), ++ *port ? ":" : "", port, html_quote(name)); + else +- snprintf(tmpbuf, TEMP_BUF_SIZE, " %s\n", +- icon_url, rfc1738_escape_part(host), *port ? ":" : "", +- port, html_quote(name)); ++ outbuf.appendf(" %s\n", ++ icon_url, rfc1738_escape_part(host), *port ? ":" : "", ++ port, html_quote(name)); + + } else if (gtype == GOPHER_INFO) { +- snprintf(tmpbuf, TEMP_BUF_SIZE, "\t%s\n", html_quote(name)); ++ outbuf.appendf("\t%s\n", html_quote(name)); + } else { + if (strncmp(selector, "GET /", 5) == 0) { + /* WWW link */ +- snprintf(tmpbuf, TEMP_BUF_SIZE, " %s\n", +- icon_url, host, rfc1738_escape_unescaped(selector + 5), html_quote(name)); ++ outbuf.appendf(" %s\n", ++ icon_url, host, rfc1738_escape_unescaped(selector + 5), html_quote(name)); ++ } else if (gtype == GOPHER_WWW) { ++ outbuf.appendf(" %s\n", ++ icon_url, rfc1738_escape_unescaped(selector), html_quote(name)); + } else { + /* Standard link */ +- snprintf(tmpbuf, TEMP_BUF_SIZE, " %s\n", +- icon_url, host, gtype, escaped_selector, html_quote(name)); ++ outbuf.appendf(" %s\n", ++ icon_url, host, gtype, escaped_selector, html_quote(name)); + } + } + + safe_free(escaped_selector); +- outbuf.append(tmpbuf); + } else { + memset(line, '\0', TEMP_BUF_SIZE); + continue; +@@ -637,13 +635,12 @@ gopherToHTML(GopherStateData * gopherState, char *inbuf, int len) + break; + + if (gopherState->cso_recno != recno) { +- snprintf(tmpbuf, TEMP_BUF_SIZE, "

Record# %d
%s

\n
", recno, html_quote(result));
++                    outbuf.appendf("

Record# %d
%s

\n
", recno, html_quote(result));
+                     gopherState->cso_recno = recno;
+                 } else {
+-                    snprintf(tmpbuf, TEMP_BUF_SIZE, "%s\n", html_quote(result));
++                    outbuf.appendf("%s\n", html_quote(result));
+                 }
+ 
+-                outbuf.append(tmpbuf);
+                 break;
+             } else {
+                 int code;
+@@ -671,8 +668,7 @@ gopherToHTML(GopherStateData * gopherState, char *inbuf, int len)
+ 
+                 case 502: { /* Too Many Matches */
+                     /* Print the message the server returns */
+-                    snprintf(tmpbuf, TEMP_BUF_SIZE, "

%s

\n
", html_quote(result));
+-                    outbuf.append(tmpbuf);
++                    outbuf.appendf("

%s

\n
", html_quote(result));
+                     break;
+                 }
+ 
+@@ -688,13 +684,12 @@ gopherToHTML(GopherStateData * gopherState, char *inbuf, int len)
+ 
+     }               /* while loop */
+ 
+-    if (outbuf.size() > 0) {
+-        entry->append(outbuf.rawBuf(), outbuf.size());
++    if (outbuf.length() > 0) {
++        entry->append(outbuf.rawContent(), outbuf.length());
+         /* now let start sending stuff to client */
+         entry->flush();
+     }
+ 
+-    outbuf.clean();
+     return;
+ }
+ 
diff --git a/squid-3.5.20-cache-peer-tolower.patch b/squid-3.5.20-cache-peer-tolower.patch
new file mode 100644
index 0000000..71b395d
--- /dev/null
+++ b/squid-3.5.20-cache-peer-tolower.patch
@@ -0,0 +1,32 @@
+From d3527ec67a9ddad9c189ae360d4f6181d5413bfa Mon Sep 17 00:00:00 2001
+From: uhliarik 
+Date: Mon, 15 Jul 2019 03:42:39 +0000
+Subject: [PATCH] Bug 4966: Lower cache_peer hostname (#420)
+
+When parsing entries from /etc/hosts file, they are all lowered
+(see bug 3040). If cache_peer hostname is uppercase, it will
+lead to DNS resolution failure. Lowering cache_peer host fixes
+this issue.
+
+This change may expose broken Squid configurations that
+incorrectly relied on non-lowercase peer host names to
+bypass Squid's "is this cache_peer different from me?"
+check. Though such configurations should encounter
+forwarding loop errors later anyway.
+---
+ src/cache_cf.cc | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/src/cache_cf.cc b/src/cache_cf.cc
+index 8886b68..e6b231c 100644
+--- a/src/cache_cf.cc
++++ b/src/cache_cf.cc
+@@ -2075,7 +2075,7 @@ parse_peer(CachePeer ** head)
+         self_destruct();
+ 
+     p->host = xstrdup(token);
+-
++    Tolower(p->host);
+     p->name = xstrdup(token);
+ 
+     if ((token = ConfigParser::NextToken()) == NULL)
diff --git a/squid-3.5.20-cache-siblings-gw.patch b/squid-3.5.20-cache-siblings-gw.patch
new file mode 100644
index 0000000..95b0edf
--- /dev/null
+++ b/squid-3.5.20-cache-siblings-gw.patch
@@ -0,0 +1,308 @@
+diff --git a/src/FwdState.cc b/src/FwdState.cc
+index f16acd0..c1d8a0f 100644
+--- a/src/FwdState.cc
++++ b/src/FwdState.cc
+@@ -139,7 +139,6 @@ FwdState::FwdState(const Comm::ConnectionPointer &client, StoreEntry * e, HttpRe
+     start_t = squid_curtime;
+     serverDestinations.reserve(Config.forward_max_tries);
+     e->lock("FwdState");
+-    EBIT_SET(e->flags, ENTRY_FWD_HDR_WAIT);
+ }
+ 
+ // Called once, right after object creation, when it is safe to set self
+@@ -250,7 +249,6 @@ FwdState::completed()
+             }
+ #endif
+         } else {
+-            EBIT_CLR(entry->flags, ENTRY_FWD_HDR_WAIT);
+             entry->complete();
+             entry->releaseRequest();
+         }
+@@ -495,7 +493,6 @@ FwdState::complete()
+             debugs(17, 3, HERE << "server FD " << serverConnection()->fd << " not re-forwarding status " << entry->getReply()->sline.status());
+         else
+             debugs(17, 3, HERE << "server (FD closed) not re-forwarding status " << entry->getReply()->sline.status());
+-        EBIT_CLR(entry->flags, ENTRY_FWD_HDR_WAIT);
+         entry->complete();
+ 
+         if (!Comm::IsConnOpen(serverConn))
+diff --git a/src/MemStore.cc b/src/MemStore.cc
+index 86b6024..405b644 100644
+--- a/src/MemStore.cc
++++ b/src/MemStore.cc
+@@ -402,7 +402,6 @@ MemStore::copyFromShmSlice(StoreEntry &e, const StoreIOBuffer &buf, bool eof)
+         const int result = rep->httpMsgParseStep(mb.buf, buf.length, eof);
+         if (result > 0) {
+             assert(rep->pstate == psParsed);
+-            EBIT_CLR(e.flags, ENTRY_FWD_HDR_WAIT);
+         } else if (result < 0) {
+             debugs(20, DBG_IMPORTANT, "Corrupted mem-cached headers: " << e);
+             return false;
+@@ -508,15 +507,9 @@ MemStore::startCaching(StoreEntry &e)
+ void
+ MemStore::copyToShm(StoreEntry &e)
+ {
+-    // prevents remote readers from getting ENTRY_FWD_HDR_WAIT entries and
+-    // not knowing when the wait is over
+-    if (EBIT_TEST(e.flags, ENTRY_FWD_HDR_WAIT)) {
+-        debugs(20, 5, "postponing copying " << e << " for ENTRY_FWD_HDR_WAIT");
+-        return;
+-    }
+-
+     assert(map);
+     assert(e.mem_obj);
++    Must(!EBIT_TEST(e.flags, ENTRY_FWD_HDR_WAIT));
+ 
+     const int32_t index = e.mem_obj->memCache.index;
+     assert(index >= 0);
+diff --git a/src/client_side_request.cc b/src/client_side_request.cc
+index a824b08..5debc29 100644
+--- a/src/client_side_request.cc
++++ b/src/client_side_request.cc
+@@ -1919,7 +1919,6 @@ ClientHttpRequest::handleAdaptedHeader(HttpMsg *msg)
+         assert(repContext);
+         repContext->createStoreEntry(request->method, request->flags);
+ 
+-        EBIT_CLR(storeEntry()->flags, ENTRY_FWD_HDR_WAIT);
+         request_satisfaction_mode = true;
+         request_satisfaction_offset = 0;
+         storeEntry()->replaceHttpReply(new_rep);
+diff --git a/src/clients/FtpGateway.cc b/src/clients/FtpGateway.cc
+index 9c78bbb..4f8319a 100644
+--- a/src/clients/FtpGateway.cc
++++ b/src/clients/FtpGateway.cc
+@@ -2309,7 +2309,6 @@ Ftp::Gateway::completedListing()
+     ferr.ftp.server_msg = ctrl.message;
+     ctrl.message = NULL;
+     entry->replaceHttpReply( ferr.BuildHttpReply() );
+-    EBIT_CLR(entry->flags, ENTRY_FWD_HDR_WAIT);
+     entry->flush();
+     entry->unlock("Ftp::Gateway");
+ }
+@@ -2588,8 +2587,6 @@ Ftp::Gateway::appendSuccessHeader()
+ 
+     assert(entry->isEmpty());
+ 
+-    EBIT_CLR(entry->flags, ENTRY_FWD_HDR_WAIT);
+-
+     entry->buffer();    /* released when done processing current data payload */
+ 
+     filename = (t = urlpath.rpos('/')) ? t + 1 : urlpath.termedBuf();
+diff --git a/src/clients/FtpRelay.cc b/src/clients/FtpRelay.cc
+index ed498b4..f1d4e9e 100644
+--- a/src/clients/FtpRelay.cc
++++ b/src/clients/FtpRelay.cc
+@@ -290,7 +290,6 @@ Ftp::Relay::failedErrorMessage(err_type error, int xerrno)
+     const Http::StatusCode httpStatus = failedHttpStatus(error);
+     HttpReply *const reply = createHttpReply(httpStatus);
+     entry->replaceHttpReply(reply);
+-    EBIT_CLR(entry->flags, ENTRY_FWD_HDR_WAIT);
+     fwd->request->detailError(error, xerrno);
+ }
+ 
+@@ -373,7 +372,6 @@ void
+ Ftp::Relay::forwardReply()
+ {
+     assert(entry->isEmpty());
+-    EBIT_CLR(entry->flags, ENTRY_FWD_HDR_WAIT);
+ 
+     HttpReply *const reply = createHttpReply(Http::scNoContent);
+ 
+@@ -448,7 +446,6 @@ Ftp::Relay::startDataDownload()
+            " (" << data.conn->local << ")");
+ 
+     HttpReply *const reply = createHttpReply(Http::scOkay, -1);
+-    EBIT_CLR(entry->flags, ENTRY_FWD_HDR_WAIT);
+     setVirginReply(reply);
+     adaptOrFinalizeReply();
+ 
+diff --git a/src/enums.h b/src/enums.h
+index 4d04805..50199da 100644
+--- a/src/enums.h
++++ b/src/enums.h
+@@ -96,12 +96,31 @@ typedef enum {
+ enum {
+     ENTRY_SPECIAL,
+     ENTRY_REVALIDATE,
++
++    /// Tiny Store writes are likely. The writes should be aggregated together
++    /// before Squid announces the new content availability to the store
++    /// clients. For example, forming a cached HTTP response header may result
++    /// in dozens of StoreEntry::write() calls, many of which adding as little
++    /// as two bytes. Sharing those small writes with the store clients
++    /// increases overhead, especially because the client code can do nothing
++    /// useful with the written content until the whole response header is
++    /// stored. Might be combined with ENTRY_FWD_HDR_WAIT. TODO: Rename to
++    /// ENTRY_DELAY_WHILE_COALESCING to emphasize the difference from and
++    /// similarity with ENTRY_FWD_HDR_WAIT.
+     DELAY_SENDING,
+     RELEASE_REQUEST,
+     REFRESH_REQUEST,
+     ENTRY_CACHABLE_RESERVED_FOR_FUTURE_USE,
+     ENTRY_DISPATCHED,
+     KEY_PRIVATE,
++
++    /// The current entry response may change. The contents of an entry in this
++    /// state must not be shared with its store clients. For example, Squid
++    /// receives (and buffers) an HTTP/504 response but may decide to retry that
++    /// transaction to receive a successful response from another server
++    /// instead. Might be combined with DELAY_SENDING. TODO: Rename to
++    /// ENTRY_DELAY_WHILE_WOBBLING to emphasize the difference from and
++    /// similarity with DELAY_SENDING.
+     ENTRY_FWD_HDR_WAIT,
+     ENTRY_NEGCACHED,
+     ENTRY_VALIDATED,
+diff --git a/src/gopher.cc b/src/gopher.cc
+index d373e8a..6d4ab1e 100644
+--- a/src/gopher.cc
++++ b/src/gopher.cc
+@@ -233,7 +233,6 @@ gopherMimeCreate(GopherStateData * gopherState)
+     }
+ 
+     assert(entry->isEmpty());
+-    EBIT_CLR(entry->flags, ENTRY_FWD_HDR_WAIT);
+ 
+     HttpReply *reply = new HttpReply;
+     entry->buffer();
+diff --git a/src/http.cc b/src/http.cc
+index 1dd1e6d..08531dc 100644
+--- a/src/http.cc
++++ b/src/http.cc
+@@ -932,8 +932,8 @@ HttpStateData::haveParsedReplyHeaders()
+ 
+         if (vary.isEmpty()) {
+             entry->makePrivate();
+-            if (!fwd->reforwardableStatus(rep->sline.status()))
+-                EBIT_CLR(entry->flags, ENTRY_FWD_HDR_WAIT);
++            if (fwd->reforwardableStatus(rep->sline.status()))
++                EBIT_SET(entry->flags, ENTRY_FWD_HDR_WAIT);
+             varyFailure = true;
+         } else {
+             entry->mem_obj->vary_headers = vary;
+@@ -945,8 +945,8 @@ HttpStateData::haveParsedReplyHeaders()
+          * If its not a reply that we will re-forward, then
+          * allow the client to get it.
+          */
+-        if (!fwd->reforwardableStatus(rep->sline.status()))
+-            EBIT_CLR(entry->flags, ENTRY_FWD_HDR_WAIT);
++        if (fwd->reforwardableStatus(rep->sline.status()))
++            EBIT_SET(entry->flags, ENTRY_FWD_HDR_WAIT);
+ 
+         switch (cacheableReply()) {
+ 
+diff --git a/src/ipc/Forwarder.cc b/src/ipc/Forwarder.cc
+index bf3c428..116d6f0 100644
+--- a/src/ipc/Forwarder.cc
++++ b/src/ipc/Forwarder.cc
+@@ -94,8 +94,10 @@ Ipc::Forwarder::handleRemoteAck()
+ {
+     debugs(54, 3, HERE);
+     request->requestId = 0;
+-    // Do not clear ENTRY_FWD_HDR_WAIT or do entry->complete() because
+-    // it will trigger our client side processing. Let job cleanup close.
++    // Do not do entry->complete() because it will trigger our client side
++    // processing when we no longer own the client-Squid connection.
++    // Let job cleanup close the client-Squid connection that Coordinator
++    // now owns.
+ }
+ 
+ /// Ipc::Forwarder::requestTimedOut wrapper
+diff --git a/src/mgr/Forwarder.cc b/src/mgr/Forwarder.cc
+index 3c4e4f3..7d33a9b 100644
+--- a/src/mgr/Forwarder.cc
++++ b/src/mgr/Forwarder.cc
+@@ -37,7 +37,6 @@ Mgr::Forwarder::Forwarder(const Comm::ConnectionPointer &aConn, const ActionPara
+ 
+     HTTPMSGLOCK(httpRequest);
+     entry->lock("Mgr::Forwarder");
+-    EBIT_SET(entry->flags, ENTRY_FWD_HDR_WAIT);
+ 
+     closer = asyncCall(16, 5, "Mgr::Forwarder::noteCommClosed",
+                        CommCbMemFunT(this, &Forwarder::noteCommClosed));
+@@ -122,7 +121,6 @@ Mgr::Forwarder::sendError(ErrorState *error)
+     Must(entry != NULL);
+     Must(httpRequest != NULL);
+ 
+-    EBIT_CLR(entry->flags, ENTRY_FWD_HDR_WAIT);
+     entry->buffer();
+     entry->replaceHttpReply(error->BuildHttpReply());
+     entry->expires = squid_curtime;
+diff --git a/src/store.cc b/src/store.cc
+index cbb2676..c5ae817 100644
+--- a/src/store.cc
++++ b/src/store.cc
+@@ -831,8 +831,12 @@ StoreEntry::write (StoreIOBuffer writeBuffer)
+     storeGetMemSpace(writeBuffer.length);
+     mem_obj->write(writeBuffer);
+ 
+-    if (!EBIT_TEST(flags, DELAY_SENDING))
+-        invokeHandlers();
++    if (EBIT_TEST(flags, ENTRY_FWD_HDR_WAIT) && !mem_obj->readAheadPolicyCanRead()) {
++        debugs(20, 3, "allow Store clients to get entry content after buffering too much for " << *this);
++        EBIT_CLR(flags, ENTRY_FWD_HDR_WAIT);
++    }
++
++    invokeHandlers();
+ }
+ 
+ /* Append incoming data from a primary server to an entry. */
+@@ -1047,6 +1051,9 @@ StoreEntry::complete()
+ {
+     debugs(20, 3, "storeComplete: '" << getMD5Text() << "'");
+ 
++    // To preserve forwarding retries, call FwdState::complete() instead.
++    EBIT_CLR(flags, ENTRY_FWD_HDR_WAIT);
++
+     if (store_status != STORE_PENDING) {
+         /*
+          * if we're not STORE_PENDING, then probably we got aborted
+@@ -1103,6 +1110,9 @@ StoreEntry::abort()
+ 
+     EBIT_SET(flags, ENTRY_ABORTED);
+ 
++    // allow the Store clients to be told about the problem
++    EBIT_CLR(flags, ENTRY_FWD_HDR_WAIT);
++
+     setMemStatus(NOT_IN_MEMORY);
+ 
+     store_status = STORE_OK;
+@@ -1890,7 +1900,6 @@ StoreEntry::startWriting()
+ 
+     rep->packHeadersInto(&p);
+     mem_obj->markEndOfReplyHeaders();
+-    EBIT_CLR(flags, ENTRY_FWD_HDR_WAIT);
+ 
+     rep->body.packInto(&p);
+ 
+diff --git a/src/store_client.cc b/src/store_client.cc
+index 07a05d4..7ee1b10 100644
+--- a/src/store_client.cc
++++ b/src/store_client.cc
+@@ -282,11 +282,6 @@ storeClientCopy2(StoreEntry * e, store_client * sc)
+         return;
+     }
+ 
+-    if (EBIT_TEST(e->flags, ENTRY_FWD_HDR_WAIT)) {
+-        debugs(90, 5, "storeClientCopy2: returning because ENTRY_FWD_HDR_WAIT set");
+-        return;
+-    }
+-
+     if (sc->flags.store_copying) {
+         sc->flags.copy_event_pending = true;
+         debugs(90, 3, "storeClientCopy2: Queueing storeClientCopyEvent()");
+@@ -720,6 +715,15 @@ storeUnregister(store_client * sc, StoreEntry * e, void *data)
+ void
+ StoreEntry::invokeHandlers()
+ {
++    if (EBIT_TEST(flags, DELAY_SENDING)) {
++        debugs(90, 3, "DELAY_SENDING is on, exiting " << *this);
++        return;
++    }
++    if (EBIT_TEST(flags, ENTRY_FWD_HDR_WAIT)) {
++        debugs(90, 3, "ENTRY_FWD_HDR_WAIT is on, exiting " << *this);
++        return;
++    }
++
+     /* Commit what we can to disk, if appropriate */
+     swapOut();
+     int i = 0;
diff --git a/squid-3.5.20-conf-casecmp.patch b/squid-3.5.20-conf-casecmp.patch
new file mode 100644
index 0000000..42d6c31
--- /dev/null
+++ b/squid-3.5.20-conf-casecmp.patch
@@ -0,0 +1,583 @@
+diff --git a/src/acl/Ip.cc b/src/acl/Ip.cc
+index 4aa2c90..99b0bf9 100644
+--- a/src/acl/Ip.cc
++++ b/src/acl/Ip.cc
+@@ -221,7 +221,7 @@ acl_ip_data::FactoryParse(const char *t)
+     debugs(28, 5, "aclIpParseIpData: " << t);
+ 
+     /* Special ACL RHS "all" matches entire Internet */
+-    if (strcmp(t, "all") == 0) {
++    if (strcasecmp(t, "all") == 0) {
+         debugs(28, 9, "aclIpParseIpData: magic 'all' found.");
+         q->addr1.setAnyAddr();
+         q->addr2.setEmpty();
+@@ -231,8 +231,8 @@ acl_ip_data::FactoryParse(const char *t)
+ 
+     /* Detect some old broken strings equivalent to 'all'.
+      * treat them nicely. But be loud until its fixed.  */
+-    if (strcmp(t, "0/0") == 0 || strcmp(t, "0.0.0.0/0") == 0 || strcmp(t, "0.0.0.0/0.0.0.0") == 0 ||
+-            strcmp(t, "0.0.0.0-255.255.255.255") == 0 || strcmp(t, "0.0.0.0-0.0.0.0/0") == 0) {
++    if (strcasecmp(t, "0/0") == 0 || strcasecmp(t, "0.0.0.0/0") == 0 || strcasecmp(t, "0.0.0.0/0.0.0.0") == 0 ||
++            strcasecmp(t, "0.0.0.0-255.255.255.255") == 0 || strcasecmp(t, "0.0.0.0-0.0.0.0/0") == 0) {
+ 
+         debugs(28,DBG_CRITICAL, "ERROR: '" << t << "' needs to be replaced by the term 'all'.");
+         debugs(28,DBG_CRITICAL, "SECURITY NOTICE: Overriding config setting. Using 'all' instead.");
+@@ -245,14 +245,14 @@ acl_ip_data::FactoryParse(const char *t)
+     /* Special ACL RHS "ipv4" matches IPv4 Internet
+      * A nod to IANA; we include the entire class space in case
+      * they manage to find a way to recover and use it */
+-    if (strcmp(t, "ipv4") == 0) {
++    if (strcasecmp(t, "ipv4") == 0) {
+         q->mask.setNoAddr();
+         q->mask.applyMask(0, AF_INET);
+         return q;
+     }
+ 
+     /* Special ACL RHS "ipv6" matches IPv6-Unicast Internet */
+-    if (strcmp(t, "ipv6") == 0) {
++    if (strcasecmp(t, "ipv6") == 0) {
+         debugs(28, 9, "aclIpParseIpData: magic 'ipv6' found.");
+         r = q; // save head of the list for result.
+ 
+diff --git a/src/adaptation/ServiceConfig.cc b/src/adaptation/ServiceConfig.cc
+index cbae4d4..127b591 100644
+--- a/src/adaptation/ServiceConfig.cc
++++ b/src/adaptation/ServiceConfig.cc
+@@ -55,10 +55,10 @@ Adaptation::ServiceConfig::parseVectPoint(const char *service_configConfig) cons
+     if (q)
+         t = q + 1;
+ 
+-    if (!strcmp(t, "precache"))
++    if (!strcasecmp(t, "precache"))
+         return Adaptation::pointPreCache;
+ 
+-    if (!strcmp(t, "postcache"))
++    if (!strcasecmp(t, "postcache"))
+         return Adaptation::pointPostCache;
+ 
+     return Adaptation::pointNone;
+diff --git a/src/auth/Config.cc b/src/auth/Config.cc
+index d8129c7..a02ccac 100644
+--- a/src/auth/Config.cc
++++ b/src/auth/Config.cc
+@@ -73,7 +73,7 @@ Auth::Config::registerWithCacheManager(void)
+ void
+ Auth::Config::parse(Auth::Config * scheme, int n_configured, char *param_str)
+ {
+-    if (strcmp(param_str, "program") == 0) {
++    if (strcasecmp(param_str, "program") == 0) {
+         if (authenticateProgram)
+             wordlistDestroy(&authenticateProgram);
+ 
+@@ -81,7 +81,7 @@ Auth::Config::parse(Auth::Config * scheme, int n_configured, char *param_str)
+ 
+         requirePathnameExists("Authentication helper program", authenticateProgram->key);
+ 
+-    } else if (strcmp(param_str, "realm") == 0) {
++    } else if (strcasecmp(param_str, "realm") == 0) {
+         realm.clear();
+ 
+         char *token = ConfigParser::NextQuotedOrToEol();
+@@ -97,10 +97,10 @@ Auth::Config::parse(Auth::Config * scheme, int n_configured, char *param_str)
+ 
+         realm = token;
+ 
+-    } else if (strcmp(param_str, "children") == 0) {
++    } else if (strcasecmp(param_str, "children") == 0) {
+         authenticateChildren.parseConfig();
+ 
+-    } else if (strcmp(param_str, "key_extras") == 0) {
++    } else if (strcasecmp(param_str, "key_extras") == 0) {
+         keyExtrasLine = ConfigParser::NextQuotedToken();
+         Format::Format *nlf =  new ::Format::Format(scheme->type());
+         if (!nlf->parse(keyExtrasLine.termedBuf())) {
+diff --git a/src/auth/basic/Config.cc b/src/auth/basic/Config.cc
+index ae84bed..fb800d3 100644
+--- a/src/auth/basic/Config.cc
++++ b/src/auth/basic/Config.cc
+@@ -133,11 +133,11 @@ Auth::Basic::Config::Config() :
+ void
+ Auth::Basic::Config::parse(Auth::Config * scheme, int n_configured, char *param_str)
+ {
+-    if (strcmp(param_str, "credentialsttl") == 0) {
++    if (strcasecmp(param_str, "credentialsttl") == 0) {
+         parse_time_t(&credentialsTTL);
+-    } else if (strcmp(param_str, "casesensitive") == 0) {
++    } else if (strcasecmp(param_str, "casesensitive") == 0) {
+         parse_onoff(&casesensitive);
+-    } else if (strcmp(param_str, "utf8") == 0) {
++    } else if (strcasecmp(param_str, "utf8") == 0) {
+         parse_onoff(&utf8);
+     } else
+         Auth::Config::parse(scheme, n_configured, param_str);
+diff --git a/src/auth/digest/Config.cc b/src/auth/digest/Config.cc
+index 2d24969..a91225b 100644
+--- a/src/auth/digest/Config.cc
++++ b/src/auth/digest/Config.cc
+@@ -602,26 +602,26 @@ Auth::Digest::Config::Config() :
+ void
+ Auth::Digest::Config::parse(Auth::Config * scheme, int n_configured, char *param_str)
+ {
+-    if (strcmp(param_str, "program") == 0) {
++    if (strcasecmp(param_str, "program") == 0) {
+         if (authenticateProgram)
+             wordlistDestroy(&authenticateProgram);
+ 
+         parse_wordlist(&authenticateProgram);
+ 
+         requirePathnameExists("auth_param digest program", authenticateProgram->key);
+-    } else if (strcmp(param_str, "nonce_garbage_interval") == 0) {
++    } else if (strcasecmp(param_str, "nonce_garbage_interval") == 0) {
+         parse_time_t(&nonceGCInterval);
+-    } else if (strcmp(param_str, "nonce_max_duration") == 0) {
++    } else if (strcasecmp(param_str, "nonce_max_duration") == 0) {
+         parse_time_t(&noncemaxduration);
+-    } else if (strcmp(param_str, "nonce_max_count") == 0) {
++    } else if (strcasecmp(param_str, "nonce_max_count") == 0) {
+         parse_int((int *) &noncemaxuses);
+-    } else if (strcmp(param_str, "nonce_strictness") == 0) {
++    } else if (strcasecmp(param_str, "nonce_strictness") == 0) {
+         parse_onoff(&NonceStrictness);
+-    } else if (strcmp(param_str, "check_nonce_count") == 0) {
++    } else if (strcasecmp(param_str, "check_nonce_count") == 0) {
+         parse_onoff(&CheckNonceCount);
+-    } else if (strcmp(param_str, "post_workaround") == 0) {
++    } else if (strcasecmp(param_str, "post_workaround") == 0) {
+         parse_onoff(&PostWorkaround);
+-    } else if (strcmp(param_str, "utf8") == 0) {
++    } else if (strcasecmp(param_str, "utf8") == 0) {
+         parse_onoff(&utf8);
+     } else
+         Auth::Config::parse(scheme, n_configured, param_str);
+diff --git a/src/auth/negotiate/Config.cc b/src/auth/negotiate/Config.cc
+index 0f5b462..e46b98f 100644
+--- a/src/auth/negotiate/Config.cc
++++ b/src/auth/negotiate/Config.cc
+@@ -97,14 +97,14 @@ Auth::Negotiate::Config::Config() : keep_alive(1)
+ void
+ Auth::Negotiate::Config::parse(Auth::Config * scheme, int n_configured, char *param_str)
+ {
+-    if (strcmp(param_str, "program") == 0) {
++    if (strcasecmp(param_str, "program") == 0) {
+         if (authenticateProgram)
+             wordlistDestroy(&authenticateProgram);
+ 
+         parse_wordlist(&authenticateProgram);
+ 
+         requirePathnameExists("auth_param negotiate program", authenticateProgram->key);
+-    } else if (strcmp(param_str, "keep_alive") == 0) {
++    } else if (strcasecmp(param_str, "keep_alive") == 0) {
+         parse_onoff(&keep_alive);
+     } else
+         Auth::Config::parse(scheme, n_configured, param_str);
+diff --git a/src/auth/ntlm/Config.cc b/src/auth/ntlm/Config.cc
+index 135e927..27d7904 100644
+--- a/src/auth/ntlm/Config.cc
++++ b/src/auth/ntlm/Config.cc
+@@ -89,14 +89,14 @@ Auth::Ntlm::Config::Config() : keep_alive(1)
+ void
+ Auth::Ntlm::Config::parse(Auth::Config * scheme, int n_configured, char *param_str)
+ {
+-    if (strcmp(param_str, "program") == 0) {
++    if (strcasecmp(param_str, "program") == 0) {
+         if (authenticateProgram)
+             wordlistDestroy(&authenticateProgram);
+ 
+         parse_wordlist(&authenticateProgram);
+ 
+         requirePathnameExists("auth_param ntlm program", authenticateProgram->key);
+-    } else if (strcmp(param_str, "keep_alive") == 0) {
++    } else if (strcasecmp(param_str, "keep_alive") == 0) {
+         parse_onoff(&keep_alive);
+     } else
+         Auth::Config::parse(scheme, n_configured, param_str);
+diff --git a/src/cache_cf.cc b/src/cache_cf.cc
+index fedabc0..8886b68 100644
+--- a/src/cache_cf.cc
++++ b/src/cache_cf.cc
+@@ -2097,27 +2097,27 @@ parse_peer(CachePeer ** head)
+     p->connection_auth = 2;    /* auto */
+ 
+     while ((token = ConfigParser::NextToken())) {
+-        if (!strcmp(token, "proxy-only")) {
++        if (!strcasecmp(token, "proxy-only")) {
+             p->options.proxy_only = true;
+-        } else if (!strcmp(token, "no-query")) {
++        } else if (!strcasecmp(token, "no-query")) {
+             p->options.no_query = true;
+-        } else if (!strcmp(token, "background-ping")) {
++        } else if (!strcasecmp(token, "background-ping")) {
+             p->options.background_ping = true;
+-        } else if (!strcmp(token, "no-digest")) {
++        } else if (!strcasecmp(token, "no-digest")) {
+             p->options.no_digest = true;
+-        } else if (!strcmp(token, "no-tproxy")) {
++        } else if (!strcasecmp(token, "no-tproxy")) {
+             p->options.no_tproxy = true;
+-        } else if (!strcmp(token, "multicast-responder")) {
++        } else if (!strcasecmp(token, "multicast-responder")) {
+             p->options.mcast_responder = true;
+ #if PEER_MULTICAST_SIBLINGS
+-        } else if (!strcmp(token, "multicast-siblings")) {
++        } else if (!strcasecmp(token, "multicast-siblings")) {
+             p->options.mcast_siblings = true;
+ #endif
+-        } else if (!strncmp(token, "weight=", 7)) {
++        } else if (!strncasecmp(token, "weight=", 7)) {
+             p->weight = xatoi(token + 7);
+-        } else if (!strncmp(token, "basetime=", 9)) {
++        } else if (!strncasecmp(token, "basetime=", 9)) {
+             p->basetime = xatoi(token + 9);
+-        } else if (!strcmp(token, "closest-only")) {
++        } else if (!strcasecmp(token, "closest-only")) {
+             p->options.closest_only = true;
+         } else if (!strncmp(token, "ttl=", 4)) {
+             p->mcast.ttl = xatoi(token + 4);
+@@ -2127,16 +2127,16 @@ parse_peer(CachePeer ** head)
+ 
+             if (p->mcast.ttl > 128)
+                 p->mcast.ttl = 128;
+-        } else if (!strcmp(token, "default")) {
++        } else if (!strcasecmp(token, "default")) {
+             p->options.default_parent = true;
+-        } else if (!strcmp(token, "round-robin")) {
++        } else if (!strcasecmp(token, "round-robin")) {
+             p->options.roundrobin = true;
+-        } else if (!strcmp(token, "weighted-round-robin")) {
++        } else if (!strcasecmp(token, "weighted-round-robin")) {
+             p->options.weighted_roundrobin = true;
+ #if USE_HTCP
+-        } else if (!strcmp(token, "htcp")) {
++        } else if (!strcasecmp(token, "htcp")) {
+             p->options.htcp = true;
+-        } else if (!strncmp(token, "htcp=", 5) || !strncmp(token, "htcp-", 5)) {
++        } else if (!strncasecmp(token, "htcp=", 5) || !strncmp(token, "htcp-", 5)) {
+             /* Note: The htcp- form is deprecated, replaced by htcp= */
+             p->options.htcp = true;
+             char *tmp = xstrdup(token+5);
+@@ -2147,19 +2147,19 @@ parse_peer(CachePeer ** head)
+                     *nextmode = '\0';
+                     ++nextmode;
+                 }
+-                if (!strcmp(mode, "no-clr")) {
++                if (!strcasecmp(mode, "no-clr")) {
+                     if (p->options.htcp_only_clr)
+                         fatalf("parse_peer: can't set htcp-no-clr and htcp-only-clr simultaneously");
+                     p->options.htcp_no_clr = true;
+-                } else if (!strcmp(mode, "no-purge-clr")) {
++                } else if (!strcasecmp(mode, "no-purge-clr")) {
+                     p->options.htcp_no_purge_clr = true;
+-                } else if (!strcmp(mode, "only-clr")) {
++                } else if (!strcasecmp(mode, "only-clr")) {
+                     if (p->options.htcp_no_clr)
+                         fatalf("parse_peer: can't set htcp no-clr and only-clr simultaneously");
+                     p->options.htcp_only_clr = true;
+-                } else if (!strcmp(mode, "forward-clr")) {
++                } else if (!strcasecmp(mode, "forward-clr")) {
+                     p->options.htcp_forward_clr = true;
+-                } else if (!strcmp(mode, "oldsquid")) {
++                } else if (!strcasecmp(mode, "oldsquid")) {
+                     p->options.htcp_oldsquid = true;
+                 } else {
+                     fatalf("invalid HTCP mode '%s'", mode);
+@@ -2167,15 +2167,15 @@ parse_peer(CachePeer ** head)
+             }
+             safe_free(tmp);
+ #endif
+-        } else if (!strcmp(token, "no-netdb-exchange")) {
++        } else if (!strcasecmp(token, "no-netdb-exchange")) {
+             p->options.no_netdb_exchange = true;
+ 
+-        } else if (!strcmp(token, "carp")) {
++        } else if (!strcasecmp(token, "carp")) {
+             if (p->type != PEER_PARENT)
+                 fatalf("parse_peer: non-parent carp peer %s/%d\n", p->host, p->http_port);
+ 
+             p->options.carp = true;
+-        } else if (!strncmp(token, "carp-key=", 9)) {
++        } else if (!strncasecmp(token, "carp-key=", 9)) {
+             if (p->options.carp != true)
+                 fatalf("parse_peer: carp-key specified on non-carp peer %s/%d\n", p->host, p->http_port);
+             p->options.carp_key.set = true;
+@@ -2183,21 +2183,21 @@ parse_peer(CachePeer ** head)
+             for (; key; key = nextkey) {
+                 nextkey=strchr(key,',');
+                 if (nextkey) ++nextkey; // skip the comma, any
+-                if (0==strncmp(key,"scheme",6)) {
++                if (0==strncasecmp(key,"scheme",6)) {
+                     p->options.carp_key.scheme = true;
+-                } else if (0==strncmp(key,"host",4)) {
++                } else if (0==strncasecmp(key,"host",4)) {
+                     p->options.carp_key.host = true;
+-                } else if (0==strncmp(key,"port",4)) {
++                } else if (0==strncasecmp(key,"port",4)) {
+                     p->options.carp_key.port = true;
+-                } else if (0==strncmp(key,"path",4)) {
++                } else if (0==strncasecmp(key,"path",4)) {
+                     p->options.carp_key.path = true;
+-                } else if (0==strncmp(key,"params",6)) {
++                } else if (0==strncasecmp(key,"params",6)) {
+                     p->options.carp_key.params = true;
+                 } else {
+                     fatalf("invalid carp-key '%s'",key);
+                 }
+             }
+-        } else if (!strcmp(token, "userhash")) {
++        } else if (!strcasecmp(token, "userhash")) {
+ #if USE_AUTH
+             if (p->type != PEER_PARENT)
+                 fatalf("parse_peer: non-parent userhash peer %s/%d\n", p->host, p->http_port);
+@@ -2206,44 +2206,44 @@ parse_peer(CachePeer ** head)
+ #else
+             fatalf("parse_peer: userhash requires authentication. peer %s/%d\n", p->host, p->http_port);
+ #endif
+-        } else if (!strcmp(token, "sourcehash")) {
++        } else if (!strcasecmp(token, "sourcehash")) {
+             if (p->type != PEER_PARENT)
+                 fatalf("parse_peer: non-parent sourcehash peer %s/%d\n", p->host, p->http_port);
+ 
+             p->options.sourcehash = true;
+ 
+-        } else if (!strcmp(token, "no-delay")) {
++        } else if (!strcasecmp(token, "no-delay")) {
+ #if USE_DELAY_POOLS
+             p->options.no_delay = true;
+ #else
+             debugs(0, DBG_CRITICAL, "WARNING: cache_peer option 'no-delay' requires --enable-delay-pools");
+ #endif
+-        } else if (!strncmp(token, "login=", 6)) {
++        } else if (!strncasecmp(token, "login=", 6)) {
+             p->login = xstrdup(token + 6);
+             rfc1738_unescape(p->login);
+-        } else if (!strncmp(token, "connect-timeout=", 16)) {
++        } else if (!strncasecmp(token, "connect-timeout=", 16)) {
+             p->connect_timeout = xatoi(token + 16);
+-        } else if (!strncmp(token, "connect-fail-limit=", 19)) {
++        } else if (!strncasecmp(token, "connect-fail-limit=", 19)) {
+             p->connect_fail_limit = xatoi(token + 19);
+ #if USE_CACHE_DIGESTS
+-        } else if (!strncmp(token, "digest-url=", 11)) {
++        } else if (!strncasecmp(token, "digest-url=", 11)) {
+             p->digest_url = xstrdup(token + 11);
+ #endif
+ 
+-        } else if (!strcmp(token, "allow-miss")) {
++        } else if (!strcasecmp(token, "allow-miss")) {
+             p->options.allow_miss = true;
+-        } else if (!strncmp(token, "max-conn=", 9)) {
++        } else if (!strncasecmp(token, "max-conn=", 9)) {
+             p->max_conn = xatoi(token + 9);
+-        } else if (!strncmp(token, "standby=", 8)) {
++        } else if (!strncasecmp(token, "standby=", 8)) {
+             p->standby.limit = xatoi(token + 8);
+-        } else if (!strcmp(token, "originserver")) {
++        } else if (!strcasecmp(token, "originserver")) {
+             p->options.originserver = true;
+-        } else if (!strncmp(token, "name=", 5)) {
++        } else if (!strncasecmp(token, "name=", 5)) {
+             safe_free(p->name);
+ 
+             if (token[5])
+                 p->name = xstrdup(token + 5);
+-        } else if (!strncmp(token, "forceddomain=", 13)) {
++        } else if (!strncasecmp(token, "forceddomain=", 13)) {
+             safe_free(p->domain);
+ 
+             if (token[13])
+@@ -2601,14 +2601,14 @@ parse_onoff(int *var)
+     if (token == NULL)
+         self_destruct();
+ 
+-    if (!strcmp(token, "on")) {
++    if (!strcasecmp(token, "on")) {
+         *var = 1;
+-    } else if (!strcmp(token, "enable")) {
++    } else if (!strcasecmp(token, "enable")) {
+         debugs(0, DBG_PARSE_NOTE(DBG_IMPORTANT), "WARNING: 'enable' is deprecated. Please update to use 'on'.");
+         *var = 1;
+-    } else if (!strcmp(token, "off")) {
++    } else if (!strcasecmp(token, "off")) {
+         *var = 0;
+-    } else if (!strcmp(token, "disable")) {
++    } else if (!strcasecmp(token, "disable")) {
+         debugs(0, DBG_PARSE_NOTE(DBG_IMPORTANT), "WARNING: 'disable' is deprecated. Please update to use 'off'.");
+         *var = 0;
+     } else {
+@@ -2642,16 +2642,16 @@ parse_tristate(int *var)
+     if (token == NULL)
+         self_destruct();
+ 
+-    if (!strcmp(token, "on")) {
++    if (!strcasecmp(token, "on")) {
+         *var = 1;
+-    } else if (!strcmp(token, "enable")) {
++    } else if (!strcasecmp(token, "enable")) {
+         debugs(0, DBG_PARSE_NOTE(DBG_IMPORTANT), "WARNING: 'enable' is deprecated. Please update to use value 'on'.");
+         *var = 1;
+-    } else if (!strcmp(token, "warn")) {
++    } else if (!strcasecmp(token, "warn")) {
+         *var = -1;
+-    } else if (!strcmp(token, "off")) {
++    } else if (!strcasecmp(token, "off")) {
+         *var = 0;
+-    } else if (!strcmp(token, "disable")) {
++    } else if (!strcasecmp(token, "disable")) {
+         debugs(0, DBG_PARSE_NOTE(DBG_IMPORTANT), "WARNING: 'disable' is deprecated. Please update to use value 'off'.");
+         *var = 0;
+     } else {
+@@ -3249,15 +3249,15 @@ parse_uri_whitespace(int *var)
+     if (token == NULL)
+         self_destruct();
+ 
+-    if (!strcmp(token, "strip"))
++    if (!strcasecmp(token, "strip"))
+         *var = URI_WHITESPACE_STRIP;
+-    else if (!strcmp(token, "deny"))
++    else if (!strcasecmp(token, "deny"))
+         *var = URI_WHITESPACE_DENY;
+-    else if (!strcmp(token, "allow"))
++    else if (!strcasecmp(token, "allow"))
+         *var = URI_WHITESPACE_ALLOW;
+-    else if (!strcmp(token, "encode"))
++    else if (!strcasecmp(token, "encode"))
+         *var = URI_WHITESPACE_ENCODE;
+-    else if (!strcmp(token, "chop"))
++    else if (!strcasecmp(token, "chop"))
+         *var = URI_WHITESPACE_CHOP;
+     else {
+         debugs(0, DBG_PARSE_NOTE(2), "ERROR: Invalid option '" << token << "': 'uri_whitespace' accepts 'strip', 'deny', 'allow', 'encode', and 'chop'.");
+@@ -3399,19 +3399,19 @@ dump_memcachemode(StoreEntry * entry, const char *name, SquidConfig &config)
+ peer_t
+ parseNeighborType(const char *s)
+ {
+-    if (!strcmp(s, "parent"))
++    if (!strcasecmp(s, "parent"))
+         return PEER_PARENT;
+ 
+-    if (!strcmp(s, "neighbor"))
++    if (!strcasecmp(s, "neighbor"))
+         return PEER_SIBLING;
+ 
+-    if (!strcmp(s, "neighbour"))
++    if (!strcasecmp(s, "neighbour"))
+         return PEER_SIBLING;
+ 
+-    if (!strcmp(s, "sibling"))
++    if (!strcasecmp(s, "sibling"))
+         return PEER_SIBLING;
+ 
+-    if (!strcmp(s, "multicast"))
++    if (!strcasecmp(s, "multicast"))
+         return PEER_MULTICAST;
+ 
+     debugs(15, DBG_CRITICAL, "WARNING: Unknown neighbor type: " << s);
+@@ -3689,11 +3689,11 @@ parse_port_option(AnyP::PortCfgPointer &s, char *token)
+     } else if (strcmp(token, "connection-auth=on") == 0) {
+         s->connection_auth_disabled = false;
+     } else if (strncmp(token, "disable-pmtu-discovery=", 23) == 0) {
+-        if (!strcmp(token + 23, "off"))
++        if (!strcasecmp(token + 23, "off"))
+             s->disable_pmtu_discovery = DISABLE_PMTU_OFF;
+-        else if (!strcmp(token + 23, "transparent"))
++        else if (!strcasecmp(token + 23, "transparent"))
+             s->disable_pmtu_discovery = DISABLE_PMTU_TRANSPARENT;
+-        else if (!strcmp(token + 23, "always"))
++        else if (!strcasecmp(token + 23, "always"))
+             s->disable_pmtu_discovery = DISABLE_PMTU_ALWAYS;
+         else
+             self_destruct();
+@@ -3719,7 +3719,7 @@ parse_port_option(AnyP::PortCfgPointer &s, char *token)
+             s->tcp_keepalive.timeout = xatoui(t);
+         }
+ #if USE_OPENSSL
+-    } else if (strcmp(token, "sslBump") == 0) {
++    } else if (strcasecmp(token, "sslBump") == 0) {
+         debugs(3, DBG_CRITICAL, "WARNING: '" << token << "' is deprecated " <<
+                "in " << cfg_directive << ". Use 'ssl-bump' instead.");
+         s->flags.tunnelSslBumping = true;
+diff --git a/src/dns_internal.cc b/src/dns_internal.cc
+index 699301e..ef0644d 100644
+--- a/src/dns_internal.cc
++++ b/src/dns_internal.cc
+@@ -396,7 +396,7 @@ idnsParseResolvConf(void)
+ 
+         if (NULL == t) {
+             continue;
+-        } else if (strcmp(t, "nameserver") == 0) {
++        } else if (strcasecmp(t, "nameserver") == 0) {
+             t = strtok(NULL, w_space);
+ 
+             if (NULL == t)
+@@ -406,7 +406,7 @@ idnsParseResolvConf(void)
+ 
+             idnsAddNameserver(t);
+             result = true;
+-        } else if (strcmp(t, "domain") == 0) {
++        } else if (strcasecmp(t, "domain") == 0) {
+             idnsFreeSearchpath();
+             t = strtok(NULL, w_space);
+ 
+@@ -416,7 +416,7 @@ idnsParseResolvConf(void)
+             debugs(78, DBG_IMPORTANT, "Adding domain " << t << " from " << _PATH_RESCONF);
+ 
+             idnsAddPathComponent(t);
+-        } else if (strcmp(t, "search") == 0) {
++        } else if (strcasecmp(t, "search") == 0) {
+             idnsFreeSearchpath();
+             while (NULL != t) {
+                 t = strtok(NULL, w_space);
+@@ -428,7 +428,7 @@ idnsParseResolvConf(void)
+ 
+                 idnsAddPathComponent(t);
+             }
+-        } else if (strcmp(t, "options") == 0) {
++        } else if (strcasecmp(t, "options") == 0) {
+             while (NULL != t) {
+                 t = strtok(NULL, w_space);
+ 
+diff --git a/tools/cachemgr.cc b/tools/cachemgr.cc
+index 8c7729e..1ec4e15 100644
+--- a/tools/cachemgr.cc
++++ b/tools/cachemgr.cc
+@@ -1018,23 +1018,23 @@ read_request(void)
+ 
+         rfc1738_unescape(q);
+ 
+-        if (0 == strcmp(t, "server") && strlen(q))
++        if (0 == strcasecmp(t, "server") && strlen(q))
+             req->server = xstrdup(q);
+-        else if (0 == strcmp(t, "host") && strlen(q))
++        else if (0 == strcasecmp(t, "host") && strlen(q))
+             req->hostname = xstrdup(q);
+-        else if (0 == strcmp(t, "port") && strlen(q))
++        else if (0 == strcasecmp(t, "port") && strlen(q))
+             req->port = atoi(q);
+-        else if (0 == strcmp(t, "user_name") && strlen(q))
++        else if (0 == strcasecmp(t, "user_name") && strlen(q))
+             req->user_name = xstrdup(q);
+-        else if (0 == strcmp(t, "passwd") && strlen(q))
++        else if (0 == strcasecmp(t, "passwd") && strlen(q))
+             req->passwd = xstrdup(q);
+-        else if (0 == strcmp(t, "auth") && strlen(q))
++        else if (0 == strcasecmp(t, "auth") && strlen(q))
+             req->pub_auth = xstrdup(q), decode_pub_auth(req);
+-        else if (0 == strcmp(t, "operation"))
++        else if (0 == strcasecmp(t, "operation"))
+             req->action = xstrdup(q);
+-        else if (0 == strcmp(t, "workers") && strlen(q))
++        else if (0 == strcasecmp(t, "workers") && strlen(q))
+             req->workers = xstrdup(q);
+-        else if (0 == strcmp(t, "processes") && strlen(q))
++        else if (0 == strcasecmp(t, "processes") && strlen(q))
+             req->processes = xstrdup(q);
+     }
+     safe_free(t);
+@@ -1254,7 +1254,7 @@ check_target_acl(const char *hostname, int port)
+             if (strcmp(token, "*") == 0)
+ 
+                 ;   /* Wildcard port specification */
+-            else if (strcmp(token, "any") == 0)
++            else if (strcasecmp(token, "any") == 0)
+ 
+                 ;   /* Wildcard port specification */
+             else if (sscanf(token, "%d", &i) != 1)
diff --git a/squid-3.5.20-empty-cname.patch b/squid-3.5.20-empty-cname.patch
new file mode 100644
index 0000000..bfe30c9
--- /dev/null
+++ b/squid-3.5.20-empty-cname.patch
@@ -0,0 +1,138 @@
+From f6eee0760ca32e8ce090d73135ffbeb483f2bc5a Mon Sep 17 00:00:00 2001
+From: Stephen Baynes 
+Date: Thu, 1 Dec 2016 12:20:39 +1300
+Subject: [PATCH] Bug 4007: Hang on DNS query with dead-end CNAME
+
+DNS lookup recursion no longer occurs. ipcacheParse() return values are no
+longer useful.
+
+Also, cleanup the debugging output.
+---
+ src/ipcache.cc | 40 ++++++++++++++++------------------------
+ 1 file changed, 16 insertions(+), 24 deletions(-)
+
+diff --git a/src/ipcache.cc b/src/ipcache.cc
+index 0eaab11241..d2a883e29b 100644
+--- a/src/ipcache.cc
++++ b/src/ipcache.cc
+@@ -123,7 +123,6 @@ static void stat_ipcache_get(StoreEntry *);
+ static FREE ipcacheFreeEntry;
+ static IDNSCB ipcacheHandleReply;
+ static int ipcacheExpiredEntry(ipcache_entry *);
+-static int ipcacheParse(ipcache_entry *, const rfc1035_rr *, int, const char *error);
+ static ipcache_entry *ipcache_get(const char *);
+ static void ipcacheLockEntry(ipcache_entry *);
+ static void ipcacheStatPrint(ipcache_entry *, StoreEntry *);
+@@ -328,8 +327,7 @@ ipcacheCallback(ipcache_entry *i, int wait)
+     ipcacheUnlockEntry(i);
+ }
+ 
+-/// \ingroup IPCacheAPI
+-static int
++static void
+ ipcacheParse(ipcache_entry *i, const rfc1035_rr * answers, int nr, const char *error_message)
+ {
+     int k;
+@@ -350,25 +348,25 @@ ipcacheParse(ipcache_entry *i, const rfc1035_rr * answers, int nr, const char *e
+     i->addrs.count = 0;
+ 
+     if (nr < 0) {
+-        debugs(14, 3, "ipcacheParse: Lookup failed '" << error_message << "' for '" << (const char *)i->hash.key << "'");
++        debugs(14, 3, "Lookup failed '" << error_message << "' for '" << (const char *)i->hash.key << "'");
+         i->error_message = xstrdup(error_message);
+-        return -1;
++        return;
+     }
+ 
+     if (nr == 0) {
+-        debugs(14, 3, "ipcacheParse: No DNS records in response to '" << name << "'");
++        debugs(14, 3, "No DNS records in response to '" << name << "'");
+         i->error_message = xstrdup("No DNS records");
+-        return -1;
++        return;
+     }
+ 
+-    debugs(14, 3, "ipcacheParse: " << nr << " answers for '" << name << "'");
++    debugs(14, 3, nr << " answers for '" << name << "'");
+     assert(answers);
+ 
+     for (k = 0; k < nr; ++k) {
+ 
+         if (Ip::EnableIpv6 && answers[k].type == RFC1035_TYPE_AAAA) {
+             if (answers[k].rdlength != sizeof(struct in6_addr)) {
+-                debugs(14, DBG_IMPORTANT, "ipcacheParse: Invalid IPv6 address in response to '" << name << "'");
++                debugs(14, DBG_IMPORTANT, MYNAME << "Invalid IPv6 address in response to '" << name << "'");
+                 continue;
+             }
+             ++na;
+@@ -378,7 +376,7 @@ ipcacheParse(ipcache_entry *i, const rfc1035_rr * answers, int nr, const char *e
+ 
+         if (answers[k].type == RFC1035_TYPE_A) {
+             if (answers[k].rdlength != sizeof(struct in_addr)) {
+-                debugs(14, DBG_IMPORTANT, "ipcacheParse: Invalid IPv4 address in response to '" << name << "'");
++                debugs(14, DBG_IMPORTANT, MYNAME << "Invalid IPv4 address in response to '" << name << "'");
+                 continue;
+             }
+             ++na;
+@@ -394,14 +392,14 @@ ipcacheParse(ipcache_entry *i, const rfc1035_rr * answers, int nr, const char *e
+         }
+ 
+         // otherwise its an unknown RR. debug at level 9 since we usually want to ignore these and they are common.
+-        debugs(14, 9, HERE << "Unknown RR type received: type=" << answers[k].type << " starting at " << &(answers[k]) );
++        debugs(14, 9, "Unknown RR type received: type=" << answers[k].type << " starting at " << &(answers[k]) );
+     }
+     if (na == 0) {
+-        debugs(14, DBG_IMPORTANT, "ipcacheParse: No Address records in response to '" << name << "'");
++        debugs(14, DBG_IMPORTANT, MYNAME << "No Address records in response to '" << name << "'");
+         i->error_message = xstrdup("No Address records");
+         if (cname_found)
+             ++IpcacheStats.cname_only;
+-        return 0;
++        return;
+     }
+ 
+     i->addrs.in_addrs = static_cast(xcalloc(na, sizeof(Ip::Address)));
+@@ -419,7 +417,7 @@ ipcacheParse(ipcache_entry *i, const rfc1035_rr * answers, int nr, const char *e
+             memcpy(&temp, answers[k].rdata, sizeof(struct in_addr));
+             i->addrs.in_addrs[j] = temp;
+ 
+-            debugs(14, 3, "ipcacheParse: " << name << " #" << j << " " << i->addrs.in_addrs[j]);
++            debugs(14, 3, name << " #" << j << " " << i->addrs.in_addrs[j]);
+             ++j;
+ 
+         } else if (Ip::EnableIpv6 && answers[k].type == RFC1035_TYPE_AAAA) {
+@@ -430,7 +428,7 @@ ipcacheParse(ipcache_entry *i, const rfc1035_rr * answers, int nr, const char *e
+             memcpy(&temp, answers[k].rdata, sizeof(struct in6_addr));
+             i->addrs.in_addrs[j] = temp;
+ 
+-            debugs(14, 3, "ipcacheParse: " << name << " #" << j << " " << i->addrs.in_addrs[j] );
++            debugs(14, 3, name << " #" << j << " " << i->addrs.in_addrs[j] );
+             ++j;
+         }
+         if (ttl == 0 || (int) answers[k].ttl < ttl)
+@@ -453,8 +451,6 @@ ipcacheParse(ipcache_entry *i, const rfc1035_rr * answers, int nr, const char *e
+     i->expires = squid_curtime + ttl;
+ 
+     i->flags.negcached = false;
+-
+-    return i->addrs.count;
+ }
+ 
+ /// \ingroup IPCacheInternal
+@@ -467,13 +463,9 @@ ipcacheHandleReply(void *data, const rfc1035_rr * answers, int na, const char *e
+     const int age = i->age();
+     statCounter.dns.svcTime.count(age);
+ 
+-    int done = ipcacheParse(i, answers, na, error_message);
+-
+-    /* If we have not produced either IPs or Error immediately, wait for recursion to finish. */
+-    if (done != 0 || error_message != NULL) {
+-        ipcacheAddEntry(i);
+-        ipcacheCallback(i, age);
+-    }
++    ipcacheParse(i, answers, na, error_message);
++    ipcacheAddEntry(i);
++    ipcacheCallback(i, age);
+ }
+ 
+ /**
diff --git a/squid-3.5.20-https-packet-size.patch b/squid-3.5.20-https-packet-size.patch
new file mode 100644
index 0000000..1222bb7
--- /dev/null
+++ b/squid-3.5.20-https-packet-size.patch
@@ -0,0 +1,154 @@
+diff --git a/src/client_side.cc b/src/client_side.cc
+index 79b1b4b..01760f3 100644
+--- a/src/client_side.cc
++++ b/src/client_side.cc
+@@ -4369,7 +4369,7 @@ void httpsSslBumpStep2AccessCheckDone(allow_t answer, void *data)
+             connState->in.buf.append(rbuf.content(), rbuf.contentSize());
+             ClientSocketContext::Pointer context = connState->getCurrentContext();
+             ClientHttpRequest *http = context->http;
+-            tunnelStart(http, &http->out.size, &http->al->http.code, http->al);
++            tunnelStart(http);
+         }
+     }
+ }
+diff --git a/src/client_side_reply.cc b/src/client_side_reply.cc
+index d17cfec..84447d1 100644
+--- a/src/client_side_reply.cc
++++ b/src/client_side_reply.cc
+@@ -1114,7 +1114,7 @@ clientReplyContext::storeNotOKTransferDone() const
+     if (curReply->content_length < 0)
+         return 0;
+ 
+-    int64_t expectedLength = curReply->content_length + http->out.headers_sz;
++    uint64_t expectedLength = curReply->content_length + http->out.headers_sz;
+ 
+     if (http->out.size < expectedLength)
+         return 0;
+diff --git a/src/client_side_request.cc b/src/client_side_request.cc
+index 05de6da..a824b08 100644
+--- a/src/client_side_request.cc
++++ b/src/client_side_request.cc
+@@ -1501,7 +1501,7 @@ ClientHttpRequest::processRequest()
+         }
+ #endif
+         getConn()->stopReading(); // tunnels read for themselves
+-        tunnelStart(this, &out.size, &al->http.code, al);
++        tunnelStart(this);
+         return;
+     }
+ 
+diff --git a/src/client_side_request.h b/src/client_side_request.h
+index 442d362..51a6d75 100644
+--- a/src/client_side_request.h
++++ b/src/client_side_request.h
+@@ -73,7 +73,7 @@ public:
+ 
+     struct {
+         int64_t offset;
+-        int64_t size;
++        uint64_t size;
+         size_t headers_sz;
+     } out;
+ 
+@@ -180,7 +180,7 @@ int clientHttpRequestStatus(int fd, ClientHttpRequest const *http);
+ void clientAccessCheck(ClientHttpRequest *);
+ 
+ /* ones that should be elsewhere */
+-void tunnelStart(ClientHttpRequest *, int64_t *, int *, const AccessLogEntry::Pointer &al);
++void tunnelStart(ClientHttpRequest *);
+ 
+ #if _USE_INLINE_
+ #include "client_side_request.cci"
+diff --git a/src/tests/stub_tunnel.cc b/src/tests/stub_tunnel.cc
+index 822b8c8..6a57dfb 100644
+--- a/src/tests/stub_tunnel.cc
++++ b/src/tests/stub_tunnel.cc
+@@ -14,7 +14,7 @@
+ #include "FwdState.h"
+ class ClientHttpRequest;
+ 
+-void tunnelStart(ClientHttpRequest *, int64_t *, int *, const AccessLogEntryPointer &al) STUB
++void tunnelStart(ClientHttpRequest *) STUB
+ 
+ void switchToTunnel(HttpRequest *request, Comm::ConnectionPointer &clientConn, Comm::ConnectionPointer &srvConn) STUB
+ 
+diff --git a/src/tunnel.cc b/src/tunnel.cc
+index 1d4bed7..d595150 100644
+--- a/src/tunnel.cc
++++ b/src/tunnel.cc
+@@ -139,7 +139,7 @@ public:
+         int len;
+         char *buf;
+         AsyncCall::Pointer writer; ///< pending Comm::Write callback
+-        int64_t *size_ptr;      /* pointer to size in an ConnStateData for logging */
++        uint64_t *size_ptr;      /* pointer to size in an ConnStateData for logging */
+ 
+         Comm::ConnectionPointer conn;    ///< The currently connected connection.
+         uint8_t delayedLoops; ///< how many times a read on this connection has been postponed.
+@@ -849,6 +849,11 @@ tunnelConnectedWriteDone(const Comm::ConnectionPointer &conn, char *buf, size_t
+         return;
+     }
+ 
++    if (ClientHttpRequest *http = tunnelState->http.get()) {
++        http->out.headers_sz += size;
++        http->out.size += size;
++    }
++
+     tunnelStartShoveling(tunnelState);
+ }
+ 
+@@ -996,7 +1001,7 @@ tunnelConnectDone(const Comm::ConnectionPointer &conn, Comm::Flag status, int xe
+ }
+ 
+ void
+-tunnelStart(ClientHttpRequest * http, int64_t * size_ptr, int *status_ptr, const AccessLogEntryPointer &al)
++tunnelStart(ClientHttpRequest * http)
+ {
+     debugs(26, 3, HERE);
+     /* Create state structure. */
+@@ -1022,7 +1027,7 @@ tunnelStart(ClientHttpRequest * http, int64_t * size_ptr, int *status_ptr, const
+         if (ch.fastCheck() == ACCESS_DENIED) {
+             debugs(26, 4, HERE << "MISS access forbidden.");
+             err = new ErrorState(ERR_FORWARDING_DENIED, Http::scForbidden, request);
+-            *status_ptr = Http::scForbidden;
++            http->al->http.code = Http::scForbidden;
+             errorSend(http->getConn()->clientConnection, err);
+             return;
+         }
+@@ -1038,12 +1043,13 @@ tunnelStart(ClientHttpRequest * http, int64_t * size_ptr, int *status_ptr, const
+ #endif
+     tunnelState->url = xstrdup(url);
+     tunnelState->request = request;
+-    tunnelState->server.size_ptr = size_ptr;
+-    tunnelState->status_ptr = status_ptr;
++    tunnelState->server.size_ptr = &http->out.size;
++    tunnelState->client.size_ptr = &http->al->http.clientRequestSz.payloadData;
++    tunnelState->status_ptr = &http->al->http.code;
+     tunnelState->logTag_ptr = &http->logType;
+     tunnelState->client.conn = http->getConn()->clientConnection;
+     tunnelState->http = http;
+-    tunnelState->al = al;
++    tunnelState->al = http->al ;
+     tunnelState->started = squid_curtime;
+ 
+     comm_add_close_handler(tunnelState->client.conn->fd,
+@@ -1054,7 +1060,7 @@ tunnelStart(ClientHttpRequest * http, int64_t * size_ptr, int *status_ptr, const
+                                      CommTimeoutCbPtrFun(tunnelTimeout, tunnelState));
+     commSetConnTimeout(tunnelState->client.conn, Config.Timeout.lifetime, timeoutCall);
+ 
+-    peerSelect(&(tunnelState->serverDestinations), request, al,
++    peerSelect(&(tunnelState->serverDestinations), request, tunnelState->al,
+                NULL,
+                tunnelPeerSelectComplete,
+                tunnelState);
+@@ -1227,6 +1233,10 @@ switchToTunnel(HttpRequest *request, Comm::ConnectionPointer &clientConn, Comm::
+         if (context != NULL && context->http != NULL) {
+             tunnelState->logTag_ptr = &context->http->logType;
+             tunnelState->server.size_ptr = &context->http->out.size;
++            if (context->http->al != NULL) {
++                tunnelState->al = context->http->al;
++                tunnelState->client.size_ptr = &context->http->al->http.clientRequestSz.payloadData;
++            }
+ 
+ #if USE_DELAY_POOLS
+             /* no point using the delayIsNoDelay stuff since tunnel is nice and simple */
diff --git a/squid-3.5.20-man-see-also.patch b/squid-3.5.20-man-see-also.patch
new file mode 100644
index 0000000..71a9cd4
--- /dev/null
+++ b/squid-3.5.20-man-see-also.patch
@@ -0,0 +1,20 @@
+diff --git a/src/squid.8.in b/src/squid.8.in
+index 3882481..f0ff2c3 100644
+--- a/src/squid.8.in
++++ b/src/squid.8.in
+@@ -265,11 +265,11 @@ Report ideas for new improvements to the
+ .SH SEE ALSO
+ .if !'po4a'hide' .B cachemgr.cgi "(8), "
+ .if !'po4a'hide' .B squidclient "(1), "
+-.if !'po4a'hide' .B pam_auth "(8), "
+-.if !'po4a'hide' .B squid_ldap_auth "(8), "
+-.if !'po4a'hide' .B squid_ldap_group "(8), "
++.if !'po4a'hide' .B basic_pam_auth "(8), "
++.if !'po4a'hide' .B basic_ldap_auth "(8), "
++.if !'po4a'hide' .B ext_ldap_group_acl "(8), "
+ .if !'po4a'hide' .B ext_session_acl "(8), "
+-.if !'po4a'hide' .B squid_unix_group "(8), "
++.if !'po4a'hide' .B ext_unix_group_acl "(8), "
+ .br
+ The Squid FAQ wiki
+ .if !'po4a'hide' http://wiki.squid-cache.org/SquidFaq
diff --git a/squid-3.5.20-man-typos.patch b/squid-3.5.20-man-typos.patch
new file mode 100644
index 0000000..5802976
--- /dev/null
+++ b/squid-3.5.20-man-typos.patch
@@ -0,0 +1,379 @@
+diff --git a/compat/compat.h b/compat/compat.h
+index 00714ed..92f245a 100644
+--- a/compat/compat.h
++++ b/compat/compat.h
+@@ -11,7 +11,7 @@
+ 
+ /*
+  * From discussions it was chosen to push compat code as far down as possible.
+- * That means we can have a seperate compat for most
++ * That means we can have a separate compat for most
+  *  compatability and portability hacks and resolutions.
+  *
+  * This file is meant to collate all those hacks files together and
+diff --git a/helpers/basic_auth/DB/basic_db_auth.8 b/helpers/basic_auth/DB/basic_db_auth.8
+index 1aebcee..77e683c 100644
+--- a/helpers/basic_auth/DB/basic_db_auth.8
++++ b/helpers/basic_auth/DB/basic_db_auth.8
+@@ -147,8 +147,8 @@
+ .Vb 1
+ \& basic_db_auth [options]
+ .Ve
+-.SH "DESCRIPTOIN"
+-.IX Header "DESCRIPTOIN"
++.SH "DESCRIPTION"
++.IX Header "DESCRIPTION"
+ This program verifies username & password to a database
+ .SH "OPTIONS"
+ .IX Header "OPTIONS"
+@@ -213,7 +213,7 @@ This manual was written by \fIHenrik Nordstrom 
+ Copyright (C) 2007 Henrik Nordstrom 
+ Copyright (C) 2010 Luis Daniel Lucio Quiroz  (Joomla support)
+ This program is free software. You may redistribute copies of it under the
+-terms of the \s-1GNU\s0 General Public License version 2, or (at youropinion) any
++terms of the \s-1GNU\s0 General Public License version 2, or (at your opinion) any
+ later version.
+ .SH "QUESTIONS"
+ .IX Header "QUESTIONS"
+diff --git a/helpers/basic_auth/DB/basic_db_auth.pl.in b/helpers/basic_auth/DB/basic_db_auth.pl.in
+index 8dc7f00..3672488 100644
+--- a/helpers/basic_auth/DB/basic_db_auth.pl.in
++++ b/helpers/basic_auth/DB/basic_db_auth.pl.in
+@@ -14,7 +14,7 @@ use Getopt::Long;
+ 
+  basic_db_auth [options]
+ 
+-=head1 DESCRIPTOIN
++=head1 DESCRIPTION
+ 
+ This program verifies username & password to a database
+ 
+@@ -97,7 +97,7 @@ This manual was written by I>
+ Copyright (C) 2007 Henrik Nordstrom 
+ Copyright (C) 2010 Luis Daniel Lucio Quiroz  (Joomla support)
+ This program is free software. You may redistribute copies of it under the
+-terms of the GNU General Public License version 2, or (at youropinion) any
++terms of the GNU General Public License version 2, or (at your opinion) any
+ later version.
+ 
+ =head1 QUESTIONS
+diff --git a/helpers/basic_auth/LDAP/basic_ldap_auth.8 b/helpers/basic_auth/LDAP/basic_ldap_auth.8
+index 3893514..cb972cd 100644
+--- a/helpers/basic_auth/LDAP/basic_ldap_auth.8
++++ b/helpers/basic_auth/LDAP/basic_ldap_auth.8
+@@ -98,7 +98,7 @@ option). Defaults to
+ .B Note:
+ This can only be done if all your users are located directly under
+ the same position in the LDAP tree and the login name is used for naming
+-each user object. If your LDAP tree does not match these criterias or if
++each user object. If your LDAP tree does not match these criteria or if
+ you want to filter who are valid users then you need to use a search filter
+ to search for your users DN (
+ .B \-f
+@@ -187,14 +187,14 @@ when to dereference aliases. Defaults to
+ dereference aliases (default),
+ .B always
+ dereference aliases, only while
+-.B search ing
++.B searching
+ or only to
+ .B find
+ the base object.
+ .
+ .if !'po4a'hide' .TP
+ .if !'po4a'hide' .B "\-H ldap_uri
+-Specity the LDAP server to connect to by LDAP URI (requires OpenLDAP libraries).
++Specify the LDAP server to connect to by LDAP URI (requires OpenLDAP libraries).
+ Servers can also be specified last on the command line.
+ .
+ .if !'po4a'hide' .TP
+diff --git a/helpers/digest_auth/LDAP/digest_pw_auth.cc b/helpers/digest_auth/LDAP/digest_pw_auth.cc
+index 50ce8fb..866cf9c 100644
+--- a/helpers/digest_auth/LDAP/digest_pw_auth.cc
++++ b/helpers/digest_auth/LDAP/digest_pw_auth.cc
+@@ -30,7 +30,7 @@
+  * the file format.  However storing such a triple does little to
+  * improve security: If compromised the username:realm:HA1 combination
+  * is "plaintext equivalent" - for the purposes of digest authentication
+- * they allow the user access. Password syncronisation is not tackled
++ * they allow the user access. Password synchronization is not tackled
+  * by digest - just preventing on the wire compromise.
+  *
+  * Copyright (c) 2003  Robert Collins  
+diff --git a/helpers/digest_auth/eDirectory/digest_pw_auth.cc b/helpers/digest_auth/eDirectory/digest_pw_auth.cc
+index 5db1ce4..aaeb86d 100644
+--- a/helpers/digest_auth/eDirectory/digest_pw_auth.cc
++++ b/helpers/digest_auth/eDirectory/digest_pw_auth.cc
+@@ -30,7 +30,7 @@
+  * the file format.  However storing such a triple does little to
+  * improve security: If compromised the username:realm:HA1 combination
+  * is "plaintext equivalent" - for the purposes of digest authentication
+- * they allow the user access. Password syncronisation is not tackled
++ * they allow the user access. Password synchronization is not tackled
+  * by digest - just preventing on the wire compromise.
+  *
+  * Copyright (c) 2003  Robert Collins  
+diff --git a/helpers/digest_auth/file/digest_file_auth.8 b/helpers/digest_auth/file/digest_file_auth.8
+index 008d53c..66254d9 100644
+--- a/helpers/digest_auth/file/digest_file_auth.8
++++ b/helpers/digest_auth/file/digest_file_auth.8
+@@ -15,7 +15,7 @@ file
+ is an installed binary authentication program for Squid. It handles digest 
+ authentication protocol and authenticates against a text file backend.
+ .
+-This program will automatically detect the existence of a concurrecy channel-ID and adjust appropriately.
++This program will automatically detect the existence of a concurrency channel-ID and adjust appropriately.
+ It may be used with any value 0 or above for the auth_param children concurrency= parameter.
+ .
+ .SH OPTIONS
+@@ -54,7 +54,7 @@ the file format.  However storing such a triple does little to
+ improve security: If compromised the
+ .B username:realm:HA1 
+ combination is "plaintext equivalent" - for the purposes of digest authentication
+-they allow the user access. Password syncronisation is not tackled
++they allow the user access. Password synchronization is not tackled
+ by digest - just preventing on the wire compromise.
+ .
+ .SH AUTHOR
+diff --git a/helpers/digest_auth/file/digest_file_auth.cc b/helpers/digest_auth/file/digest_file_auth.cc
+index cd17a54..5d36563 100644
+--- a/helpers/digest_auth/file/digest_file_auth.cc
++++ b/helpers/digest_auth/file/digest_file_auth.cc
+@@ -33,7 +33,7 @@
+  * the file format.  However storing such a triple does little to
+  * improve security: If compromised the username:realm:HA1 combination
+  * is "plaintext equivalent" - for the purposes of digest authentication
+- * they allow the user access. Password syncronisation is not tackled
++ * they allow the user access. Password synchronization is not tackled
+  * by digest - just preventing on the wire compromise.
+  *
+  * Copyright (c) 2003  Robert Collins  
+diff --git a/helpers/digest_auth/file/text_backend.cc b/helpers/digest_auth/file/text_backend.cc
+index 0b58670..5f4e882 100644
+--- a/helpers/digest_auth/file/text_backend.cc
++++ b/helpers/digest_auth/file/text_backend.cc
+@@ -29,7 +29,7 @@
+  * the file format.  However storing such a triple does little to
+  * improve security: If compromised the username:realm:HA1 combination
+  * is "plaintext equivalent" - for the purposes of digest authentication
+- * they allow the user access. Password syncronisation is not tackled
++ * they allow the user access. Password synchronization is not tackled
+  * by digest - just preventing on the wire compromise.
+  *
+  * Copyright (c) 2003  Robert Collins  
+diff --git a/helpers/external_acl/LDAP_group/ext_ldap_group_acl.8 b/helpers/external_acl/LDAP_group/ext_ldap_group_acl.8
+index 1345f6a..d165f57 100644
+--- a/helpers/external_acl/LDAP_group/ext_ldap_group_acl.8
++++ b/helpers/external_acl/LDAP_group/ext_ldap_group_acl.8
+@@ -53,7 +53,7 @@ When to dereference aliases. Defaults to 'never'
+ dereference aliases (default),
+ .BI always
+ dereference aliases, only while
+-.BR search ing
++.BR searching
+ or only to
+ .B find
+ the base object
+@@ -143,7 +143,7 @@ Specify the LDAP server to connect to
+ .
+ .if !'po4a'hide' .TP
+ .if !'po4a'hide' .BI \-H " ldapuri"
+-Specity the LDAP server to connect to by a LDAP URI (requires OpenLDAP libraries)
++Specify the LDAP server to connect to by a LDAP URI (requires OpenLDAP libraries)
+ .
+ .if !'po4a'hide' .TP
+ .if !'po4a'hide' .BI \-K
+diff --git a/helpers/external_acl/kerberos_ldap_group/README b/helpers/external_acl/kerberos_ldap_group/README
+index 4d80409..78e8a67 100644
+--- a/helpers/external_acl/kerberos_ldap_group/README
++++ b/helpers/external_acl/kerberos_ldap_group/README
+@@ -65,7 +65,7 @@ KRB5_KTNAME=/etc/squid/HTTP.keytab
+ export KRB5_KTNAME
+ 
+ If you use a different Kerberos domain than the machine itself is in you can point squid to 
+-the seperate Kerberos config file by setting the following environmnet variable in the startup 
++the separate Kerberos config file by setting the following environment variable in the startup 
+ script.
+ 
+ KRB5_CONFIG=/etc/krb5-squid.conf
+diff --git a/helpers/external_acl/kerberos_ldap_group/ext_kerberos_ldap_group_acl.8 b/helpers/external_acl/kerberos_ldap_group/ext_kerberos_ldap_group_acl.8
+index 6972104..90b2cdd 100644
+--- a/helpers/external_acl/kerberos_ldap_group/ext_kerberos_ldap_group_acl.8
++++ b/helpers/external_acl/kerberos_ldap_group/ext_kerberos_ldap_group_acl.8
+@@ -163,7 +163,7 @@ the proxy name set in IE or firefox. You can not use an IP address.
+ .if !'po4a'hide' .ft
+ .
+ If you use a different Kerberos domain than the machine itself is in you can point squid to
+-the seperate Kerberos config file by setting the following environmnet variable in the startup
++the separate Kerberos config file by setting the following environment variable in the startup
+ script.
+ .if !'po4a'hide' .P
+ .if !'po4a'hide' .ft CR
+diff --git a/helpers/external_acl/session/ext_session_acl.8 b/helpers/external_acl/session/ext_session_acl.8
+index 6a2ec7b..a631fb2 100644
+--- a/helpers/external_acl/session/ext_session_acl.8
++++ b/helpers/external_acl/session/ext_session_acl.8
+@@ -21,7 +21,7 @@ and timing out sessions. The timeout is based either on idle use (
+ ) or a fixed period of time (
+ .B \-T
+ ). The former is suitable for displaying terms and conditions to a user; the
+-latter is suitable for the display of advertisments or other notices (both as a
++latter is suitable for the display of advertisements or other notices (both as a
+ splash page \- see config examples in the wiki online). The session helper can also be used
+ to force users to re\-authenticate if the 
+ .B %LOGIN 
+@@ -55,7 +55,7 @@ used as the database. If a path is specified, a Berkeley DB database
+ environment is created within the directory. The advantage of the latter
+ is better database support between multiple instances of the session
+ helper. Using multiple instances of the session helper with a single
+-database file will cause synchronisation problems between processes.
++database file will cause synchronization problems between processes.
+ If this option is not specified the session details will be kept in
+ memory only and all sessions will reset each time Squid restarts its
+ helpers (Squid restart or rotation of logs).
+diff --git a/helpers/log_daemon/DB/log_db_daemon.8 b/helpers/log_daemon/DB/log_db_daemon.8
+index abb4407..d260feb 100644
+--- a/helpers/log_daemon/DB/log_db_daemon.8
++++ b/helpers/log_daemon/DB/log_db_daemon.8
+@@ -143,8 +143,8 @@ log_db_daemon \- Database logging daemon for Squid
+ .SH "SYNOPSIS"
+ .IX Header "SYNOPSIS"
+ log_db_daemon \s-1DSN\s0 [options]
+-.SH "DESCRIPTOIN"
+-.IX Header "DESCRIPTOIN"
++.SH "DESCRIPTION"
++.IX Header "DESCRIPTION"
+ This program writes Squid access.log entries to a database.
+ Presently only accepts the \fBsquid\fR native format
+ .IP "\fB\s-1DSN\s0\fR" 8
+diff --git a/helpers/log_daemon/DB/log_db_daemon.pl.in b/helpers/log_daemon/DB/log_db_daemon.pl.in
+index 66b863f..24e0256 100755
+--- a/helpers/log_daemon/DB/log_db_daemon.pl.in
++++ b/helpers/log_daemon/DB/log_db_daemon.pl.in
+@@ -18,7 +18,7 @@ log_db_daemon - Database logging daemon for Squid
+ 
+ log_db_daemon DSN [options]
+ 
+-=head1 DESCRIPTOIN
++=head1 DESCRIPTION
+ 
+ This program writes Squid access.log entries to a database.
+ Presently only accepts the B native format
+@@ -373,7 +373,7 @@ To distinguish only between HITs and MISSes:
+     WHERE squid_request_status LIKE '%MISS%')
+     /
+     (SELECT COUNT(*) FROM access_log)*100
+-    AS pecentage;
++    AS percentage;
+ 
+ =item Response time ranges
+ 
+@@ -433,7 +433,7 @@ Indexes should be created according to the queries that are more frequently run.
+ 
+ This script currently implements only the C (i.e. "append a line to the log") command, therefore the log lines are never purged from the table. This approach has an obvious scalability problem.
+ 
+-One solution would be to implement e.g. the "rotate log" command in a way that would calculate some summary values, put them in a "summary table" and then delete the lines used to caluclate those values.
++One solution would be to implement e.g. the "rotate log" command in a way that would calculate some summary values, put them in a "summary table" and then delete the lines used to calculate those values.
+ 
+ Similar cleanup code could be implemented in an external script and run periodically independently from squid log commands.
+ 
+diff --git a/helpers/negotiate_auth/kerberos/README b/helpers/negotiate_auth/kerberos/README
+index 69c2a6c..d49af11 100644
+--- a/helpers/negotiate_auth/kerberos/README
++++ b/helpers/negotiate_auth/kerberos/README
+@@ -53,7 +53,7 @@ KRB5_KTNAME=/etc/squid/HTTP.keytab
+ export KRB5_KTNAME
+ 
+ If you use a different Kerberos domain than the machine itself is in you can point squid to 
+-the seperate Kerberos config file by setting the following environmnet variable in the startup 
++the separate Kerberos config file by setting the following environment variable in the startup 
+ script.
+ 
+ KRB5_CONFIG=/etc/krb-squid5.conf
+diff --git a/helpers/negotiate_auth/kerberos/negotiate_kerberos_auth.8 b/helpers/negotiate_auth/kerberos/negotiate_kerberos_auth.8
+index b31b046..52a86a2 100644
+--- a/helpers/negotiate_auth/kerberos/negotiate_kerberos_auth.8
++++ b/helpers/negotiate_auth/kerberos/negotiate_kerberos_auth.8
+@@ -69,7 +69,7 @@ KRB5_KTNAME=/etc/squid/HTTP.keytab
+ export KRB5_KTNAME
+ 
+ If you use a different Kerberos domain than the machine itself is in you can point squid to
+-the seperate Kerberos config file by setting the following environmnet variable in the startup
++the separate Kerberos config file by setting the following environment variable in the startup
+ script.
+ 
+ KRB5_CONFIG=/etc/krb5\-squid.conf
+diff --git a/helpers/storeid_rewrite/file/storeid_file_rewrite.8 b/helpers/storeid_rewrite/file/storeid_file_rewrite.8
+index c314387..aafe7d5 100644
+--- a/helpers/storeid_rewrite/file/storeid_file_rewrite.8
++++ b/helpers/storeid_rewrite/file/storeid_file_rewrite.8
+@@ -162,7 +162,7 @@ Eg:
+ Rewrite rules are matched in the same order as they appear in the rules file.
+ So for best performance, sort it in order of frequency of occurrence.
+ .PP
+-This program will automatically detect the existence of a concurrecy channel-ID and adjust appropriately.
++This program will automatically detect the existence of a concurrency channel-ID and adjust appropriately.
+ It may be used with any value 0 or above for the store_id_children concurrency= parameter.
+ .SH "OPTIONS"
+ .IX Header "OPTIONS"
+diff --git a/helpers/storeid_rewrite/file/storeid_file_rewrite.pl.in b/helpers/storeid_rewrite/file/storeid_file_rewrite.pl.in
+index dccd164..12e0d95 100644
+--- a/helpers/storeid_rewrite/file/storeid_file_rewrite.pl.in
++++ b/helpers/storeid_rewrite/file/storeid_file_rewrite.pl.in
+@@ -29,7 +29,7 @@ Eg:
+ Rewrite rules are matched in the same order as they appear in the rules file.
+ So for best performance, sort it in order of frequency of occurrence.
+ 
+-This program will automatically detect the existence of a concurrecy channel-ID and adjust appropriately.
++This program will automatically detect the existence of a concurrency channel-ID and adjust appropriately.
+ It may be used with any value 0 or above for the store_id_children concurrency= parameter.
+ 
+ =head1 OPTIONS
+diff --git a/src/StoreFileSystem.h b/src/StoreFileSystem.h
+index d9a33c5..53ae98f 100644
+--- a/src/StoreFileSystem.h
++++ b/src/StoreFileSystem.h
+@@ -47,7 +47,7 @@
+  \par
+  * configure will take a list of storage types through the
+  * --enable-store-io parameter. This parameter takes a list of
+- * space seperated storage types. For example,
++ * space separated storage types. For example,
+  * --enable-store-io="ufs aufs" .
+  *
+  \par
+diff --git a/src/ipcache.cc b/src/ipcache.cc
+index 0eaab11..f350ccd 100644
+--- a/src/ipcache.cc
++++ b/src/ipcache.cc
+@@ -50,7 +50,7 @@
+  \defgroup IPCacheInternal IP Cache Internals
+  \ingroup IPCacheAPI
+  \todo  when IP cache is provided as a class. These sub-groups will be obsolete
+- *  for now they are used to seperate the public and private functions.
++ *  for now they are used to separate the public and private functions.
+  *  with the private ones all being in IPCachInternal and public in IPCacheAPI
+  *
+  \section InternalOperation Internal Operation
+diff --git a/src/ssl/ssl_crtd.8 b/src/ssl/ssl_crtd.8
+index 9931e7e..ef39ebe 100644
+--- a/src/ssl/ssl_crtd.8
++++ b/src/ssl/ssl_crtd.8
+@@ -33,7 +33,7 @@ is an installed binary.
+ Because the generation and signing of SSL certificates takes time
+ Squid must use external process to handle the work.
+ .
+-This process generates new SSL certificates and uses a disk cache of certificatess
++This process generates new SSL certificates and uses a disk cache of certificates
+ to improve response times on repeated requests.
+ Communication occurs via TCP sockets bound to the loopback interface.
+ .
+@@ -122,7 +122,7 @@ After any change to the signing CA in squid.conf be sure to erase and re-initial
+ .
+ .PP
+ For simple configuration the helper defaults can be used.
+-Only HTTP listening port options are required to enable generation and set the signign CA certificate.
++Only HTTP listening port options are required to enable generation and set the signing CA certificate.
+ For Example:
+ .if !'po4a'hide' .RS
+ .if !'po4a'hide' .B http_port 3128 ssl-bump generate-host-certificates=on dynamic_cert_mem_cache_size=4MB cert=/usr/local/squid/ssl_cert/www.sample.com.pem
diff --git a/squid-3.5.20-mem-usage-out-of-fd.patch b/squid-3.5.20-mem-usage-out-of-fd.patch
new file mode 100644
index 0000000..c09f2b1
--- /dev/null
+++ b/squid-3.5.20-mem-usage-out-of-fd.patch
@@ -0,0 +1,155 @@
+diff -up squid-3.5.20/src/comm/AcceptLimiter.cc.02396660 squid-3.5.20/src/comm/AcceptLimiter.cc
+--- squid-3.5.20/src/comm/AcceptLimiter.cc.02396660	2019-06-05 13:18:11.000000000 +0200
++++ squid-3.5.20/src/comm/AcceptLimiter.cc	2019-06-05 13:21:29.000000000 +0200
+@@ -24,42 +24,33 @@ Comm::AcceptLimiter::Instance()
+ void
+ Comm::AcceptLimiter::defer(const Comm::TcpAcceptor::Pointer &afd)
+ {
+-    ++ (afd->isLimited);
+-    debugs(5, 5, afd->conn << " x" << afd->isLimited);
++    debugs(5, 5, afd->conn << "; already queued: " << deferred_.size());
+     deferred_.push_back(afd);
+ }
+ 
+ void
+ Comm::AcceptLimiter::removeDead(const Comm::TcpAcceptor::Pointer &afd)
+ {
+-    uint64_t abandonedClients = 0;
+-    for (unsigned int i = 0; i < deferred_.size() && afd->isLimited > 0; ++i) {
+-        if (deferred_[i] == afd) {
+-            -- deferred_[i]->isLimited;
+-            deferred_[i] = NULL; // fast. kick() will skip empty entries later.
+-            debugs(5, 5, afd->conn << " x" << afd->isLimited);
+-            ++abandonedClients;
++    for (auto it = deferred_.begin(); it != deferred_.end(); ++it) {
++        if (*it == afd) {
++            *it = nullptr; // fast. kick() will skip empty entries later.
++            debugs(5,4, "Abandoned client TCP SYN by closing socket: " << afd->conn);
++            return;
+         }
+     }
+-    debugs(5,4, "Abandoned " << abandonedClients << " client TCP SYN by closing socket: " << afd->conn);
++    debugs(5,4, "Not found " << afd->conn << " in queue, size: " << deferred_.size());
+ }
+ 
+ void
+ Comm::AcceptLimiter::kick()
+ {
+-    // TODO: this could be optimized further with an iterator to search
+-    //       looking for first non-NULL, followed by dumping the first N
+-    //       with only one shift()/pop_front operation
+-    //  OR, by reimplementing as a list instead of Vector.
+-
+     debugs(5, 5, "size=" << deferred_.size());
+-    while (deferred_.size() > 0 && fdNFree() >= RESERVED_FD) {
++    while (deferred_.size() > 0 && Comm::TcpAcceptor::okToAccept()) {
+         /* NP: shift() is equivalent to pop_front(). Giving us a FIFO queue. */
+         TcpAcceptor::Pointer temp = deferred_.front();
+         deferred_.erase(deferred_.begin());
+         if (temp.valid()) {
+             debugs(5, 5, "doing one.");
+-            -- temp->isLimited;
+             temp->acceptNext();
+             break;
+         }
+diff -up squid-3.5.20/src/comm/AcceptLimiter.h.02396660 squid-3.5.20/src/comm/AcceptLimiter.h
+--- squid-3.5.20/src/comm/AcceptLimiter.h.02396660	2019-06-05 13:18:27.000000000 +0200
++++ squid-3.5.20/src/comm/AcceptLimiter.h	2019-06-05 13:22:09.000000000 +0200
+@@ -11,7 +11,7 @@
+ 
+ #include "comm/TcpAcceptor.h"
+ 
+-#include 
++#include 
+ 
+ namespace Comm
+ {
+@@ -26,16 +26,6 @@ namespace Comm
+  * removeDead - used only by Comm layer ConnAcceptor to remove themselves when dying.
+  * kick - used by Comm layer when FD are closed.
+  */
+-/* TODO this algorithm can be optimized further:
+- *
+- * 1) reduce overheads by only pushing one entry per port to the list?
+- * use TcpAcceptor::isLimited as a flag whether to re-list when kick()'ing
+- * or to NULL an entry while scanning the list for empty spaces.
+- * Side effect: TcpAcceptor->kick() becomes allowed to pull off multiple accept()'s in bunches
+- *
+- * 2) re-implement as a std::queue instead of std::vector
+- * storing head/tail pointers for fast push/pop and avoiding the whole shift() overhead
+- */
+ class AcceptLimiter
+ {
+ 
+@@ -56,7 +46,7 @@ private:
+     static AcceptLimiter Instance_;
+ 
+     /** FIFO queue */
+-    std::vector deferred_;
++    std::deque deferred_;
+ };
+ 
+ }; // namepace Comm
+diff -up squid-3.5.20/src/comm/TcpAcceptor.cc.02396660 squid-3.5.20/src/comm/TcpAcceptor.cc
+--- squid-3.5.20/src/comm/TcpAcceptor.cc.02396660	2019-06-05 13:18:49.000000000 +0200
++++ squid-3.5.20/src/comm/TcpAcceptor.cc	2019-06-05 13:23:49.000000000 +0200
+@@ -41,7 +41,6 @@ CBDATA_NAMESPACED_CLASS_INIT(Comm, TcpAc
+ Comm::TcpAcceptor::TcpAcceptor(const Comm::ConnectionPointer &newConn, const char *note, const Subscription::Pointer &aSub) :
+     AsyncJob("Comm::TcpAcceptor"),
+     errcode(0),
+-    isLimited(0),
+     theCallSub(aSub),
+     conn(newConn),
+     listenPort_()
+@@ -50,7 +49,6 @@ Comm::TcpAcceptor::TcpAcceptor(const Com
+ Comm::TcpAcceptor::TcpAcceptor(const AnyP::PortCfgPointer &p, const char *note, const Subscription::Pointer &aSub) :
+     AsyncJob("Comm::TcpAcceptor"),
+     errcode(0),
+-    isLimited(0),
+     theCallSub(aSub),
+     conn(p->listenConn),
+     listenPort_(p)
+@@ -227,7 +225,6 @@ Comm::TcpAcceptor::doAccept(int fd, void
+         } else {
+             afd->acceptNext();
+         }
+-        SetSelect(fd, COMM_SELECT_READ, Comm::TcpAcceptor::doAccept, afd, 0);
+ 
+     } catch (const std::exception &e) {
+         fatalf("FATAL: error while accepting new client connection: %s\n", e.what());
+@@ -286,6 +283,7 @@ Comm::TcpAcceptor::acceptOne()
+            " accepted new connection " << newConnDetails <<
+            " handler Subscription: " << theCallSub);
+     notify(flag, newConnDetails);
++    SetSelect(conn->fd, COMM_SELECT_READ, doAccept, this, 0);
+ }
+ 
+ void
+diff -up squid-3.5.20/src/comm/TcpAcceptor.h.02396660 squid-3.5.20/src/comm/TcpAcceptor.h
+--- squid-3.5.20/src/comm/TcpAcceptor.h.02396660	2019-06-05 13:18:57.000000000 +0200
++++ squid-3.5.20/src/comm/TcpAcceptor.h	2019-06-05 13:25:05.000000000 +0200
+@@ -74,9 +74,12 @@ public:
+     /// errno code of the last accept() or listen() action if one occurred.
+     int errcode;
+ 
++    /// Method to test if there are enough file descriptors to open a new client connection
++    /// if not the accept() will be postponed
++    static bool okToAccept();
++
+ protected:
+     friend class AcceptLimiter;
+-    int32_t isLimited;                   ///< whether this socket is delayed and on the AcceptLimiter queue.
+ 
+ private:
+     Subscription::Pointer theCallSub;    ///< used to generate AsyncCalls handling our events.
+@@ -91,10 +94,6 @@ private:
+     /// listen socket closure handler
+     AsyncCall::Pointer closer_;
+ 
+-    /// Method to test if there are enough file descriptors to open a new client connection
+-    /// if not the accept() will be postponed
+-    static bool okToAccept();
+-
+     /// Method callback for whenever an FD is ready to accept a client connection.
+     static void doAccept(int fd, void *data);
+ 
diff --git a/squid-3.5.20-tunnel-sigsegv.patch b/squid-3.5.20-tunnel-sigsegv.patch
new file mode 100644
index 0000000..198ec77
--- /dev/null
+++ b/squid-3.5.20-tunnel-sigsegv.patch
@@ -0,0 +1,14 @@
+diff -up ./src/tunnel.cc.orig ./src/tunnel.cc
+--- ./src/tunnel.cc.orig	2016-07-01 13:37:50.000000000 +0200
++++ ./src/tunnel.cc	2016-07-15 16:25:07.132823875 +0200
+@@ -475,7 +475,9 @@ TunnelStateData::handleConnectResponse(c
+     *status_ptr = rep.sline.status();
+ 
+     // we need to relay the 401/407 responses when login=PASS(THRU)
+-    const char *pwd = server.conn->getPeer()->login;
++    CachePeer *peer = server.conn->getPeer();
++    const char *pwd = (peer) ? peer->login : NULL;
++
+     const bool relay = pwd && (strcmp(pwd, "PASS") != 0 || strcmp(pwd, "PASSTHRU") != 0) &&
+                        (*status_ptr == Http::scProxyAuthenticationRequired ||
+                         *status_ptr == Http::scUnauthorized);
diff --git a/squid-CVE-2016-10002.patch b/squid-CVE-2016-10002.patch
new file mode 100644
index 0000000..ce76f4f
--- /dev/null
+++ b/squid-CVE-2016-10002.patch
@@ -0,0 +1,290 @@
+------------------------------------------------------------
+revno: 14109
+revision-id: squid3@treenet.co.nz-20161111060325-yh8chavvnzuvfh3h
+parent: squid3@treenet.co.nz-20161101112231-k77st4up2sekl5zx
+fixes bug: http://bugs.squid-cache.org/show_bug.cgi?id=3379
+author: Garri Djavadyan , Amos Jeffries 
+committer: Amos Jeffries 
+branch nick: 3.5
+timestamp: Fri 2016-11-11 19:03:25 +1300
+message:
+  Bug 3379: Combination of If-Match and a Cache Hit result in TCP Connection Failure
+------------------------------------------------------------
+# Bazaar merge directive format 2 (Bazaar 0.90)
+# revision_id: squid3@treenet.co.nz-20161111060325-yh8chavvnzuvfh3h
+# target_branch: http://bzr.squid-cache.org/bzr/squid3/3.5
+# testament_sha1: 50d66878a765925d9a64569b3c226bebdee1f736
+# timestamp: 2016-11-11 06:10:37 +0000
+# source_branch: http://bzr.squid-cache.org/bzr/squid3/3.5
+# base_revision_id: squid3@treenet.co.nz-20161101112231-\
+#   k77st4up2sekl5zx
+# 
+# Begin patch
+=== modified file 'src/client_side_reply.cc'
+--- src/client_side_reply.cc  2016-10-09 19:47:26 +0000
++++ src/client_side_reply.cc  2016-11-11 06:03:25 +0000
+@@ -589,6 +589,7 @@
+         debugs(88, 5, "negative-HIT");
+         http->logType = LOG_TCP_NEGATIVE_HIT;
+         sendMoreData(result);
++        return;
+     } else if (blockedHit()) {
+         debugs(88, 5, "send_hit forces a MISS");
+         http->logType = LOG_TCP_MISS;
+@@ -641,27 +642,29 @@
+             http->logType = LOG_TCP_MISS;
+             processMiss();
+         }
++        return;
+     } else if (r->conditional()) {
+         debugs(88, 5, "conditional HIT");
+-        processConditional(result);
+-    } else {
+-        /*
+-         * plain ol' cache hit
+-         */
+-        debugs(88, 5, "plain old HIT");
++        if (processConditional(result))
++            return;
++    }
++
++    /*
++     * plain ol' cache hit
++     */
++    debugs(88, 5, "plain old HIT");
+ 
+ #if USE_DELAY_POOLS
+-        if (e->store_status != STORE_OK)
+-            http->logType = LOG_TCP_MISS;
+-        else
++    if (e->store_status != STORE_OK)
++        http->logType = LOG_TCP_MISS;
++    else
+ #endif
+-            if (e->mem_status == IN_MEMORY)
+-                http->logType = LOG_TCP_MEM_HIT;
+-            else if (Config.onoff.offline)
+-                http->logType = LOG_TCP_OFFLINE_HIT;
++        if (e->mem_status == IN_MEMORY)
++            http->logType = LOG_TCP_MEM_HIT;
++        else if (Config.onoff.offline)
++            http->logType = LOG_TCP_OFFLINE_HIT;
+ 
+-        sendMoreData(result);
+-    }
++    sendMoreData(result);
+ }
+ 
+ /**
+@@ -755,17 +758,16 @@
+ }
+ 
+ /// process conditional request from client
+-void
++bool
+ clientReplyContext::processConditional(StoreIOBuffer &result)
+ {
+     StoreEntry *const e = http->storeEntry();
+ 
+     if (e->getReply()->sline.status() != Http::scOkay) {
+-        debugs(88, 4, "clientReplyContext::processConditional: Reply code " <<
+-               e->getReply()->sline.status() << " != 200");
++        debugs(88, 4, "Reply code " << e->getReply()->sline.status() << " != 200");
+         http->logType = LOG_TCP_MISS;
+         processMiss();
+-        return;
++        return true;
+     }
+ 
+     HttpRequest &r = *http->request;
+@@ -773,7 +775,7 @@
+     if (r.header.has(HDR_IF_MATCH) && !e->hasIfMatchEtag(r)) {
+         // RFC 2616: reply with 412 Precondition Failed if If-Match did not match
+         sendPreconditionFailedError();
+-        return;
++        return true;
+     }
+ 
+     bool matchedIfNoneMatch = false;
+@@ -786,14 +788,14 @@
+             r.header.delById(HDR_IF_MODIFIED_SINCE);
+             http->logType = LOG_TCP_MISS;
+             sendMoreData(result);
+-            return;
++            return true;
+         }
+ 
+         if (!r.flags.ims) {
+             // RFC 2616: if If-None-Match matched and there is no IMS,
+             // reply with 304 Not Modified or 412 Precondition Failed
+             sendNotModifiedOrPreconditionFailedError();
+-            return;
++            return true;
+         }
+ 
+         // otherwise check IMS below to decide if we reply with 304 or 412
+@@ -805,19 +807,20 @@
+         if (e->modifiedSince(&r)) {
+             http->logType = LOG_TCP_IMS_HIT;
+             sendMoreData(result);
+-            return;
+-        }
+ 
+-        if (matchedIfNoneMatch) {
++        } else if (matchedIfNoneMatch) {
+             // If-None-Match matched, reply with 304 Not Modified or
+             // 412 Precondition Failed
+             sendNotModifiedOrPreconditionFailedError();
+-            return;
++
++        } else {
++            // otherwise reply with 304 Not Modified
++            sendNotModified();
+         }
+-
+-        // otherwise reply with 304 Not Modified
+-        sendNotModified();
++        return true;
+     }
++
++    return false;
+ }
+ 
+ /// whether squid.conf send_hit prevents us from serving this hit
+
+=== modified file 'src/client_side_reply.h'
+--- src/client_side_reply.h 2016-09-23 15:28:42 +0000
++++ src/client_side_reply.h 2016-11-11 06:03:25 +0000
+@@ -114,7 +114,7 @@
+     bool alwaysAllowResponse(Http::StatusCode sline) const;
+     int checkTransferDone();
+     void processOnlyIfCachedMiss();
+-    void processConditional(StoreIOBuffer &result);
++    bool processConditional(StoreIOBuffer &result);
+     void cacheHit(StoreIOBuffer result);
+     void handleIMSReply(StoreIOBuffer result);
+     void sendMoreData(StoreIOBuffer result);
+
+------------------------------------------------------------
+revno: 14126
+revision-id: squid3@treenet.co.nz-20161215103357-827wow3k1y3k9yql
+parent: squid3@treenet.co.nz-20161215093634-ykbs6tv8pdusz7cj
+fixes bug: http://bugs.squid-cache.org/show_bug.cgi?id=4169
+author: Garri Djavadyan 
+committer: Amos Jeffries 
+branch nick: 3.5
+timestamp: Thu 2016-12-15 23:33:57 +1300
+message:
+  Bug 4169: HIT marked as MISS when If-None-Match does not match
+------------------------------------------------------------
+# Bazaar merge directive format 2 (Bazaar 0.90)
+# revision_id: squid3@treenet.co.nz-20161215103357-827wow3k1y3k9yql
+# target_branch: http://bzr.squid-cache.org/bzr/squid3/3.5
+# testament_sha1: 258cd3e400bcb137a7bcdf6e7e0240287ea581a3
+# timestamp: 2016-12-15 10:34:30 +0000
+# source_branch: http://bzr.squid-cache.org/bzr/squid3/3.5
+# base_revision_id: squid3@treenet.co.nz-20161215093634-\
+#   ykbs6tv8pdusz7cj
+# 
+# Begin patch
+=== modified file 'src/LogTags.h'
+--- src/LogTags.h 2016-10-09 19:47:26 +0000
++++ src/LogTags.h 2016-12-15 10:33:57 +0000
+@@ -28,6 +28,7 @@
+     LOG_TCP_REFRESH_MODIFIED,   // refresh from origin replaced existing entry
+     LOG_TCP_CLIENT_REFRESH_MISS,
+     LOG_TCP_IMS_HIT,
++    LOG_TCP_INM_HIT,
+     LOG_TCP_SWAPFAIL_MISS,
+     LOG_TCP_NEGATIVE_HIT,
+     LOG_TCP_MEM_HIT,
+@@ -54,6 +55,7 @@
+     return
+         (code == LOG_TCP_HIT) ||
+         (code == LOG_TCP_IMS_HIT) ||
++        (code == LOG_TCP_INM_HIT) ||
+         (code == LOG_TCP_REFRESH_FAIL_OLD) ||
+         (code == LOG_TCP_REFRESH_UNMODIFIED) ||
+         (code == LOG_TCP_NEGATIVE_HIT) ||
+
+=== modified file 'src/client_side.cc'
+--- src/client_side.cc  2016-12-09 01:58:33 +0000
++++ src/client_side.cc  2016-12-15 10:33:57 +0000
+@@ -429,6 +429,7 @@
+         statCounter.client_http.nearHitSvcTime.count(svc_time);
+         break;
+ 
++    case LOG_TCP_INM_HIT:
+     case LOG_TCP_IMS_HIT:
+         statCounter.client_http.nearMissSvcTime.count(svc_time);
+         break;
+
+=== modified file 'src/client_side_reply.cc'
+--- src/client_side_reply.cc  2016-12-15 09:36:34 +0000
++++ src/client_side_reply.cc  2016-12-15 10:33:57 +0000
+@@ -778,40 +778,27 @@
+         return true;
+     }
+ 
+-    bool matchedIfNoneMatch = false;
+     if (r.header.has(HDR_IF_NONE_MATCH)) {
+-        if (!e->hasIfNoneMatchEtag(r)) {
+-            // RFC 2616: ignore IMS if If-None-Match did not match
+-            r.flags.ims = false;
+-            r.ims = -1;
+-            r.imslen = 0;
+-            r.header.delById(HDR_IF_MODIFIED_SINCE);
+-            http->logType = LOG_TCP_MISS;
+-            sendMoreData(result);
+-            return true;
+-        }
++        // RFC 7232: If-None-Match recipient MUST ignore IMS
++        r.flags.ims = false;
++        r.ims = -1;
++        r.imslen = 0;
++        r.header.delById(HDR_IF_MODIFIED_SINCE);
+ 
+-        if (!r.flags.ims) {
+-            // RFC 2616: if If-None-Match matched and there is no IMS,
+-            // reply with 304 Not Modified or 412 Precondition Failed
++        if (e->hasIfNoneMatchEtag(r)) {
+             sendNotModifiedOrPreconditionFailedError();
+             return true;
+         }
+ 
+-        // otherwise check IMS below to decide if we reply with 304 or 412
+-        matchedIfNoneMatch = true;
++        // None-Match is true (no ETag matched); treat as an unconditional hit
++        return false;
+     }
+ 
+     if (r.flags.ims) {
+         // handle If-Modified-Since requests from the client
+         if (e->modifiedSince(&r)) {
+-            http->logType = LOG_TCP_IMS_HIT;
+-            sendMoreData(result);
+-
+-        } else if (matchedIfNoneMatch) {
+-            // If-None-Match matched, reply with 304 Not Modified or
+-            // 412 Precondition Failed
+-            sendNotModifiedOrPreconditionFailedError();
++            // Modified-Since is true; treat as an unconditional hit
++            return false;
+ 
+         } else {
+             // otherwise reply with 304 Not Modified
+@@ -1974,7 +1961,12 @@
+     StoreEntry *e = http->storeEntry();
+     const time_t timestamp = e->timestamp;
+     HttpReply *const temprep = e->getReply()->make304();
+-    http->logType = LOG_TCP_IMS_HIT;
++    // log as TCP_INM_HIT if code 304 generated for
++    // If-None-Match request
++    if (!http->request->flags.ims)
++        http->logType = LOG_TCP_INM_HIT;
++    else
++        http->logType = LOG_TCP_IMS_HIT;
+     removeClientStoreReference(&sc, http);
+     createStoreEntry(http->request->method, RequestFlags());
+     e = http->storeEntry();
+
diff --git a/squid-migrate-conf.py b/squid-migrate-conf.py
new file mode 100644
index 0000000..13e1c22
--- /dev/null
+++ b/squid-migrate-conf.py
@@ -0,0 +1,287 @@
+#!/usr/bin/python -tt
+# -*- coding: utf-8 -*-
+#
+# This script will help you with migration squid-3.3 conf files to squid-3.5 conf files
+# Copyright (C) 2016 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# he Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Authors: Lubos Uhliarik 
+
+import sys
+import os
+import re
+import shutil
+import traceback
+import argparse
+import glob
+
+class ConfMigration:
+    RE_LOG_ACCESS="log_access\s+(\w+)\s+"
+    RE_LOG_ACCESS_DENY_REP="access_log none "
+    RE_LOG_ACCESS_ALLOW_REP="access_log daemon:/var/log/squid/access.log squid "
+    RE_LOG_ACCESS_TEXT="log_access"
+
+    RE_LOG_ICAP="log_icap\s+"
+    RE_LOG_ICAP_REP="icap_log daemon:/var/log/squid/icap.log "
+    RE_LOG_ICAP_TEXT="log_icap"
+
+    RE_HIER_STOPLIST="hierarchy_stoplist\s+(.*)"
+    RE_HIER_STOPLIST_REP="acl %s url_regex %s\nalways_direct allow %s"
+    RE_HIER_STOPLIST_TEXT="hierarchy_stoplist"
+
+    HIER_ACL_NAME="migrated_hs_%d_%d"
+
+    RE_INCLUDE_CHECK="\s*include\s+(.*)"
+
+    COMMENT_FMT="# migrated automatically by squid-migrate-conf, the original configuration was: %s\n%s"
+
+    DEFAULT_SQUID_CONF="/etc/squid/squid.conf"
+    DEFAULT_BACKUP_EXT=".bak"
+    DEFAULT_LEVEL_INDENT=3
+
+    MAX_NESTED_INCLUDES=16
+
+    def __init__(self, args, level=0, squid_conf='', conf_seq=0):
+        self.args = args
+
+        if squid_conf:
+            self.squid_conf = squid_conf
+        else:
+            self.squid_conf = args.squid_conf
+        self.write_changes = args.write_changes
+        self.debug = args.debug
+
+        self.conf_seq = conf_seq
+        self.acl_seq = 0
+
+        self.line_num = 0
+        self.level = level
+        if (not os.path.isfile(self.squid_conf)):
+            sys.stderr.write("%sError: the config file %s does not exist\n" % (self.get_prefix_str(), self.squid_conf))
+            sys.exit(1)
+
+        self.squid_bak_conf = self.get_backup_name()
+
+        self.migrated_squid_conf_data = []
+        self.squid_conf_data = None
+
+
+        print ("Migrating: " + self.squid_conf)
+
+    def print_info(self, text=''):
+        if (self.debug):
+            print "%s%s" % (self.get_prefix_str(), text)
+
+    def get_backup_name(self):
+        file_idx = 1
+        tmp_fn = self.squid_conf + self.DEFAULT_BACKUP_EXT
+
+        while (os.path.isfile(tmp_fn)):
+            tmp_fn = self.squid_conf + self.DEFAULT_BACKUP_EXT + str(file_idx)
+            file_idx = file_idx + 1
+
+        return tmp_fn
+
+    #
+    #  From squid config documentation:
+    #
+    #  Configuration options can be included using the "include" directive.
+    #  Include takes a list of files to include. Quoting and wildcards are
+    #  supported.
+    #
+    #  For example,
+    #
+    #  include /path/to/included/file/squid.acl.config
+    #
+    #  Includes can be nested up to a hard-coded depth of 16 levels.
+    #  This arbitrary restriction is to prevent recursive include references
+    #  from causing Squid entering an infinite loop whilst trying to load
+    #  configuration files.
+    #
+    def check_include(self, line=''):
+        m = re.match(self.RE_INCLUDE_CHECK, line)
+        include_list = ""
+        if not (m is None):
+             include_list = re.split('\s+', m.group(1))
+             for include_file_re in include_list:
+                 # included file can be written in regexp syntax
+                 for include_file in glob.glob(include_file_re):
+                     self.print_info("A config file %s was found and it will be included" % (include_file))
+                     if os.path.isfile(include_file):
+                         self.print_info("Migrating the included config file %s" % (include_file))
+                         conf = ConfMigration(self.args, self.level+1, include_file, self.conf_seq+1)
+                         conf.migrate()
+
+                 # check, if included file exists
+                 if (len(glob.glob(include_file_re)) == 0 and not (os.path.isfile(include_file_re))):
+                     self.print_info("The config file %s does not exist." % (include_file_re))
+
+    def print_sub_text(self, text, new_str):
+        if self.write_changes:
+            print "File: '%s', line: %d - the directive %s was replaced by %s" % (self.squid_conf, self.line_num, text, new_str)
+        else:
+            print "File: '%s', line: %d - the directive %s could be replaced by %s" % (self.squid_conf, self.line_num, text, new_str)
+
+    def add_conf_comment(self, old_line, line):
+        return self.COMMENT_FMT % (old_line, line)
+
+    def sub_line_ad(self, line, line_re, allow_sub, deny_sub, text):
+        new_line = line
+        m = re.match(line_re, line)
+        if not (m is None):
+            # check, if allow or deny was used and select coresponding sub
+            sub_text = allow_sub
+            if (re.match('allow', m.group(1), re.IGNORECASE)):
+                new_line = re.sub(line_re, sub_text, line)
+            elif (re.match('deny', m.group(1), re.IGNORECASE)):
+                sub_text = deny_sub
+                new_line = re.sub(line_re, sub_text, line)
+
+            # print out, if there was any change and add comment to conf line, if so
+            if not (new_line is line):
+                self.print_sub_text(text + " " +  m.group(1), sub_text)
+                new_line = self.add_conf_comment(line, new_line)
+
+        return new_line
+
+    def sub_line(self, line, line_re, sub, text):
+        new_line = line
+        m = re.match(line_re, line)
+        if not (m is None):
+            new_line = re.sub(line_re, sub, line)
+
+            # print out, if there was any change and add comment to conf line, if so
+            if not (new_line is line):
+                self.print_sub_text(text, sub)
+                new_line = self.add_conf_comment(line, new_line)
+
+        return new_line
+
+    def rep_hier_stoplist(self, line, sub, words):
+        wordlist = words.split(' ')
+
+        esc_wordlist = []
+        for w in wordlist:
+            esc_wordlist.append(re.escape(w))
+
+        # unique acl name for hierarchy_stoplist acl
+        acl_name = self.HIER_ACL_NAME % (self.conf_seq, self.acl_seq)
+        return sub % (acl_name, ' '.join(esc_wordlist), acl_name)
+
+    def sub_hier_stoplist(self, line, line_re, sub, text):
+        new_line = line
+        m = re.match(line_re, line)
+        if (not (m is None)):
+            new_line = self.rep_hier_stoplist(line, sub, m.group(1))
+
+        # print out, if there was any change and add comment to conf line, if so
+        if not (new_line is line):
+            self.print_sub_text(text, sub)
+            new_line = self.add_conf_comment(line, new_line)
+
+        return new_line
+
+    def process_conf_lines(self):
+        for line in self.squid_conf_data.split(os.linesep):
+
+            # do not migrate comments
+            if not line.strip().startswith('#'):
+               self.check_include(line)
+               line = self.sub_line_ad(line, self.RE_LOG_ACCESS, self.RE_LOG_ACCESS_ALLOW_REP, self.RE_LOG_ACCESS_DENY_REP, self.RE_LOG_ACCESS_TEXT)
+               line = self.sub_line(line, self.RE_LOG_ICAP, self.RE_LOG_ICAP_REP, self.RE_LOG_ICAP_TEXT)
+               line = self.sub_hier_stoplist(line, self.RE_HIER_STOPLIST, self.RE_HIER_STOPLIST_REP, self.RE_HIER_STOPLIST_TEXT)
+
+            self.migrated_squid_conf_data.append(line)
+
+            self.line_num = self.line_num + 1
+
+    def migrate(self):
+        # prevent infinite loop
+        if (self.level > ConfMigration.MAX_NESTED_INCLUDES):
+            sys.stderr.write("WARNING: the maximum number of nested includes was reached\n")
+            return
+
+        self.read_conf()
+        self.process_conf_lines()
+        if self.write_changes:
+            if (not (set(self.migrated_squid_conf_data) == set(self.squid_conf_data.split(os.linesep)))):
+                self.write_conf()
+
+        self.print_info("The migration finished successfully")
+
+    def get_prefix_str(self):
+        return (("    " * int(self.level)) + "["+  self.squid_conf + "@%d]: " % (self.line_num))
+
+    def read_conf(self):
+        self.print_info("Reading squid conf: " + self.squid_conf)
+        try:
+           self.in_file = open(self.squid_conf, 'r')
+           self.squid_conf_data = self.in_file.read()
+           self.in_file.close()
+        except Exception as e:
+           sys.stderr.write("%sError: %s\n" % (self.get_prefix_str(), e))
+           sys.exit(1)
+
+    def write_conf(self):
+        self.print_info("Creating backup conf: %s" % (self.squid_bak_conf))
+        self.print_info("Writing changes to: %s" % (self.squid_conf))
+        try:
+           shutil.copyfile(self.squid_conf, self.squid_bak_conf)
+           self.out_file = open(self.squid_conf, "w")
+           self.out_file.write(os.linesep.join(self.migrated_squid_conf_data))
+           self.out_file.close()
+        except Exception as e:
+           sys.stderr.write("%s Error: %s\n" % (self.get_prefix_str(), e))
+           sys.exit(1)
+
+def parse_args():
+    parser = argparse.ArgumentParser(description='The script migrates the squid 3.3 configuration files to configuration files which are compatible with squid 3.5.')
+    parser.add_argument('--conf', dest='squid_conf', action='store',
+                        default=ConfMigration.DEFAULT_SQUID_CONF,
+                        help='specify filename of squid configuration (default: %s)' % (ConfMigration.DEFAULT_SQUID_CONF))
+    parser.add_argument('--write-changes', dest='write_changes', action='store_true',
+                        default=False,
+                        help='The changes are written to corresponding configuration files')
+    parser.add_argument('--debug', dest="debug", action='store_true', default=False, help='print debug messages to stderr')
+    return parser.parse_args()
+
+if __name__ == '__main__':
+    # parse args from command line
+    args = parse_args()
+
+    # check if config file exists
+    if (not os.path.exists(args.squid_conf)):
+        sys.stderr.write("Error: the file %s does not exist\n" % (args.squid_conf))
+        sys.exit(1)
+
+    # change working directory
+    script_dir = os.getcwd()
+    if (os.path.dirname(args.squid_conf)):
+        os.chdir(os.path.dirname(args.squid_conf))
+
+    # start migration
+    try:
+        conf = ConfMigration(args, 0)
+        conf.migrate()
+    finally:
+        print ""
+
+        if not args.write_changes:
+            print "The changes have NOT been written to config files.\nUse the --write-changes option to write the changes"
+        else:
+            print "The changes have been written to config files!"
+
+        os.chdir(script_dir)
diff --git a/squid.init b/squid.init
new file mode 100644
index 0000000..045f082
--- /dev/null
+++ b/squid.init
@@ -0,0 +1,180 @@
+#!/bin/bash
+# chkconfig: - 90 25
+# pidfile: /var/run/squid.pid
+# config: /etc/squid/squid.conf
+#
+### BEGIN INIT INFO
+# Provides: squid
+# Short-Description: starting and stopping Squid Internet Object Cache
+# Description: Squid - Internet Object Cache. Internet object caching is \
+#       a way to store requested Internet objects (i.e., data available \
+#       via the HTTP, FTP, and gopher protocols) on a system closer to the \
+#       requesting site than to the source. Web browsers can then use the \
+#       local Squid cache as a proxy HTTP server, reducing access time as \
+#       well as bandwidth consumption.
+### END INIT INFO
+
+
+PATH=/usr/bin:/sbin:/bin:/usr/sbin
+export PATH
+
+# Source function library.
+. /etc/rc.d/init.d/functions
+
+# Source networking configuration.
+. /etc/sysconfig/network
+
+if [ -f /etc/sysconfig/squid ]; then
+	. /etc/sysconfig/squid
+fi
+
+# don't raise an error if the config file is incomplete
+# set defaults instead:
+SQUID_OPTS=${SQUID_OPTS:-""}
+SQUID_PIDFILE_TIMEOUT=${SQUID_PIDFILE_TIMEOUT:-20}
+SQUID_SHUTDOWN_TIMEOUT=${SQUID_SHUTDOWN_TIMEOUT:-100}
+SQUID_CONF=${SQUID_CONF:-"/etc/squid/squid.conf"}
+
+# determine the name of the squid binary
+[ -f /usr/sbin/squid ] && SQUID=squid
+
+prog="$SQUID"
+
+# determine which one is the cache_swap directory
+CACHE_SWAP=`sed -e 's/#.*//g' $SQUID_CONF | \
+	grep cache_dir | awk '{ print $3 }'`
+
+RETVAL=0
+
+probe() {
+	# Check that networking is up.
+	[ ${NETWORKING} = "no" ] && exit 1
+
+	[ `id -u` -ne 0 ] && exit 4
+
+	# check if the squid conf file is present
+	[ -f $SQUID_CONF ] || exit 6
+}
+
+start() {
+	probe
+
+	parse=`$SQUID -k parse -f $SQUID_CONF 2>&1`
+	RETVAL=$?
+	if [ $RETVAL -ne 0 ]; then
+		echo -n $"Starting $prog: "
+		echo_failure
+		echo
+		echo "$parse"
+		return 1
+	fi
+	for adir in $CACHE_SWAP; do
+		if [ ! -d $adir/00 ]; then
+			echo -n "init_cache_dir $adir... "
+			$SQUID -z -F -f $SQUID_CONF >> /var/log/squid/squid.out 2>&1
+		fi
+	done
+	echo -n $"Starting $prog: "
+	$SQUID $SQUID_OPTS -f $SQUID_CONF >> /var/log/squid/squid.out 2>&1
+	RETVAL=$?
+	if [ $RETVAL -eq 0 ]; then
+		timeout=0;
+		while : ; do
+			[ ! -f /var/run/squid.pid ] || break
+			if [ $timeout -ge $SQUID_PIDFILE_TIMEOUT ]; then
+				RETVAL=1
+				break
+			fi
+			sleep 1 && echo -n "."
+			timeout=$((timeout+1))
+		done
+	fi
+	[ $RETVAL -eq 0 ] && touch /var/lock/subsys/$SQUID
+	[ $RETVAL -eq 0 ] && echo_success
+	[ $RETVAL -ne 0 ] && echo_failure
+	echo
+	return $RETVAL
+}
+
+stop() {
+	echo -n $"Stopping $prog: "
+	$SQUID -k check -f $SQUID_CONF >> /var/log/squid/squid.out 2>&1
+	RETVAL=$?
+	if [ $RETVAL -eq 0 ] ; then
+		$SQUID -k shutdown -f $SQUID_CONF &
+		rm -f /var/lock/subsys/$SQUID
+		timeout=0
+		while : ; do
+			[ -f /var/run/squid.pid ] || break
+			if [ $timeout -ge $SQUID_SHUTDOWN_TIMEOUT ]; then
+				echo
+				return 1
+			fi
+			sleep 2 && echo -n "."
+			timeout=$((timeout+2))
+		done
+		echo_success
+		echo
+	else
+		echo_failure
+		if [ ! -e /var/lock/subsys/$SQUID ]; then
+			RETVAL=0
+		fi
+		echo
+	fi
+	return $RETVAL
+}
+
+reload() {
+	$SQUID $SQUID_OPTS -k reconfigure -f $SQUID_CONF
+}
+
+restart() {
+	stop
+	start
+}
+
+condrestart() {
+	[ -e /var/lock/subsys/squid ] && restart || :
+}
+
+rhstatus() {
+	status $SQUID && $SQUID -k check -f $SQUID_CONF
+}
+
+
+case "$1" in
+start)
+	start
+	;;
+
+stop)
+	stop
+	;;
+
+reload|force-reload)
+	reload
+	;;
+
+restart)
+	restart
+	;;
+
+condrestart|try-restart)
+	condrestart
+	;;
+
+status)
+	rhstatus
+	;;
+
+probe)
+	probe
+	;;
+
+*)
+	echo $"Usage: $0 {start|stop|status|reload|force-reload|restart|try-restart|probe}"
+	exit 2
+esac
+
+exit $?
diff --git a/squid.logrotate b/squid.logrotate
new file mode 100644
index 0000000..4a0406f
--- /dev/null
+++ b/squid.logrotate
@@ -0,0 +1,16 @@
+/var/log/squid/*.log {
+    weekly
+    rotate 5
+    compress
+    notifempty
+    missingok
+    nocreate
+    sharedscripts
+    postrotate
+      # Asks squid to reopen its logs. (logfile_rotate 0 is set in squid.conf)
+      # errors redirected to make it silent if squid is not running
+      /usr/sbin/squid -k rotate 2>/dev/null
+      # Wait a little to allow Squid to catch up before the logs is compressed
+      sleep 1
+    endscript
+}
diff --git a/squid.nm b/squid.nm
new file mode 100755
index 0000000..552816f
--- /dev/null
+++ b/squid.nm
@@ -0,0 +1,15 @@
+#!/bin/sh
+
+case "$2" in
+        up|down|vpn-up|vpn-down)
+                n=20
+                while /usr/sbin/squid -k check >/dev/null 2>&1 && [ ! -f /var/run/squid.pid ] && [ $n -gt 0 ]; do
+                    sleep 1
+                    n=`expr $n - 1`
+                done
+
+                if [ -f /var/run/squid.pid ]; then
+                        /bin/systemctl reload squid.service || :
+                fi
+                ;;
+esac
diff --git a/squid.pam b/squid.pam
new file mode 100644
index 0000000..1d78594
--- /dev/null
+++ b/squid.pam
@@ -0,0 +1,3 @@
+#%PAM-1.0
+auth		include		password-auth
+account		include		password-auth
diff --git a/squid.service b/squid.service
new file mode 100644
index 0000000..85faf2d
--- /dev/null
+++ b/squid.service
@@ -0,0 +1,16 @@
+[Unit]
+Description=Squid caching proxy
+After=syslog.target network.target nss-lookup.target
+
+[Service]
+Type=forking
+LimitNOFILE=16384
+EnvironmentFile=/etc/sysconfig/squid
+ExecStartPre=/usr/libexec/squid/cache_swap.sh
+ExecStart=/usr/sbin/squid $SQUID_OPTS -f $SQUID_CONF
+ExecReload=/usr/sbin/squid $SQUID_OPTS -k reconfigure -f $SQUID_CONF
+ExecStop=/usr/sbin/squid -k shutdown -f $SQUID_CONF
+TimeoutSec=0
+
+[Install]
+WantedBy=multi-user.target
diff --git a/squid.spec b/squid.spec
new file mode 100644
index 0000000..8cfc397
--- /dev/null
+++ b/squid.spec
@@ -0,0 +1,1708 @@
+## % define _use_internal_dependency_generator 0
+%define __perl_requires %{SOURCE98}
+## % define __find_requires %{SOURCE99}
+
+Name:     squid
+Version:  3.5.20
+Release:  17%{?dist}.7
+Summary:  The Squid proxy caching server
+Epoch:    7
+# See CREDITS for breakdown of non GPLv2+ code
+License:  GPLv2+ and (LGPLv2+ and MIT and BSD and Public Domain)
+Group:    System Environment/Daemons
+URL:      http://www.squid-cache.org
+Source0:  http://www.squid-cache.org/Versions/v3/3.5/squid-%{version}.tar.xz
+Source1:  http://www.squid-cache.org/Versions/v3/3.5/squid-%{version}.tar.xz.asc
+Source2:  squid.init
+Source3:  squid.logrotate
+Source4:  squid.sysconfig
+Source5:  squid.pam
+Source6:  squid.nm
+Source7:  squid.service
+Source8:  cache_swap.sh
+Source98: perl-requires-squid.sh
+Source99: squid-migrate-conf.py
+
+# Local patches
+# Applying upstream patches first makes it less likely that local patches
+# will break upstream ones.
+Patch201: squid-3.1.0.9-config.patch
+Patch202: squid-3.1.0.9-location.patch
+Patch203: squid-3.0.STABLE1-perlpath.patch
+Patch204: squid-3.2.0.9-fpic.patch
+Patch205: squid-3.1.9-ltdl.patch
+# https://bugzilla.redhat.com/show_bug.cgi?id=980511
+Patch206: squid-3.3.8-active-ftp-1.patch
+Patch207: squid-3.3.8-active-ftp-2.patch
+# https://bugzilla.redhat.com/show_bug.cgi?id=1265328#c23
+Patch208: squid-3.5.10-ssl-helper.patch
+# https://bugzilla.redhat.com/show_bug.cgi?id=1378025
+# http://bazaar.launchpad.net/~squid/squid/3.4/revision/12713
+Patch209: squid-3.5.20-conf-casecmp.patch
+# http://www.squid-cache.org/Versions/v3/3.5/changesets/SQUID-2016_11.patch
+Patch210: squid-CVE-2016-10002.patch
+# https://bugzilla.redhat.com/show_bug.cgi?id=1404817
+Patch211: squid-3.5.20-tunnel-sigsegv.patch
+# https://bugzilla.redhat.com/show_bug.cgi?id=1414853
+Patch212: squid-3.5.20-man-typos.patch
+# https://bugzilla.redhat.com/show_bug.cgi?id=1290404
+Patch213: squid-3.5.20-man-see-also.patch
+# https://bugzilla.redhat.com/show_bug.cgi?id=1620546
+Patch214: squid-3.5.20-empty-cname.patch
+# https://bugzilla.redhat.com/show_bug.cgi?id=1690551
+Patch215: squid-3.5.20-cache-peer-tolower.patch
+# https://bugzilla.redhat.com/show_bug.cgi?id=1680022
+Patch216: squid-3.5.20-https-packet-size.patch
+# https://bugzilla.redhat.com/show_bug.cgi?id=1717430
+Patch217: squid-3.5.20-mem-usage-out-of-fd.patch
+# https://bugzilla.redhat.com/show_bug.cgi?id=1676420
+Patch218: squid-3.5.20-cache-siblings-gw.patch
+
+
+# Security Fixes:
+
+# https://bugzilla.redhat.com/show_bug.cgi?id=1727744
+# Regression caused by original patch fixed -
+# https://bugzilla.redhat.com/show_bug.cgi?id=1890581
+Patch500: squid-3.5.20-CVE-2019-13345.patch
+# https://bugzilla.redhat.com/show_bug.cgi?id=1582301
+Patch501: squid-3.5.20-CVE-2018-1000024.patch
+Patch502: squid-3.5.20-CVE-2018-1000027.patch
+Patch503: squid-3.5.20-CVE-2019-12525.patch
+# https://bugzilla.redhat.com/show_bug.cgi?id=1828361
+Patch504: squid-3.5.20-CVE-2020-11945.patch
+# https://bugzilla.redhat.com/show_bug.cgi?id=1828362
+Patch505: squid-3.5.20-CVE-2019-12519.patch
+# https://bugzilla.redhat.com/show_bug.cgi?id=1798540
+# https://bugzilla.redhat.com/show_bug.cgi?id=1798552
+Patch506: squid-3.5.20-CVE-2020-8449-and-8450.patch
+# https://bugzilla.redhat.com/show_bug.cgi?id=1852550
+Patch507: squid-3.5.20-CVE-2020-15049.patch
+# https://bugzilla.redhat.com/show_bug.cgi?id=1802517
+Patch508: squid-3.5.20-CVE-2019-12528.patch
+# https://bugzilla.redhat.com/show_bug.cgi?id=1871705
+Patch509: squid-3.5.20-CVE-2020-24606.patch
+# https://bugzilla.redhat.com/show_bug.cgi?id=1871700
+Patch510: squid-3.5.20-CVE-2020-15810.patch
+# https://bugzilla.redhat.com/show_bug.cgi?id=1871702
+Patch511: squid-3.5.20-CVE-2020-15811.patch
+# https://bugzilla.redhat.com/show_bug.cgi?id=1939925
+Patch512: squid-3.5.20-CVE-2020-25097.patch
+# https://bugzilla.redhat.com/show_bug.cgi?id=2100721
+Patch513: squid-3.5.20-CVE-2021-46784.patch
+
+Buildroot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)
+Requires: bash >= 2.0
+Requires: squid-migration-script
+Requires(pre): shadow-utils
+Requires(post): /sbin/chkconfig
+Requires(preun): /sbin/chkconfig
+Requires(post): systemd
+Requires(preun): systemd
+Requires(postun): systemd
+# squid_ldap_auth and other LDAP helpers require OpenLDAP
+BuildRequires: openldap-devel
+# squid_pam_auth requires PAM development libs
+BuildRequires: pam-devel
+# SSL support requires OpenSSL
+BuildRequires: openssl-devel
+# squid_kerb_aut requires Kerberos development libs
+BuildRequires: krb5-devel
+# ESI support requires Expat & libxml2
+BuildRequires: expat-devel libxml2-devel
+# TPROXY requires libcap, and also increases security somewhat
+BuildRequires: libcap-devel
+# eCAP support
+BuildRequires: libecap-devel >= 1.0.0
+# 
+BuildRequires: libtool libtool-ltdl-devel
+# For test suite
+BuildRequires: cppunit-devel
+# DB helper requires
+BuildRequires: perl-podlators libdb-devel
+# c++ source files
+BuildRequires: gcc-c++
+
+%description
+Squid is a high-performance proxy caching server for Web clients,
+supporting FTP, gopher, and HTTP data objects. Unlike traditional
+caching software, Squid handles all requests in a single,
+non-blocking, I/O-driven process. Squid keeps meta data and especially
+hot objects cached in RAM, caches DNS lookups, supports non-blocking
+DNS lookups, and implements negative caching of failed requests.
+
+Squid consists of a main server program squid, a Domain Name System
+lookup program (dnsserver), a program for retrieving FTP data
+(ftpget), and some management and client tools.
+
+%package sysvinit
+Group: System Environment/Daemons
+Summary: SysV initscript for squid caching proxy
+Requires: %{name} = %{epoch}:%{version}-%{release}
+Requires(preun): /sbin/service
+Requires(postun): /sbin/service
+
+%description sysvinit
+The squid-sysvinit contains SysV initscritps support.
+
+%package migration-script
+Group: System Environment/Daemons
+Summary: Migration script for squid caching proxy
+
+%description migration-script
+The squid-migration-script contains scripts for squid configuration
+migration and script which prepares squid for downgrade operation.
+
+%prep
+%setup -q
+
+# Local patches
+%patch201 -p1 -b .config
+%patch202 -p1 -b .location
+%patch203 -p1 -b .perlpath
+%patch204 -p1 -b .fpic
+%patch205 -p1 -b .ltdl
+%patch206 -p1 -b .active-ftp-1
+%patch207 -p1 -b .active-ftp-2
+%patch208 -p1 -b .ssl-helper
+%patch209 -p1 -b .conf-casecmp
+%patch210 -p0 -b .CVE-2016-10002
+%patch211 -p1 -b .tunnel-sigsegv
+%patch212 -p1 -b .man-see-also
+%patch213 -p1 -b .man-typos
+%patch214 -p1 -b .empty-cname
+%patch215 -p1 -b .cache-peer-tolower
+%patch216 -p1 -b .https-packet-size
+%patch217 -p1 -b .mem-usage-out-of-fd
+%patch218 -p1 -b .cache-siblings-gw
+
+# security fixes
+%patch500 -p1 -b .CVE-2019-13345
+%patch501 -p1 -b .CVE-2018-1000024
+%patch502 -p1 -b .CVE-2018-1000027
+%patch503 -p1 -b .CVE-2019-12525
+%patch504 -p1 -b .CVE-2020-11945
+%patch505 -p1 -b .CVE-2019-12519
+%patch506 -p1 -b .CVE-2020-8449-and-8450
+%patch507 -p1 -b .CVE-2020-15049
+%patch508 -p1 -b .CVE-2019-12528
+%patch509 -p1 -b .CVE-2020-24606
+%patch510 -p1 -b .CVE-2020-15810
+%patch511 -p1 -b .CVE-2020-15811
+%patch512 -p1 -b .CVE-2020-25097
+%patch513 -p1 -b .CVE-2021-46784
+
+# https://bugzilla.redhat.com/show_bug.cgi?id=1471140
+# Patch in the vendor documentation and used different location for documentation
+sed -i 's|@SYSCONFDIR@/squid.conf.documented|%{_docdir}/squid-%{version}/squid.conf.documented|' src/squid.8.in
+
+%build
+%ifarch sparcv9 sparc64 s390 s390x
+   CXXFLAGS="$RPM_OPT_FLAGS -fPIE" \
+   CFLAGS="$RPM_OPT_FLAGS -fPIE" \
+%else
+   CXXFLAGS="$RPM_OPT_FLAGS -fpie" \
+   CFLAGS="$RPM_OPT_FLAGS -fpie" \
+%endif
+LDFLAGS="$RPM_LD_FLAGS -pie -Wl,-z,relro -Wl,-z,now"
+
+%configure \
+   --disable-strict-error-checking \
+   --exec_prefix=/usr \
+   --libexecdir=%{_libdir}/squid \
+   --localstatedir=%{_var} \
+   --datadir=%{_datadir}/squid \
+   --sysconfdir=%{_sysconfdir}/squid \
+   --with-logdir='$(localstatedir)/log/squid' \
+   --with-pidfile='$(localstatedir)/run/squid.pid' \
+   --disable-dependency-tracking \
+   --enable-eui \
+   --enable-follow-x-forwarded-for \
+   --enable-auth \
+   --enable-auth-basic="DB,LDAP,MSNT-multi-domain,NCSA,NIS,PAM,POP3,RADIUS,SASL,SMB,SMB_LM,getpwnam" \
+   --enable-auth-ntlm="smb_lm,fake" \
+   --enable-auth-digest="file,LDAP,eDirectory" \
+   --enable-auth-negotiate="kerberos" \
+   --enable-external-acl-helpers="file_userip,LDAP_group,time_quota,session,unix_group,wbinfo_group,kerberos_ldap_group" \
+   --enable-cache-digests \
+   --enable-cachemgr-hostname=localhost \
+   --enable-delay-pools \
+   --enable-epoll \
+   --enable-ident-lookups \
+   %ifnarch ppc64 ia64 x86_64 s390x aarch64
+   --with-large-files \
+   %endif
+   --enable-linux-netfilter \
+   --enable-removal-policies="heap,lru" \
+   --enable-snmp \
+   --enable-ssl-crtd \
+   --enable-storeio="aufs,diskd,rock,ufs" \
+   --enable-wccpv2 \
+   --enable-esi \
+   --enable-ecap \
+   --with-aio \
+   --with-default-user="squid" \
+   --with-dl \
+   --with-openssl \
+   --with-pthreads \
+   --disable-arch-native
+
+make \
+	DEFAULT_SWAP_DIR='$(localstatedir)/spool/squid' \
+	%{?_smp_mflags}
+
+%check
+make check
+
+%install
+rm -rf $RPM_BUILD_ROOT
+make \
+	DESTDIR=$RPM_BUILD_ROOT \
+	install
+echo "
+#
+# This is %{_sysconfdir}/httpd/conf.d/squid.conf
+#
+
+ScriptAlias /Squid/cgi-bin/cachemgr.cgi %{_libdir}/squid/cachemgr.cgi
+
+# Only allow access from localhost by default
+
+ Require local
+ # Add additional allowed hosts as needed
+ # Require host example.com
+" > $RPM_BUILD_ROOT/squid.httpd.tmp
+
+
+mkdir -p $RPM_BUILD_ROOT%{_sysconfdir}/rc.d/init.d
+mkdir -p $RPM_BUILD_ROOT%{_sysconfdir}/logrotate.d
+mkdir -p $RPM_BUILD_ROOT%{_sysconfdir}/sysconfig
+mkdir -p $RPM_BUILD_ROOT%{_sysconfdir}/pam.d
+mkdir -p $RPM_BUILD_ROOT%{_sysconfdir}/httpd/conf.d/
+mkdir -p $RPM_BUILD_ROOT%{_sysconfdir}/NetworkManager/dispatcher.d
+mkdir -p $RPM_BUILD_ROOT%{_unitdir}
+mkdir -p $RPM_BUILD_ROOT%{_libexecdir}/squid
+mkdir -p $RPM_BUILD_ROOT%{_prefix}/lib/firewalld/services
+
+install -m 755 %{SOURCE2} $RPM_BUILD_ROOT%{_sysconfdir}/rc.d/init.d/squid
+install -m 644 %{SOURCE3} $RPM_BUILD_ROOT%{_sysconfdir}/logrotate.d/squid
+install -m 644 %{SOURCE4} $RPM_BUILD_ROOT%{_sysconfdir}/sysconfig/squid
+install -m 644 %{SOURCE5} $RPM_BUILD_ROOT%{_sysconfdir}/pam.d/squid
+install -m 644 %{SOURCE7} $RPM_BUILD_ROOT%{_unitdir}
+install -m 755 %{SOURCE8} $RPM_BUILD_ROOT%{_libexecdir}/squid
+install -m 644 $RPM_BUILD_ROOT/squid.httpd.tmp $RPM_BUILD_ROOT%{_sysconfdir}/httpd/conf.d/squid.conf
+install -m 644 %{SOURCE6} $RPM_BUILD_ROOT%{_sysconfdir}/NetworkManager/dispatcher.d/20-squid
+mkdir -p $RPM_BUILD_ROOT%{_var}/log/squid
+mkdir -p $RPM_BUILD_ROOT%{_var}/spool/squid
+mkdir -p $RPM_BUILD_ROOT%{_var}/run/squid
+chmod 644 contrib/url-normalizer.pl contrib/rredir.* contrib/user-agents.pl
+iconv -f ISO88591 -t UTF8 ChangeLog -o ChangeLog.tmp
+mv -f ChangeLog.tmp ChangeLog
+
+# install /usr/lib/tmpfiles.d/squid.conf
+mkdir -p ${RPM_BUILD_ROOT}%{_tmpfilesdir}
+cat > ${RPM_BUILD_ROOT}%{_tmpfilesdir}/squid.conf </dev/null 2>&1; then
+  /usr/sbin/groupadd -g 23 squid
+fi
+
+if ! getent passwd squid >/dev/null 2>&1 ; then
+  /usr/sbin/useradd -g 23 -u 23 -d /var/spool/squid -r -s /sbin/nologin squid >/dev/null 2>&1 || exit 1 
+fi
+
+for i in /var/log/squid /var/spool/squid ; do
+        if [ -d $i ] ; then
+                for adir in `find $i -maxdepth 0 \! -user squid`; do
+                        chown -R squid:squid $adir
+                done
+        fi
+done
+
+exit 0
+
+%post
+/usr/bin/squid-migrate-conf.py --write-changes --conf %{_sysconfdir}/squid/squid.conf &>/dev/null
+%systemd_post squid.service
+
+%preun
+%systemd_preun squid.service
+
+%postun
+%systemd_postun_with_restart squid.service
+
+%triggerin -- samba-common
+if ! getent group wbpriv >/dev/null 2>&1 ; then
+  /usr/sbin/groupadd -g 88 wbpriv >/dev/null 2>&1 || :
+fi
+/usr/sbin/usermod -a -G wbpriv squid >/dev/null 2>&1 || \
+    chgrp squid /var/cache/samba/winbindd_privileged >/dev/null 2>&1 || :
+
+%changelog
+* Tue Jun 28 2022 Luboš Uhliarik  - 7:3.5.20-17.7
+- Resolves: #2100778 - CVE-2021-46784 squid: DoS when processing gopher server
+  responses
+
+* Wed Mar 31 2021 Lubos Uhliarik  - 7:3.5.20-17.6
+- Resolves: #1944256 - CVE-2020-25097 squid: improper input validation may allow
+  a trusted client to perform HTTP Request Smuggling
+
+* Mon Oct 26 2020 Lubos Uhliarik  - 7:3.5.20-17.5
+- Resolves: #1890581 - Fix for CVE 2019-13345 breaks authentication in
+  cachemgr.cgi
+
+* Fri Aug 28 2020 Lubos Uhliarik  - 7:3.5.20-17.4
+- Resolves: #1872349 - CVE-2020-24606 squid: Improper Input Validation could
+  result in a DoS
+- Resolves: #1872327 - CVE-2020-15810 squid: HTTP Request Smuggling could
+  result in cache poisoning
+- Resolves: #1872342 - CVE-2020-15811 squid: HTTP Request Splitting could
+  result in cache poisoning
+
+* Fri Jul 31 2020 Lubos Uhliarik  - 7:3.5.20-17.2
+- Resolves: #1802516 - CVE-2020-8449 squid: Improper input validation issues
+  in HTTP Request processing
+- Resolves: #1802515 - CVE-2020-8450 squid: Buffer overflow in a Squid acting
+  as reverse-proxy
+- Resolves: #1853129 - CVE-2020-15049 squid: request smuggling and poisoning
+  attack against the HTTP cache
+- Resolves: #1802517 - CVE-2019-12528 squid: Information Disclosure issue in
+  FTP Gateway
+
+* Tue Apr 28 2020 Lubos Uhliarik  - 7:3.5.20-17
+- Resolves: #1828361 - CVE-2020-11945 squid: improper access restriction upon
+  Digest Authentication nonce replay could lead to remote code execution
+- Resolves: #1828362 - CVE-2019-12519 squid: improper check for new member in
+  ESIExpression::Evaluate allows for stack buffer overflow [rhel
+
+* Fri Mar 27 2020 Lubos Uhliarik  - 7:3.5.20-16
+- Resolves: #1738582 - CVE-2019-12525 squid: parsing of header 
+  Proxy-Authentication leads to memory corruption
+
+* Thu Jul 25 2019 Lubos Uhliarik  - 7:3.5.20-15
+- Resolves: #1690551 - Squid cache_peer DNS lookup failed when not all lower
+  case
+- Resolves: #1680022 - squid can't display download/upload packet size for HTTPS
+  sites
+- Resolves: #1717430 - Excessive memory usage when running out of descriptors
+- Resolves: #1676420 - Cache siblings return wrongly cached gateway timeouts
+- Resolves: #1729435 - CVE-2019-13345 squid: XSS via user_name or auth parameter
+  in cachemgr.cgi
+- Resolves: #1582301 - CVE-2018-1000024 CVE-2018-1000027 squid: various flaws
+
+* Thu Dec 06 2018 Luboš Uhliarik  - 7:3.5.20-13
+- Resolves: #1620546 - migration of upstream squid
+
+* Mon Oct 02 2017 Luboš Uhliarik  - 7:3.5.20-12
+- Resolves: #1471140 - Missing detailed configuration file
+
+* Mon Oct 02 2017 Luboš Uhliarik  - 7:3.5.20-11
+- Resolves: #1452200 - Include kerberos_ldap_group helper in squid
+
+* Tue Apr 25 2017 Luboš Uhliarik  - 7:3.5.20-10
+- Resolves: #1445219 - [RFE] Add rock cache directive to squid
+
+* Thu Mar 23 2017 Luboš Uhliarik  - 7:3.5.20-9
+- Resolves: #1290404 - wrong names of components in man page, section SEE ALSO
+
+* Thu Mar 23 2017 Luboš Uhliarik  - 7:3.5.20-8
+- Resolves: #1414853 - typo error(s) in man page(s)
+
+* Mon Mar 20 2017 Luboš Uhliarik  - 7:3.5.20-7
+- Related: #1347096 - squid: ERROR: No running copy
+
+* Mon Mar 20 2017 Luboš Uhliarik  - 7:3.5.20-6
+- Resolves: #1347096 - squid: ERROR: No running copy
+
+* Thu Mar 02 2017 Luboš Uhliarik  - 7:3.5.20-5
+- Resolves: #1404817 - SIGSEV in TunnelStateData::handleConnectResponse()
+  during squid reconfigure and restart
+
+* Fri Jan 13 2017 Luboš Uhliarik  - 7:3.5.20-4
+- Resolves: #1412736 - CVE-2016-10002 squid: Information disclosure in HTTP
+  request processing
+
+* Thu Dec 15 2016 Luboš Uhliarik  - 7:3.5.20-3
+- Resolves: #1404894 - icap support has been disabled on squid 3.5.20-2.el7
+
+* Wed Sep 21 2016 Luboš Uhliarik  - 7:3.5.20-2
+- Resolves: #1378025 - host_verify_strict only accepts lowercase arguments
+
+* Tue Aug 09 2016 Luboš Uhliarik  - 7:3.5.20-1
+- Resolves: #1273942 - Rebase squid to latest mature 3.5 version (3.5.20)
+
+* Mon Aug 08 2016 Luboš Uhliarik  - 7:3.5.10-9
+- Related: #1349775 - Provide migration tools needed due to rebase
+  to squid 3.5 as a separate sub-package
+
+* Mon Aug 01 2016 Luboš Uhliarik  - 7:3.5.10-8
+- Related: #1349775 - Provide migration tools needed due to rebase
+  to squid 3.5 as a separate sub-package
+
+* Mon Aug 01 2016 Luboš Uhliarik  - 7:3.5.10-7
+- Related: #1349775 - Provide migration tools needed due to rebase
+  to squid 3.5 as a separate sub-package
+
+* Wed Jul 27 2016 Luboš Uhliarik  - 7:3.5.10-6
+- Related: #1349775 - Provide migration tools needed due to rebase
+  to squid 3.5 as a separate sub-package
+
+* Tue Jul 26 2016 Luboš Uhliarik  - 7:3.5.10-5
+- Related: #1349775 - Provide migration tools needed due to rebase
+  to squid 3.5 as a separate sub-package
+
+* Tue Jul 19 2016 Luboš Uhliarik  - 7:3.5.10-4
+- Resolves: #1349775 - Provide migration tools needed due to rebase
+  to squid 3.5 as a separate sub-package
+
+* Tue Jun 14 2016 Luboš Uhliarik  - 7:3.5.10-3
+- Resolves: #1330186 - digest doesn't properly work with squid 3.3 on CentOS 7
+
+* Tue Jun 14 2016 Luboš Uhliarik  - 7:3.5.10-2
+- Resolves: #1336387 - Squid send wrong respond for GET-request following
+  Range-GET request
+
+* Wed Jun 08 2016 Luboš Uhliarik  - 7:3.5.10-1
+- Resolves: #1273942 - Rebase squid to latest mature 3.5 version (3.5.10)
+- Resolves: #1322770 - CVE-2016-2569 CVE-2016-2570 CVE-2016-2571 CVE-2016-2572
+  CVE-2016-3948 squid: various flaws
+- Resolves: #1254016 - IPv4 fallback is not working when connecting
+  to a dualstack host with non-functional IPv6
+- Resolves: #1254018 - should BuildRequire: g++
+- Resolves: #1262456 - Squid delays on FQDNs that don't contains AAAA record
+- Resolves: #1336940 - Disable squid systemd unit start/stop timeouts
+- Resolves: #1344197 - /usr/lib/firewalld/services/squid.xml conflicts between
+  attempted installs of squid-7:3.3.8-31.el7.x86_64 and
+  firewalld-0.4.2-1.el7.noarch
+- Resolves: #1299972 - squid file descriptor limit hardcoded to 16384 via 
+  compile option in spec file
+
+* Wed Jun 08 2016 Luboš Uhliarik  - 7:3.3.8-31
+- Resolves: #1283078 - max_filedescriptors in squid.conf is ignored
+
+* Mon May 09 2016 Luboš Uhliarik  - 7:3.3.8-30
+- Related: #1334509 - CVE-2016-4553 squid: Cache poisoning issue in
+  HTTP Request handling
+- Related: #1334492 - CVE-2016-4554 CVE-2016-4555 CVE-2016-4556 
+  squid: various flaws
+
+* Tue May 03 2016 Luboš Uhliarik  - 7:3.3.8-29
+- Related: #1330577 - CVE-2016-4052 CVE-2016-4053 CVE-2016-4054 squid: multiple
+  issues in ESI processing
+
+* Thu Apr 28 2016 Luboš Uhliarik  - 7:3.3.8-28
+- Related: #1330577 - CVE-2016-4052 CVE-2016-4053 CVE-2016-4054 squid: multiple
+  issues in ESI processing
+
+* Thu Apr 28 2016 Luboš Uhliarik  - 7:3.3.8-27
+- Resolves: #1330577 - CVE-2016-4051 squid: buffer overflow in cachemgr.cgi
+
+* Wed Oct 14 2015 Luboš Uhliarik  - 7:3.3.8-26
+- Related: #1186768 - removing patch, because of missing tests and 
+  incorrent patch
+
+* Tue Oct 13 2015 Luboš Uhliarik  - 7:3.3.8-25
+- Related: #1102842 - squid rpm package misses /var/run/squid needed for
+  smp mode. Squid needs write access to /var/run/squid.
+
+* Fri Oct 09 2015 Luboš Uhliarik  - 7:3.3.8-24
+- Related: #1102842 - squid rpm package misses /var/run/squid needed for
+  smp mode. Creation of /var/run/squid was also needed to be in SPEC file.
+
+* Tue Oct 06 2015 Luboš Uhliarik  - 7:3.3.8-23
+- Related: #1102842 - squid rpm package misses /var/run/squid needed for
+  smp mode. Creation of this directory was moved to tmpfiles.d conf file.
+
+* Fri Oct 02 2015 Luboš Uhliarik  - 7:3.3.8-22
+- Related: #1102842 - squid rpm package misses /var/run/squid needed for
+  smp mode. Creation of this directory was moved to service file.
+
+* Tue Sep 22 2015 Luboš Uhliarik  - 7:3.3.8-21
+- Resolves: #1263338 - squid with digest auth on big endian systems 
+  start looping
+
+* Mon Aug 10 2015 Luboš Uhliarik  - 7:3.3.8-20
+- Resolves: #1186768 - security issue: Nonce replay vulnerability 
+  in Digest authentication
+
+* Tue Jul 14 2015 Luboš Uhliarik  - 7:3.3.8-19
+- Resolves: #1225640 - squid crashes by segfault when it reboots
+
+* Thu Jun 25 2015 Luboš Uhliarik  - 7:3.3.8-18
+- Resolves: #1102842 - squid rpm package misses /var/run/squid needed for 
+  smp mode
+
+* Wed Jun 24 2015 Luboš Uhliarik  - 7:3.3.8-17
+- Resolves: #1233265 - CVE-2015-3455 squid: incorrect X509 server
+  certificate validation
+
+* Fri Jun 19 2015 Luboš Uhliarik  - 7:3.3.8-16
+- Resolves: #1080042 - Supply a firewalld service file with squid
+
+* Wed Jun 17 2015 Luboš Uhliarik  - 7:3.3.8-15
+- Resolves: #1161600 - Squid does not serve cached responses 
+  with Vary headers
+
+* Wed Jun 17 2015 Luboš Uhliarik  - 7:3.3.8-14
+- Resolves: #1198778 - Filedescriptor leaks on snmp
+
+* Wed Jun 17 2015 Luboš Uhliarik  - 7:3.3.8-13
+- Resolves: #1204375 - squid sends incorrect ssl chain breaking newer gnutls 
+  using applications
+
+* Fri Aug 29 2014 Michal Luscon  - 7:3.3.8-12
+- Resolves: #1134934 - CVE-2014-3609 assertion failure in header processing
+
+* Mon Mar 17 2014 Pavel Šimerda  - 7:3.3.8-11
+- Resolves: #1074873 - CVE-2014-0128 squid: denial of service when using
+  SSL-Bump
+
+* Wed Mar 05 2014 Pavel Šimerda ' - 7:3.3.8-10
+- Resolves: #1072973 - don't depend on libdb4
+
+* Tue Feb 11 2014 Pavel Šimerda  - 7:3.3.8-9
+- revert: Resolves: #1038160 - avoid running squid's own supervisor process
+
+* Tue Feb 11 2014 Pavel Šimerda  - 7:3.3.8-8
+- Resolves: #1063248 - missing helpers
+
+* Fri Jan 24 2014 Daniel Mach  - 7:3.3.8-7
+- Mass rebuild 2014-01-24
+
+* Thu Jan 02 2014 Pavel Šimerda  - 7:3.3.8-6
+- Resolves: #980511 - squid doesn't work with active FTP
+
+* Fri Dec 27 2013 Daniel Mach  - 7:3.3.8-5
+- Mass rebuild 2013-12-27
+
+* Tue Dec 10 2013 Pavel Šimerda  - 7:3.3.8-4
+- Resolves: #1038160 - avoid running squid's own supervisor process
+
+* Thu Nov 21 2013 Pavel Šimerda  - 7:3.3.8-3
+- Resolves: #1028588 - fix build on aarch64
+
+* Tue Aug 27 2013 Michal Luscon  - 7:3.3.8-2
+- Fixed: source code url
+
+* Thu Jul 25 2013 Michal Luscon  - 7:3.3.8-1
+- Update to latest upstream version 3.3.8
+- Fixed: active ftp crashing
+- Fix basic auth and log daemon DB helper builds.
+- Use xz compressed tarball, fix source URLs.
+- Fix bogus dates in %%changelog.
+
+* Fri May 3 2013 Michal Luscon  - 7:3.2.11-1
+- Update to latest upstream version 3.2.11
+
+* Tue Apr 23 2013 Michal Luscon  - 7:3.2.9-3
+- Option '-k' is not stated in squidclient man
+- Remove pid from service file(#913262)
+
+* Fri Apr 19 2013 Michal Luscon  - 7:3.2.9-2
+- Enable full RELRO (-Wl,-z,relro -Wl,-z,now)
+
+* Tue Mar 19 2013 Michal Luscon  - 7:3.2.9-1
+- Update to latest upstream version 3.2.9
+- Fixed: CVE-2013-1839
+- Removed: makefile-patch (+make check)
+
+* Mon Mar 11 2013 Michal Luscon  - 7:3.2.8-3
+- Resolved: /usr move - squid service file
+
+* Sat Mar 09 2013 Michal Luscon  - 7:3.2.8-2
+- Resolved: #896127 - basic_ncsa_auth does not work
+
+* Fri Mar 08 2013 Michal Luscon  - 7:3.2.8-1
+- Update to latest upstream version 3.2.8
+- Fixed rawhide build issues (-make check)
+
+* Thu Feb 07 2013 Michal Luscon  - 7:3.2.7-1
+- Update to latest upstream version 3.2.7
+
+* Thu Jan 24 2013 Michal Luscon  - 7:3.2.5-2
+- CVE-2013-0189: Incomplete fix for the CVE-2012-5643
+
+* Mon Dec 17 2012 Michal Luscon  - 7:3.2.5-1
+- Update to latest upstream version 3.2.5
+
+* Mon Nov 05 2012 Michal Luscon  - 7:3.2.3-3
+- Resolved: #71483 - httpd 2.4 requires new configuration directives
+
+* Fri Oct 26 2012 Michal Luscon  - 7:3.2.3-2
+- Resolved: #854356 - squid.service use PIDFile
+- Resolved: #859393 - Improve cache_swap script
+- Resolved: #791129 - disk space warning
+- Resolved: #862252 - reload on VPN or network up/down
+- Resolved: #867531 - run test suite during build
+- Resolved: #832684 - missing after dependency nss-lookup.target
+- Removed obsolete configure options
+
+* Mon Oct 22 2012 Tomas Hozza  - 7:3.2.3-1
+- Update to latest upstream version 3.2.3
+
+* Tue Oct 16 2012 Tomas Hozza  - 7:3.2.2-1
+- Update to latest upstream version 3.2.2
+
+* Fri Oct 05 2012 Tomas Hozza  - 7:3.2.1-2
+- Introduced new systemd-rpm macros in squid spec file. (#850326)
+
+* Wed Aug 29 2012 Michal Luscon  - 7:3.2.1-1
+- Update to latest upstream 3.2.1
+
+* Sat Jul 21 2012 Fedora Release Engineering  - 7:3.2.0.16-3
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_18_Mass_Rebuild
+
+* Mon Apr 02 2012 Henrik Nordstrom  - 7:3.2.0.16-2
+- Enable SSL CRTD for ssl bump
+
+* Wed Mar 07 2012 Henrik Nordstrom  - 7:3.2.0.16-1
+- Upstream 3.2.0.16 bugfix release
+
+* Tue Feb 28 2012 Fedora Release Engineering  - 7:3.2.0.15-2
+- Rebuilt for c++ ABI breakage
+
+* Mon Feb 06 2012 Henrik Nordstrom  - 7:3.2.0.15-1
+- Upstream 3.2.0.15 bugfix release
+
+* Wed Feb 01 2012 Henrik Nordstrom  - 7:3.2.0.14-7
+- update with upstreamed patch versions
+
+* Tue Jan 17 2012 Henrik Nordstrom  - 7:3.2.0.14-6
+- upstream gcc-4.7 patch
+- fix for bug #772483 running out of memory, mem_node growing out of bounds
+
+* Mon Jan 16 2012 Jiri Skala  - 7:3.2.0.14-5
+- fixes FTBFS due to gcc-4.7
+
+* Fri Jan 13 2012 Jiri Skala  - 7:3.2.0.14-4
+- fixes #772481 - Low number of open files for squid process
+- fixes FTBFS due to gcc4.7
+
+* Thu Jan 05 2012 Henrik Nordstrom  - 3.2.0.14-3
+- rebuild for gcc-4.7.0
+
+* Mon Dec 19 2011 Jiri Skala  - 7:3.2.0.14-2
+- fixes #768586 - Please enable eCAP support again
+
+* Wed Dec 14 2011 Jiri Skala  - 7:3.2.0.14-1
+- update to latest upstream 3.2.0.14
+
+* Mon Nov 07 2011 Jiri Skala  - 7:3.2.0.13-5
+- fixes #751679 - host_strict_verify setting inverted in squid.conf
+
+* Thu Nov 03 2011 Jiri Skala  - 7:3.2.0.13-4
+- fixes #750550 - Squid might depend on named
+
+* Wed Oct 26 2011 Jiri Skala  - 7:3.2.0.13-3
+- added upstream fix for #747125
+
+* Wed Oct 26 2011 Jiri Skala  - 7:3.2.0.13-2
+- fixes #747103 - squid does not start if /var/spool/squid is empty
+- fixes #747110 - squid does not start adding "memory_pools off"
+
+* Mon Oct 17 2011 Jiri Skala  - 7:3.2.0.13-1
+- update to latest upstream 3.2.0.13
+
+* Tue Sep 20 2011 Jiri Skala  - 7:3.2.0.12-1
+- update to latest upstream 3.2.0.12
+
+* Mon Aug 29 2011 Henrik Nordstrom  - 7:3.2.0.11-3
+- update to latest upstream 3.2.0.11
+
+* Sat Aug 27 2011 Henrik Nordstrom  - 7:3.2.0.10-3
+- Fix for SQUID-2011:3 Gopher vulnerability
+
+* Thu Aug 18 2011 Jiri Skala  - 7:3.2.0.10-2
+- rebuild for rpm
+
+* Mon Aug 01 2011 Jiri Skala  - 7:3.2.0.10-1
+- update to latest upsteam 3.2.0.10
+
+* Mon Aug 01 2011 Jiri Skala  - 7:3.2.0.9-2
+- rebuild for libcap
+
+* Tue Jun 07 2011 Jiri Skala  - 7:3.2.0.9-1
+- upgrade to squid-3.2
+- fixes #720445 - Provide native systemd unit file
+- SysV initscript moved to subpackage
+- temproary disabled eCap
+
+* Wed May 18 2011 Jiri Skala  - 7:3.1.12-3
+- enabled eCAP support
+
+* Wed May 04 2011 Jiri Skala  - 7:3.1.12-2
+- applied corrections of unused patch (Ismail Dönmez)
+
+* Fri Apr 15 2011 Jiri Skala  - 7:3.1.12-1
+- Update to 3.1.12 upstream release
+
+* Thu Feb 10 2011 Jiri Skala  - 7:3.1.11-1
+- Update to 3.1.11 upstream release
+- fixes issue with unused variale after mass rebuild (gcc-4.6)
+
+* Wed Feb 09 2011 Fedora Release Engineering  - 7:3.1.10-2
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_15_Mass_Rebuild
+
+* Thu Jan 06 2011 Jiri Skala  - 7:3.1.10-1
+- Update to 3.1.10 upstream release
+
+* Fri Nov 05 2010 Jiri Skala  - 7:3.1.9-5
+- rebuild for libxml2
+
+* Mon Nov 01 2010 Jiri Skala  - 7:3.1.9-4
+- fixes #647967 - build with -fPIE option back and dropped proper libltdl usage
+
+* Sat Oct 30 2010 Henrik Nordstrom  - 7:3.1.9-3
+- Bug #647967 - License clarification & spec-file cleanup
+
+* Mon Oct 25 2010 Henrik Nordstrom  7:3.1.9-2
+- Upstream 3.1.9 bugfix release
+
+* Wed Oct 13 2010 Jiri Skala  - 7:3.1.8-2
+- fixes #584161 - squid userid not added to wbpriv group
+
+* Sun Sep 05 2010 Henrik Nordstrom  - 7:3.1.8-1
+- Bug #630445: SQUID-2010:3 Denial of service issue
+
+* Tue Aug 24 2010 Henrik Nordstrom  - 7:3.1.7-1
+- Upstream 3.1.7 bugfix release
+
+* Fri Aug 20 2010 Henrik Nordstrom  - 7:3.1.6-1
+- Upstream 3.1.6 bugfix release
+- Build with system libtool-ltdl
+
+* Thu Jul 15 2010 Henrik Nordstrom  - 7:3.1.5-2
+- Upstream 3.1.5 bugfix release
+- Upstream patch for Bug #614665: Squid crashes with  ident auth
+- Upstream patches for various memory leaks
+
+* Mon May 31 2010 Henrik Nordstrom  - 7:3.1.4-2
+- Correct case-insensitiveness in HTTP list header parsing
+
+* Sun May 30 2010 Henrik Nordstrom  - 7:3.1.4-1
+- Upstream 3.1.4 bugfix release, issues relating to IPv6, TPROXY, Memory
+  management, follow_x_forwarded_for, and stability fixes
+
+* Fri May 14 2010 Henrik Nordstrom  - 7:3.1.3-2
+- Fully fix #548903 - "comm_open: socket failure: (97) Address family not supported by protocol" if IPv6 disabled
+- Various IPv6 related issues fixed, making tcp_outgoing_address behave
+  as expected and no commResetFD warnings when using tproxy setups.
+
+* Sun May 02 2010 Henrik Nordstrom  - 7:3.1.3-1
+- Update to 3.1.3 Upstream bugfix release, fixing WCCPv1
+
+* Mon Apr 19 2010 Henrik Nordstrom  - 7:3.1.1-4
+- Bug #583489: Adjust logrotate script to changes in logrotate package.
+
+* Mon Apr 19 2010 Jiri Skala 
+- fixes #548903 - "comm_open: socket failure: (97) Address family not supported by protocol" if IPv6 disabled
+
+* Tue Mar 30 2010 Henrik Nordstrom  - 7:3.1.1-2
+- Update to 3.1.1 Squid bug #2827 crash with assertion failed:
+  FilledChecklist.cc:90: "conn() != NULL" under high load.
+
+* Mon Mar 15 2010 Henrik Nordstrom  - 7:3.1.0.18-1
+- Upgrade to 3.1.0.18 fixing Digest authentication and improved HTTP/1.1 support
+
+* Sun Feb 28 2010 Henrik Nordstrom  -  7:3.1.0.17-3
+- Bug 569120, fails to open unbound ipv4 listening sockets
+
+* Thu Feb 25 2010 Henrik Nordstrom  - 7:3.1.0.17-2
+- Upgrade to 3.1.0.17
+
+* Thu Feb 18 2010 Henrik Nordstrom  - 7:3.1.0.16-7
+- Workaround for Fedora-13 build failure
+
+* Sun Feb 14 2010 Henrik Nordstrom  - 7:3.1.0.16-6
+- Patch for Squid security advisory SQUID-2010:2, denial of service
+  issue in HTCP processing (CVE-2010-0639)
+
+* Sun Feb 07 2010 Henrik Nordstrom  - 7:3.1.0.16-5
+- Rebuild 3.1.0.16 with corrected upstream release.
+
+* Wed Feb 03 2010 Jiri Skala  - 7:3.1.0.16-4
+- spec file modified to be fedora packaging guidline compliant
+- little shifting lines in init script header due to rpmlint complaint
+- fixes assertion during start up
+
+* Mon Feb 01 2010 Henrik Nordstrom  7:3.1.0.16-3
+- Upgrade to 3.1.0.16 for DNS related DoS fix (Squid-2010:1)
+
+* Sat Jan 09 2010 Henrik Nordstrom  - 7:3.1.0.15-3
+- fixed #551302 PROXY needs libcap. Also increases security a little.
+- merged relevant upstream bugfixes waiting for next 3.1 release
+
+* Mon Nov 23 2009 Henrik Nordstrom  - 7:3.1.0.15-2
+- Update to 3.1.0.15 with a number of bugfixes and a workaround for
+  ICEcast/SHOUTcast streams.
+
+* Mon Nov 23 2009 Jiri Skala  7:3.1.0.14-2
+- fixed #532930 Syntactic error in /etc/init.d/squid
+- fixed #528453 cannot initialize cache_dir with user specified config file
+
+* Sun Sep 27 2009 Henrik Nordstrom  - 7:3.1.0.14-1
+- Update to 3.1.0.14
+
+* Sat Sep 26 2009 Henrik Nordstrom  - 7:3.1.0.13-7
+- Include upstream patches fixing important operational issues
+- Enable ESI support now that it does not conflict with normal operation
+
+* Fri Sep 18 2009 Henrik Nordstrom  - 7:3.1.0.13-6
+- Rotate store.log if enabled
+
+* Wed Sep 16 2009 Tomas Mraz  - 7:3.1.0.13-5
+- Use password-auth common PAM configuration instead of system-auth
+
+* Tue Sep 15 2009 Jiri Skala  - 7:3.1.0.13-4
+- fixed #521596 - wrong return code of init script
+
+* Tue Sep 08 2009 Henrik Nordstrom  - 7:3.1.0.13-3
+- Enable squid_kerb_auth
+
+* Mon Sep 07 2009 Henrik Nordstrom  - 7:3.1.0.13-2
+- Cleaned up packaging to ease future maintenance
+
+* Fri Sep 04 2009 Henrik Nordstrom  - 7:3.1.0.13-1
+- Upgrade to next upstream release 3.1.0.13 with many new features
+  * IPv6 support
+  * NTLM-passthru
+  * Kerberos/Negotiate authentication scheme support
+  * Localized error pages based on browser language preferences
+  * Follow X-Forwarded-For capability
+  * and more..
+
+* Mon Aug 31 2009 Henrik Nordstrom  - 3.0.STABLE18-3
+- Bug #520445 silence logrotate when Squid is not running
+
+* Fri Aug 21 2009 Tomas Mraz  - 7:3.0.STABLE18-2
+- rebuilt with new openssl
+
+* Tue Aug 04 2009 Henrik Nordstrom  - 7:3.0.STABLE18-1
+- Update to 3.0.STABLE18
+
+* Sat Aug 01 2009 Henrik Nordstrom  - 7:3.0.STABLE17-3
+- Squid Bug #2728: regression: assertion failed: http.cc:705: "!eof"
+
+* Mon Jul 27 2009 Henrik Nordstrom  - 7:3.0.STABLE17-2
+- Bug #514014, update to 3.0.STABLE17 fixing the denial of service issues
+  mentioned in Squid security advisory SQUID-2009_2.
+
+* Sun Jul 26 2009 Fedora Release Engineering  - 7:3.0.STABLE16-3
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_12_Mass_Rebuild
+
+* Wed Jul 01 2009 Jiri Skala  7:3.0.STABLE16-2
+- fixed patch parameter of bXXX patches
+
+* Mon Jun 29 2009 Henrik Nordstrom  - 7:3.0.STABLE16-1
+- Upgrade to 3.0.STABLE16
+
+* Sat May 23 2009 Henrik Nordstrom  - 7:3.0.STABLE15-2
+- Bug #453304 - Squid requires restart after Network Manager connection setup
+
+* Sat May 09 2009 Henrik Nordstrom  - 7:3.0.STABLE15-1
+- Upgrade to 3.0.STABLE15
+
+* Tue Apr 28 2009 Jiri Skala  - 7:3.0.STABLE14-3
+- fixed ambiguous condition in the init script (exit 4)
+
+* Mon Apr 20 2009 Henrik Nordstrom  - 7:3.0.STABLE14-2
+- Squid bug #2635: assertion failed: HttpHeader.cc:1196: "Headers[id].type == ftInt64"
+
+* Sun Apr 19 2009 Henrik Nordstrom  - 7:3.0.STABLE14-1
+- Upgrade to 3.0.STABLE14
+
+* Fri Mar 06 2009 Henrik Nordstrom  - 7:3.0.STABLE13-2
+- backported logfile.cc syslog parameters patch from 3.1 (b9443.patch)
+- GCC-4.4 workaround in src/wccp2.cc
+
+* Wed Feb 25 2009 Fedora Release Engineering  - 7:3.0.STABLE13-2
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_11_Mass_Rebuild
+
+* Thu Feb 5 2009 Jonathan Steffan  - 7:3.0.STABLE13-1
+- upgrade to latest upstream
+
+* Tue Jan 27 2009 Henrik Nordstrom  - 7:3.0.STABLE12-1
+- upgrade to latest upstream
+
+* Sun Jan 18 2009 Tomas Mraz  - 7:3.0.STABLE10-4
+- rebuild with new openssl
+
+* Fri Dec 19 2008 Henrik Nordstrom  - 7:3.0.STABLE10-3
+- actually include the upstream bugfixes in the build
+
+* Fri Dec 19 2008 Henrik Nordstrom  - 7:3.0.STABLE10-2
+- upstream bugfixes for cache corruption and access.log response size errors
+
+* Fri Oct 24 2008 Henrik Nordstrom  - 7:3.0.STABLE10-1
+- upgrade to latest upstream
+
+* Sun Oct 19 2008 Henrik Nordstrom  - 7:3.0.STABLE9-2
+- disable coss support, not officially supported in 3.0
+
+* Sun Oct 19 2008 Henrik Nordstrom  - 7:3.0.STABLE9-1
+- update to latest upstream
+
+* Thu Oct 09 2008 Henrik Nordstrom  - 7:3.0.STABLE7-4
+- change logrotate to move instead of copytruncate
+
+* Wed Oct 08 2008 Jiri Skala  - 7:3.0.STABLE7-3
+- fix #465052 -  FTBFS squid-3.0.STABLE7-1.fc10
+
+* Thu Aug 14 2008 Jiri Skala  - 7:3.0.STABLE7-2
+- used ncsa_auth.8 from man-pages. there will be this file removed due to conflict
+- fix #458593 noisy initscript
+- fix #463129 init script tests wrong conf file
+- fix #450352 - build.patch patches only generated files
+
+* Wed Jul 02 2008 Jiri Skala  - 7:3.0.STABLE7-1
+- update to latest upstream
+- fix #453214
+
+* Mon May 26 2008 Martin Nagy  - 7:3.0.STABLE6-2
+- fix bad allocation
+
+* Wed May 21 2008 Martin Nagy  - 7:3.0.STABLE6-1
+- upgrade to latest upstream
+- fix bad allocation
+
+* Fri May 09 2008 Martin Nagy  - 7:3.0.STABLE5-2
+- fix configure detection of netfilter kernel headers (#435499),
+  patch by aoliva@redhat.com
+- add support for negotiate authentication (#445337)
+
+* Fri May 02 2008 Martin Nagy  - 7:3.0.STABLE5-1
+- upgrade to latest upstream
+
+* Tue Apr 08 2008 Martin Nagy  - 7:3.0.STABLE4-1
+- upgrade to latest upstream
+
+* Thu Apr 03 2008 Martin Nagy  - 7:3.0.STABLE2-2
+- add %%{optflags} to make
+- remove warnings about unused return values
+
+* Thu Mar 13 2008 Martin Nagy  - 7:3.0.STABLE2-1
+- upgrade to latest upstream 3.0.STABLE2
+- check config file before starting (#428998)
+- whitespace unification of init script
+- some minor path changes in the QUICKSTART file
+- configure with the --with-filedescriptors=16384 option
+
+* Tue Feb 26 2008 Martin Nagy  - 7:3.0.STABLE1-3
+- change the cache_effective_group default back to none
+
+* Mon Feb 11 2008 Martin Nagy  - 7:3.0.STABLE1-2
+- rebuild for 4.3
+
+* Wed Jan 23 2008 Martin Nagy  - 7:3.0.STABLE1-1
+- upgrade to latest upstream 3.0.STABLE1
+
+* Tue Dec 04 2007 Martin Bacovsky  - 2.6.STABLE17-1
+- upgrade to latest upstream 2.6.STABLE17
+
+* Wed Oct 31 2007 Martin Bacovsky  - 7:2.6.STABLE16-3
+- arp-acl was enabled
+
+* Tue Sep 25 2007 Martin Bacovsky  - 7:2.6.STABLE16-2
+- our fd_config patch was replaced by upstream's version 
+- Source1 (FAQ.sgml) points to local source (upstream's moved to wiki)
+
+* Fri Sep 14 2007 Martin Bacovsky  - 7:2.6.STABLE16-1
+- upgrade to latest upstream 2.6.STABLE16
+
+* Wed Aug 29 2007 Fedora Release Engineering  - 7:2.6.STABLE14-2
+- Rebuild for selinux ppc32 issue.
+
+* Thu Jul 19 2007 Martin Bacovsky  - 7:2.6.STABLE14-1
+- update to latest upstream 2.6.STABLE14
+- resolves: #247064: Initscript Review
+
+* Tue Mar 27 2007 Martin Bacovsky  - 7:2.6.STABLE12-1
+- update to latest upstream 2.6.STABLE12
+- Resolves: #233913: squid: unowned directory
+
+* Mon Feb 19 2007 Martin Bacovsky  - 7:2.6.STABLE9-2
+- Resolves: #226431: Merge Review: squid
+
+* Mon Jan 29 2007 Martin Bacovsky  - 7:2.6.STABLE9-1
+- update to the latest upstream
+
+* Sun Jan 14 2007 Martin Stransky  - 7:2.6.STABLE7-1
+- update to the latest upstream
+
+* Tue Dec 12 2006 Martin Stransky  - 7:2.6.STABLE6-1
+- update to the latest upstream
+
+* Mon Nov  6 2006 Martin Stransky  - 7:2.6.STABLE5-1
+- update to the latest upstream
+
+* Thu Oct 26 2006 Martin Stransky  - 7:2.6.STABLE4-4
+- added fix for #205568 - marked cachemgr.conf as world readable
+
+* Wed Oct 25 2006 Martin Stransky  - 7:2.6.STABLE4-3
+- added fix for #183869 - squid can abort when getting status
+- added upstream fixes:
+    * Bug #1796: Assertion error HttpHeader.c:914: "str"
+    * Bug #1779: Delay pools fairness, correction to first patch
+    * Bug #1802: Crash on exit in certain conditions where cache.log is not writeable
+    * Bug #1779: Delay pools fairness when multiple connections compete for bandwidth
+    * Clarify the select/poll/kqueue/epoll configure --enable/disable options
+- reworked fd patch for STABLE4
+
+* Tue Oct 17 2006 Martin Stransky  - 7:2.6.STABLE4-2
+- upstream fixes:
+  * Accept 00:00-24:00 as a valid time specification (upstream BZ #1794)
+  * aioDone() could be called twice
+  * Squid reconfiguration (upstream BZ #1800)
+
+* Mon Oct 2 2006 Martin Stransky  - 7:2.6.STABLE4-1
+- new upstream
+- fixes from upstream bugzilla, items #1782,#1780,#1785,#1719,#1784,#1776
+
+* Tue Sep 5 2006 Martin Stransky  - 7:2.6.STABLE3-2
+- added upstream patches for ACL
+
+* Mon Aug 21 2006 Martin Stransky  - 7:2.6.STABLE3-1
+- the latest stable upstream
+
+* Thu Aug 10 2006 Karsten Hopp  7:2.6.STABLE2-3
+- added some requirements for pre/post install scripts
+
+* Fri Aug 04 2006 Martin Stransky  - 7:2.6.STABLE2-2
+- added patch for #198253 - squid: don't chgrp another pkg's
+  files/directory
+
+* Mon Jul 31 2006 Martin Stransky  - 7:2.6.STABLE2-1
+- the latest stable upstream
+- reworked fd config patch
+
+* Tue Jul 25 2006 Martin Stransky  - 7:2.6.STABLE1-3
+- the latest CVS upstream snapshot
+
+* Wed Jul 19 2006 Martin Stransky  - 7:2.6.STABLE1-2
+- the latest CVS snapshot
+
+* Tue Jul 18 2006 Martin Stransky  - 7:2.6.STABLE1-1
+- new upstream + the latest CVS snapshot from 2006/07/18
+- updated fd config patch
+- enabled epoll
+- fixed release format (#197405)
+- enabled WCCPv2 support (#198642)
+
+* Wed Jul 12 2006 Jesse Keating  - 7:2.5.STABLE14-2.1
+- rebuild
+
+* Thu Jun 8 2006 Martin Stransky  - 7:2.5.STABLE14-2
+- fix for squid BZ#1511 - assertion failed: HttpReply.c:105: "rep"
+
+* Tue May 30 2006 Martin Stransky  - 7:2.5.STABLE14-1
+- update to new upstream
+
+* Sun May 28 2006 Martin Stransky  - 7:2.5.STABLE13-5
+- fixed libbind patch (#193298)
+
+* Wed May 3  2006 Martin Stransky  - 7:2.5.STABLE13-4
+- added extra group check (#190544)
+
+* Wed Mar 29 2006 Martin Stransky  - 7:2.5.STABLE13-3
+- improved pre script (#187217) - added group switch
+
+* Thu Mar 23 2006 Martin Stransky  - 7:2.5.STABLE13-2
+- removed "--with-large-files" on 64bit arches
+
+* Mon Mar 13 2006 Martin Stransky  - 7:2.5.STABLE13-1
+- update to new upstream
+
+* Fri Feb 10 2006 Jesse Keating  - 7:2.5.STABLE12-5.1
+- bump again for double-long bug on ppc(64)
+
+* Tue Feb 07 2006 Martin Stransky  - 7:2.5.STABLE12-5
+- new upstream patches
+
+* Tue Feb 07 2006 Jesse Keating  - 7:2.5.STABLE12-4.1
+- rebuilt for new gcc4.1 snapshot and glibc changes
+
+* Wed Dec 28 2005  Martin Stransky  7:2.5.STABLE12-4
+- added follow-xff patch (#176055)
+- samba path fix (#176659)
+
+* Mon Dec 19 2005  Martin Stransky  7:2.5.STABLE12-3
+- fd-config.patch clean-up
+- SMB_BadFetch patch from upstream
+
+* Fri Dec 09 2005 Jesse Keating 
+- rebuilt
+
+* Mon Nov 28 2005  Martin Stransky  7:2.5.STABLE12-2
+- rewriten patch squid-2.5.STABLE10-64bit.patch, it works with
+  "--with-large-files" option now
+- fix for #72896 - squid does not support > 1024 file descriptors,
+  new "--enable-fd-config" option for it.
+
+* Wed Nov 9 2005  Martin Stransky  7:2.5.STABLE12-1
+- update to STABLE12
+- setenv patch
+
+* Mon Oct 24 2005 Martin Stransky  7:2.5.STABLE11-6
+- fix for delay pool from upstream
+
+* Thu Oct 20 2005 Martin Stransky  7:2.5.STABLE11-5
+- fix for #171213 - CVE-2005-3258 Squid crash due to malformed FTP response
+- more fixes from upstream
+
+* Fri Oct 14 2005 Martin Stransky  7:2.5.STABLE11-4
+- enabled support for large files (#167503)
+
+* Thu Oct 13 2005 Tomas Mraz  7:2.5.STABLE11-3
+- use include instead of pam_stack in pam config
+
+* Thu Sep 29 2005 Martin Stransky  7:2.5.STABLE11-2
+- added patch for delay pools and some minor fixes
+
+* Fri Sep 23 2005 Martin Stransky  7:2.5.STABLE11-1
+- update to STABLE11
+
+* Mon Sep 5 2005 Martin Stransky  7:2.5.STABLE10-4
+- Three upstream patches for #167414
+- Spanish and Greek messages
+- patch for -D_FORTIFY_SOURCE=2 
+
+* Tue Aug 30 2005 Martin Stransky  7:2.5.STABLE10-3
+- removed "--enable-truncate" option (#165948)
+- added "--enable-cache-digests" option (#102134)
+- added "--enable-ident-lookups" option (#161640)
+- some clean up (#165949)
+
+* Fri Jul 15 2005 Martin Stransky  7:2.5.STABLE10-2
+- pam_auth and ncsa_auth have setuid (#162660)
+
+* Thu Jul 7 2005 Martin Stransky  7:2.5.STABLE10-1
+- new upstream version
+- enabled fakeauth utility (#154020)
+- enabled digest authentication scheme (#155882)
+- all error pages marked as config (#127836)
+- patch for 64bit statvfs interface (#153274)
+- added httpd config file for cachemgr.cgi (#112725)
+
+* Mon May 16 2005 Jay Fenlason  7:2.5.STABLE9-7
+- Upgrade the upstream -dns_query patch from -4 to -5
+
+* Wed May 11 2005 Jay Fenlason  7:2.5.STABLE9-6
+- More upstream patches, including a fix for
+  bz#157456 CAN-2005-1519 DNS lookups unreliable on untrusted networks
+
+* Tue Apr 26 2005 Jay Fenlason  7:2.5.STABLE9-5
+- more upstream patches, including a fix for
+  CVE-1999-0710 cachemgr malicious use
+
+* Fri Apr 22 2005 Jay Fenlason  7:2.5.STABLE9-4
+- More upstream patches, including the fixed 2GB patch.
+- include the -libbind patch, which prevents squid from using the optional
+  -lbind library, even if it's installed.
+
+* Tue Mar 15 2005 Jay Fenlason  7:2.5.STABLE9-2
+- New upstream version, with 14 upstream patches.
+
+* Wed Feb 16 2005 Jay Fenlason  7:2.5.STABLE8-2
+- new upstream version with 4 upstream patches.
+- Reorganize spec file to apply upstream patches first
+
+* Tue Feb 1 2005 Jay Fenlason  7:2.5.STABLE7-4
+- Include two more upstream patches for security vulns:
+  bz#146783 Correct handling of oversized reply headers
+  bz#146778 CAN-2005-0211 Buffer overflow in WCCP recvfrom() call
+
+* Tue Jan 25 2005 Jay Fenlason  7:2.5.STABLE7-3
+- Include more upstream patches, including two for security holes.
+
+* Tue Jan 18 2005 Jay Fenlason  7:2.5.STABLE7-2
+- Add a triggerin on samba-common to make /var/cache/samba/winbindd_privileged
+  accessable so that ntlm_auth will work.  It needs to be in this rpm,
+  because the Samba RPM can't assume the squid user exists.
+  Note that this will only work if the Samba RPM is recent enough to create
+  that directory at install time instead of at winbindd startup time.
+  That should be samba-common-3.0.0-15 or later.
+  This fixes bugzilla #103726
+- Clean up extra whitespace in this spec file.
+- Add additional upstream patches. (Now 18 upstream patches).
+- patch #112 closes CAN-2005-0096 and CAN-2005-0097, remote DOS security holes.
+- patch #113 closes CAN-2005-0094, a remote buffer-overflow DOS security hole.
+- patch #114 closes CAN-2005-0095, a remote DOS security hole.
+- Remove the -nonbl (replaced by #104) and -close (replaced by #111) patches, since
+  they're now fixed by upstream patches.
+
+* Mon Oct 25 2004 Jay Fenlason  7:2.5.STABLE7-1
+- new upstream version, with 3 upstream patches.
+  Updated the -build and -config patches
+- Include patch from Ulrich Drepper  to more
+  intelligently close all file descriptors.
+
+* Mon Oct 18 2004 Jay Fenlason  7:2.5.STABLE6-3
+- include patch from Ulrich Drepper  to stop
+  problems with O_NONBLOCK.  This closes #136049
+
+* Tue Oct 12 2004 Jay Fenlason  7:2.5.STABLE6-2
+- Include fix for CAN-2004-0918
+
+* Tue Sep 28 2004 Jay Fenlason  7:2.5.STABLE6-1
+- New upstream version, with 32 upstream patches.
+  This closes #133970, #133931, #131728, #128143, #126726
+
+- Change the permissions on /etc/squid/squid.conf to 640.  This closes
+  bugzilla #125007
+
+* Mon Jun 28 2004 Jay Fenlason  7:2.5STABLE5-5
+- Merge current upstream patches.
+- Fix the -pipe patch to have the correct name of the winbind pipe.
+
+* Tue Jun 15 2004 Elliot Lee 
+- rebuilt
+
+* Mon Apr 5 2004 Jay Fenlason  7:2.5.STABLE5-2
+- Include the first 10 upstream patches
+- Add a patch for the correct location of the winbindd pipe.  This closes
+  bugzilla #107561
+- Remove the change to ssl_support.c from squid-2.5.STABLE3-build patch
+  This closes #117851
+- Include /etc/pam.d/squid .  This closes #113404
+- Include a patch to close #111254 (assignment in assert)
+- Change squid.init to put output messages in /var/log/squid/squid.out
+  This closes #104697
+- Only useradd the squid user if it doesn't already exist, and error out
+  if the useradd fails.  This closes #118718.
+
+* Tue Mar 2 2004 Jay Fenlason  7:2.5.STABLE5-1
+- New upstream version, obsoletes many patches.
+- Fix --datadir passed to configure.  Configure automatically adds /squid
+  so we shouldn't.
+- Remove the problematic triggerpostun trigger, since is's broken, and FC2
+  never shipped with that old version.
+- add %%{?_smp_mflags} to make line.
+
+* Tue Mar 02 2004 Elliot Lee 
+- rebuilt
+
+* Mon Feb 23 2004 Tim Waugh 
+- Use ':' instead of '.' as separator for chown.
+
+* Fri Feb 20 2004 Jay Fenlason  7:2.5.STABLE4-3
+- Clean up the spec file to work on 64-bit platforms (use %%{_libdir}
+  instead of /usr/lib, etc)
+- Make the release number in the changelog section agree with reality.
+- use -fPIE rather than -fpie.  s390 fails with just -fpie
+
+* Fri Feb 13 2004 Elliot Lee 
+- rebuilt
+
+* Thu Feb 5 2004 Jay Fenlason 
+- Incorporate many upstream patches
+- Include many spec file changes from D.Johnson 
+
+* Tue Sep 23 2003 Jay Fenlason  7:2.5.STABLE4-1
+- New upstream version.
+- Fix the Source: line in this spec file to point to the correct URL.
+- redo the -location patch to work with the new upstream version.
+
+* Mon Jun 30 2003 Jay Fenlason  7:2.5.STABLE3-0
+- Spec file change to enable the nul storage module. bugzilla #74654
+- Upgrade to 2.5STABLE3 with current official patches.
+- Added --enable-auth="basic,ntlm": closes bugzilla #90145
+- Added --with-winbind-auth-challenge: closes bugzilla #78691
+- Added --enable-useragent-log and --enable-referer-log, closes
+- bugzilla #91884
+# - Changed configure line to enable pie
+# (Disabled due to broken compilers on ia64 build machines)
+#- Patched to increase the maximum number of file descriptors #72896
+#- (disabled for now--needs more testing)
+
+* Wed Jun 04 2003 Elliot Lee 
+- rebuilt
+
+* Wed Jan 22 2003 Tim Powers 
+- rebuilt
+
+* Wed Jan 15 2003 Bill Nottingham  7:2.5.STABLE1-1
+- update to 2.5.STABLE1
+
+* Wed Nov 27 2002 Tim Powers  7:2.4.STABLE7-5
+- remove unpackaged files from the buildroot
+
+* Tue Aug 27 2002 Nalin Dahyabhai  2.4.STABLE7-4
+- rebuild
+
+* Wed Jul 31 2002 Karsten Hopp 
+- don't raise an error if the config file is incomplete
+  set defaults instead (#69322, #70065)
+
+* Thu Jul 18 2002 Bill Nottingham  2.4.STABLE7-2
+- don't strip binaries
+
+* Mon Jul  8 2002 Bill Nottingham 
+- update to 2.4.STABLE7
+- fix restart (#53761)
+
+* Tue Jun 25 2002 Bill Nottingham 
+- add various upstream bugfix patches
+
+* Fri Jun 21 2002 Tim Powers 
+- automated rebuild
+
+* Thu May 23 2002 Tim Powers 
+- automated rebuild
+
+* Fri Mar 22 2002 Bill Nottingham 
+- 2.4.STABLE6
+- turn off carp
+
+* Mon Feb 18 2002 Bill Nottingham 
+- 2.4.STABLE3 + patches
+- turn off HTCP at request of maintainers
+- leave SNMP enabled in the build, but disabled in the default config
+
+* Fri Jan 25 2002 Tim Powers 
+- rebuild against new libssl
+
+* Wed Jan 09 2002 Tim Powers 
+- automated rebuild
+
+* Mon Jan 07 2002 Florian La Roche 
+- require linuxdoc-tools instead of sgml-tools
+
+* Tue Sep 25 2001 Bill Nottingham 
+- update to 2.4.STABLE2
+
+* Mon Sep 24 2001 Bill Nottingham 
+- add patch to fix FTP crash
+
+* Mon Aug  6 2001 Bill Nottingham 
+- fix uninstall (#50411)
+
+* Mon Jul 23 2001 Bill Nottingham 
+- add some buildprereqs (#49705)
+
+* Sun Jul 22 2001 Bill Nottingham 
+- update FAQ
+
+* Tue Jul 17 2001 Bill Nottingham 
+- own /etc/squid, /usr/lib/squid
+
+* Tue Jun 12 2001 Nalin Dahyabhai 
+- rebuild in new environment
+- s/Copyright:/License:/
+
+* Tue Apr 24 2001 Bill Nottingham 
+- update to 2.4.STABLE1 + patches
+- enable some more configure options (#24981)
+- oops, ship /etc/sysconfig/squid
+
+* Fri Mar  2 2001 Nalin Dahyabhai 
+- rebuild in new environment
+
+* Tue Feb  6 2001 Trond Eivind Glomsrød 
+- improve i18n
+- make the initscript use the standard OK/FAILED
+
+* Tue Jan 23 2001 Bill Nottingham 
+- change i18n mechanism
+
+* Fri Jan 19 2001 Bill Nottingham 
+- fix path references in QUICKSTART (#15114)
+- fix initscript translations (#24086)
+- fix shutdown logic (#24234), patch from 
+- add /etc/sysconfig/squid for daemon options & shutdown timeouts
+- three more bugfixes from the Squid people
+- update FAQ.sgml
+- build and ship auth modules (#23611)
+
+* Thu Jan 11 2001 Bill Nottingham 
+- initscripts translations
+
+* Mon Jan  8 2001 Bill Nottingham 
+- add patch to use mkstemp (greg@wirex.com)
+
+* Fri Dec 01 2000 Bill Nottingham 
+- rebuild because of broken fileutils
+
+* Sat Nov 11 2000 Bill Nottingham 
+- fix the acl matching cases (only need the second patch)
+
+* Tue Nov  7 2000 Bill Nottingham 
+- add two patches to fix domain ACLs
+- add 2 bugfix patches from the squid people
+
+* Fri Jul 28 2000 Bill Nottingham 
+- clean up init script; fix condrestart
+- update to STABLE4, more bugfixes
+- update FAQ
+
+* Tue Jul 18 2000 Nalin Dahyabhai 
+- fix syntax error in init script
+- finish adding condrestart support
+
+* Fri Jul 14 2000 Bill Nottingham 
+- move initscript back
+
+* Wed Jul 12 2000 Prospector 
+- automatic rebuild
+
+* Thu Jul  6 2000 Bill Nottingham 
+- prereq /etc/init.d
+- add bugfix patch
+- update FAQ
+
+* Thu Jun 29 2000 Bill Nottingham 
+- fix init script
+
+* Tue Jun 27 2000 Bill Nottingham 
+- don't prereq new initscripts
+
+* Mon Jun 26 2000 Bill Nottingham 
+- initscript munging
+
+* Sat Jun 10 2000 Bill Nottingham 
+- rebuild for exciting FHS stuff
+
+* Wed May 31 2000 Bill Nottingham 
+- fix init script again (#11699)
+- add --enable-delay-pools (#11695)
+- update to STABLE3
+- update FAQ
+
+* Fri Apr 28 2000 Bill Nottingham 
+- fix init script (#11087)
+
+* Fri Apr  7 2000 Bill Nottingham 
+- three more bugfix patches from the squid people
+- buildprereq jade, sgmltools
+
+* Sun Mar 26 2000 Florian La Roche 
+- make %%pre more portable
+
+* Thu Mar 16 2000 Bill Nottingham 
+- bugfix patches
+- fix dependency on /usr/local/bin/perl
+
+* Sat Mar  4 2000 Bill Nottingham 
+- 2.3.STABLE2
+
+* Mon Feb 14 2000 Bill Nottingham 
+- Yet More Bugfix Patches
+
+* Tue Feb  8 2000 Bill Nottingham 
+- add more bugfix patches
+- --enable-heap-replacement
+
+* Mon Jan 31 2000 Cristian Gafton 
+- rebuild to fix dependencies
+
+* Fri Jan 28 2000 Bill Nottingham 
+- grab some bugfix patches
+
+* Mon Jan 10 2000 Bill Nottingham 
+- 2.3.STABLE1 (whee, another serial number)
+
+* Tue Dec 21 1999 Bernhard Rosenkraenzer 
+- Fix compliance with ftp RFCs
+  (http://www.wu-ftpd.org/broken-clients.html)
+- Work around a bug in some versions of autoconf
+- BuildPrereq sgml-tools - we're using sgml2html
+
+* Mon Oct 18 1999 Bill Nottingham 
+- add a couple of bugfix patches
+
+* Wed Oct 13 1999 Bill Nottingham 
+- update to 2.2.STABLE5.
+- update FAQ, fix URLs.
+
+* Sat Sep 11 1999 Cristian Gafton 
+- transform restart in reload and add restart to the init script
+
+* Tue Aug 31 1999 Bill Nottingham 
+- add squid user as user 23.
+
+* Mon Aug 16 1999 Bill Nottingham 
+- initscript munging
+- fix conflict between logrotate & squid -k (#4562)
+
+* Wed Jul 28 1999 Bill Nottingham 
+- put cachemgr.cgi back in /usr/lib/squid
+
+* Wed Jul 14 1999 Bill Nottingham 
+- add webdav bugfix patch (#4027)
+
+* Mon Jul 12 1999 Bill Nottingham 
+- fix path to config in squid.init (confuses linuxconf)
+
+* Wed Jul  7 1999 Bill Nottingham 
+- 2.2.STABLE4
+
+* Wed Jun 9 1999 Dale Lovelace 
+- logrotate changes
+- errors from find when /var/spool/squid or
+- /var/log/squid didn't exist
+
+* Thu May 20 1999 Bill Nottingham 
+- 2.2.STABLE3
+
+* Thu Apr 22 1999 Bill Nottingham 
+- update to 2.2.STABLE.2
+
+* Sun Apr 18 1999 Bill Nottingham 
+- update to 2.2.STABLE1
+
+* Thu Apr 15 1999 Bill Nottingham 
+- don't need to run groupdel on remove
+- fix useradd
+
+* Mon Apr 12 1999 Bill Nottingham 
+- fix effective_user (bug #2124)
+
+* Mon Apr  5 1999 Bill Nottingham 
+- strip binaries
+
+* Thu Apr  1 1999 Bill Nottingham 
+- duh. adduser does require a user name.
+- add a serial number
+
+* Tue Mar 30 1999 Bill Nottingham 
+- add an adduser in %%pre, too
+
+* Thu Mar 25 1999 Bill Nottingham 
+- oog. chkconfig must be in %%preun, not %%postun
+
+* Wed Mar 24 1999 Bill Nottingham 
+- switch to using group squid
+- turn off icmp (insecure)
+- update to 2.2.DEVEL3
+- build FAQ docs from source
+
+* Tue Mar 23 1999 Bill Nottingham 
+- logrotate changes
+
+* Sun Mar 21 1999 Cristian Gafton 
+- auto rebuild in the new build environment (release 4)
+
+* Wed Feb 10 1999 Bill Nottingham 
+- update to 2.2.PRE2
+
+* Wed Dec 30 1998 Bill Nottingham 
+- cache & log dirs shouldn't be world readable
+- remove preun script (leave logs & cache @ uninstall)
+
+* Tue Dec 29 1998 Bill Nottingham 
+- fix initscript to get cache_dir correct
+
+* Fri Dec 18 1998 Bill Nottingham 
+- update to 2.1.PATCH2
+- merge in some changes from RHCN version
+
+* Sat Oct 10 1998 Cristian Gafton 
+- strip binaries
+- version 1.1.22
+
+* Sun May 10 1998 Cristian Gafton 
+- don't make packages conflict with each other...
+
+* Sat May 02 1998 Cristian Gafton 
+- added a proxy auth patch from Alex deVries 
+- fixed initscripts
+
+* Thu Apr 09 1998 Cristian Gafton 
+- rebuilt for Manhattan
+
+* Fri Mar 20 1998 Cristian Gafton 
+- upgraded to 1.1.21/1.NOVM.21
+
+* Mon Mar 02 1998 Cristian Gafton 
+- updated the init script to use reconfigure option to restart squid instead
+  of shutdown/restart (both safer and quicker)
+
+* Sat Feb 07 1998 Cristian Gafton 
+- upgraded to 1.1.20
+- added the NOVM package and tryied to reduce the mess in the spec file
+
+* Wed Jan 7 1998 Cristian Gafton 
+- first build against glibc
+- patched out the use of setresuid(), which is available only on kernels
+  2.1.44 and later
+
diff --git a/squid.sysconfig b/squid.sysconfig
new file mode 100644
index 0000000..3864bd8
--- /dev/null
+++ b/squid.sysconfig
@@ -0,0 +1,9 @@
+# default squid options
+SQUID_OPTS=""
+
+# Time to wait for Squid to shut down when asked. Should not be necessary
+# most of the time.
+SQUID_SHUTDOWN_TIMEOUT=100
+
+# default squid conf file
+SQUID_CONF="/etc/squid/squid.conf"
-- 
Gitee