mirror of
https://github.com/DNSCrypt/dnscrypt-proxy.git
synced 2025-03-04 02:14:40 +01:00
Add golang.org/x/net/http2 to the dependencies
This commit is contained in:
parent
751f049136
commit
1a4d34dc55
228 changed files with 193316 additions and 2 deletions
29
Gopkg.lock
generated
29
Gopkg.lock
generated
|
@ -155,10 +155,14 @@
|
|||
name = "golang.org/x/net"
|
||||
packages = [
|
||||
"bpf",
|
||||
"http2",
|
||||
"http2/hpack",
|
||||
"idna",
|
||||
"internal/iana",
|
||||
"internal/socket",
|
||||
"ipv4",
|
||||
"ipv6"
|
||||
"ipv6",
|
||||
"lex/httplex"
|
||||
]
|
||||
revision = "61147c48b25b599e5b561d2e9c4f3e1ef489ca41"
|
||||
|
||||
|
@ -174,6 +178,27 @@
|
|||
]
|
||||
revision = "3b87a42e500a6dc65dae1a55d0b641295971163e"
|
||||
|
||||
[[projects]]
|
||||
name = "golang.org/x/text"
|
||||
packages = [
|
||||
"collate",
|
||||
"collate/build",
|
||||
"internal/colltab",
|
||||
"internal/gen",
|
||||
"internal/tag",
|
||||
"internal/triegen",
|
||||
"internal/ucd",
|
||||
"language",
|
||||
"secure/bidirule",
|
||||
"transform",
|
||||
"unicode/bidi",
|
||||
"unicode/cldr",
|
||||
"unicode/norm",
|
||||
"unicode/rangetable"
|
||||
]
|
||||
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
|
||||
version = "v0.3.0"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/natefinch/lumberjack.v2"
|
||||
packages = ["."]
|
||||
|
@ -183,6 +208,6 @@
|
|||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "a4dd651828c61eaf4a60761a081ef914190ece4cfa682cb3391712be98bdb34b"
|
||||
inputs-digest = "5afc7ca4456d4f6f1a1da31e3ae1eb4eaf79ffe39c72a3dc98a2d78d4d865b79"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
|
2
vendor/golang.org/x/net/http2/.gitignore
generated
vendored
Normal file
2
vendor/golang.org/x/net/http2/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
*~
|
||||
h2i/h2i
|
51
vendor/golang.org/x/net/http2/Dockerfile
generated
vendored
Normal file
51
vendor/golang.org/x/net/http2/Dockerfile
generated
vendored
Normal file
|
@ -0,0 +1,51 @@
|
|||
#
|
||||
# This Dockerfile builds a recent curl with HTTP/2 client support, using
|
||||
# a recent nghttp2 build.
|
||||
#
|
||||
# See the Makefile for how to tag it. If Docker and that image is found, the
|
||||
# Go tests use this curl binary for integration tests.
|
||||
#
|
||||
|
||||
FROM ubuntu:trusty
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get upgrade -y && \
|
||||
apt-get install -y git-core build-essential wget
|
||||
|
||||
RUN apt-get install -y --no-install-recommends \
|
||||
autotools-dev libtool pkg-config zlib1g-dev \
|
||||
libcunit1-dev libssl-dev libxml2-dev libevent-dev \
|
||||
automake autoconf
|
||||
|
||||
# The list of packages nghttp2 recommends for h2load:
|
||||
RUN apt-get install -y --no-install-recommends make binutils \
|
||||
autoconf automake autotools-dev \
|
||||
libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \
|
||||
libev-dev libevent-dev libjansson-dev libjemalloc-dev \
|
||||
cython python3.4-dev python-setuptools
|
||||
|
||||
# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached:
|
||||
ENV NGHTTP2_VER 895da9a
|
||||
RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git
|
||||
|
||||
WORKDIR /root/nghttp2
|
||||
RUN git reset --hard $NGHTTP2_VER
|
||||
RUN autoreconf -i
|
||||
RUN automake
|
||||
RUN autoconf
|
||||
RUN ./configure
|
||||
RUN make
|
||||
RUN make install
|
||||
|
||||
WORKDIR /root
|
||||
RUN wget http://curl.haxx.se/download/curl-7.45.0.tar.gz
|
||||
RUN tar -zxvf curl-7.45.0.tar.gz
|
||||
WORKDIR /root/curl-7.45.0
|
||||
RUN ./configure --with-ssl --with-nghttp2=/usr/local
|
||||
RUN make
|
||||
RUN make install
|
||||
RUN ldconfig
|
||||
|
||||
CMD ["-h"]
|
||||
ENTRYPOINT ["/usr/local/bin/curl"]
|
||||
|
3
vendor/golang.org/x/net/http2/Makefile
generated
vendored
Normal file
3
vendor/golang.org/x/net/http2/Makefile
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
curlimage:
|
||||
docker build -t gohttp2/curl .
|
||||
|
20
vendor/golang.org/x/net/http2/README
generated
vendored
Normal file
20
vendor/golang.org/x/net/http2/README
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
This is a work-in-progress HTTP/2 implementation for Go.
|
||||
|
||||
It will eventually live in the Go standard library and won't require
|
||||
any changes to your code to use. It will just be automatic.
|
||||
|
||||
Status:
|
||||
|
||||
* The server support is pretty good. A few things are missing
|
||||
but are being worked on.
|
||||
* The client work has just started but shares a lot of code
|
||||
is coming along much quicker.
|
||||
|
||||
Docs are at https://godoc.org/golang.org/x/net/http2
|
||||
|
||||
Demo test server at https://http2.golang.org/
|
||||
|
||||
Help & bug reports welcome!
|
||||
|
||||
Contributing: https://golang.org/doc/contribute.html
|
||||
Bugs: https://golang.org/issue/new?title=x/net/http2:+
|
641
vendor/golang.org/x/net/http2/ciphers.go
generated
vendored
Normal file
641
vendor/golang.org/x/net/http2/ciphers.go
generated
vendored
Normal file
|
@ -0,0 +1,641 @@
|
|||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
// A list of the possible cipher suite ids. Taken from
|
||||
// https://www.iana.org/assignments/tls-parameters/tls-parameters.txt
|
||||
|
||||
const (
|
||||
cipher_TLS_NULL_WITH_NULL_NULL uint16 = 0x0000
|
||||
cipher_TLS_RSA_WITH_NULL_MD5 uint16 = 0x0001
|
||||
cipher_TLS_RSA_WITH_NULL_SHA uint16 = 0x0002
|
||||
cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0003
|
||||
cipher_TLS_RSA_WITH_RC4_128_MD5 uint16 = 0x0004
|
||||
cipher_TLS_RSA_WITH_RC4_128_SHA uint16 = 0x0005
|
||||
cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x0006
|
||||
cipher_TLS_RSA_WITH_IDEA_CBC_SHA uint16 = 0x0007
|
||||
cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0008
|
||||
cipher_TLS_RSA_WITH_DES_CBC_SHA uint16 = 0x0009
|
||||
cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x000A
|
||||
cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000B
|
||||
cipher_TLS_DH_DSS_WITH_DES_CBC_SHA uint16 = 0x000C
|
||||
cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x000D
|
||||
cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000E
|
||||
cipher_TLS_DH_RSA_WITH_DES_CBC_SHA uint16 = 0x000F
|
||||
cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0010
|
||||
cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0011
|
||||
cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA uint16 = 0x0012
|
||||
cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x0013
|
||||
cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0014
|
||||
cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA uint16 = 0x0015
|
||||
cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0016
|
||||
cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0017
|
||||
cipher_TLS_DH_anon_WITH_RC4_128_MD5 uint16 = 0x0018
|
||||
cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0019
|
||||
cipher_TLS_DH_anon_WITH_DES_CBC_SHA uint16 = 0x001A
|
||||
cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0x001B
|
||||
// Reserved uint16 = 0x001C-1D
|
||||
cipher_TLS_KRB5_WITH_DES_CBC_SHA uint16 = 0x001E
|
||||
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA uint16 = 0x001F
|
||||
cipher_TLS_KRB5_WITH_RC4_128_SHA uint16 = 0x0020
|
||||
cipher_TLS_KRB5_WITH_IDEA_CBC_SHA uint16 = 0x0021
|
||||
cipher_TLS_KRB5_WITH_DES_CBC_MD5 uint16 = 0x0022
|
||||
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5 uint16 = 0x0023
|
||||
cipher_TLS_KRB5_WITH_RC4_128_MD5 uint16 = 0x0024
|
||||
cipher_TLS_KRB5_WITH_IDEA_CBC_MD5 uint16 = 0x0025
|
||||
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA uint16 = 0x0026
|
||||
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA uint16 = 0x0027
|
||||
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA uint16 = 0x0028
|
||||
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 uint16 = 0x0029
|
||||
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x002A
|
||||
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5 uint16 = 0x002B
|
||||
cipher_TLS_PSK_WITH_NULL_SHA uint16 = 0x002C
|
||||
cipher_TLS_DHE_PSK_WITH_NULL_SHA uint16 = 0x002D
|
||||
cipher_TLS_RSA_PSK_WITH_NULL_SHA uint16 = 0x002E
|
||||
cipher_TLS_RSA_WITH_AES_128_CBC_SHA uint16 = 0x002F
|
||||
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0030
|
||||
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0031
|
||||
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0032
|
||||
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0033
|
||||
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA uint16 = 0x0034
|
||||
cipher_TLS_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0035
|
||||
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0036
|
||||
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0037
|
||||
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0038
|
||||
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0039
|
||||
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA uint16 = 0x003A
|
||||
cipher_TLS_RSA_WITH_NULL_SHA256 uint16 = 0x003B
|
||||
cipher_TLS_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003C
|
||||
cipher_TLS_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x003D
|
||||
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x003E
|
||||
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003F
|
||||
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x0040
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0041
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0042
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0043
|
||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0044
|
||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0045
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0046
|
||||
// Reserved uint16 = 0x0047-4F
|
||||
// Reserved uint16 = 0x0050-58
|
||||
// Reserved uint16 = 0x0059-5C
|
||||
// Unassigned uint16 = 0x005D-5F
|
||||
// Reserved uint16 = 0x0060-66
|
||||
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x0067
|
||||
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x0068
|
||||
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x0069
|
||||
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x006A
|
||||
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x006B
|
||||
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256 uint16 = 0x006C
|
||||
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256 uint16 = 0x006D
|
||||
// Unassigned uint16 = 0x006E-83
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0084
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0085
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0086
|
||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0087
|
||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0088
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0089
|
||||
cipher_TLS_PSK_WITH_RC4_128_SHA uint16 = 0x008A
|
||||
cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008B
|
||||
cipher_TLS_PSK_WITH_AES_128_CBC_SHA uint16 = 0x008C
|
||||
cipher_TLS_PSK_WITH_AES_256_CBC_SHA uint16 = 0x008D
|
||||
cipher_TLS_DHE_PSK_WITH_RC4_128_SHA uint16 = 0x008E
|
||||
cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008F
|
||||
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0090
|
||||
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0091
|
||||
cipher_TLS_RSA_PSK_WITH_RC4_128_SHA uint16 = 0x0092
|
||||
cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x0093
|
||||
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0094
|
||||
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0095
|
||||
cipher_TLS_RSA_WITH_SEED_CBC_SHA uint16 = 0x0096
|
||||
cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA uint16 = 0x0097
|
||||
cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA uint16 = 0x0098
|
||||
cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA uint16 = 0x0099
|
||||
cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA uint16 = 0x009A
|
||||
cipher_TLS_DH_anon_WITH_SEED_CBC_SHA uint16 = 0x009B
|
||||
cipher_TLS_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009C
|
||||
cipher_TLS_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009D
|
||||
cipher_TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009E
|
||||
cipher_TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009F
|
||||
cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x00A0
|
||||
cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x00A1
|
||||
cipher_TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A2
|
||||
cipher_TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A3
|
||||
cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A4
|
||||
cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A5
|
||||
cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256 uint16 = 0x00A6
|
||||
cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384 uint16 = 0x00A7
|
||||
cipher_TLS_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00A8
|
||||
cipher_TLS_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00A9
|
||||
cipher_TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AA
|
||||
cipher_TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AB
|
||||
cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AC
|
||||
cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AD
|
||||
cipher_TLS_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00AE
|
||||
cipher_TLS_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00AF
|
||||
cipher_TLS_PSK_WITH_NULL_SHA256 uint16 = 0x00B0
|
||||
cipher_TLS_PSK_WITH_NULL_SHA384 uint16 = 0x00B1
|
||||
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B2
|
||||
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B3
|
||||
cipher_TLS_DHE_PSK_WITH_NULL_SHA256 uint16 = 0x00B4
|
||||
cipher_TLS_DHE_PSK_WITH_NULL_SHA384 uint16 = 0x00B5
|
||||
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B6
|
||||
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B7
|
||||
cipher_TLS_RSA_PSK_WITH_NULL_SHA256 uint16 = 0x00B8
|
||||
cipher_TLS_RSA_PSK_WITH_NULL_SHA384 uint16 = 0x00B9
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BA
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BB
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BC
|
||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BD
|
||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BE
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BF
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C0
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C1
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C2
|
||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C3
|
||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C4
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C5
|
||||
// Unassigned uint16 = 0x00C6-FE
|
||||
cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV uint16 = 0x00FF
|
||||
// Unassigned uint16 = 0x01-55,*
|
||||
cipher_TLS_FALLBACK_SCSV uint16 = 0x5600
|
||||
// Unassigned uint16 = 0x5601 - 0xC000
|
||||
cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA uint16 = 0xC001
|
||||
cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA uint16 = 0xC002
|
||||
cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC003
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC004
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC005
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA uint16 = 0xC006
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA uint16 = 0xC007
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC008
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC009
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC00A
|
||||
cipher_TLS_ECDH_RSA_WITH_NULL_SHA uint16 = 0xC00B
|
||||
cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA uint16 = 0xC00C
|
||||
cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC00D
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC00E
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC00F
|
||||
cipher_TLS_ECDHE_RSA_WITH_NULL_SHA uint16 = 0xC010
|
||||
cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA uint16 = 0xC011
|
||||
cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC012
|
||||
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC013
|
||||
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC014
|
||||
cipher_TLS_ECDH_anon_WITH_NULL_SHA uint16 = 0xC015
|
||||
cipher_TLS_ECDH_anon_WITH_RC4_128_SHA uint16 = 0xC016
|
||||
cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0xC017
|
||||
cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA uint16 = 0xC018
|
||||
cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA uint16 = 0xC019
|
||||
cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01A
|
||||
cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01B
|
||||
cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01C
|
||||
cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA uint16 = 0xC01D
|
||||
cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC01E
|
||||
cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA uint16 = 0xC01F
|
||||
cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA uint16 = 0xC020
|
||||
cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC021
|
||||
cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA uint16 = 0xC022
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC023
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC024
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC025
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC026
|
||||
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC027
|
||||
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC028
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC029
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC02A
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02B
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02C
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02D
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02E
|
||||
cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02F
|
||||
cipher_TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC030
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC031
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC032
|
||||
cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA uint16 = 0xC033
|
||||
cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0xC034
|
||||
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0xC035
|
||||
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0xC036
|
||||
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0xC037
|
||||
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0xC038
|
||||
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA uint16 = 0xC039
|
||||
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256 uint16 = 0xC03A
|
||||
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384 uint16 = 0xC03B
|
||||
cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03C
|
||||
cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03D
|
||||
cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03E
|
||||
cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03F
|
||||
cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC040
|
||||
cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC041
|
||||
cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC042
|
||||
cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC043
|
||||
cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC044
|
||||
cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC045
|
||||
cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC046
|
||||
cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC047
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC048
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC049
|
||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04A
|
||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04B
|
||||
cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04C
|
||||
cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04D
|
||||
cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04E
|
||||
cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04F
|
||||
cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC050
|
||||
cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC051
|
||||
cipher_TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC052
|
||||
cipher_TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC053
|
||||
cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC054
|
||||
cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC055
|
||||
cipher_TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC056
|
||||
cipher_TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC057
|
||||
cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC058
|
||||
cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC059
|
||||
cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05A
|
||||
cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05B
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05C
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05D
|
||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05E
|
||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05F
|
||||
cipher_TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC060
|
||||
cipher_TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC061
|
||||
cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC062
|
||||
cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC063
|
||||
cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC064
|
||||
cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC065
|
||||
cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC066
|
||||
cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC067
|
||||
cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC068
|
||||
cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC069
|
||||
cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06A
|
||||
cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06B
|
||||
cipher_TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06C
|
||||
cipher_TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06D
|
||||
cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06E
|
||||
cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06F
|
||||
cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC070
|
||||
cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC071
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC072
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC073
|
||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC074
|
||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC075
|
||||
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC076
|
||||
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC077
|
||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC078
|
||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC079
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07A
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07B
|
||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07C
|
||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07D
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07E
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07F
|
||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC080
|
||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC081
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC082
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC083
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC084
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC085
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC086
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC087
|
||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC088
|
||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC089
|
||||
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08A
|
||||
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08B
|
||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08C
|
||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08D
|
||||
cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08E
|
||||
cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08F
|
||||
cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC090
|
||||
cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC091
|
||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC092
|
||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC093
|
||||
cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC094
|
||||
cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC095
|
||||
cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC096
|
||||
cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC097
|
||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC098
|
||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC099
|
||||
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC09A
|
||||
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC09B
|
||||
cipher_TLS_RSA_WITH_AES_128_CCM uint16 = 0xC09C
|
||||
cipher_TLS_RSA_WITH_AES_256_CCM uint16 = 0xC09D
|
||||
cipher_TLS_DHE_RSA_WITH_AES_128_CCM uint16 = 0xC09E
|
||||
cipher_TLS_DHE_RSA_WITH_AES_256_CCM uint16 = 0xC09F
|
||||
cipher_TLS_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A0
|
||||
cipher_TLS_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A1
|
||||
cipher_TLS_DHE_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A2
|
||||
cipher_TLS_DHE_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A3
|
||||
cipher_TLS_PSK_WITH_AES_128_CCM uint16 = 0xC0A4
|
||||
cipher_TLS_PSK_WITH_AES_256_CCM uint16 = 0xC0A5
|
||||
cipher_TLS_DHE_PSK_WITH_AES_128_CCM uint16 = 0xC0A6
|
||||
cipher_TLS_DHE_PSK_WITH_AES_256_CCM uint16 = 0xC0A7
|
||||
cipher_TLS_PSK_WITH_AES_128_CCM_8 uint16 = 0xC0A8
|
||||
cipher_TLS_PSK_WITH_AES_256_CCM_8 uint16 = 0xC0A9
|
||||
cipher_TLS_PSK_DHE_WITH_AES_128_CCM_8 uint16 = 0xC0AA
|
||||
cipher_TLS_PSK_DHE_WITH_AES_256_CCM_8 uint16 = 0xC0AB
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM uint16 = 0xC0AC
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM uint16 = 0xC0AD
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 uint16 = 0xC0AE
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8 uint16 = 0xC0AF
|
||||
// Unassigned uint16 = 0xC0B0-FF
|
||||
// Unassigned uint16 = 0xC1-CB,*
|
||||
// Unassigned uint16 = 0xCC00-A7
|
||||
cipher_TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA8
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA9
|
||||
cipher_TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAA
|
||||
cipher_TLS_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAB
|
||||
cipher_TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAC
|
||||
cipher_TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAD
|
||||
cipher_TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAE
|
||||
)
|
||||
|
||||
// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
|
||||
// References:
|
||||
// https://tools.ietf.org/html/rfc7540#appendix-A
|
||||
// Reject cipher suites from Appendix A.
|
||||
// "This list includes those cipher suites that do not
|
||||
// offer an ephemeral key exchange and those that are
|
||||
// based on the TLS null, stream or block cipher type"
|
||||
func isBadCipher(cipher uint16) bool {
|
||||
switch cipher {
|
||||
case cipher_TLS_NULL_WITH_NULL_NULL,
|
||||
cipher_TLS_RSA_WITH_NULL_MD5,
|
||||
cipher_TLS_RSA_WITH_NULL_SHA,
|
||||
cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5,
|
||||
cipher_TLS_RSA_WITH_RC4_128_MD5,
|
||||
cipher_TLS_RSA_WITH_RC4_128_SHA,
|
||||
cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5,
|
||||
cipher_TLS_RSA_WITH_IDEA_CBC_SHA,
|
||||
cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA,
|
||||
cipher_TLS_RSA_WITH_DES_CBC_SHA,
|
||||
cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA,
|
||||
cipher_TLS_DH_DSS_WITH_DES_CBC_SHA,
|
||||
cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA,
|
||||
cipher_TLS_DH_RSA_WITH_DES_CBC_SHA,
|
||||
cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA,
|
||||
cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA,
|
||||
cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA,
|
||||
cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA,
|
||||
cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5,
|
||||
cipher_TLS_DH_anon_WITH_RC4_128_MD5,
|
||||
cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA,
|
||||
cipher_TLS_DH_anon_WITH_DES_CBC_SHA,
|
||||
cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_KRB5_WITH_DES_CBC_SHA,
|
||||
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_KRB5_WITH_RC4_128_SHA,
|
||||
cipher_TLS_KRB5_WITH_IDEA_CBC_SHA,
|
||||
cipher_TLS_KRB5_WITH_DES_CBC_MD5,
|
||||
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5,
|
||||
cipher_TLS_KRB5_WITH_RC4_128_MD5,
|
||||
cipher_TLS_KRB5_WITH_IDEA_CBC_MD5,
|
||||
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA,
|
||||
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA,
|
||||
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA,
|
||||
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5,
|
||||
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5,
|
||||
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5,
|
||||
cipher_TLS_PSK_WITH_NULL_SHA,
|
||||
cipher_TLS_DHE_PSK_WITH_NULL_SHA,
|
||||
cipher_TLS_RSA_PSK_WITH_NULL_SHA,
|
||||
cipher_TLS_RSA_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_RSA_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_RSA_WITH_NULL_SHA256,
|
||||
cipher_TLS_RSA_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_RSA_WITH_AES_256_CBC_SHA256,
|
||||
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA,
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA,
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA,
|
||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA,
|
||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA,
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA,
|
||||
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256,
|
||||
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256,
|
||||
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256,
|
||||
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256,
|
||||
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256,
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA,
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA,
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA,
|
||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA,
|
||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA,
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA,
|
||||
cipher_TLS_PSK_WITH_RC4_128_SHA,
|
||||
cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_PSK_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_PSK_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_DHE_PSK_WITH_RC4_128_SHA,
|
||||
cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_RSA_PSK_WITH_RC4_128_SHA,
|
||||
cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_RSA_WITH_SEED_CBC_SHA,
|
||||
cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA,
|
||||
cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA,
|
||||
cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA,
|
||||
cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA,
|
||||
cipher_TLS_DH_anon_WITH_SEED_CBC_SHA,
|
||||
cipher_TLS_RSA_WITH_AES_128_GCM_SHA256,
|
||||
cipher_TLS_RSA_WITH_AES_256_GCM_SHA384,
|
||||
cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256,
|
||||
cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384,
|
||||
cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256,
|
||||
cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384,
|
||||
cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256,
|
||||
cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384,
|
||||
cipher_TLS_PSK_WITH_AES_128_GCM_SHA256,
|
||||
cipher_TLS_PSK_WITH_AES_256_GCM_SHA384,
|
||||
cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256,
|
||||
cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384,
|
||||
cipher_TLS_PSK_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_PSK_WITH_AES_256_CBC_SHA384,
|
||||
cipher_TLS_PSK_WITH_NULL_SHA256,
|
||||
cipher_TLS_PSK_WITH_NULL_SHA384,
|
||||
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384,
|
||||
cipher_TLS_DHE_PSK_WITH_NULL_SHA256,
|
||||
cipher_TLS_DHE_PSK_WITH_NULL_SHA384,
|
||||
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384,
|
||||
cipher_TLS_RSA_PSK_WITH_NULL_SHA256,
|
||||
cipher_TLS_RSA_PSK_WITH_NULL_SHA384,
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256,
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256,
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256,
|
||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256,
|
||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256,
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256,
|
||||
cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_ECDH_RSA_WITH_NULL_SHA,
|
||||
cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA,
|
||||
cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_ECDHE_RSA_WITH_NULL_SHA,
|
||||
cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA,
|
||||
cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_ECDH_anon_WITH_NULL_SHA,
|
||||
cipher_TLS_ECDH_anon_WITH_RC4_128_SHA,
|
||||
cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384,
|
||||
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384,
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256,
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384,
|
||||
cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA,
|
||||
cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384,
|
||||
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA,
|
||||
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256,
|
||||
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384,
|
||||
cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256,
|
||||
cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384,
|
||||
cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256,
|
||||
cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384,
|
||||
cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256,
|
||||
cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384,
|
||||
cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256,
|
||||
cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384,
|
||||
cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256,
|
||||
cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384,
|
||||
cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256,
|
||||
cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384,
|
||||
cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256,
|
||||
cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384,
|
||||
cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384,
|
||||
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384,
|
||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384,
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256,
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384,
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256,
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384,
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256,
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384,
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256,
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384,
|
||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256,
|
||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384,
|
||||
cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256,
|
||||
cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384,
|
||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256,
|
||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384,
|
||||
cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384,
|
||||
cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384,
|
||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384,
|
||||
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384,
|
||||
cipher_TLS_RSA_WITH_AES_128_CCM,
|
||||
cipher_TLS_RSA_WITH_AES_256_CCM,
|
||||
cipher_TLS_RSA_WITH_AES_128_CCM_8,
|
||||
cipher_TLS_RSA_WITH_AES_256_CCM_8,
|
||||
cipher_TLS_PSK_WITH_AES_128_CCM,
|
||||
cipher_TLS_PSK_WITH_AES_256_CCM,
|
||||
cipher_TLS_PSK_WITH_AES_128_CCM_8,
|
||||
cipher_TLS_PSK_WITH_AES_256_CCM_8:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
309
vendor/golang.org/x/net/http2/ciphers_test.go
generated
vendored
Normal file
309
vendor/golang.org/x/net/http2/ciphers_test.go
generated
vendored
Normal file
|
@ -0,0 +1,309 @@
|
|||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestIsBadCipherBad(t *testing.T) {
|
||||
for _, c := range badCiphers {
|
||||
if !isBadCipher(c) {
|
||||
t.Errorf("Wrong result for isBadCipher(%d), want true", c)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// verify we don't give false positives on ciphers not on blacklist
|
||||
func TestIsBadCipherGood(t *testing.T) {
|
||||
goodCiphers := map[uint16]string{
|
||||
cipher_TLS_DHE_RSA_WITH_AES_256_CCM: "cipher_TLS_DHE_RSA_WITH_AES_256_CCM",
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM: "cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM",
|
||||
cipher_TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256: "cipher_TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256",
|
||||
}
|
||||
for c, name := range goodCiphers {
|
||||
if isBadCipher(c) {
|
||||
t.Errorf("Wrong result for isBadCipher(%d) %s, want false", c, name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// copied from https://http2.github.io/http2-spec/#BadCipherSuites,
|
||||
var badCiphers = []uint16{
|
||||
cipher_TLS_NULL_WITH_NULL_NULL,
|
||||
cipher_TLS_RSA_WITH_NULL_MD5,
|
||||
cipher_TLS_RSA_WITH_NULL_SHA,
|
||||
cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5,
|
||||
cipher_TLS_RSA_WITH_RC4_128_MD5,
|
||||
cipher_TLS_RSA_WITH_RC4_128_SHA,
|
||||
cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5,
|
||||
cipher_TLS_RSA_WITH_IDEA_CBC_SHA,
|
||||
cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA,
|
||||
cipher_TLS_RSA_WITH_DES_CBC_SHA,
|
||||
cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA,
|
||||
cipher_TLS_DH_DSS_WITH_DES_CBC_SHA,
|
||||
cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA,
|
||||
cipher_TLS_DH_RSA_WITH_DES_CBC_SHA,
|
||||
cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA,
|
||||
cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA,
|
||||
cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA,
|
||||
cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA,
|
||||
cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5,
|
||||
cipher_TLS_DH_anon_WITH_RC4_128_MD5,
|
||||
cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA,
|
||||
cipher_TLS_DH_anon_WITH_DES_CBC_SHA,
|
||||
cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_KRB5_WITH_DES_CBC_SHA,
|
||||
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_KRB5_WITH_RC4_128_SHA,
|
||||
cipher_TLS_KRB5_WITH_IDEA_CBC_SHA,
|
||||
cipher_TLS_KRB5_WITH_DES_CBC_MD5,
|
||||
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5,
|
||||
cipher_TLS_KRB5_WITH_RC4_128_MD5,
|
||||
cipher_TLS_KRB5_WITH_IDEA_CBC_MD5,
|
||||
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA,
|
||||
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA,
|
||||
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA,
|
||||
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5,
|
||||
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5,
|
||||
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5,
|
||||
cipher_TLS_PSK_WITH_NULL_SHA,
|
||||
cipher_TLS_DHE_PSK_WITH_NULL_SHA,
|
||||
cipher_TLS_RSA_PSK_WITH_NULL_SHA,
|
||||
cipher_TLS_RSA_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_RSA_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_RSA_WITH_NULL_SHA256,
|
||||
cipher_TLS_RSA_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_RSA_WITH_AES_256_CBC_SHA256,
|
||||
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA,
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA,
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA,
|
||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA,
|
||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA,
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA,
|
||||
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256,
|
||||
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256,
|
||||
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256,
|
||||
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256,
|
||||
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256,
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA,
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA,
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA,
|
||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA,
|
||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA,
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA,
|
||||
cipher_TLS_PSK_WITH_RC4_128_SHA,
|
||||
cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_PSK_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_PSK_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_DHE_PSK_WITH_RC4_128_SHA,
|
||||
cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_RSA_PSK_WITH_RC4_128_SHA,
|
||||
cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_RSA_WITH_SEED_CBC_SHA,
|
||||
cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA,
|
||||
cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA,
|
||||
cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA,
|
||||
cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA,
|
||||
cipher_TLS_DH_anon_WITH_SEED_CBC_SHA,
|
||||
cipher_TLS_RSA_WITH_AES_128_GCM_SHA256,
|
||||
cipher_TLS_RSA_WITH_AES_256_GCM_SHA384,
|
||||
cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256,
|
||||
cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384,
|
||||
cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256,
|
||||
cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384,
|
||||
cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256,
|
||||
cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384,
|
||||
cipher_TLS_PSK_WITH_AES_128_GCM_SHA256,
|
||||
cipher_TLS_PSK_WITH_AES_256_GCM_SHA384,
|
||||
cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256,
|
||||
cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384,
|
||||
cipher_TLS_PSK_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_PSK_WITH_AES_256_CBC_SHA384,
|
||||
cipher_TLS_PSK_WITH_NULL_SHA256,
|
||||
cipher_TLS_PSK_WITH_NULL_SHA384,
|
||||
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384,
|
||||
cipher_TLS_DHE_PSK_WITH_NULL_SHA256,
|
||||
cipher_TLS_DHE_PSK_WITH_NULL_SHA384,
|
||||
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384,
|
||||
cipher_TLS_RSA_PSK_WITH_NULL_SHA256,
|
||||
cipher_TLS_RSA_PSK_WITH_NULL_SHA384,
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256,
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256,
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256,
|
||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256,
|
||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256,
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256,
|
||||
cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_ECDH_RSA_WITH_NULL_SHA,
|
||||
cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA,
|
||||
cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_ECDHE_RSA_WITH_NULL_SHA,
|
||||
cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA,
|
||||
cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_ECDH_anon_WITH_NULL_SHA,
|
||||
cipher_TLS_ECDH_anon_WITH_RC4_128_SHA,
|
||||
cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384,
|
||||
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384,
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256,
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384,
|
||||
cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA,
|
||||
cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384,
|
||||
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA,
|
||||
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256,
|
||||
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384,
|
||||
cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256,
|
||||
cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384,
|
||||
cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256,
|
||||
cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384,
|
||||
cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256,
|
||||
cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384,
|
||||
cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256,
|
||||
cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384,
|
||||
cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256,
|
||||
cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384,
|
||||
cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256,
|
||||
cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384,
|
||||
cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256,
|
||||
cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384,
|
||||
cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384,
|
||||
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384,
|
||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384,
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256,
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384,
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256,
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384,
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256,
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384,
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256,
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384,
|
||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256,
|
||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384,
|
||||
cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256,
|
||||
cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384,
|
||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256,
|
||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384,
|
||||
cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384,
|
||||
cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384,
|
||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384,
|
||||
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384,
|
||||
cipher_TLS_RSA_WITH_AES_128_CCM,
|
||||
cipher_TLS_RSA_WITH_AES_256_CCM,
|
||||
cipher_TLS_RSA_WITH_AES_128_CCM_8,
|
||||
cipher_TLS_RSA_WITH_AES_256_CCM_8,
|
||||
cipher_TLS_PSK_WITH_AES_128_CCM,
|
||||
cipher_TLS_PSK_WITH_AES_256_CCM,
|
||||
cipher_TLS_PSK_WITH_AES_128_CCM_8,
|
||||
cipher_TLS_PSK_WITH_AES_256_CCM_8,
|
||||
}
|
256
vendor/golang.org/x/net/http2/client_conn_pool.go
generated
vendored
Normal file
256
vendor/golang.org/x/net/http2/client_conn_pool.go
generated
vendored
Normal file
|
@ -0,0 +1,256 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Transport code's client connection pooling.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net/http"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// ClientConnPool manages a pool of HTTP/2 client connections.
|
||||
type ClientConnPool interface {
|
||||
GetClientConn(req *http.Request, addr string) (*ClientConn, error)
|
||||
MarkDead(*ClientConn)
|
||||
}
|
||||
|
||||
// clientConnPoolIdleCloser is the interface implemented by ClientConnPool
|
||||
// implementations which can close their idle connections.
|
||||
type clientConnPoolIdleCloser interface {
|
||||
ClientConnPool
|
||||
closeIdleConnections()
|
||||
}
|
||||
|
||||
var (
|
||||
_ clientConnPoolIdleCloser = (*clientConnPool)(nil)
|
||||
_ clientConnPoolIdleCloser = noDialClientConnPool{}
|
||||
)
|
||||
|
||||
// TODO: use singleflight for dialing and addConnCalls?
|
||||
type clientConnPool struct {
|
||||
t *Transport
|
||||
|
||||
mu sync.Mutex // TODO: maybe switch to RWMutex
|
||||
// TODO: add support for sharing conns based on cert names
|
||||
// (e.g. share conn for googleapis.com and appspot.com)
|
||||
conns map[string][]*ClientConn // key is host:port
|
||||
dialing map[string]*dialCall // currently in-flight dials
|
||||
keys map[*ClientConn][]string
|
||||
addConnCalls map[string]*addConnCall // in-flight addConnIfNeede calls
|
||||
}
|
||||
|
||||
func (p *clientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) {
|
||||
return p.getClientConn(req, addr, dialOnMiss)
|
||||
}
|
||||
|
||||
const (
|
||||
dialOnMiss = true
|
||||
noDialOnMiss = false
|
||||
)
|
||||
|
||||
func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) {
|
||||
if isConnectionCloseRequest(req) && dialOnMiss {
|
||||
// It gets its own connection.
|
||||
const singleUse = true
|
||||
cc, err := p.t.dialClientConn(addr, singleUse)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cc, nil
|
||||
}
|
||||
p.mu.Lock()
|
||||
for _, cc := range p.conns[addr] {
|
||||
if cc.CanTakeNewRequest() {
|
||||
p.mu.Unlock()
|
||||
return cc, nil
|
||||
}
|
||||
}
|
||||
if !dialOnMiss {
|
||||
p.mu.Unlock()
|
||||
return nil, ErrNoCachedConn
|
||||
}
|
||||
call := p.getStartDialLocked(addr)
|
||||
p.mu.Unlock()
|
||||
<-call.done
|
||||
return call.res, call.err
|
||||
}
|
||||
|
||||
// dialCall is an in-flight Transport dial call to a host.
|
||||
type dialCall struct {
|
||||
p *clientConnPool
|
||||
done chan struct{} // closed when done
|
||||
res *ClientConn // valid after done is closed
|
||||
err error // valid after done is closed
|
||||
}
|
||||
|
||||
// requires p.mu is held.
|
||||
func (p *clientConnPool) getStartDialLocked(addr string) *dialCall {
|
||||
if call, ok := p.dialing[addr]; ok {
|
||||
// A dial is already in-flight. Don't start another.
|
||||
return call
|
||||
}
|
||||
call := &dialCall{p: p, done: make(chan struct{})}
|
||||
if p.dialing == nil {
|
||||
p.dialing = make(map[string]*dialCall)
|
||||
}
|
||||
p.dialing[addr] = call
|
||||
go call.dial(addr)
|
||||
return call
|
||||
}
|
||||
|
||||
// run in its own goroutine.
|
||||
func (c *dialCall) dial(addr string) {
|
||||
const singleUse = false // shared conn
|
||||
c.res, c.err = c.p.t.dialClientConn(addr, singleUse)
|
||||
close(c.done)
|
||||
|
||||
c.p.mu.Lock()
|
||||
delete(c.p.dialing, addr)
|
||||
if c.err == nil {
|
||||
c.p.addConnLocked(addr, c.res)
|
||||
}
|
||||
c.p.mu.Unlock()
|
||||
}
|
||||
|
||||
// addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't
|
||||
// already exist. It coalesces concurrent calls with the same key.
|
||||
// This is used by the http1 Transport code when it creates a new connection. Because
|
||||
// the http1 Transport doesn't de-dup TCP dials to outbound hosts (because it doesn't know
|
||||
// the protocol), it can get into a situation where it has multiple TLS connections.
|
||||
// This code decides which ones live or die.
|
||||
// The return value used is whether c was used.
|
||||
// c is never closed.
|
||||
func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) (used bool, err error) {
|
||||
p.mu.Lock()
|
||||
for _, cc := range p.conns[key] {
|
||||
if cc.CanTakeNewRequest() {
|
||||
p.mu.Unlock()
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
call, dup := p.addConnCalls[key]
|
||||
if !dup {
|
||||
if p.addConnCalls == nil {
|
||||
p.addConnCalls = make(map[string]*addConnCall)
|
||||
}
|
||||
call = &addConnCall{
|
||||
p: p,
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
p.addConnCalls[key] = call
|
||||
go call.run(t, key, c)
|
||||
}
|
||||
p.mu.Unlock()
|
||||
|
||||
<-call.done
|
||||
if call.err != nil {
|
||||
return false, call.err
|
||||
}
|
||||
return !dup, nil
|
||||
}
|
||||
|
||||
type addConnCall struct {
|
||||
p *clientConnPool
|
||||
done chan struct{} // closed when done
|
||||
err error
|
||||
}
|
||||
|
||||
func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) {
|
||||
cc, err := t.NewClientConn(tc)
|
||||
|
||||
p := c.p
|
||||
p.mu.Lock()
|
||||
if err != nil {
|
||||
c.err = err
|
||||
} else {
|
||||
p.addConnLocked(key, cc)
|
||||
}
|
||||
delete(p.addConnCalls, key)
|
||||
p.mu.Unlock()
|
||||
close(c.done)
|
||||
}
|
||||
|
||||
func (p *clientConnPool) addConn(key string, cc *ClientConn) {
|
||||
p.mu.Lock()
|
||||
p.addConnLocked(key, cc)
|
||||
p.mu.Unlock()
|
||||
}
|
||||
|
||||
// p.mu must be held
|
||||
func (p *clientConnPool) addConnLocked(key string, cc *ClientConn) {
|
||||
for _, v := range p.conns[key] {
|
||||
if v == cc {
|
||||
return
|
||||
}
|
||||
}
|
||||
if p.conns == nil {
|
||||
p.conns = make(map[string][]*ClientConn)
|
||||
}
|
||||
if p.keys == nil {
|
||||
p.keys = make(map[*ClientConn][]string)
|
||||
}
|
||||
p.conns[key] = append(p.conns[key], cc)
|
||||
p.keys[cc] = append(p.keys[cc], key)
|
||||
}
|
||||
|
||||
func (p *clientConnPool) MarkDead(cc *ClientConn) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
for _, key := range p.keys[cc] {
|
||||
vv, ok := p.conns[key]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
newList := filterOutClientConn(vv, cc)
|
||||
if len(newList) > 0 {
|
||||
p.conns[key] = newList
|
||||
} else {
|
||||
delete(p.conns, key)
|
||||
}
|
||||
}
|
||||
delete(p.keys, cc)
|
||||
}
|
||||
|
||||
func (p *clientConnPool) closeIdleConnections() {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
// TODO: don't close a cc if it was just added to the pool
|
||||
// milliseconds ago and has never been used. There's currently
|
||||
// a small race window with the HTTP/1 Transport's integration
|
||||
// where it can add an idle conn just before using it, and
|
||||
// somebody else can concurrently call CloseIdleConns and
|
||||
// break some caller's RoundTrip.
|
||||
for _, vv := range p.conns {
|
||||
for _, cc := range vv {
|
||||
cc.closeIfIdle()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func filterOutClientConn(in []*ClientConn, exclude *ClientConn) []*ClientConn {
|
||||
out := in[:0]
|
||||
for _, v := range in {
|
||||
if v != exclude {
|
||||
out = append(out, v)
|
||||
}
|
||||
}
|
||||
// If we filtered it out, zero out the last item to prevent
|
||||
// the GC from seeing it.
|
||||
if len(in) != len(out) {
|
||||
in[len(in)-1] = nil
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// noDialClientConnPool is an implementation of http2.ClientConnPool
|
||||
// which never dials. We let the HTTP/1.1 client dial and use its TLS
|
||||
// connection instead.
|
||||
type noDialClientConnPool struct{ *clientConnPool }
|
||||
|
||||
func (p noDialClientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) {
|
||||
return p.getClientConn(req, addr, noDialOnMiss)
|
||||
}
|
80
vendor/golang.org/x/net/http2/configure_transport.go
generated
vendored
Normal file
80
vendor/golang.org/x/net/http2/configure_transport.go
generated
vendored
Normal file
|
@ -0,0 +1,80 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.6
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func configureTransport(t1 *http.Transport) (*Transport, error) {
|
||||
connPool := new(clientConnPool)
|
||||
t2 := &Transport{
|
||||
ConnPool: noDialClientConnPool{connPool},
|
||||
t1: t1,
|
||||
}
|
||||
connPool.t = t2
|
||||
if err := registerHTTPSProtocol(t1, noDialH2RoundTripper{t2}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if t1.TLSClientConfig == nil {
|
||||
t1.TLSClientConfig = new(tls.Config)
|
||||
}
|
||||
if !strSliceContains(t1.TLSClientConfig.NextProtos, "h2") {
|
||||
t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...)
|
||||
}
|
||||
if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") {
|
||||
t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1")
|
||||
}
|
||||
upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper {
|
||||
addr := authorityAddr("https", authority)
|
||||
if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil {
|
||||
go c.Close()
|
||||
return erringRoundTripper{err}
|
||||
} else if !used {
|
||||
// Turns out we don't need this c.
|
||||
// For example, two goroutines made requests to the same host
|
||||
// at the same time, both kicking off TCP dials. (since protocol
|
||||
// was unknown)
|
||||
go c.Close()
|
||||
}
|
||||
return t2
|
||||
}
|
||||
if m := t1.TLSNextProto; len(m) == 0 {
|
||||
t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{
|
||||
"h2": upgradeFn,
|
||||
}
|
||||
} else {
|
||||
m["h2"] = upgradeFn
|
||||
}
|
||||
return t2, nil
|
||||
}
|
||||
|
||||
// registerHTTPSProtocol calls Transport.RegisterProtocol but
|
||||
// converting panics into errors.
|
||||
func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) {
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
err = fmt.Errorf("%v", e)
|
||||
}
|
||||
}()
|
||||
t.RegisterProtocol("https", rt)
|
||||
return nil
|
||||
}
|
||||
|
||||
// noDialH2RoundTripper is a RoundTripper which only tries to complete the request
|
||||
// if there's already has a cached connection to the host.
|
||||
type noDialH2RoundTripper struct{ t *Transport }
|
||||
|
||||
func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
res, err := rt.t.RoundTrip(req)
|
||||
if isNoCachedConnError(err) {
|
||||
return nil, http.ErrSkipAltProtocol
|
||||
}
|
||||
return res, err
|
||||
}
|
146
vendor/golang.org/x/net/http2/databuffer.go
generated
vendored
Normal file
146
vendor/golang.org/x/net/http2/databuffer.go
generated
vendored
Normal file
|
@ -0,0 +1,146 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Buffer chunks are allocated from a pool to reduce pressure on GC.
|
||||
// The maximum wasted space per dataBuffer is 2x the largest size class,
|
||||
// which happens when the dataBuffer has multiple chunks and there is
|
||||
// one unread byte in both the first and last chunks. We use a few size
|
||||
// classes to minimize overheads for servers that typically receive very
|
||||
// small request bodies.
|
||||
//
|
||||
// TODO: Benchmark to determine if the pools are necessary. The GC may have
|
||||
// improved enough that we can instead allocate chunks like this:
|
||||
// make([]byte, max(16<<10, expectedBytesRemaining))
|
||||
var (
|
||||
dataChunkSizeClasses = []int{
|
||||
1 << 10,
|
||||
2 << 10,
|
||||
4 << 10,
|
||||
8 << 10,
|
||||
16 << 10,
|
||||
}
|
||||
dataChunkPools = [...]sync.Pool{
|
||||
{New: func() interface{} { return make([]byte, 1<<10) }},
|
||||
{New: func() interface{} { return make([]byte, 2<<10) }},
|
||||
{New: func() interface{} { return make([]byte, 4<<10) }},
|
||||
{New: func() interface{} { return make([]byte, 8<<10) }},
|
||||
{New: func() interface{} { return make([]byte, 16<<10) }},
|
||||
}
|
||||
)
|
||||
|
||||
func getDataBufferChunk(size int64) []byte {
|
||||
i := 0
|
||||
for ; i < len(dataChunkSizeClasses)-1; i++ {
|
||||
if size <= int64(dataChunkSizeClasses[i]) {
|
||||
break
|
||||
}
|
||||
}
|
||||
return dataChunkPools[i].Get().([]byte)
|
||||
}
|
||||
|
||||
func putDataBufferChunk(p []byte) {
|
||||
for i, n := range dataChunkSizeClasses {
|
||||
if len(p) == n {
|
||||
dataChunkPools[i].Put(p)
|
||||
return
|
||||
}
|
||||
}
|
||||
panic(fmt.Sprintf("unexpected buffer len=%v", len(p)))
|
||||
}
|
||||
|
||||
// dataBuffer is an io.ReadWriter backed by a list of data chunks.
|
||||
// Each dataBuffer is used to read DATA frames on a single stream.
|
||||
// The buffer is divided into chunks so the server can limit the
|
||||
// total memory used by a single connection without limiting the
|
||||
// request body size on any single stream.
|
||||
type dataBuffer struct {
|
||||
chunks [][]byte
|
||||
r int // next byte to read is chunks[0][r]
|
||||
w int // next byte to write is chunks[len(chunks)-1][w]
|
||||
size int // total buffered bytes
|
||||
expected int64 // we expect at least this many bytes in future Write calls (ignored if <= 0)
|
||||
}
|
||||
|
||||
var errReadEmpty = errors.New("read from empty dataBuffer")
|
||||
|
||||
// Read copies bytes from the buffer into p.
|
||||
// It is an error to read when no data is available.
|
||||
func (b *dataBuffer) Read(p []byte) (int, error) {
|
||||
if b.size == 0 {
|
||||
return 0, errReadEmpty
|
||||
}
|
||||
var ntotal int
|
||||
for len(p) > 0 && b.size > 0 {
|
||||
readFrom := b.bytesFromFirstChunk()
|
||||
n := copy(p, readFrom)
|
||||
p = p[n:]
|
||||
ntotal += n
|
||||
b.r += n
|
||||
b.size -= n
|
||||
// If the first chunk has been consumed, advance to the next chunk.
|
||||
if b.r == len(b.chunks[0]) {
|
||||
putDataBufferChunk(b.chunks[0])
|
||||
end := len(b.chunks) - 1
|
||||
copy(b.chunks[:end], b.chunks[1:])
|
||||
b.chunks[end] = nil
|
||||
b.chunks = b.chunks[:end]
|
||||
b.r = 0
|
||||
}
|
||||
}
|
||||
return ntotal, nil
|
||||
}
|
||||
|
||||
func (b *dataBuffer) bytesFromFirstChunk() []byte {
|
||||
if len(b.chunks) == 1 {
|
||||
return b.chunks[0][b.r:b.w]
|
||||
}
|
||||
return b.chunks[0][b.r:]
|
||||
}
|
||||
|
||||
// Len returns the number of bytes of the unread portion of the buffer.
|
||||
func (b *dataBuffer) Len() int {
|
||||
return b.size
|
||||
}
|
||||
|
||||
// Write appends p to the buffer.
|
||||
func (b *dataBuffer) Write(p []byte) (int, error) {
|
||||
ntotal := len(p)
|
||||
for len(p) > 0 {
|
||||
// If the last chunk is empty, allocate a new chunk. Try to allocate
|
||||
// enough to fully copy p plus any additional bytes we expect to
|
||||
// receive. However, this may allocate less than len(p).
|
||||
want := int64(len(p))
|
||||
if b.expected > want {
|
||||
want = b.expected
|
||||
}
|
||||
chunk := b.lastChunkOrAlloc(want)
|
||||
n := copy(chunk[b.w:], p)
|
||||
p = p[n:]
|
||||
b.w += n
|
||||
b.size += n
|
||||
b.expected -= int64(n)
|
||||
}
|
||||
return ntotal, nil
|
||||
}
|
||||
|
||||
func (b *dataBuffer) lastChunkOrAlloc(want int64) []byte {
|
||||
if len(b.chunks) != 0 {
|
||||
last := b.chunks[len(b.chunks)-1]
|
||||
if b.w < len(last) {
|
||||
return last
|
||||
}
|
||||
}
|
||||
chunk := getDataBufferChunk(want)
|
||||
b.chunks = append(b.chunks, chunk)
|
||||
b.w = 0
|
||||
return chunk
|
||||
}
|
157
vendor/golang.org/x/net/http2/databuffer_test.go
generated
vendored
Normal file
157
vendor/golang.org/x/net/http2/databuffer_test.go
generated
vendored
Normal file
|
@ -0,0 +1,157 @@
|
|||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.7
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func fmtDataChunk(chunk []byte) string {
|
||||
out := ""
|
||||
var last byte
|
||||
var count int
|
||||
for _, c := range chunk {
|
||||
if c != last {
|
||||
if count > 0 {
|
||||
out += fmt.Sprintf(" x %d ", count)
|
||||
count = 0
|
||||
}
|
||||
out += string([]byte{c})
|
||||
last = c
|
||||
}
|
||||
count++
|
||||
}
|
||||
if count > 0 {
|
||||
out += fmt.Sprintf(" x %d", count)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func fmtDataChunks(chunks [][]byte) string {
|
||||
var out string
|
||||
for _, chunk := range chunks {
|
||||
out += fmt.Sprintf("{%q}", fmtDataChunk(chunk))
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func testDataBuffer(t *testing.T, wantBytes []byte, setup func(t *testing.T) *dataBuffer) {
|
||||
// Run setup, then read the remaining bytes from the dataBuffer and check
|
||||
// that they match wantBytes. We use different read sizes to check corner
|
||||
// cases in Read.
|
||||
for _, readSize := range []int{1, 2, 1 * 1024, 32 * 1024} {
|
||||
t.Run(fmt.Sprintf("ReadSize=%d", readSize), func(t *testing.T) {
|
||||
b := setup(t)
|
||||
buf := make([]byte, readSize)
|
||||
var gotRead bytes.Buffer
|
||||
for {
|
||||
n, err := b.Read(buf)
|
||||
gotRead.Write(buf[:n])
|
||||
if err == errReadEmpty {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("error after %v bytes: %v", gotRead.Len(), err)
|
||||
}
|
||||
}
|
||||
if got, want := gotRead.Bytes(), wantBytes; !bytes.Equal(got, want) {
|
||||
t.Errorf("FinalRead=%q, want %q", fmtDataChunk(got), fmtDataChunk(want))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDataBufferAllocation(t *testing.T) {
|
||||
writes := [][]byte{
|
||||
bytes.Repeat([]byte("a"), 1*1024-1),
|
||||
[]byte("a"),
|
||||
bytes.Repeat([]byte("b"), 4*1024-1),
|
||||
[]byte("b"),
|
||||
bytes.Repeat([]byte("c"), 8*1024-1),
|
||||
[]byte("c"),
|
||||
bytes.Repeat([]byte("d"), 16*1024-1),
|
||||
[]byte("d"),
|
||||
bytes.Repeat([]byte("e"), 32*1024),
|
||||
}
|
||||
var wantRead bytes.Buffer
|
||||
for _, p := range writes {
|
||||
wantRead.Write(p)
|
||||
}
|
||||
|
||||
testDataBuffer(t, wantRead.Bytes(), func(t *testing.T) *dataBuffer {
|
||||
b := &dataBuffer{}
|
||||
for _, p := range writes {
|
||||
if n, err := b.Write(p); n != len(p) || err != nil {
|
||||
t.Fatalf("Write(%q x %d)=%v,%v want %v,nil", p[:1], len(p), n, err, len(p))
|
||||
}
|
||||
}
|
||||
want := [][]byte{
|
||||
bytes.Repeat([]byte("a"), 1*1024),
|
||||
bytes.Repeat([]byte("b"), 4*1024),
|
||||
bytes.Repeat([]byte("c"), 8*1024),
|
||||
bytes.Repeat([]byte("d"), 16*1024),
|
||||
bytes.Repeat([]byte("e"), 16*1024),
|
||||
bytes.Repeat([]byte("e"), 16*1024),
|
||||
}
|
||||
if !reflect.DeepEqual(b.chunks, want) {
|
||||
t.Errorf("dataBuffer.chunks\ngot: %s\nwant: %s", fmtDataChunks(b.chunks), fmtDataChunks(want))
|
||||
}
|
||||
return b
|
||||
})
|
||||
}
|
||||
|
||||
func TestDataBufferAllocationWithExpected(t *testing.T) {
|
||||
writes := [][]byte{
|
||||
bytes.Repeat([]byte("a"), 1*1024), // allocates 16KB
|
||||
bytes.Repeat([]byte("b"), 14*1024),
|
||||
bytes.Repeat([]byte("c"), 15*1024), // allocates 16KB more
|
||||
bytes.Repeat([]byte("d"), 2*1024),
|
||||
bytes.Repeat([]byte("e"), 1*1024), // overflows 32KB expectation, allocates just 1KB
|
||||
}
|
||||
var wantRead bytes.Buffer
|
||||
for _, p := range writes {
|
||||
wantRead.Write(p)
|
||||
}
|
||||
|
||||
testDataBuffer(t, wantRead.Bytes(), func(t *testing.T) *dataBuffer {
|
||||
b := &dataBuffer{expected: 32 * 1024}
|
||||
for _, p := range writes {
|
||||
if n, err := b.Write(p); n != len(p) || err != nil {
|
||||
t.Fatalf("Write(%q x %d)=%v,%v want %v,nil", p[:1], len(p), n, err, len(p))
|
||||
}
|
||||
}
|
||||
want := [][]byte{
|
||||
append(bytes.Repeat([]byte("a"), 1*1024), append(bytes.Repeat([]byte("b"), 14*1024), bytes.Repeat([]byte("c"), 1*1024)...)...),
|
||||
append(bytes.Repeat([]byte("c"), 14*1024), bytes.Repeat([]byte("d"), 2*1024)...),
|
||||
bytes.Repeat([]byte("e"), 1*1024),
|
||||
}
|
||||
if !reflect.DeepEqual(b.chunks, want) {
|
||||
t.Errorf("dataBuffer.chunks\ngot: %s\nwant: %s", fmtDataChunks(b.chunks), fmtDataChunks(want))
|
||||
}
|
||||
return b
|
||||
})
|
||||
}
|
||||
|
||||
func TestDataBufferWriteAfterPartialRead(t *testing.T) {
|
||||
testDataBuffer(t, []byte("cdxyz"), func(t *testing.T) *dataBuffer {
|
||||
b := &dataBuffer{}
|
||||
if n, err := b.Write([]byte("abcd")); n != 4 || err != nil {
|
||||
t.Fatalf("Write(\"abcd\")=%v,%v want 4,nil", n, err)
|
||||
}
|
||||
p := make([]byte, 2)
|
||||
if n, err := b.Read(p); n != 2 || err != nil || !bytes.Equal(p, []byte("ab")) {
|
||||
t.Fatalf("Read()=%q,%v,%v want \"ab\",2,nil", p, n, err)
|
||||
}
|
||||
if n, err := b.Write([]byte("xyz")); n != 3 || err != nil {
|
||||
t.Fatalf("Write(\"xyz\")=%v,%v want 3,nil", n, err)
|
||||
}
|
||||
return b
|
||||
})
|
||||
}
|
133
vendor/golang.org/x/net/http2/errors.go
generated
vendored
Normal file
133
vendor/golang.org/x/net/http2/errors.go
generated
vendored
Normal file
|
@ -0,0 +1,133 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec.
|
||||
type ErrCode uint32
|
||||
|
||||
const (
|
||||
ErrCodeNo ErrCode = 0x0
|
||||
ErrCodeProtocol ErrCode = 0x1
|
||||
ErrCodeInternal ErrCode = 0x2
|
||||
ErrCodeFlowControl ErrCode = 0x3
|
||||
ErrCodeSettingsTimeout ErrCode = 0x4
|
||||
ErrCodeStreamClosed ErrCode = 0x5
|
||||
ErrCodeFrameSize ErrCode = 0x6
|
||||
ErrCodeRefusedStream ErrCode = 0x7
|
||||
ErrCodeCancel ErrCode = 0x8
|
||||
ErrCodeCompression ErrCode = 0x9
|
||||
ErrCodeConnect ErrCode = 0xa
|
||||
ErrCodeEnhanceYourCalm ErrCode = 0xb
|
||||
ErrCodeInadequateSecurity ErrCode = 0xc
|
||||
ErrCodeHTTP11Required ErrCode = 0xd
|
||||
)
|
||||
|
||||
var errCodeName = map[ErrCode]string{
|
||||
ErrCodeNo: "NO_ERROR",
|
||||
ErrCodeProtocol: "PROTOCOL_ERROR",
|
||||
ErrCodeInternal: "INTERNAL_ERROR",
|
||||
ErrCodeFlowControl: "FLOW_CONTROL_ERROR",
|
||||
ErrCodeSettingsTimeout: "SETTINGS_TIMEOUT",
|
||||
ErrCodeStreamClosed: "STREAM_CLOSED",
|
||||
ErrCodeFrameSize: "FRAME_SIZE_ERROR",
|
||||
ErrCodeRefusedStream: "REFUSED_STREAM",
|
||||
ErrCodeCancel: "CANCEL",
|
||||
ErrCodeCompression: "COMPRESSION_ERROR",
|
||||
ErrCodeConnect: "CONNECT_ERROR",
|
||||
ErrCodeEnhanceYourCalm: "ENHANCE_YOUR_CALM",
|
||||
ErrCodeInadequateSecurity: "INADEQUATE_SECURITY",
|
||||
ErrCodeHTTP11Required: "HTTP_1_1_REQUIRED",
|
||||
}
|
||||
|
||||
func (e ErrCode) String() string {
|
||||
if s, ok := errCodeName[e]; ok {
|
||||
return s
|
||||
}
|
||||
return fmt.Sprintf("unknown error code 0x%x", uint32(e))
|
||||
}
|
||||
|
||||
// ConnectionError is an error that results in the termination of the
|
||||
// entire connection.
|
||||
type ConnectionError ErrCode
|
||||
|
||||
func (e ConnectionError) Error() string { return fmt.Sprintf("connection error: %s", ErrCode(e)) }
|
||||
|
||||
// StreamError is an error that only affects one stream within an
|
||||
// HTTP/2 connection.
|
||||
type StreamError struct {
|
||||
StreamID uint32
|
||||
Code ErrCode
|
||||
Cause error // optional additional detail
|
||||
}
|
||||
|
||||
func streamError(id uint32, code ErrCode) StreamError {
|
||||
return StreamError{StreamID: id, Code: code}
|
||||
}
|
||||
|
||||
func (e StreamError) Error() string {
|
||||
if e.Cause != nil {
|
||||
return fmt.Sprintf("stream error: stream ID %d; %v; %v", e.StreamID, e.Code, e.Cause)
|
||||
}
|
||||
return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code)
|
||||
}
|
||||
|
||||
// 6.9.1 The Flow Control Window
|
||||
// "If a sender receives a WINDOW_UPDATE that causes a flow control
|
||||
// window to exceed this maximum it MUST terminate either the stream
|
||||
// or the connection, as appropriate. For streams, [...]; for the
|
||||
// connection, a GOAWAY frame with a FLOW_CONTROL_ERROR code."
|
||||
type goAwayFlowError struct{}
|
||||
|
||||
func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" }
|
||||
|
||||
// connError represents an HTTP/2 ConnectionError error code, along
|
||||
// with a string (for debugging) explaining why.
|
||||
//
|
||||
// Errors of this type are only returned by the frame parser functions
|
||||
// and converted into ConnectionError(Code), after stashing away
|
||||
// the Reason into the Framer's errDetail field, accessible via
|
||||
// the (*Framer).ErrorDetail method.
|
||||
type connError struct {
|
||||
Code ErrCode // the ConnectionError error code
|
||||
Reason string // additional reason
|
||||
}
|
||||
|
||||
func (e connError) Error() string {
|
||||
return fmt.Sprintf("http2: connection error: %v: %v", e.Code, e.Reason)
|
||||
}
|
||||
|
||||
type pseudoHeaderError string
|
||||
|
||||
func (e pseudoHeaderError) Error() string {
|
||||
return fmt.Sprintf("invalid pseudo-header %q", string(e))
|
||||
}
|
||||
|
||||
type duplicatePseudoHeaderError string
|
||||
|
||||
func (e duplicatePseudoHeaderError) Error() string {
|
||||
return fmt.Sprintf("duplicate pseudo-header %q", string(e))
|
||||
}
|
||||
|
||||
type headerFieldNameError string
|
||||
|
||||
func (e headerFieldNameError) Error() string {
|
||||
return fmt.Sprintf("invalid header field name %q", string(e))
|
||||
}
|
||||
|
||||
type headerFieldValueError string
|
||||
|
||||
func (e headerFieldValueError) Error() string {
|
||||
return fmt.Sprintf("invalid header field value %q", string(e))
|
||||
}
|
||||
|
||||
var (
|
||||
errMixPseudoHeaderTypes = errors.New("mix of request and response pseudo headers")
|
||||
errPseudoAfterRegular = errors.New("pseudo header field after regular")
|
||||
)
|
24
vendor/golang.org/x/net/http2/errors_test.go
generated
vendored
Normal file
24
vendor/golang.org/x/net/http2/errors_test.go
generated
vendored
Normal file
|
@ -0,0 +1,24 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestErrCodeString(t *testing.T) {
|
||||
tests := []struct {
|
||||
err ErrCode
|
||||
want string
|
||||
}{
|
||||
{ErrCodeProtocol, "PROTOCOL_ERROR"},
|
||||
{0xd, "HTTP_1_1_REQUIRED"},
|
||||
{0xf, "unknown error code 0xf"},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
got := tt.err.String()
|
||||
if got != tt.want {
|
||||
t.Errorf("%d. Error = %q; want %q", i, got, tt.want)
|
||||
}
|
||||
}
|
||||
}
|
50
vendor/golang.org/x/net/http2/flow.go
generated
vendored
Normal file
50
vendor/golang.org/x/net/http2/flow.go
generated
vendored
Normal file
|
@ -0,0 +1,50 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Flow control
|
||||
|
||||
package http2
|
||||
|
||||
// flow is the flow control window's size.
|
||||
type flow struct {
|
||||
// n is the number of DATA bytes we're allowed to send.
|
||||
// A flow is kept both on a conn and a per-stream.
|
||||
n int32
|
||||
|
||||
// conn points to the shared connection-level flow that is
|
||||
// shared by all streams on that conn. It is nil for the flow
|
||||
// that's on the conn directly.
|
||||
conn *flow
|
||||
}
|
||||
|
||||
func (f *flow) setConnFlow(cf *flow) { f.conn = cf }
|
||||
|
||||
func (f *flow) available() int32 {
|
||||
n := f.n
|
||||
if f.conn != nil && f.conn.n < n {
|
||||
n = f.conn.n
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (f *flow) take(n int32) {
|
||||
if n > f.available() {
|
||||
panic("internal error: took too much")
|
||||
}
|
||||
f.n -= n
|
||||
if f.conn != nil {
|
||||
f.conn.n -= n
|
||||
}
|
||||
}
|
||||
|
||||
// add adds n bytes (positive or negative) to the flow control window.
|
||||
// It returns false if the sum would exceed 2^31-1.
|
||||
func (f *flow) add(n int32) bool {
|
||||
remain := (1<<31 - 1) - f.n
|
||||
if n > remain {
|
||||
return false
|
||||
}
|
||||
f.n += n
|
||||
return true
|
||||
}
|
53
vendor/golang.org/x/net/http2/flow_test.go
generated
vendored
Normal file
53
vendor/golang.org/x/net/http2/flow_test.go
generated
vendored
Normal file
|
@ -0,0 +1,53 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestFlow(t *testing.T) {
|
||||
var st flow
|
||||
var conn flow
|
||||
st.add(3)
|
||||
conn.add(2)
|
||||
|
||||
if got, want := st.available(), int32(3); got != want {
|
||||
t.Errorf("available = %d; want %d", got, want)
|
||||
}
|
||||
st.setConnFlow(&conn)
|
||||
if got, want := st.available(), int32(2); got != want {
|
||||
t.Errorf("after parent setup, available = %d; want %d", got, want)
|
||||
}
|
||||
|
||||
st.take(2)
|
||||
if got, want := conn.available(), int32(0); got != want {
|
||||
t.Errorf("after taking 2, conn = %d; want %d", got, want)
|
||||
}
|
||||
if got, want := st.available(), int32(0); got != want {
|
||||
t.Errorf("after taking 2, stream = %d; want %d", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFlowAdd(t *testing.T) {
|
||||
var f flow
|
||||
if !f.add(1) {
|
||||
t.Fatal("failed to add 1")
|
||||
}
|
||||
if !f.add(-1) {
|
||||
t.Fatal("failed to add -1")
|
||||
}
|
||||
if got, want := f.available(), int32(0); got != want {
|
||||
t.Fatalf("size = %d; want %d", got, want)
|
||||
}
|
||||
if !f.add(1<<31 - 1) {
|
||||
t.Fatal("failed to add 2^31-1")
|
||||
}
|
||||
if got, want := f.available(), int32(1<<31-1); got != want {
|
||||
t.Fatalf("size = %d; want %d", got, want)
|
||||
}
|
||||
if f.add(1) {
|
||||
t.Fatal("adding 1 to max shouldn't be allowed")
|
||||
}
|
||||
|
||||
}
|
1579
vendor/golang.org/x/net/http2/frame.go
generated
vendored
Normal file
1579
vendor/golang.org/x/net/http2/frame.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
1191
vendor/golang.org/x/net/http2/frame_test.go
generated
vendored
Normal file
1191
vendor/golang.org/x/net/http2/frame_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
16
vendor/golang.org/x/net/http2/go16.go
generated
vendored
Normal file
16
vendor/golang.org/x/net/http2/go16.go
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.6
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
func transportExpectContinueTimeout(t1 *http.Transport) time.Duration {
|
||||
return t1.ExpectContinueTimeout
|
||||
}
|
106
vendor/golang.org/x/net/http2/go17.go
generated
vendored
Normal file
106
vendor/golang.org/x/net/http2/go17.go
generated
vendored
Normal file
|
@ -0,0 +1,106 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.7
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptrace"
|
||||
"time"
|
||||
)
|
||||
|
||||
type contextContext interface {
|
||||
context.Context
|
||||
}
|
||||
|
||||
func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) {
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr())
|
||||
if hs := opts.baseConfig(); hs != nil {
|
||||
ctx = context.WithValue(ctx, http.ServerContextKey, hs)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) {
|
||||
return context.WithCancel(ctx)
|
||||
}
|
||||
|
||||
func requestWithContext(req *http.Request, ctx contextContext) *http.Request {
|
||||
return req.WithContext(ctx)
|
||||
}
|
||||
|
||||
type clientTrace httptrace.ClientTrace
|
||||
|
||||
func reqContext(r *http.Request) context.Context { return r.Context() }
|
||||
|
||||
func (t *Transport) idleConnTimeout() time.Duration {
|
||||
if t.t1 != nil {
|
||||
return t.t1.IdleConnTimeout
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func setResponseUncompressed(res *http.Response) { res.Uncompressed = true }
|
||||
|
||||
func traceGotConn(req *http.Request, cc *ClientConn) {
|
||||
trace := httptrace.ContextClientTrace(req.Context())
|
||||
if trace == nil || trace.GotConn == nil {
|
||||
return
|
||||
}
|
||||
ci := httptrace.GotConnInfo{Conn: cc.tconn}
|
||||
cc.mu.Lock()
|
||||
ci.Reused = cc.nextStreamID > 1
|
||||
ci.WasIdle = len(cc.streams) == 0 && ci.Reused
|
||||
if ci.WasIdle && !cc.lastActive.IsZero() {
|
||||
ci.IdleTime = time.Now().Sub(cc.lastActive)
|
||||
}
|
||||
cc.mu.Unlock()
|
||||
|
||||
trace.GotConn(ci)
|
||||
}
|
||||
|
||||
func traceWroteHeaders(trace *clientTrace) {
|
||||
if trace != nil && trace.WroteHeaders != nil {
|
||||
trace.WroteHeaders()
|
||||
}
|
||||
}
|
||||
|
||||
func traceGot100Continue(trace *clientTrace) {
|
||||
if trace != nil && trace.Got100Continue != nil {
|
||||
trace.Got100Continue()
|
||||
}
|
||||
}
|
||||
|
||||
func traceWait100Continue(trace *clientTrace) {
|
||||
if trace != nil && trace.Wait100Continue != nil {
|
||||
trace.Wait100Continue()
|
||||
}
|
||||
}
|
||||
|
||||
func traceWroteRequest(trace *clientTrace, err error) {
|
||||
if trace != nil && trace.WroteRequest != nil {
|
||||
trace.WroteRequest(httptrace.WroteRequestInfo{Err: err})
|
||||
}
|
||||
}
|
||||
|
||||
func traceFirstResponseByte(trace *clientTrace) {
|
||||
if trace != nil && trace.GotFirstResponseByte != nil {
|
||||
trace.GotFirstResponseByte()
|
||||
}
|
||||
}
|
||||
|
||||
func requestTrace(req *http.Request) *clientTrace {
|
||||
trace := httptrace.ContextClientTrace(req.Context())
|
||||
return (*clientTrace)(trace)
|
||||
}
|
||||
|
||||
// Ping sends a PING frame to the server and waits for the ack.
|
||||
func (cc *ClientConn) Ping(ctx context.Context) error {
|
||||
return cc.ping(ctx)
|
||||
}
|
36
vendor/golang.org/x/net/http2/go17_not18.go
generated
vendored
Normal file
36
vendor/golang.org/x/net/http2/go17_not18.go
generated
vendored
Normal file
|
@ -0,0 +1,36 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.7,!go1.8
|
||||
|
||||
package http2
|
||||
|
||||
import "crypto/tls"
|
||||
|
||||
// temporary copy of Go 1.7's private tls.Config.clone:
|
||||
func cloneTLSConfig(c *tls.Config) *tls.Config {
|
||||
return &tls.Config{
|
||||
Rand: c.Rand,
|
||||
Time: c.Time,
|
||||
Certificates: c.Certificates,
|
||||
NameToCertificate: c.NameToCertificate,
|
||||
GetCertificate: c.GetCertificate,
|
||||
RootCAs: c.RootCAs,
|
||||
NextProtos: c.NextProtos,
|
||||
ServerName: c.ServerName,
|
||||
ClientAuth: c.ClientAuth,
|
||||
ClientCAs: c.ClientCAs,
|
||||
InsecureSkipVerify: c.InsecureSkipVerify,
|
||||
CipherSuites: c.CipherSuites,
|
||||
PreferServerCipherSuites: c.PreferServerCipherSuites,
|
||||
SessionTicketsDisabled: c.SessionTicketsDisabled,
|
||||
SessionTicketKey: c.SessionTicketKey,
|
||||
ClientSessionCache: c.ClientSessionCache,
|
||||
MinVersion: c.MinVersion,
|
||||
MaxVersion: c.MaxVersion,
|
||||
CurvePreferences: c.CurvePreferences,
|
||||
DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled,
|
||||
Renegotiation: c.Renegotiation,
|
||||
}
|
||||
}
|
56
vendor/golang.org/x/net/http2/go18.go
generated
vendored
Normal file
56
vendor/golang.org/x/net/http2/go18.go
generated
vendored
Normal file
|
@ -0,0 +1,56 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.8
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"io"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func cloneTLSConfig(c *tls.Config) *tls.Config {
|
||||
c2 := c.Clone()
|
||||
c2.GetClientCertificate = c.GetClientCertificate // golang.org/issue/19264
|
||||
return c2
|
||||
}
|
||||
|
||||
var _ http.Pusher = (*responseWriter)(nil)
|
||||
|
||||
// Push implements http.Pusher.
|
||||
func (w *responseWriter) Push(target string, opts *http.PushOptions) error {
|
||||
internalOpts := pushOptions{}
|
||||
if opts != nil {
|
||||
internalOpts.Method = opts.Method
|
||||
internalOpts.Header = opts.Header
|
||||
}
|
||||
return w.push(target, internalOpts)
|
||||
}
|
||||
|
||||
func configureServer18(h1 *http.Server, h2 *Server) error {
|
||||
if h2.IdleTimeout == 0 {
|
||||
if h1.IdleTimeout != 0 {
|
||||
h2.IdleTimeout = h1.IdleTimeout
|
||||
} else {
|
||||
h2.IdleTimeout = h1.ReadTimeout
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func shouldLogPanic(panicValue interface{}) bool {
|
||||
return panicValue != nil && panicValue != http.ErrAbortHandler
|
||||
}
|
||||
|
||||
func reqGetBody(req *http.Request) func() (io.ReadCloser, error) {
|
||||
return req.GetBody
|
||||
}
|
||||
|
||||
func reqBodyIsNoBody(body io.ReadCloser) bool {
|
||||
return body == http.NoBody
|
||||
}
|
||||
|
||||
func go18httpNoBody() io.ReadCloser { return http.NoBody } // for tests only
|
79
vendor/golang.org/x/net/http2/go18_test.go
generated
vendored
Normal file
79
vendor/golang.org/x/net/http2/go18_test.go
generated
vendored
Normal file
|
@ -0,0 +1,79 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.8
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Tests that http2.Server.IdleTimeout is initialized from
|
||||
// http.Server.{Idle,Read}Timeout. http.Server.IdleTimeout was
|
||||
// added in Go 1.8.
|
||||
func TestConfigureServerIdleTimeout_Go18(t *testing.T) {
|
||||
const timeout = 5 * time.Second
|
||||
const notThisOne = 1 * time.Second
|
||||
|
||||
// With a zero http2.Server, verify that it copies IdleTimeout:
|
||||
{
|
||||
s1 := &http.Server{
|
||||
IdleTimeout: timeout,
|
||||
ReadTimeout: notThisOne,
|
||||
}
|
||||
s2 := &Server{}
|
||||
if err := ConfigureServer(s1, s2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if s2.IdleTimeout != timeout {
|
||||
t.Errorf("s2.IdleTimeout = %v; want %v", s2.IdleTimeout, timeout)
|
||||
}
|
||||
}
|
||||
|
||||
// And that it falls back to ReadTimeout:
|
||||
{
|
||||
s1 := &http.Server{
|
||||
ReadTimeout: timeout,
|
||||
}
|
||||
s2 := &Server{}
|
||||
if err := ConfigureServer(s1, s2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if s2.IdleTimeout != timeout {
|
||||
t.Errorf("s2.IdleTimeout = %v; want %v", s2.IdleTimeout, timeout)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify that s1's IdleTimeout doesn't overwrite an existing setting:
|
||||
{
|
||||
s1 := &http.Server{
|
||||
IdleTimeout: notThisOne,
|
||||
}
|
||||
s2 := &Server{
|
||||
IdleTimeout: timeout,
|
||||
}
|
||||
if err := ConfigureServer(s1, s2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if s2.IdleTimeout != timeout {
|
||||
t.Errorf("s2.IdleTimeout = %v; want %v", s2.IdleTimeout, timeout)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCertClone(t *testing.T) {
|
||||
c := &tls.Config{
|
||||
GetClientCertificate: func(*tls.CertificateRequestInfo) (*tls.Certificate, error) {
|
||||
panic("shouldn't be called")
|
||||
},
|
||||
}
|
||||
c2 := cloneTLSConfig(c)
|
||||
if c2.GetClientCertificate == nil {
|
||||
t.Error("GetClientCertificate is nil")
|
||||
}
|
||||
}
|
16
vendor/golang.org/x/net/http2/go19.go
generated
vendored
Normal file
16
vendor/golang.org/x/net/http2/go19.go
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func configureServer19(s *http.Server, conf *Server) error {
|
||||
s.RegisterOnShutdown(conf.state.startGracefulShutdown)
|
||||
return nil
|
||||
}
|
59
vendor/golang.org/x/net/http2/go19_test.go
generated
vendored
Normal file
59
vendor/golang.org/x/net/http2/go19_test.go
generated
vendored
Normal file
|
@ -0,0 +1,59 @@
|
|||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestServerGracefulShutdown(t *testing.T) {
|
||||
var st *serverTester
|
||||
handlerDone := make(chan struct{})
|
||||
st = newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
|
||||
defer close(handlerDone)
|
||||
go st.ts.Config.Shutdown(context.Background())
|
||||
|
||||
ga := st.wantGoAway()
|
||||
if ga.ErrCode != ErrCodeNo {
|
||||
t.Errorf("GOAWAY error = %v; want ErrCodeNo", ga.ErrCode)
|
||||
}
|
||||
if ga.LastStreamID != 1 {
|
||||
t.Errorf("GOAWAY LastStreamID = %v; want 1", ga.LastStreamID)
|
||||
}
|
||||
|
||||
w.Header().Set("x-foo", "bar")
|
||||
})
|
||||
defer st.Close()
|
||||
|
||||
st.greet()
|
||||
st.bodylessReq1()
|
||||
|
||||
select {
|
||||
case <-handlerDone:
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatalf("server did not shutdown?")
|
||||
}
|
||||
hf := st.wantHeaders()
|
||||
goth := st.decodeHeader(hf.HeaderBlockFragment())
|
||||
wanth := [][2]string{
|
||||
{":status", "200"},
|
||||
{"x-foo", "bar"},
|
||||
{"content-length", "0"},
|
||||
}
|
||||
if !reflect.DeepEqual(goth, wanth) {
|
||||
t.Errorf("Got headers %v; want %v", goth, wanth)
|
||||
}
|
||||
|
||||
n, err := st.cc.Read([]byte{0})
|
||||
if n != 0 || err == nil {
|
||||
t.Errorf("Read = %v, %v; want 0, non-nil", n, err)
|
||||
}
|
||||
}
|
170
vendor/golang.org/x/net/http2/gotrack.go
generated
vendored
Normal file
170
vendor/golang.org/x/net/http2/gotrack.go
generated
vendored
Normal file
|
@ -0,0 +1,170 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Defensive debug-only utility to track that functions run on the
|
||||
// goroutine that they're supposed to.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1"
|
||||
|
||||
type goroutineLock uint64
|
||||
|
||||
func newGoroutineLock() goroutineLock {
|
||||
if !DebugGoroutines {
|
||||
return 0
|
||||
}
|
||||
return goroutineLock(curGoroutineID())
|
||||
}
|
||||
|
||||
func (g goroutineLock) check() {
|
||||
if !DebugGoroutines {
|
||||
return
|
||||
}
|
||||
if curGoroutineID() != uint64(g) {
|
||||
panic("running on the wrong goroutine")
|
||||
}
|
||||
}
|
||||
|
||||
func (g goroutineLock) checkNotOn() {
|
||||
if !DebugGoroutines {
|
||||
return
|
||||
}
|
||||
if curGoroutineID() == uint64(g) {
|
||||
panic("running on the wrong goroutine")
|
||||
}
|
||||
}
|
||||
|
||||
var goroutineSpace = []byte("goroutine ")
|
||||
|
||||
func curGoroutineID() uint64 {
|
||||
bp := littleBuf.Get().(*[]byte)
|
||||
defer littleBuf.Put(bp)
|
||||
b := *bp
|
||||
b = b[:runtime.Stack(b, false)]
|
||||
// Parse the 4707 out of "goroutine 4707 ["
|
||||
b = bytes.TrimPrefix(b, goroutineSpace)
|
||||
i := bytes.IndexByte(b, ' ')
|
||||
if i < 0 {
|
||||
panic(fmt.Sprintf("No space found in %q", b))
|
||||
}
|
||||
b = b[:i]
|
||||
n, err := parseUintBytes(b, 10, 64)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
var littleBuf = sync.Pool{
|
||||
New: func() interface{} {
|
||||
buf := make([]byte, 64)
|
||||
return &buf
|
||||
},
|
||||
}
|
||||
|
||||
// parseUintBytes is like strconv.ParseUint, but using a []byte.
|
||||
func parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) {
|
||||
var cutoff, maxVal uint64
|
||||
|
||||
if bitSize == 0 {
|
||||
bitSize = int(strconv.IntSize)
|
||||
}
|
||||
|
||||
s0 := s
|
||||
switch {
|
||||
case len(s) < 1:
|
||||
err = strconv.ErrSyntax
|
||||
goto Error
|
||||
|
||||
case 2 <= base && base <= 36:
|
||||
// valid base; nothing to do
|
||||
|
||||
case base == 0:
|
||||
// Look for octal, hex prefix.
|
||||
switch {
|
||||
case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'):
|
||||
base = 16
|
||||
s = s[2:]
|
||||
if len(s) < 1 {
|
||||
err = strconv.ErrSyntax
|
||||
goto Error
|
||||
}
|
||||
case s[0] == '0':
|
||||
base = 8
|
||||
default:
|
||||
base = 10
|
||||
}
|
||||
|
||||
default:
|
||||
err = errors.New("invalid base " + strconv.Itoa(base))
|
||||
goto Error
|
||||
}
|
||||
|
||||
n = 0
|
||||
cutoff = cutoff64(base)
|
||||
maxVal = 1<<uint(bitSize) - 1
|
||||
|
||||
for i := 0; i < len(s); i++ {
|
||||
var v byte
|
||||
d := s[i]
|
||||
switch {
|
||||
case '0' <= d && d <= '9':
|
||||
v = d - '0'
|
||||
case 'a' <= d && d <= 'z':
|
||||
v = d - 'a' + 10
|
||||
case 'A' <= d && d <= 'Z':
|
||||
v = d - 'A' + 10
|
||||
default:
|
||||
n = 0
|
||||
err = strconv.ErrSyntax
|
||||
goto Error
|
||||
}
|
||||
if int(v) >= base {
|
||||
n = 0
|
||||
err = strconv.ErrSyntax
|
||||
goto Error
|
||||
}
|
||||
|
||||
if n >= cutoff {
|
||||
// n*base overflows
|
||||
n = 1<<64 - 1
|
||||
err = strconv.ErrRange
|
||||
goto Error
|
||||
}
|
||||
n *= uint64(base)
|
||||
|
||||
n1 := n + uint64(v)
|
||||
if n1 < n || n1 > maxVal {
|
||||
// n+v overflows
|
||||
n = 1<<64 - 1
|
||||
err = strconv.ErrRange
|
||||
goto Error
|
||||
}
|
||||
n = n1
|
||||
}
|
||||
|
||||
return n, nil
|
||||
|
||||
Error:
|
||||
return n, &strconv.NumError{Func: "ParseUint", Num: string(s0), Err: err}
|
||||
}
|
||||
|
||||
// Return the first number n such that n*base >= 1<<64.
|
||||
func cutoff64(base int) uint64 {
|
||||
if base < 2 {
|
||||
return 0
|
||||
}
|
||||
return (1<<64-1)/uint64(base) + 1
|
||||
}
|
33
vendor/golang.org/x/net/http2/gotrack_test.go
generated
vendored
Normal file
33
vendor/golang.org/x/net/http2/gotrack_test.go
generated
vendored
Normal file
|
@ -0,0 +1,33 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestGoroutineLock(t *testing.T) {
|
||||
oldDebug := DebugGoroutines
|
||||
DebugGoroutines = true
|
||||
defer func() { DebugGoroutines = oldDebug }()
|
||||
|
||||
g := newGoroutineLock()
|
||||
g.check()
|
||||
|
||||
sawPanic := make(chan interface{})
|
||||
go func() {
|
||||
defer func() { sawPanic <- recover() }()
|
||||
g.check() // should panic
|
||||
}()
|
||||
e := <-sawPanic
|
||||
if e == nil {
|
||||
t.Fatal("did not see panic from check in other goroutine")
|
||||
}
|
||||
if !strings.Contains(fmt.Sprint(e), "wrong goroutine") {
|
||||
t.Errorf("expected on see panic about running on the wrong goroutine; got %v", e)
|
||||
}
|
||||
}
|
78
vendor/golang.org/x/net/http2/headermap.go
generated
vendored
Normal file
78
vendor/golang.org/x/net/http2/headermap.go
generated
vendored
Normal file
|
@ -0,0 +1,78 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
commonLowerHeader = map[string]string{} // Go-Canonical-Case -> lower-case
|
||||
commonCanonHeader = map[string]string{} // lower-case -> Go-Canonical-Case
|
||||
)
|
||||
|
||||
func init() {
|
||||
for _, v := range []string{
|
||||
"accept",
|
||||
"accept-charset",
|
||||
"accept-encoding",
|
||||
"accept-language",
|
||||
"accept-ranges",
|
||||
"age",
|
||||
"access-control-allow-origin",
|
||||
"allow",
|
||||
"authorization",
|
||||
"cache-control",
|
||||
"content-disposition",
|
||||
"content-encoding",
|
||||
"content-language",
|
||||
"content-length",
|
||||
"content-location",
|
||||
"content-range",
|
||||
"content-type",
|
||||
"cookie",
|
||||
"date",
|
||||
"etag",
|
||||
"expect",
|
||||
"expires",
|
||||
"from",
|
||||
"host",
|
||||
"if-match",
|
||||
"if-modified-since",
|
||||
"if-none-match",
|
||||
"if-unmodified-since",
|
||||
"last-modified",
|
||||
"link",
|
||||
"location",
|
||||
"max-forwards",
|
||||
"proxy-authenticate",
|
||||
"proxy-authorization",
|
||||
"range",
|
||||
"referer",
|
||||
"refresh",
|
||||
"retry-after",
|
||||
"server",
|
||||
"set-cookie",
|
||||
"strict-transport-security",
|
||||
"trailer",
|
||||
"transfer-encoding",
|
||||
"user-agent",
|
||||
"vary",
|
||||
"via",
|
||||
"www-authenticate",
|
||||
} {
|
||||
chk := http.CanonicalHeaderKey(v)
|
||||
commonLowerHeader[chk] = v
|
||||
commonCanonHeader[v] = chk
|
||||
}
|
||||
}
|
||||
|
||||
func lowerHeader(v string) string {
|
||||
if s, ok := commonLowerHeader[v]; ok {
|
||||
return s
|
||||
}
|
||||
return strings.ToLower(v)
|
||||
}
|
240
vendor/golang.org/x/net/http2/hpack/encode.go
generated
vendored
Normal file
240
vendor/golang.org/x/net/http2/hpack/encode.go
generated
vendored
Normal file
|
@ -0,0 +1,240 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package hpack
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
const (
|
||||
uint32Max = ^uint32(0)
|
||||
initialHeaderTableSize = 4096
|
||||
)
|
||||
|
||||
type Encoder struct {
|
||||
dynTab dynamicTable
|
||||
// minSize is the minimum table size set by
|
||||
// SetMaxDynamicTableSize after the previous Header Table Size
|
||||
// Update.
|
||||
minSize uint32
|
||||
// maxSizeLimit is the maximum table size this encoder
|
||||
// supports. This will protect the encoder from too large
|
||||
// size.
|
||||
maxSizeLimit uint32
|
||||
// tableSizeUpdate indicates whether "Header Table Size
|
||||
// Update" is required.
|
||||
tableSizeUpdate bool
|
||||
w io.Writer
|
||||
buf []byte
|
||||
}
|
||||
|
||||
// NewEncoder returns a new Encoder which performs HPACK encoding. An
|
||||
// encoded data is written to w.
|
||||
func NewEncoder(w io.Writer) *Encoder {
|
||||
e := &Encoder{
|
||||
minSize: uint32Max,
|
||||
maxSizeLimit: initialHeaderTableSize,
|
||||
tableSizeUpdate: false,
|
||||
w: w,
|
||||
}
|
||||
e.dynTab.table.init()
|
||||
e.dynTab.setMaxSize(initialHeaderTableSize)
|
||||
return e
|
||||
}
|
||||
|
||||
// WriteField encodes f into a single Write to e's underlying Writer.
|
||||
// This function may also produce bytes for "Header Table Size Update"
|
||||
// if necessary. If produced, it is done before encoding f.
|
||||
func (e *Encoder) WriteField(f HeaderField) error {
|
||||
e.buf = e.buf[:0]
|
||||
|
||||
if e.tableSizeUpdate {
|
||||
e.tableSizeUpdate = false
|
||||
if e.minSize < e.dynTab.maxSize {
|
||||
e.buf = appendTableSize(e.buf, e.minSize)
|
||||
}
|
||||
e.minSize = uint32Max
|
||||
e.buf = appendTableSize(e.buf, e.dynTab.maxSize)
|
||||
}
|
||||
|
||||
idx, nameValueMatch := e.searchTable(f)
|
||||
if nameValueMatch {
|
||||
e.buf = appendIndexed(e.buf, idx)
|
||||
} else {
|
||||
indexing := e.shouldIndex(f)
|
||||
if indexing {
|
||||
e.dynTab.add(f)
|
||||
}
|
||||
|
||||
if idx == 0 {
|
||||
e.buf = appendNewName(e.buf, f, indexing)
|
||||
} else {
|
||||
e.buf = appendIndexedName(e.buf, f, idx, indexing)
|
||||
}
|
||||
}
|
||||
n, err := e.w.Write(e.buf)
|
||||
if err == nil && n != len(e.buf) {
|
||||
err = io.ErrShortWrite
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// searchTable searches f in both stable and dynamic header tables.
|
||||
// The static header table is searched first. Only when there is no
|
||||
// exact match for both name and value, the dynamic header table is
|
||||
// then searched. If there is no match, i is 0. If both name and value
|
||||
// match, i is the matched index and nameValueMatch becomes true. If
|
||||
// only name matches, i points to that index and nameValueMatch
|
||||
// becomes false.
|
||||
func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) {
|
||||
i, nameValueMatch = staticTable.search(f)
|
||||
if nameValueMatch {
|
||||
return i, true
|
||||
}
|
||||
|
||||
j, nameValueMatch := e.dynTab.table.search(f)
|
||||
if nameValueMatch || (i == 0 && j != 0) {
|
||||
return j + uint64(staticTable.len()), nameValueMatch
|
||||
}
|
||||
|
||||
return i, false
|
||||
}
|
||||
|
||||
// SetMaxDynamicTableSize changes the dynamic header table size to v.
|
||||
// The actual size is bounded by the value passed to
|
||||
// SetMaxDynamicTableSizeLimit.
|
||||
func (e *Encoder) SetMaxDynamicTableSize(v uint32) {
|
||||
if v > e.maxSizeLimit {
|
||||
v = e.maxSizeLimit
|
||||
}
|
||||
if v < e.minSize {
|
||||
e.minSize = v
|
||||
}
|
||||
e.tableSizeUpdate = true
|
||||
e.dynTab.setMaxSize(v)
|
||||
}
|
||||
|
||||
// SetMaxDynamicTableSizeLimit changes the maximum value that can be
|
||||
// specified in SetMaxDynamicTableSize to v. By default, it is set to
|
||||
// 4096, which is the same size of the default dynamic header table
|
||||
// size described in HPACK specification. If the current maximum
|
||||
// dynamic header table size is strictly greater than v, "Header Table
|
||||
// Size Update" will be done in the next WriteField call and the
|
||||
// maximum dynamic header table size is truncated to v.
|
||||
func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) {
|
||||
e.maxSizeLimit = v
|
||||
if e.dynTab.maxSize > v {
|
||||
e.tableSizeUpdate = true
|
||||
e.dynTab.setMaxSize(v)
|
||||
}
|
||||
}
|
||||
|
||||
// shouldIndex reports whether f should be indexed.
|
||||
func (e *Encoder) shouldIndex(f HeaderField) bool {
|
||||
return !f.Sensitive && f.Size() <= e.dynTab.maxSize
|
||||
}
|
||||
|
||||
// appendIndexed appends index i, as encoded in "Indexed Header Field"
|
||||
// representation, to dst and returns the extended buffer.
|
||||
func appendIndexed(dst []byte, i uint64) []byte {
|
||||
first := len(dst)
|
||||
dst = appendVarInt(dst, 7, i)
|
||||
dst[first] |= 0x80
|
||||
return dst
|
||||
}
|
||||
|
||||
// appendNewName appends f, as encoded in one of "Literal Header field
|
||||
// - New Name" representation variants, to dst and returns the
|
||||
// extended buffer.
|
||||
//
|
||||
// If f.Sensitive is true, "Never Indexed" representation is used. If
|
||||
// f.Sensitive is false and indexing is true, "Inremental Indexing"
|
||||
// representation is used.
|
||||
func appendNewName(dst []byte, f HeaderField, indexing bool) []byte {
|
||||
dst = append(dst, encodeTypeByte(indexing, f.Sensitive))
|
||||
dst = appendHpackString(dst, f.Name)
|
||||
return appendHpackString(dst, f.Value)
|
||||
}
|
||||
|
||||
// appendIndexedName appends f and index i referring indexed name
|
||||
// entry, as encoded in one of "Literal Header field - Indexed Name"
|
||||
// representation variants, to dst and returns the extended buffer.
|
||||
//
|
||||
// If f.Sensitive is true, "Never Indexed" representation is used. If
|
||||
// f.Sensitive is false and indexing is true, "Incremental Indexing"
|
||||
// representation is used.
|
||||
func appendIndexedName(dst []byte, f HeaderField, i uint64, indexing bool) []byte {
|
||||
first := len(dst)
|
||||
var n byte
|
||||
if indexing {
|
||||
n = 6
|
||||
} else {
|
||||
n = 4
|
||||
}
|
||||
dst = appendVarInt(dst, n, i)
|
||||
dst[first] |= encodeTypeByte(indexing, f.Sensitive)
|
||||
return appendHpackString(dst, f.Value)
|
||||
}
|
||||
|
||||
// appendTableSize appends v, as encoded in "Header Table Size Update"
|
||||
// representation, to dst and returns the extended buffer.
|
||||
func appendTableSize(dst []byte, v uint32) []byte {
|
||||
first := len(dst)
|
||||
dst = appendVarInt(dst, 5, uint64(v))
|
||||
dst[first] |= 0x20
|
||||
return dst
|
||||
}
|
||||
|
||||
// appendVarInt appends i, as encoded in variable integer form using n
|
||||
// bit prefix, to dst and returns the extended buffer.
|
||||
//
|
||||
// See
|
||||
// http://http2.github.io/http2-spec/compression.html#integer.representation
|
||||
func appendVarInt(dst []byte, n byte, i uint64) []byte {
|
||||
k := uint64((1 << n) - 1)
|
||||
if i < k {
|
||||
return append(dst, byte(i))
|
||||
}
|
||||
dst = append(dst, byte(k))
|
||||
i -= k
|
||||
for ; i >= 128; i >>= 7 {
|
||||
dst = append(dst, byte(0x80|(i&0x7f)))
|
||||
}
|
||||
return append(dst, byte(i))
|
||||
}
|
||||
|
||||
// appendHpackString appends s, as encoded in "String Literal"
|
||||
// representation, to dst and returns the extended buffer.
|
||||
//
|
||||
// s will be encoded in Huffman codes only when it produces strictly
|
||||
// shorter byte string.
|
||||
func appendHpackString(dst []byte, s string) []byte {
|
||||
huffmanLength := HuffmanEncodeLength(s)
|
||||
if huffmanLength < uint64(len(s)) {
|
||||
first := len(dst)
|
||||
dst = appendVarInt(dst, 7, huffmanLength)
|
||||
dst = AppendHuffmanString(dst, s)
|
||||
dst[first] |= 0x80
|
||||
} else {
|
||||
dst = appendVarInt(dst, 7, uint64(len(s)))
|
||||
dst = append(dst, s...)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// encodeTypeByte returns type byte. If sensitive is true, type byte
|
||||
// for "Never Indexed" representation is returned. If sensitive is
|
||||
// false and indexing is true, type byte for "Incremental Indexing"
|
||||
// representation is returned. Otherwise, type byte for "Without
|
||||
// Indexing" is returned.
|
||||
func encodeTypeByte(indexing, sensitive bool) byte {
|
||||
if sensitive {
|
||||
return 0x10
|
||||
}
|
||||
if indexing {
|
||||
return 0x40
|
||||
}
|
||||
return 0
|
||||
}
|
386
vendor/golang.org/x/net/http2/hpack/encode_test.go
generated
vendored
Normal file
386
vendor/golang.org/x/net/http2/hpack/encode_test.go
generated
vendored
Normal file
|
@ -0,0 +1,386 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package hpack
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestEncoderTableSizeUpdate(t *testing.T) {
|
||||
tests := []struct {
|
||||
size1, size2 uint32
|
||||
wantHex string
|
||||
}{
|
||||
// Should emit 2 table size updates (2048 and 4096)
|
||||
{2048, 4096, "3fe10f 3fe11f 82"},
|
||||
|
||||
// Should emit 1 table size update (2048)
|
||||
{16384, 2048, "3fe10f 82"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
var buf bytes.Buffer
|
||||
e := NewEncoder(&buf)
|
||||
e.SetMaxDynamicTableSize(tt.size1)
|
||||
e.SetMaxDynamicTableSize(tt.size2)
|
||||
if err := e.WriteField(pair(":method", "GET")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want := removeSpace(tt.wantHex)
|
||||
if got := hex.EncodeToString(buf.Bytes()); got != want {
|
||||
t.Errorf("e.SetDynamicTableSize %v, %v = %q; want %q", tt.size1, tt.size2, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncoderWriteField(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
e := NewEncoder(&buf)
|
||||
var got []HeaderField
|
||||
d := NewDecoder(4<<10, func(f HeaderField) {
|
||||
got = append(got, f)
|
||||
})
|
||||
|
||||
tests := []struct {
|
||||
hdrs []HeaderField
|
||||
}{
|
||||
{[]HeaderField{
|
||||
pair(":method", "GET"),
|
||||
pair(":scheme", "http"),
|
||||
pair(":path", "/"),
|
||||
pair(":authority", "www.example.com"),
|
||||
}},
|
||||
{[]HeaderField{
|
||||
pair(":method", "GET"),
|
||||
pair(":scheme", "http"),
|
||||
pair(":path", "/"),
|
||||
pair(":authority", "www.example.com"),
|
||||
pair("cache-control", "no-cache"),
|
||||
}},
|
||||
{[]HeaderField{
|
||||
pair(":method", "GET"),
|
||||
pair(":scheme", "https"),
|
||||
pair(":path", "/index.html"),
|
||||
pair(":authority", "www.example.com"),
|
||||
pair("custom-key", "custom-value"),
|
||||
}},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
buf.Reset()
|
||||
got = got[:0]
|
||||
for _, hf := range tt.hdrs {
|
||||
if err := e.WriteField(hf); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
_, err := d.Write(buf.Bytes())
|
||||
if err != nil {
|
||||
t.Errorf("%d. Decoder Write = %v", i, err)
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.hdrs) {
|
||||
t.Errorf("%d. Decoded %+v; want %+v", i, got, tt.hdrs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncoderSearchTable(t *testing.T) {
|
||||
e := NewEncoder(nil)
|
||||
|
||||
e.dynTab.add(pair("foo", "bar"))
|
||||
e.dynTab.add(pair("blake", "miz"))
|
||||
e.dynTab.add(pair(":method", "GET"))
|
||||
|
||||
tests := []struct {
|
||||
hf HeaderField
|
||||
wantI uint64
|
||||
wantMatch bool
|
||||
}{
|
||||
// Name and Value match
|
||||
{pair("foo", "bar"), uint64(staticTable.len()) + 3, true},
|
||||
{pair("blake", "miz"), uint64(staticTable.len()) + 2, true},
|
||||
{pair(":method", "GET"), 2, true},
|
||||
|
||||
// Only name match because Sensitive == true. This is allowed to match
|
||||
// any ":method" entry. The current implementation uses the last entry
|
||||
// added in newStaticTable.
|
||||
{HeaderField{":method", "GET", true}, 3, false},
|
||||
|
||||
// Only Name matches
|
||||
{pair("foo", "..."), uint64(staticTable.len()) + 3, false},
|
||||
{pair("blake", "..."), uint64(staticTable.len()) + 2, false},
|
||||
// As before, this is allowed to match any ":method" entry.
|
||||
{pair(":method", "..."), 3, false},
|
||||
|
||||
// None match
|
||||
{pair("foo-", "bar"), 0, false},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
if gotI, gotMatch := e.searchTable(tt.hf); gotI != tt.wantI || gotMatch != tt.wantMatch {
|
||||
t.Errorf("d.search(%+v) = %v, %v; want %v, %v", tt.hf, gotI, gotMatch, tt.wantI, tt.wantMatch)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppendVarInt(t *testing.T) {
|
||||
tests := []struct {
|
||||
n byte
|
||||
i uint64
|
||||
want []byte
|
||||
}{
|
||||
// Fits in a byte:
|
||||
{1, 0, []byte{0}},
|
||||
{2, 2, []byte{2}},
|
||||
{3, 6, []byte{6}},
|
||||
{4, 14, []byte{14}},
|
||||
{5, 30, []byte{30}},
|
||||
{6, 62, []byte{62}},
|
||||
{7, 126, []byte{126}},
|
||||
{8, 254, []byte{254}},
|
||||
|
||||
// Multiple bytes:
|
||||
{5, 1337, []byte{31, 154, 10}},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
got := appendVarInt(nil, tt.n, tt.i)
|
||||
if !bytes.Equal(got, tt.want) {
|
||||
t.Errorf("appendVarInt(nil, %v, %v) = %v; want %v", tt.n, tt.i, got, tt.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppendHpackString(t *testing.T) {
|
||||
tests := []struct {
|
||||
s, wantHex string
|
||||
}{
|
||||
// Huffman encoded
|
||||
{"www.example.com", "8c f1e3 c2e5 f23a 6ba0 ab90 f4ff"},
|
||||
|
||||
// Not Huffman encoded
|
||||
{"a", "01 61"},
|
||||
|
||||
// zero length
|
||||
{"", "00"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
want := removeSpace(tt.wantHex)
|
||||
buf := appendHpackString(nil, tt.s)
|
||||
if got := hex.EncodeToString(buf); want != got {
|
||||
t.Errorf("appendHpackString(nil, %q) = %q; want %q", tt.s, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppendIndexed(t *testing.T) {
|
||||
tests := []struct {
|
||||
i uint64
|
||||
wantHex string
|
||||
}{
|
||||
// 1 byte
|
||||
{1, "81"},
|
||||
{126, "fe"},
|
||||
|
||||
// 2 bytes
|
||||
{127, "ff00"},
|
||||
{128, "ff01"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
want := removeSpace(tt.wantHex)
|
||||
buf := appendIndexed(nil, tt.i)
|
||||
if got := hex.EncodeToString(buf); want != got {
|
||||
t.Errorf("appendIndex(nil, %v) = %q; want %q", tt.i, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppendNewName(t *testing.T) {
|
||||
tests := []struct {
|
||||
f HeaderField
|
||||
indexing bool
|
||||
wantHex string
|
||||
}{
|
||||
// Incremental indexing
|
||||
{HeaderField{"custom-key", "custom-value", false}, true, "40 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"},
|
||||
|
||||
// Without indexing
|
||||
{HeaderField{"custom-key", "custom-value", false}, false, "00 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"},
|
||||
|
||||
// Never indexed
|
||||
{HeaderField{"custom-key", "custom-value", true}, true, "10 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"},
|
||||
{HeaderField{"custom-key", "custom-value", true}, false, "10 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
want := removeSpace(tt.wantHex)
|
||||
buf := appendNewName(nil, tt.f, tt.indexing)
|
||||
if got := hex.EncodeToString(buf); want != got {
|
||||
t.Errorf("appendNewName(nil, %+v, %v) = %q; want %q", tt.f, tt.indexing, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppendIndexedName(t *testing.T) {
|
||||
tests := []struct {
|
||||
f HeaderField
|
||||
i uint64
|
||||
indexing bool
|
||||
wantHex string
|
||||
}{
|
||||
// Incremental indexing
|
||||
{HeaderField{":status", "302", false}, 8, true, "48 82 6402"},
|
||||
|
||||
// Without indexing
|
||||
{HeaderField{":status", "302", false}, 8, false, "08 82 6402"},
|
||||
|
||||
// Never indexed
|
||||
{HeaderField{":status", "302", true}, 8, true, "18 82 6402"},
|
||||
{HeaderField{":status", "302", true}, 8, false, "18 82 6402"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
want := removeSpace(tt.wantHex)
|
||||
buf := appendIndexedName(nil, tt.f, tt.i, tt.indexing)
|
||||
if got := hex.EncodeToString(buf); want != got {
|
||||
t.Errorf("appendIndexedName(nil, %+v, %v) = %q; want %q", tt.f, tt.indexing, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppendTableSize(t *testing.T) {
|
||||
tests := []struct {
|
||||
i uint32
|
||||
wantHex string
|
||||
}{
|
||||
// Fits into 1 byte
|
||||
{30, "3e"},
|
||||
|
||||
// Extra byte
|
||||
{31, "3f00"},
|
||||
{32, "3f01"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
want := removeSpace(tt.wantHex)
|
||||
buf := appendTableSize(nil, tt.i)
|
||||
if got := hex.EncodeToString(buf); want != got {
|
||||
t.Errorf("appendTableSize(nil, %v) = %q; want %q", tt.i, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncoderSetMaxDynamicTableSize(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
e := NewEncoder(&buf)
|
||||
tests := []struct {
|
||||
v uint32
|
||||
wantUpdate bool
|
||||
wantMinSize uint32
|
||||
wantMaxSize uint32
|
||||
}{
|
||||
// Set new table size to 2048
|
||||
{2048, true, 2048, 2048},
|
||||
|
||||
// Set new table size to 16384, but still limited to
|
||||
// 4096
|
||||
{16384, true, 2048, 4096},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
e.SetMaxDynamicTableSize(tt.v)
|
||||
if got := e.tableSizeUpdate; tt.wantUpdate != got {
|
||||
t.Errorf("e.tableSizeUpdate = %v; want %v", got, tt.wantUpdate)
|
||||
}
|
||||
if got := e.minSize; tt.wantMinSize != got {
|
||||
t.Errorf("e.minSize = %v; want %v", got, tt.wantMinSize)
|
||||
}
|
||||
if got := e.dynTab.maxSize; tt.wantMaxSize != got {
|
||||
t.Errorf("e.maxSize = %v; want %v", got, tt.wantMaxSize)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncoderSetMaxDynamicTableSizeLimit(t *testing.T) {
|
||||
e := NewEncoder(nil)
|
||||
// 4095 < initialHeaderTableSize means maxSize is truncated to
|
||||
// 4095.
|
||||
e.SetMaxDynamicTableSizeLimit(4095)
|
||||
if got, want := e.dynTab.maxSize, uint32(4095); got != want {
|
||||
t.Errorf("e.dynTab.maxSize = %v; want %v", got, want)
|
||||
}
|
||||
if got, want := e.maxSizeLimit, uint32(4095); got != want {
|
||||
t.Errorf("e.maxSizeLimit = %v; want %v", got, want)
|
||||
}
|
||||
if got, want := e.tableSizeUpdate, true; got != want {
|
||||
t.Errorf("e.tableSizeUpdate = %v; want %v", got, want)
|
||||
}
|
||||
// maxSize will be truncated to maxSizeLimit
|
||||
e.SetMaxDynamicTableSize(16384)
|
||||
if got, want := e.dynTab.maxSize, uint32(4095); got != want {
|
||||
t.Errorf("e.dynTab.maxSize = %v; want %v", got, want)
|
||||
}
|
||||
// 8192 > current maxSizeLimit, so maxSize does not change.
|
||||
e.SetMaxDynamicTableSizeLimit(8192)
|
||||
if got, want := e.dynTab.maxSize, uint32(4095); got != want {
|
||||
t.Errorf("e.dynTab.maxSize = %v; want %v", got, want)
|
||||
}
|
||||
if got, want := e.maxSizeLimit, uint32(8192); got != want {
|
||||
t.Errorf("e.maxSizeLimit = %v; want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func removeSpace(s string) string {
|
||||
return strings.Replace(s, " ", "", -1)
|
||||
}
|
||||
|
||||
func BenchmarkEncoderSearchTable(b *testing.B) {
|
||||
e := NewEncoder(nil)
|
||||
|
||||
// A sample of possible header fields.
|
||||
// This is not based on any actual data from HTTP/2 traces.
|
||||
var possible []HeaderField
|
||||
for _, f := range staticTable.ents {
|
||||
if f.Value == "" {
|
||||
possible = append(possible, f)
|
||||
continue
|
||||
}
|
||||
// Generate 5 random values, except for cookie and set-cookie,
|
||||
// which we know can have many values in practice.
|
||||
num := 5
|
||||
if f.Name == "cookie" || f.Name == "set-cookie" {
|
||||
num = 25
|
||||
}
|
||||
for i := 0; i < num; i++ {
|
||||
f.Value = fmt.Sprintf("%s-%d", f.Name, i)
|
||||
possible = append(possible, f)
|
||||
}
|
||||
}
|
||||
for k := 0; k < 10; k++ {
|
||||
f := HeaderField{
|
||||
Name: fmt.Sprintf("x-header-%d", k),
|
||||
Sensitive: rand.Int()%2 == 0,
|
||||
}
|
||||
for i := 0; i < 5; i++ {
|
||||
f.Value = fmt.Sprintf("%s-%d", f.Name, i)
|
||||
possible = append(possible, f)
|
||||
}
|
||||
}
|
||||
|
||||
// Add a random sample to the dynamic table. This very loosely simulates
|
||||
// a history of 100 requests with 20 header fields per request.
|
||||
for r := 0; r < 100*20; r++ {
|
||||
f := possible[rand.Int31n(int32(len(possible)))]
|
||||
// Skip if this is in the staticTable verbatim.
|
||||
if _, has := staticTable.search(f); !has {
|
||||
e.dynTab.add(f)
|
||||
}
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for n := 0; n < b.N; n++ {
|
||||
for _, f := range possible {
|
||||
e.searchTable(f)
|
||||
}
|
||||
}
|
||||
}
|
490
vendor/golang.org/x/net/http2/hpack/hpack.go
generated
vendored
Normal file
490
vendor/golang.org/x/net/http2/hpack/hpack.go
generated
vendored
Normal file
|
@ -0,0 +1,490 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package hpack implements HPACK, a compression format for
|
||||
// efficiently representing HTTP header fields in the context of HTTP/2.
|
||||
//
|
||||
// See http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09
|
||||
package hpack
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// A DecodingError is something the spec defines as a decoding error.
|
||||
type DecodingError struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
func (de DecodingError) Error() string {
|
||||
return fmt.Sprintf("decoding error: %v", de.Err)
|
||||
}
|
||||
|
||||
// An InvalidIndexError is returned when an encoder references a table
|
||||
// entry before the static table or after the end of the dynamic table.
|
||||
type InvalidIndexError int
|
||||
|
||||
func (e InvalidIndexError) Error() string {
|
||||
return fmt.Sprintf("invalid indexed representation index %d", int(e))
|
||||
}
|
||||
|
||||
// A HeaderField is a name-value pair. Both the name and value are
|
||||
// treated as opaque sequences of octets.
|
||||
type HeaderField struct {
|
||||
Name, Value string
|
||||
|
||||
// Sensitive means that this header field should never be
|
||||
// indexed.
|
||||
Sensitive bool
|
||||
}
|
||||
|
||||
// IsPseudo reports whether the header field is an http2 pseudo header.
|
||||
// That is, it reports whether it starts with a colon.
|
||||
// It is not otherwise guaranteed to be a valid pseudo header field,
|
||||
// though.
|
||||
func (hf HeaderField) IsPseudo() bool {
|
||||
return len(hf.Name) != 0 && hf.Name[0] == ':'
|
||||
}
|
||||
|
||||
func (hf HeaderField) String() string {
|
||||
var suffix string
|
||||
if hf.Sensitive {
|
||||
suffix = " (sensitive)"
|
||||
}
|
||||
return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix)
|
||||
}
|
||||
|
||||
// Size returns the size of an entry per RFC 7541 section 4.1.
|
||||
func (hf HeaderField) Size() uint32 {
|
||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.4.1
|
||||
// "The size of the dynamic table is the sum of the size of
|
||||
// its entries. The size of an entry is the sum of its name's
|
||||
// length in octets (as defined in Section 5.2), its value's
|
||||
// length in octets (see Section 5.2), plus 32. The size of
|
||||
// an entry is calculated using the length of the name and
|
||||
// value without any Huffman encoding applied."
|
||||
|
||||
// This can overflow if somebody makes a large HeaderField
|
||||
// Name and/or Value by hand, but we don't care, because that
|
||||
// won't happen on the wire because the encoding doesn't allow
|
||||
// it.
|
||||
return uint32(len(hf.Name) + len(hf.Value) + 32)
|
||||
}
|
||||
|
||||
// A Decoder is the decoding context for incremental processing of
|
||||
// header blocks.
|
||||
type Decoder struct {
|
||||
dynTab dynamicTable
|
||||
emit func(f HeaderField)
|
||||
|
||||
emitEnabled bool // whether calls to emit are enabled
|
||||
maxStrLen int // 0 means unlimited
|
||||
|
||||
// buf is the unparsed buffer. It's only written to
|
||||
// saveBuf if it was truncated in the middle of a header
|
||||
// block. Because it's usually not owned, we can only
|
||||
// process it under Write.
|
||||
buf []byte // not owned; only valid during Write
|
||||
|
||||
// saveBuf is previous data passed to Write which we weren't able
|
||||
// to fully parse before. Unlike buf, we own this data.
|
||||
saveBuf bytes.Buffer
|
||||
}
|
||||
|
||||
// NewDecoder returns a new decoder with the provided maximum dynamic
|
||||
// table size. The emitFunc will be called for each valid field
|
||||
// parsed, in the same goroutine as calls to Write, before Write returns.
|
||||
func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decoder {
|
||||
d := &Decoder{
|
||||
emit: emitFunc,
|
||||
emitEnabled: true,
|
||||
}
|
||||
d.dynTab.table.init()
|
||||
d.dynTab.allowedMaxSize = maxDynamicTableSize
|
||||
d.dynTab.setMaxSize(maxDynamicTableSize)
|
||||
return d
|
||||
}
|
||||
|
||||
// ErrStringLength is returned by Decoder.Write when the max string length
|
||||
// (as configured by Decoder.SetMaxStringLength) would be violated.
|
||||
var ErrStringLength = errors.New("hpack: string too long")
|
||||
|
||||
// SetMaxStringLength sets the maximum size of a HeaderField name or
|
||||
// value string. If a string exceeds this length (even after any
|
||||
// decompression), Write will return ErrStringLength.
|
||||
// A value of 0 means unlimited and is the default from NewDecoder.
|
||||
func (d *Decoder) SetMaxStringLength(n int) {
|
||||
d.maxStrLen = n
|
||||
}
|
||||
|
||||
// SetEmitFunc changes the callback used when new header fields
|
||||
// are decoded.
|
||||
// It must be non-nil. It does not affect EmitEnabled.
|
||||
func (d *Decoder) SetEmitFunc(emitFunc func(f HeaderField)) {
|
||||
d.emit = emitFunc
|
||||
}
|
||||
|
||||
// SetEmitEnabled controls whether the emitFunc provided to NewDecoder
|
||||
// should be called. The default is true.
|
||||
//
|
||||
// This facility exists to let servers enforce MAX_HEADER_LIST_SIZE
|
||||
// while still decoding and keeping in-sync with decoder state, but
|
||||
// without doing unnecessary decompression or generating unnecessary
|
||||
// garbage for header fields past the limit.
|
||||
func (d *Decoder) SetEmitEnabled(v bool) { d.emitEnabled = v }
|
||||
|
||||
// EmitEnabled reports whether calls to the emitFunc provided to NewDecoder
|
||||
// are currently enabled. The default is true.
|
||||
func (d *Decoder) EmitEnabled() bool { return d.emitEnabled }
|
||||
|
||||
// TODO: add method *Decoder.Reset(maxSize, emitFunc) to let callers re-use Decoders and their
|
||||
// underlying buffers for garbage reasons.
|
||||
|
||||
func (d *Decoder) SetMaxDynamicTableSize(v uint32) {
|
||||
d.dynTab.setMaxSize(v)
|
||||
}
|
||||
|
||||
// SetAllowedMaxDynamicTableSize sets the upper bound that the encoded
|
||||
// stream (via dynamic table size updates) may set the maximum size
|
||||
// to.
|
||||
func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) {
|
||||
d.dynTab.allowedMaxSize = v
|
||||
}
|
||||
|
||||
type dynamicTable struct {
|
||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2
|
||||
table headerFieldTable
|
||||
size uint32 // in bytes
|
||||
maxSize uint32 // current maxSize
|
||||
allowedMaxSize uint32 // maxSize may go up to this, inclusive
|
||||
}
|
||||
|
||||
func (dt *dynamicTable) setMaxSize(v uint32) {
|
||||
dt.maxSize = v
|
||||
dt.evict()
|
||||
}
|
||||
|
||||
func (dt *dynamicTable) add(f HeaderField) {
|
||||
dt.table.addEntry(f)
|
||||
dt.size += f.Size()
|
||||
dt.evict()
|
||||
}
|
||||
|
||||
// If we're too big, evict old stuff.
|
||||
func (dt *dynamicTable) evict() {
|
||||
var n int
|
||||
for dt.size > dt.maxSize && n < dt.table.len() {
|
||||
dt.size -= dt.table.ents[n].Size()
|
||||
n++
|
||||
}
|
||||
dt.table.evictOldest(n)
|
||||
}
|
||||
|
||||
func (d *Decoder) maxTableIndex() int {
|
||||
// This should never overflow. RFC 7540 Section 6.5.2 limits the size of
|
||||
// the dynamic table to 2^32 bytes, where each entry will occupy more than
|
||||
// one byte. Further, the staticTable has a fixed, small length.
|
||||
return d.dynTab.table.len() + staticTable.len()
|
||||
}
|
||||
|
||||
func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) {
|
||||
// See Section 2.3.3.
|
||||
if i == 0 {
|
||||
return
|
||||
}
|
||||
if i <= uint64(staticTable.len()) {
|
||||
return staticTable.ents[i-1], true
|
||||
}
|
||||
if i > uint64(d.maxTableIndex()) {
|
||||
return
|
||||
}
|
||||
// In the dynamic table, newer entries have lower indices.
|
||||
// However, dt.ents[0] is the oldest entry. Hence, dt.ents is
|
||||
// the reversed dynamic table.
|
||||
dt := d.dynTab.table
|
||||
return dt.ents[dt.len()-(int(i)-staticTable.len())], true
|
||||
}
|
||||
|
||||
// Decode decodes an entire block.
|
||||
//
|
||||
// TODO: remove this method and make it incremental later? This is
|
||||
// easier for debugging now.
|
||||
func (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) {
|
||||
var hf []HeaderField
|
||||
saveFunc := d.emit
|
||||
defer func() { d.emit = saveFunc }()
|
||||
d.emit = func(f HeaderField) { hf = append(hf, f) }
|
||||
if _, err := d.Write(p); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := d.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return hf, nil
|
||||
}
|
||||
|
||||
func (d *Decoder) Close() error {
|
||||
if d.saveBuf.Len() > 0 {
|
||||
d.saveBuf.Reset()
|
||||
return DecodingError{errors.New("truncated headers")}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Decoder) Write(p []byte) (n int, err error) {
|
||||
if len(p) == 0 {
|
||||
// Prevent state machine CPU attacks (making us redo
|
||||
// work up to the point of finding out we don't have
|
||||
// enough data)
|
||||
return
|
||||
}
|
||||
// Only copy the data if we have to. Optimistically assume
|
||||
// that p will contain a complete header block.
|
||||
if d.saveBuf.Len() == 0 {
|
||||
d.buf = p
|
||||
} else {
|
||||
d.saveBuf.Write(p)
|
||||
d.buf = d.saveBuf.Bytes()
|
||||
d.saveBuf.Reset()
|
||||
}
|
||||
|
||||
for len(d.buf) > 0 {
|
||||
err = d.parseHeaderFieldRepr()
|
||||
if err == errNeedMore {
|
||||
// Extra paranoia, making sure saveBuf won't
|
||||
// get too large. All the varint and string
|
||||
// reading code earlier should already catch
|
||||
// overlong things and return ErrStringLength,
|
||||
// but keep this as a last resort.
|
||||
const varIntOverhead = 8 // conservative
|
||||
if d.maxStrLen != 0 && int64(len(d.buf)) > 2*(int64(d.maxStrLen)+varIntOverhead) {
|
||||
return 0, ErrStringLength
|
||||
}
|
||||
d.saveBuf.Write(d.buf)
|
||||
return len(p), nil
|
||||
}
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
return len(p), err
|
||||
}
|
||||
|
||||
// errNeedMore is an internal sentinel error value that means the
|
||||
// buffer is truncated and we need to read more data before we can
|
||||
// continue parsing.
|
||||
var errNeedMore = errors.New("need more data")
|
||||
|
||||
type indexType int
|
||||
|
||||
const (
|
||||
indexedTrue indexType = iota
|
||||
indexedFalse
|
||||
indexedNever
|
||||
)
|
||||
|
||||
func (v indexType) indexed() bool { return v == indexedTrue }
|
||||
func (v indexType) sensitive() bool { return v == indexedNever }
|
||||
|
||||
// returns errNeedMore if there isn't enough data available.
|
||||
// any other error is fatal.
|
||||
// consumes d.buf iff it returns nil.
|
||||
// precondition: must be called with len(d.buf) > 0
|
||||
func (d *Decoder) parseHeaderFieldRepr() error {
|
||||
b := d.buf[0]
|
||||
switch {
|
||||
case b&128 != 0:
|
||||
// Indexed representation.
|
||||
// High bit set?
|
||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.6.1
|
||||
return d.parseFieldIndexed()
|
||||
case b&192 == 64:
|
||||
// 6.2.1 Literal Header Field with Incremental Indexing
|
||||
// 0b10xxxxxx: top two bits are 10
|
||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.1
|
||||
return d.parseFieldLiteral(6, indexedTrue)
|
||||
case b&240 == 0:
|
||||
// 6.2.2 Literal Header Field without Indexing
|
||||
// 0b0000xxxx: top four bits are 0000
|
||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.2
|
||||
return d.parseFieldLiteral(4, indexedFalse)
|
||||
case b&240 == 16:
|
||||
// 6.2.3 Literal Header Field never Indexed
|
||||
// 0b0001xxxx: top four bits are 0001
|
||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.3
|
||||
return d.parseFieldLiteral(4, indexedNever)
|
||||
case b&224 == 32:
|
||||
// 6.3 Dynamic Table Size Update
|
||||
// Top three bits are '001'.
|
||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.6.3
|
||||
return d.parseDynamicTableSizeUpdate()
|
||||
}
|
||||
|
||||
return DecodingError{errors.New("invalid encoding")}
|
||||
}
|
||||
|
||||
// (same invariants and behavior as parseHeaderFieldRepr)
|
||||
func (d *Decoder) parseFieldIndexed() error {
|
||||
buf := d.buf
|
||||
idx, buf, err := readVarInt(7, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hf, ok := d.at(idx)
|
||||
if !ok {
|
||||
return DecodingError{InvalidIndexError(idx)}
|
||||
}
|
||||
d.buf = buf
|
||||
return d.callEmit(HeaderField{Name: hf.Name, Value: hf.Value})
|
||||
}
|
||||
|
||||
// (same invariants and behavior as parseHeaderFieldRepr)
|
||||
func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error {
|
||||
buf := d.buf
|
||||
nameIdx, buf, err := readVarInt(n, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var hf HeaderField
|
||||
wantStr := d.emitEnabled || it.indexed()
|
||||
if nameIdx > 0 {
|
||||
ihf, ok := d.at(nameIdx)
|
||||
if !ok {
|
||||
return DecodingError{InvalidIndexError(nameIdx)}
|
||||
}
|
||||
hf.Name = ihf.Name
|
||||
} else {
|
||||
hf.Name, buf, err = d.readString(buf, wantStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
hf.Value, buf, err = d.readString(buf, wantStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.buf = buf
|
||||
if it.indexed() {
|
||||
d.dynTab.add(hf)
|
||||
}
|
||||
hf.Sensitive = it.sensitive()
|
||||
return d.callEmit(hf)
|
||||
}
|
||||
|
||||
func (d *Decoder) callEmit(hf HeaderField) error {
|
||||
if d.maxStrLen != 0 {
|
||||
if len(hf.Name) > d.maxStrLen || len(hf.Value) > d.maxStrLen {
|
||||
return ErrStringLength
|
||||
}
|
||||
}
|
||||
if d.emitEnabled {
|
||||
d.emit(hf)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// (same invariants and behavior as parseHeaderFieldRepr)
|
||||
func (d *Decoder) parseDynamicTableSizeUpdate() error {
|
||||
buf := d.buf
|
||||
size, buf, err := readVarInt(5, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if size > uint64(d.dynTab.allowedMaxSize) {
|
||||
return DecodingError{errors.New("dynamic table size update too large")}
|
||||
}
|
||||
d.dynTab.setMaxSize(uint32(size))
|
||||
d.buf = buf
|
||||
return nil
|
||||
}
|
||||
|
||||
var errVarintOverflow = DecodingError{errors.New("varint integer overflow")}
|
||||
|
||||
// readVarInt reads an unsigned variable length integer off the
|
||||
// beginning of p. n is the parameter as described in
|
||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.5.1.
|
||||
//
|
||||
// n must always be between 1 and 8.
|
||||
//
|
||||
// The returned remain buffer is either a smaller suffix of p, or err != nil.
|
||||
// The error is errNeedMore if p doesn't contain a complete integer.
|
||||
func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) {
|
||||
if n < 1 || n > 8 {
|
||||
panic("bad n")
|
||||
}
|
||||
if len(p) == 0 {
|
||||
return 0, p, errNeedMore
|
||||
}
|
||||
i = uint64(p[0])
|
||||
if n < 8 {
|
||||
i &= (1 << uint64(n)) - 1
|
||||
}
|
||||
if i < (1<<uint64(n))-1 {
|
||||
return i, p[1:], nil
|
||||
}
|
||||
|
||||
origP := p
|
||||
p = p[1:]
|
||||
var m uint64
|
||||
for len(p) > 0 {
|
||||
b := p[0]
|
||||
p = p[1:]
|
||||
i += uint64(b&127) << m
|
||||
if b&128 == 0 {
|
||||
return i, p, nil
|
||||
}
|
||||
m += 7
|
||||
if m >= 63 { // TODO: proper overflow check. making this up.
|
||||
return 0, origP, errVarintOverflow
|
||||
}
|
||||
}
|
||||
return 0, origP, errNeedMore
|
||||
}
|
||||
|
||||
// readString decodes an hpack string from p.
|
||||
//
|
||||
// wantStr is whether s will be used. If false, decompression and
|
||||
// []byte->string garbage are skipped if s will be ignored
|
||||
// anyway. This does mean that huffman decoding errors for non-indexed
|
||||
// strings past the MAX_HEADER_LIST_SIZE are ignored, but the server
|
||||
// is returning an error anyway, and because they're not indexed, the error
|
||||
// won't affect the decoding state.
|
||||
func (d *Decoder) readString(p []byte, wantStr bool) (s string, remain []byte, err error) {
|
||||
if len(p) == 0 {
|
||||
return "", p, errNeedMore
|
||||
}
|
||||
isHuff := p[0]&128 != 0
|
||||
strLen, p, err := readVarInt(7, p)
|
||||
if err != nil {
|
||||
return "", p, err
|
||||
}
|
||||
if d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) {
|
||||
return "", nil, ErrStringLength
|
||||
}
|
||||
if uint64(len(p)) < strLen {
|
||||
return "", p, errNeedMore
|
||||
}
|
||||
if !isHuff {
|
||||
if wantStr {
|
||||
s = string(p[:strLen])
|
||||
}
|
||||
return s, p[strLen:], nil
|
||||
}
|
||||
|
||||
if wantStr {
|
||||
buf := bufPool.Get().(*bytes.Buffer)
|
||||
buf.Reset() // don't trust others
|
||||
defer bufPool.Put(buf)
|
||||
if err := huffmanDecode(buf, d.maxStrLen, p[:strLen]); err != nil {
|
||||
buf.Reset()
|
||||
return "", nil, err
|
||||
}
|
||||
s = buf.String()
|
||||
buf.Reset() // be nice to GC
|
||||
}
|
||||
return s, p[strLen:], nil
|
||||
}
|
722
vendor/golang.org/x/net/http2/hpack/hpack_test.go
generated
vendored
Normal file
722
vendor/golang.org/x/net/http2/hpack/hpack_test.go
generated
vendored
Normal file
|
@ -0,0 +1,722 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package hpack
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (d *Decoder) mustAt(idx int) HeaderField {
|
||||
if hf, ok := d.at(uint64(idx)); !ok {
|
||||
panic(fmt.Sprintf("bogus index %d", idx))
|
||||
} else {
|
||||
return hf
|
||||
}
|
||||
}
|
||||
|
||||
func TestDynamicTableAt(t *testing.T) {
|
||||
d := NewDecoder(4096, nil)
|
||||
at := d.mustAt
|
||||
if got, want := at(2), (pair(":method", "GET")); got != want {
|
||||
t.Errorf("at(2) = %v; want %v", got, want)
|
||||
}
|
||||
d.dynTab.add(pair("foo", "bar"))
|
||||
d.dynTab.add(pair("blake", "miz"))
|
||||
if got, want := at(staticTable.len()+1), (pair("blake", "miz")); got != want {
|
||||
t.Errorf("at(dyn 1) = %v; want %v", got, want)
|
||||
}
|
||||
if got, want := at(staticTable.len()+2), (pair("foo", "bar")); got != want {
|
||||
t.Errorf("at(dyn 2) = %v; want %v", got, want)
|
||||
}
|
||||
if got, want := at(3), (pair(":method", "POST")); got != want {
|
||||
t.Errorf("at(3) = %v; want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDynamicTableSizeEvict(t *testing.T) {
|
||||
d := NewDecoder(4096, nil)
|
||||
if want := uint32(0); d.dynTab.size != want {
|
||||
t.Fatalf("size = %d; want %d", d.dynTab.size, want)
|
||||
}
|
||||
add := d.dynTab.add
|
||||
add(pair("blake", "eats pizza"))
|
||||
if want := uint32(15 + 32); d.dynTab.size != want {
|
||||
t.Fatalf("after pizza, size = %d; want %d", d.dynTab.size, want)
|
||||
}
|
||||
add(pair("foo", "bar"))
|
||||
if want := uint32(15 + 32 + 6 + 32); d.dynTab.size != want {
|
||||
t.Fatalf("after foo bar, size = %d; want %d", d.dynTab.size, want)
|
||||
}
|
||||
d.dynTab.setMaxSize(15 + 32 + 1 /* slop */)
|
||||
if want := uint32(6 + 32); d.dynTab.size != want {
|
||||
t.Fatalf("after setMaxSize, size = %d; want %d", d.dynTab.size, want)
|
||||
}
|
||||
if got, want := d.mustAt(staticTable.len()+1), (pair("foo", "bar")); got != want {
|
||||
t.Errorf("at(dyn 1) = %v; want %v", got, want)
|
||||
}
|
||||
add(pair("long", strings.Repeat("x", 500)))
|
||||
if want := uint32(0); d.dynTab.size != want {
|
||||
t.Fatalf("after big one, size = %d; want %d", d.dynTab.size, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecoderDecode(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
in []byte
|
||||
want []HeaderField
|
||||
wantDynTab []HeaderField // newest entry first
|
||||
}{
|
||||
// C.2.1 Literal Header Field with Indexing
|
||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.1
|
||||
{"C.2.1", dehex("400a 6375 7374 6f6d 2d6b 6579 0d63 7573 746f 6d2d 6865 6164 6572"),
|
||||
[]HeaderField{pair("custom-key", "custom-header")},
|
||||
[]HeaderField{pair("custom-key", "custom-header")},
|
||||
},
|
||||
|
||||
// C.2.2 Literal Header Field without Indexing
|
||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.2
|
||||
{"C.2.2", dehex("040c 2f73 616d 706c 652f 7061 7468"),
|
||||
[]HeaderField{pair(":path", "/sample/path")},
|
||||
[]HeaderField{}},
|
||||
|
||||
// C.2.3 Literal Header Field never Indexed
|
||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.3
|
||||
{"C.2.3", dehex("1008 7061 7373 776f 7264 0673 6563 7265 74"),
|
||||
[]HeaderField{{"password", "secret", true}},
|
||||
[]HeaderField{}},
|
||||
|
||||
// C.2.4 Indexed Header Field
|
||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.4
|
||||
{"C.2.4", []byte("\x82"),
|
||||
[]HeaderField{pair(":method", "GET")},
|
||||
[]HeaderField{}},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
d := NewDecoder(4096, nil)
|
||||
hf, err := d.DecodeFull(tt.in)
|
||||
if err != nil {
|
||||
t.Errorf("%s: %v", tt.name, err)
|
||||
continue
|
||||
}
|
||||
if !reflect.DeepEqual(hf, tt.want) {
|
||||
t.Errorf("%s: Got %v; want %v", tt.name, hf, tt.want)
|
||||
}
|
||||
gotDynTab := d.dynTab.reverseCopy()
|
||||
if !reflect.DeepEqual(gotDynTab, tt.wantDynTab) {
|
||||
t.Errorf("%s: dynamic table after = %v; want %v", tt.name, gotDynTab, tt.wantDynTab)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (dt *dynamicTable) reverseCopy() (hf []HeaderField) {
|
||||
hf = make([]HeaderField, len(dt.table.ents))
|
||||
for i := range hf {
|
||||
hf[i] = dt.table.ents[len(dt.table.ents)-1-i]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type encAndWant struct {
|
||||
enc []byte
|
||||
want []HeaderField
|
||||
wantDynTab []HeaderField
|
||||
wantDynSize uint32
|
||||
}
|
||||
|
||||
// C.3 Request Examples without Huffman Coding
|
||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.C.3
|
||||
func TestDecodeC3_NoHuffman(t *testing.T) {
|
||||
testDecodeSeries(t, 4096, []encAndWant{
|
||||
{dehex("8286 8441 0f77 7777 2e65 7861 6d70 6c65 2e63 6f6d"),
|
||||
[]HeaderField{
|
||||
pair(":method", "GET"),
|
||||
pair(":scheme", "http"),
|
||||
pair(":path", "/"),
|
||||
pair(":authority", "www.example.com"),
|
||||
},
|
||||
[]HeaderField{
|
||||
pair(":authority", "www.example.com"),
|
||||
},
|
||||
57,
|
||||
},
|
||||
{dehex("8286 84be 5808 6e6f 2d63 6163 6865"),
|
||||
[]HeaderField{
|
||||
pair(":method", "GET"),
|
||||
pair(":scheme", "http"),
|
||||
pair(":path", "/"),
|
||||
pair(":authority", "www.example.com"),
|
||||
pair("cache-control", "no-cache"),
|
||||
},
|
||||
[]HeaderField{
|
||||
pair("cache-control", "no-cache"),
|
||||
pair(":authority", "www.example.com"),
|
||||
},
|
||||
110,
|
||||
},
|
||||
{dehex("8287 85bf 400a 6375 7374 6f6d 2d6b 6579 0c63 7573 746f 6d2d 7661 6c75 65"),
|
||||
[]HeaderField{
|
||||
pair(":method", "GET"),
|
||||
pair(":scheme", "https"),
|
||||
pair(":path", "/index.html"),
|
||||
pair(":authority", "www.example.com"),
|
||||
pair("custom-key", "custom-value"),
|
||||
},
|
||||
[]HeaderField{
|
||||
pair("custom-key", "custom-value"),
|
||||
pair("cache-control", "no-cache"),
|
||||
pair(":authority", "www.example.com"),
|
||||
},
|
||||
164,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// C.4 Request Examples with Huffman Coding
|
||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.C.4
|
||||
func TestDecodeC4_Huffman(t *testing.T) {
|
||||
testDecodeSeries(t, 4096, []encAndWant{
|
||||
{dehex("8286 8441 8cf1 e3c2 e5f2 3a6b a0ab 90f4 ff"),
|
||||
[]HeaderField{
|
||||
pair(":method", "GET"),
|
||||
pair(":scheme", "http"),
|
||||
pair(":path", "/"),
|
||||
pair(":authority", "www.example.com"),
|
||||
},
|
||||
[]HeaderField{
|
||||
pair(":authority", "www.example.com"),
|
||||
},
|
||||
57,
|
||||
},
|
||||
{dehex("8286 84be 5886 a8eb 1064 9cbf"),
|
||||
[]HeaderField{
|
||||
pair(":method", "GET"),
|
||||
pair(":scheme", "http"),
|
||||
pair(":path", "/"),
|
||||
pair(":authority", "www.example.com"),
|
||||
pair("cache-control", "no-cache"),
|
||||
},
|
||||
[]HeaderField{
|
||||
pair("cache-control", "no-cache"),
|
||||
pair(":authority", "www.example.com"),
|
||||
},
|
||||
110,
|
||||
},
|
||||
{dehex("8287 85bf 4088 25a8 49e9 5ba9 7d7f 8925 a849 e95b b8e8 b4bf"),
|
||||
[]HeaderField{
|
||||
pair(":method", "GET"),
|
||||
pair(":scheme", "https"),
|
||||
pair(":path", "/index.html"),
|
||||
pair(":authority", "www.example.com"),
|
||||
pair("custom-key", "custom-value"),
|
||||
},
|
||||
[]HeaderField{
|
||||
pair("custom-key", "custom-value"),
|
||||
pair("cache-control", "no-cache"),
|
||||
pair(":authority", "www.example.com"),
|
||||
},
|
||||
164,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.C.5
|
||||
// "This section shows several consecutive header lists, corresponding
|
||||
// to HTTP responses, on the same connection. The HTTP/2 setting
|
||||
// parameter SETTINGS_HEADER_TABLE_SIZE is set to the value of 256
|
||||
// octets, causing some evictions to occur."
|
||||
func TestDecodeC5_ResponsesNoHuff(t *testing.T) {
|
||||
testDecodeSeries(t, 256, []encAndWant{
|
||||
{dehex(`
|
||||
4803 3330 3258 0770 7269 7661 7465 611d
|
||||
4d6f 6e2c 2032 3120 4f63 7420 3230 3133
|
||||
2032 303a 3133 3a32 3120 474d 546e 1768
|
||||
7474 7073 3a2f 2f77 7777 2e65 7861 6d70
|
||||
6c65 2e63 6f6d
|
||||
`),
|
||||
[]HeaderField{
|
||||
pair(":status", "302"),
|
||||
pair("cache-control", "private"),
|
||||
pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
|
||||
pair("location", "https://www.example.com"),
|
||||
},
|
||||
[]HeaderField{
|
||||
pair("location", "https://www.example.com"),
|
||||
pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
|
||||
pair("cache-control", "private"),
|
||||
pair(":status", "302"),
|
||||
},
|
||||
222,
|
||||
},
|
||||
{dehex("4803 3330 37c1 c0bf"),
|
||||
[]HeaderField{
|
||||
pair(":status", "307"),
|
||||
pair("cache-control", "private"),
|
||||
pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
|
||||
pair("location", "https://www.example.com"),
|
||||
},
|
||||
[]HeaderField{
|
||||
pair(":status", "307"),
|
||||
pair("location", "https://www.example.com"),
|
||||
pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
|
||||
pair("cache-control", "private"),
|
||||
},
|
||||
222,
|
||||
},
|
||||
{dehex(`
|
||||
88c1 611d 4d6f 6e2c 2032 3120 4f63 7420
|
||||
3230 3133 2032 303a 3133 3a32 3220 474d
|
||||
54c0 5a04 677a 6970 7738 666f 6f3d 4153
|
||||
444a 4b48 514b 425a 584f 5157 454f 5049
|
||||
5541 5851 5745 4f49 553b 206d 6178 2d61
|
||||
6765 3d33 3630 303b 2076 6572 7369 6f6e
|
||||
3d31
|
||||
`),
|
||||
[]HeaderField{
|
||||
pair(":status", "200"),
|
||||
pair("cache-control", "private"),
|
||||
pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"),
|
||||
pair("location", "https://www.example.com"),
|
||||
pair("content-encoding", "gzip"),
|
||||
pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"),
|
||||
},
|
||||
[]HeaderField{
|
||||
pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"),
|
||||
pair("content-encoding", "gzip"),
|
||||
pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"),
|
||||
},
|
||||
215,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.C.6
|
||||
// "This section shows the same examples as the previous section, but
|
||||
// using Huffman encoding for the literal values. The HTTP/2 setting
|
||||
// parameter SETTINGS_HEADER_TABLE_SIZE is set to the value of 256
|
||||
// octets, causing some evictions to occur. The eviction mechanism
|
||||
// uses the length of the decoded literal values, so the same
|
||||
// evictions occurs as in the previous section."
|
||||
func TestDecodeC6_ResponsesHuffman(t *testing.T) {
|
||||
testDecodeSeries(t, 256, []encAndWant{
|
||||
{dehex(`
|
||||
4882 6402 5885 aec3 771a 4b61 96d0 7abe
|
||||
9410 54d4 44a8 2005 9504 0b81 66e0 82a6
|
||||
2d1b ff6e 919d 29ad 1718 63c7 8f0b 97c8
|
||||
e9ae 82ae 43d3
|
||||
`),
|
||||
[]HeaderField{
|
||||
pair(":status", "302"),
|
||||
pair("cache-control", "private"),
|
||||
pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
|
||||
pair("location", "https://www.example.com"),
|
||||
},
|
||||
[]HeaderField{
|
||||
pair("location", "https://www.example.com"),
|
||||
pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
|
||||
pair("cache-control", "private"),
|
||||
pair(":status", "302"),
|
||||
},
|
||||
222,
|
||||
},
|
||||
{dehex("4883 640e ffc1 c0bf"),
|
||||
[]HeaderField{
|
||||
pair(":status", "307"),
|
||||
pair("cache-control", "private"),
|
||||
pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
|
||||
pair("location", "https://www.example.com"),
|
||||
},
|
||||
[]HeaderField{
|
||||
pair(":status", "307"),
|
||||
pair("location", "https://www.example.com"),
|
||||
pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
|
||||
pair("cache-control", "private"),
|
||||
},
|
||||
222,
|
||||
},
|
||||
{dehex(`
|
||||
88c1 6196 d07a be94 1054 d444 a820 0595
|
||||
040b 8166 e084 a62d 1bff c05a 839b d9ab
|
||||
77ad 94e7 821d d7f2 e6c7 b335 dfdf cd5b
|
||||
3960 d5af 2708 7f36 72c1 ab27 0fb5 291f
|
||||
9587 3160 65c0 03ed 4ee5 b106 3d50 07
|
||||
`),
|
||||
[]HeaderField{
|
||||
pair(":status", "200"),
|
||||
pair("cache-control", "private"),
|
||||
pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"),
|
||||
pair("location", "https://www.example.com"),
|
||||
pair("content-encoding", "gzip"),
|
||||
pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"),
|
||||
},
|
||||
[]HeaderField{
|
||||
pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"),
|
||||
pair("content-encoding", "gzip"),
|
||||
pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"),
|
||||
},
|
||||
215,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testDecodeSeries(t *testing.T, size uint32, steps []encAndWant) {
|
||||
d := NewDecoder(size, nil)
|
||||
for i, step := range steps {
|
||||
hf, err := d.DecodeFull(step.enc)
|
||||
if err != nil {
|
||||
t.Fatalf("Error at step index %d: %v", i, err)
|
||||
}
|
||||
if !reflect.DeepEqual(hf, step.want) {
|
||||
t.Fatalf("At step index %d: Got headers %v; want %v", i, hf, step.want)
|
||||
}
|
||||
gotDynTab := d.dynTab.reverseCopy()
|
||||
if !reflect.DeepEqual(gotDynTab, step.wantDynTab) {
|
||||
t.Errorf("After step index %d, dynamic table = %v; want %v", i, gotDynTab, step.wantDynTab)
|
||||
}
|
||||
if d.dynTab.size != step.wantDynSize {
|
||||
t.Errorf("After step index %d, dynamic table size = %v; want %v", i, d.dynTab.size, step.wantDynSize)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHuffmanDecodeExcessPadding(t *testing.T) {
|
||||
tests := [][]byte{
|
||||
{0xff}, // Padding Exceeds 7 bits
|
||||
{0x1f, 0xff}, // {"a", 1 byte excess padding}
|
||||
{0x1f, 0xff, 0xff}, // {"a", 2 byte excess padding}
|
||||
{0x1f, 0xff, 0xff, 0xff}, // {"a", 3 byte excess padding}
|
||||
{0xff, 0x9f, 0xff, 0xff, 0xff}, // {"a", 29 bit excess padding}
|
||||
{'R', 0xbc, '0', 0xff, 0xff, 0xff, 0xff}, // Padding ends on partial symbol.
|
||||
}
|
||||
for i, in := range tests {
|
||||
var buf bytes.Buffer
|
||||
if _, err := HuffmanDecode(&buf, in); err != ErrInvalidHuffman {
|
||||
t.Errorf("test-%d: decode(%q) = %v; want ErrInvalidHuffman", i, in, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHuffmanDecodeEOS(t *testing.T) {
|
||||
in := []byte{0xff, 0xff, 0xff, 0xff, 0xfc} // {EOS, "?"}
|
||||
var buf bytes.Buffer
|
||||
if _, err := HuffmanDecode(&buf, in); err != ErrInvalidHuffman {
|
||||
t.Errorf("error = %v; want ErrInvalidHuffman", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHuffmanDecodeMaxLengthOnTrailingByte(t *testing.T) {
|
||||
in := []byte{0x00, 0x01} // {"0", "0", "0"}
|
||||
var buf bytes.Buffer
|
||||
if err := huffmanDecode(&buf, 2, in); err != ErrStringLength {
|
||||
t.Errorf("error = %v; want ErrStringLength", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHuffmanDecodeCorruptPadding(t *testing.T) {
|
||||
in := []byte{0x00}
|
||||
var buf bytes.Buffer
|
||||
if _, err := HuffmanDecode(&buf, in); err != ErrInvalidHuffman {
|
||||
t.Errorf("error = %v; want ErrInvalidHuffman", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHuffmanDecode(t *testing.T) {
|
||||
tests := []struct {
|
||||
inHex, want string
|
||||
}{
|
||||
{"f1e3 c2e5 f23a 6ba0 ab90 f4ff", "www.example.com"},
|
||||
{"a8eb 1064 9cbf", "no-cache"},
|
||||
{"25a8 49e9 5ba9 7d7f", "custom-key"},
|
||||
{"25a8 49e9 5bb8 e8b4 bf", "custom-value"},
|
||||
{"6402", "302"},
|
||||
{"aec3 771a 4b", "private"},
|
||||
{"d07a be94 1054 d444 a820 0595 040b 8166 e082 a62d 1bff", "Mon, 21 Oct 2013 20:13:21 GMT"},
|
||||
{"9d29 ad17 1863 c78f 0b97 c8e9 ae82 ae43 d3", "https://www.example.com"},
|
||||
{"9bd9 ab", "gzip"},
|
||||
{"94e7 821d d7f2 e6c7 b335 dfdf cd5b 3960 d5af 2708 7f36 72c1 ab27 0fb5 291f 9587 3160 65c0 03ed 4ee5 b106 3d50 07",
|
||||
"foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
var buf bytes.Buffer
|
||||
in, err := hex.DecodeString(strings.Replace(tt.inHex, " ", "", -1))
|
||||
if err != nil {
|
||||
t.Errorf("%d. hex input error: %v", i, err)
|
||||
continue
|
||||
}
|
||||
if _, err := HuffmanDecode(&buf, in); err != nil {
|
||||
t.Errorf("%d. decode error: %v", i, err)
|
||||
continue
|
||||
}
|
||||
if got := buf.String(); tt.want != got {
|
||||
t.Errorf("%d. decode = %q; want %q", i, got, tt.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppendHuffmanString(t *testing.T) {
|
||||
tests := []struct {
|
||||
in, want string
|
||||
}{
|
||||
{"www.example.com", "f1e3 c2e5 f23a 6ba0 ab90 f4ff"},
|
||||
{"no-cache", "a8eb 1064 9cbf"},
|
||||
{"custom-key", "25a8 49e9 5ba9 7d7f"},
|
||||
{"custom-value", "25a8 49e9 5bb8 e8b4 bf"},
|
||||
{"302", "6402"},
|
||||
{"private", "aec3 771a 4b"},
|
||||
{"Mon, 21 Oct 2013 20:13:21 GMT", "d07a be94 1054 d444 a820 0595 040b 8166 e082 a62d 1bff"},
|
||||
{"https://www.example.com", "9d29 ad17 1863 c78f 0b97 c8e9 ae82 ae43 d3"},
|
||||
{"gzip", "9bd9 ab"},
|
||||
{"foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1",
|
||||
"94e7 821d d7f2 e6c7 b335 dfdf cd5b 3960 d5af 2708 7f36 72c1 ab27 0fb5 291f 9587 3160 65c0 03ed 4ee5 b106 3d50 07"},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
buf := []byte{}
|
||||
want := strings.Replace(tt.want, " ", "", -1)
|
||||
buf = AppendHuffmanString(buf, tt.in)
|
||||
if got := hex.EncodeToString(buf); want != got {
|
||||
t.Errorf("%d. encode = %q; want %q", i, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHuffmanMaxStrLen(t *testing.T) {
|
||||
const msg = "Some string"
|
||||
huff := AppendHuffmanString(nil, msg)
|
||||
|
||||
testGood := func(max int) {
|
||||
var out bytes.Buffer
|
||||
if err := huffmanDecode(&out, max, huff); err != nil {
|
||||
t.Errorf("For maxLen=%d, unexpected error: %v", max, err)
|
||||
}
|
||||
if out.String() != msg {
|
||||
t.Errorf("For maxLen=%d, out = %q; want %q", max, out.String(), msg)
|
||||
}
|
||||
}
|
||||
testGood(0)
|
||||
testGood(len(msg))
|
||||
testGood(len(msg) + 1)
|
||||
|
||||
var out bytes.Buffer
|
||||
if err := huffmanDecode(&out, len(msg)-1, huff); err != ErrStringLength {
|
||||
t.Errorf("err = %v; want ErrStringLength", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHuffmanRoundtripStress(t *testing.T) {
|
||||
const Len = 50 // of uncompressed string
|
||||
input := make([]byte, Len)
|
||||
var output bytes.Buffer
|
||||
var huff []byte
|
||||
|
||||
n := 5000
|
||||
if testing.Short() {
|
||||
n = 100
|
||||
}
|
||||
seed := time.Now().UnixNano()
|
||||
t.Logf("Seed = %v", seed)
|
||||
src := rand.New(rand.NewSource(seed))
|
||||
var encSize int64
|
||||
for i := 0; i < n; i++ {
|
||||
for l := range input {
|
||||
input[l] = byte(src.Intn(256))
|
||||
}
|
||||
huff = AppendHuffmanString(huff[:0], string(input))
|
||||
encSize += int64(len(huff))
|
||||
output.Reset()
|
||||
if err := huffmanDecode(&output, 0, huff); err != nil {
|
||||
t.Errorf("Failed to decode %q -> %q -> error %v", input, huff, err)
|
||||
continue
|
||||
}
|
||||
if !bytes.Equal(output.Bytes(), input) {
|
||||
t.Errorf("Roundtrip failure on %q -> %q -> %q", input, huff, output.Bytes())
|
||||
}
|
||||
}
|
||||
t.Logf("Compressed size of original: %0.02f%% (%v -> %v)", 100*(float64(encSize)/(Len*float64(n))), Len*n, encSize)
|
||||
}
|
||||
|
||||
func TestHuffmanDecodeFuzz(t *testing.T) {
|
||||
const Len = 50 // of compressed
|
||||
var buf, zbuf bytes.Buffer
|
||||
|
||||
n := 5000
|
||||
if testing.Short() {
|
||||
n = 100
|
||||
}
|
||||
seed := time.Now().UnixNano()
|
||||
t.Logf("Seed = %v", seed)
|
||||
src := rand.New(rand.NewSource(seed))
|
||||
numFail := 0
|
||||
for i := 0; i < n; i++ {
|
||||
zbuf.Reset()
|
||||
if i == 0 {
|
||||
// Start with at least one invalid one.
|
||||
zbuf.WriteString("00\x91\xff\xff\xff\xff\xc8")
|
||||
} else {
|
||||
for l := 0; l < Len; l++ {
|
||||
zbuf.WriteByte(byte(src.Intn(256)))
|
||||
}
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
if err := huffmanDecode(&buf, 0, zbuf.Bytes()); err != nil {
|
||||
if err == ErrInvalidHuffman {
|
||||
numFail++
|
||||
continue
|
||||
}
|
||||
t.Errorf("Failed to decode %q: %v", zbuf.Bytes(), err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
t.Logf("%0.02f%% are invalid (%d / %d)", 100*float64(numFail)/float64(n), numFail, n)
|
||||
if numFail < 1 {
|
||||
t.Error("expected at least one invalid huffman encoding (test starts with one)")
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadVarInt(t *testing.T) {
|
||||
type res struct {
|
||||
i uint64
|
||||
consumed int
|
||||
err error
|
||||
}
|
||||
tests := []struct {
|
||||
n byte
|
||||
p []byte
|
||||
want res
|
||||
}{
|
||||
// Fits in a byte:
|
||||
{1, []byte{0}, res{0, 1, nil}},
|
||||
{2, []byte{2}, res{2, 1, nil}},
|
||||
{3, []byte{6}, res{6, 1, nil}},
|
||||
{4, []byte{14}, res{14, 1, nil}},
|
||||
{5, []byte{30}, res{30, 1, nil}},
|
||||
{6, []byte{62}, res{62, 1, nil}},
|
||||
{7, []byte{126}, res{126, 1, nil}},
|
||||
{8, []byte{254}, res{254, 1, nil}},
|
||||
|
||||
// Doesn't fit in a byte:
|
||||
{1, []byte{1}, res{0, 0, errNeedMore}},
|
||||
{2, []byte{3}, res{0, 0, errNeedMore}},
|
||||
{3, []byte{7}, res{0, 0, errNeedMore}},
|
||||
{4, []byte{15}, res{0, 0, errNeedMore}},
|
||||
{5, []byte{31}, res{0, 0, errNeedMore}},
|
||||
{6, []byte{63}, res{0, 0, errNeedMore}},
|
||||
{7, []byte{127}, res{0, 0, errNeedMore}},
|
||||
{8, []byte{255}, res{0, 0, errNeedMore}},
|
||||
|
||||
// Ignoring top bits:
|
||||
{5, []byte{255, 154, 10}, res{1337, 3, nil}}, // high dummy three bits: 111
|
||||
{5, []byte{159, 154, 10}, res{1337, 3, nil}}, // high dummy three bits: 100
|
||||
{5, []byte{191, 154, 10}, res{1337, 3, nil}}, // high dummy three bits: 101
|
||||
|
||||
// Extra byte:
|
||||
{5, []byte{191, 154, 10, 2}, res{1337, 3, nil}}, // extra byte
|
||||
|
||||
// Short a byte:
|
||||
{5, []byte{191, 154}, res{0, 0, errNeedMore}},
|
||||
|
||||
// integer overflow:
|
||||
{1, []byte{255, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128}, res{0, 0, errVarintOverflow}},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
i, remain, err := readVarInt(tt.n, tt.p)
|
||||
consumed := len(tt.p) - len(remain)
|
||||
got := res{i, consumed, err}
|
||||
if got != tt.want {
|
||||
t.Errorf("readVarInt(%d, %v ~ %x) = %+v; want %+v", tt.n, tt.p, tt.p, got, tt.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Fuzz crash, originally reported at https://github.com/bradfitz/http2/issues/56
|
||||
func TestHuffmanFuzzCrash(t *testing.T) {
|
||||
got, err := HuffmanDecodeToString([]byte("00\x91\xff\xff\xff\xff\xc8"))
|
||||
if got != "" {
|
||||
t.Errorf("Got %q; want empty string", got)
|
||||
}
|
||||
if err != ErrInvalidHuffman {
|
||||
t.Errorf("Err = %v; want ErrInvalidHuffman", err)
|
||||
}
|
||||
}
|
||||
|
||||
func pair(name, value string) HeaderField {
|
||||
return HeaderField{Name: name, Value: value}
|
||||
}
|
||||
|
||||
func dehex(s string) []byte {
|
||||
s = strings.Replace(s, " ", "", -1)
|
||||
s = strings.Replace(s, "\n", "", -1)
|
||||
b, err := hex.DecodeString(s)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func TestEmitEnabled(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
enc := NewEncoder(&buf)
|
||||
enc.WriteField(HeaderField{Name: "foo", Value: "bar"})
|
||||
enc.WriteField(HeaderField{Name: "foo", Value: "bar"})
|
||||
|
||||
numCallback := 0
|
||||
var dec *Decoder
|
||||
dec = NewDecoder(8<<20, func(HeaderField) {
|
||||
numCallback++
|
||||
dec.SetEmitEnabled(false)
|
||||
})
|
||||
if !dec.EmitEnabled() {
|
||||
t.Errorf("initial emit enabled = false; want true")
|
||||
}
|
||||
if _, err := dec.Write(buf.Bytes()); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if numCallback != 1 {
|
||||
t.Errorf("num callbacks = %d; want 1", numCallback)
|
||||
}
|
||||
if dec.EmitEnabled() {
|
||||
t.Errorf("emit enabled = true; want false")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSaveBufLimit(t *testing.T) {
|
||||
const maxStr = 1 << 10
|
||||
var got []HeaderField
|
||||
dec := NewDecoder(initialHeaderTableSize, func(hf HeaderField) {
|
||||
got = append(got, hf)
|
||||
})
|
||||
dec.SetMaxStringLength(maxStr)
|
||||
var frag []byte
|
||||
frag = append(frag[:0], encodeTypeByte(false, false))
|
||||
frag = appendVarInt(frag, 7, 3)
|
||||
frag = append(frag, "foo"...)
|
||||
frag = appendVarInt(frag, 7, 3)
|
||||
frag = append(frag, "bar"...)
|
||||
|
||||
if _, err := dec.Write(frag); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
want := []HeaderField{{Name: "foo", Value: "bar"}}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("After small writes, got %v; want %v", got, want)
|
||||
}
|
||||
|
||||
frag = append(frag[:0], encodeTypeByte(false, false))
|
||||
frag = appendVarInt(frag, 7, maxStr*3)
|
||||
frag = append(frag, make([]byte, maxStr*3)...)
|
||||
|
||||
_, err := dec.Write(frag)
|
||||
if err != ErrStringLength {
|
||||
t.Fatalf("Write error = %v; want ErrStringLength", err)
|
||||
}
|
||||
}
|
212
vendor/golang.org/x/net/http2/hpack/huffman.go
generated
vendored
Normal file
212
vendor/golang.org/x/net/http2/hpack/huffman.go
generated
vendored
Normal file
|
@ -0,0 +1,212 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package hpack
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var bufPool = sync.Pool{
|
||||
New: func() interface{} { return new(bytes.Buffer) },
|
||||
}
|
||||
|
||||
// HuffmanDecode decodes the string in v and writes the expanded
|
||||
// result to w, returning the number of bytes written to w and the
|
||||
// Write call's return value. At most one Write call is made.
|
||||
func HuffmanDecode(w io.Writer, v []byte) (int, error) {
|
||||
buf := bufPool.Get().(*bytes.Buffer)
|
||||
buf.Reset()
|
||||
defer bufPool.Put(buf)
|
||||
if err := huffmanDecode(buf, 0, v); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return w.Write(buf.Bytes())
|
||||
}
|
||||
|
||||
// HuffmanDecodeToString decodes the string in v.
|
||||
func HuffmanDecodeToString(v []byte) (string, error) {
|
||||
buf := bufPool.Get().(*bytes.Buffer)
|
||||
buf.Reset()
|
||||
defer bufPool.Put(buf)
|
||||
if err := huffmanDecode(buf, 0, v); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
||||
// ErrInvalidHuffman is returned for errors found decoding
|
||||
// Huffman-encoded strings.
|
||||
var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data")
|
||||
|
||||
// huffmanDecode decodes v to buf.
|
||||
// If maxLen is greater than 0, attempts to write more to buf than
|
||||
// maxLen bytes will return ErrStringLength.
|
||||
func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error {
|
||||
n := rootHuffmanNode
|
||||
// cur is the bit buffer that has not been fed into n.
|
||||
// cbits is the number of low order bits in cur that are valid.
|
||||
// sbits is the number of bits of the symbol prefix being decoded.
|
||||
cur, cbits, sbits := uint(0), uint8(0), uint8(0)
|
||||
for _, b := range v {
|
||||
cur = cur<<8 | uint(b)
|
||||
cbits += 8
|
||||
sbits += 8
|
||||
for cbits >= 8 {
|
||||
idx := byte(cur >> (cbits - 8))
|
||||
n = n.children[idx]
|
||||
if n == nil {
|
||||
return ErrInvalidHuffman
|
||||
}
|
||||
if n.children == nil {
|
||||
if maxLen != 0 && buf.Len() == maxLen {
|
||||
return ErrStringLength
|
||||
}
|
||||
buf.WriteByte(n.sym)
|
||||
cbits -= n.codeLen
|
||||
n = rootHuffmanNode
|
||||
sbits = cbits
|
||||
} else {
|
||||
cbits -= 8
|
||||
}
|
||||
}
|
||||
}
|
||||
for cbits > 0 {
|
||||
n = n.children[byte(cur<<(8-cbits))]
|
||||
if n == nil {
|
||||
return ErrInvalidHuffman
|
||||
}
|
||||
if n.children != nil || n.codeLen > cbits {
|
||||
break
|
||||
}
|
||||
if maxLen != 0 && buf.Len() == maxLen {
|
||||
return ErrStringLength
|
||||
}
|
||||
buf.WriteByte(n.sym)
|
||||
cbits -= n.codeLen
|
||||
n = rootHuffmanNode
|
||||
sbits = cbits
|
||||
}
|
||||
if sbits > 7 {
|
||||
// Either there was an incomplete symbol, or overlong padding.
|
||||
// Both are decoding errors per RFC 7541 section 5.2.
|
||||
return ErrInvalidHuffman
|
||||
}
|
||||
if mask := uint(1<<cbits - 1); cur&mask != mask {
|
||||
// Trailing bits must be a prefix of EOS per RFC 7541 section 5.2.
|
||||
return ErrInvalidHuffman
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type node struct {
|
||||
// children is non-nil for internal nodes
|
||||
children []*node
|
||||
|
||||
// The following are only valid if children is nil:
|
||||
codeLen uint8 // number of bits that led to the output of sym
|
||||
sym byte // output symbol
|
||||
}
|
||||
|
||||
func newInternalNode() *node {
|
||||
return &node{children: make([]*node, 256)}
|
||||
}
|
||||
|
||||
var rootHuffmanNode = newInternalNode()
|
||||
|
||||
func init() {
|
||||
if len(huffmanCodes) != 256 {
|
||||
panic("unexpected size")
|
||||
}
|
||||
for i, code := range huffmanCodes {
|
||||
addDecoderNode(byte(i), code, huffmanCodeLen[i])
|
||||
}
|
||||
}
|
||||
|
||||
func addDecoderNode(sym byte, code uint32, codeLen uint8) {
|
||||
cur := rootHuffmanNode
|
||||
for codeLen > 8 {
|
||||
codeLen -= 8
|
||||
i := uint8(code >> codeLen)
|
||||
if cur.children[i] == nil {
|
||||
cur.children[i] = newInternalNode()
|
||||
}
|
||||
cur = cur.children[i]
|
||||
}
|
||||
shift := 8 - codeLen
|
||||
start, end := int(uint8(code<<shift)), int(1<<shift)
|
||||
for i := start; i < start+end; i++ {
|
||||
cur.children[i] = &node{sym: sym, codeLen: codeLen}
|
||||
}
|
||||
}
|
||||
|
||||
// AppendHuffmanString appends s, as encoded in Huffman codes, to dst
|
||||
// and returns the extended buffer.
|
||||
func AppendHuffmanString(dst []byte, s string) []byte {
|
||||
rembits := uint8(8)
|
||||
|
||||
for i := 0; i < len(s); i++ {
|
||||
if rembits == 8 {
|
||||
dst = append(dst, 0)
|
||||
}
|
||||
dst, rembits = appendByteToHuffmanCode(dst, rembits, s[i])
|
||||
}
|
||||
|
||||
if rembits < 8 {
|
||||
// special EOS symbol
|
||||
code := uint32(0x3fffffff)
|
||||
nbits := uint8(30)
|
||||
|
||||
t := uint8(code >> (nbits - rembits))
|
||||
dst[len(dst)-1] |= t
|
||||
}
|
||||
|
||||
return dst
|
||||
}
|
||||
|
||||
// HuffmanEncodeLength returns the number of bytes required to encode
|
||||
// s in Huffman codes. The result is round up to byte boundary.
|
||||
func HuffmanEncodeLength(s string) uint64 {
|
||||
n := uint64(0)
|
||||
for i := 0; i < len(s); i++ {
|
||||
n += uint64(huffmanCodeLen[s[i]])
|
||||
}
|
||||
return (n + 7) / 8
|
||||
}
|
||||
|
||||
// appendByteToHuffmanCode appends Huffman code for c to dst and
|
||||
// returns the extended buffer and the remaining bits in the last
|
||||
// element. The appending is not byte aligned and the remaining bits
|
||||
// in the last element of dst is given in rembits.
|
||||
func appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) {
|
||||
code := huffmanCodes[c]
|
||||
nbits := huffmanCodeLen[c]
|
||||
|
||||
for {
|
||||
if rembits > nbits {
|
||||
t := uint8(code << (rembits - nbits))
|
||||
dst[len(dst)-1] |= t
|
||||
rembits -= nbits
|
||||
break
|
||||
}
|
||||
|
||||
t := uint8(code >> (nbits - rembits))
|
||||
dst[len(dst)-1] |= t
|
||||
|
||||
nbits -= rembits
|
||||
rembits = 8
|
||||
|
||||
if nbits == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
dst = append(dst, 0)
|
||||
}
|
||||
|
||||
return dst, rembits
|
||||
}
|
479
vendor/golang.org/x/net/http2/hpack/tables.go
generated
vendored
Normal file
479
vendor/golang.org/x/net/http2/hpack/tables.go
generated
vendored
Normal file
|
@ -0,0 +1,479 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package hpack
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// headerFieldTable implements a list of HeaderFields.
|
||||
// This is used to implement the static and dynamic tables.
|
||||
type headerFieldTable struct {
|
||||
// For static tables, entries are never evicted.
|
||||
//
|
||||
// For dynamic tables, entries are evicted from ents[0] and added to the end.
|
||||
// Each entry has a unique id that starts at one and increments for each
|
||||
// entry that is added. This unique id is stable across evictions, meaning
|
||||
// it can be used as a pointer to a specific entry. As in hpack, unique ids
|
||||
// are 1-based. The unique id for ents[k] is k + evictCount + 1.
|
||||
//
|
||||
// Zero is not a valid unique id.
|
||||
//
|
||||
// evictCount should not overflow in any remotely practical situation. In
|
||||
// practice, we will have one dynamic table per HTTP/2 connection. If we
|
||||
// assume a very powerful server that handles 1M QPS per connection and each
|
||||
// request adds (then evicts) 100 entries from the table, it would still take
|
||||
// 2M years for evictCount to overflow.
|
||||
ents []HeaderField
|
||||
evictCount uint64
|
||||
|
||||
// byName maps a HeaderField name to the unique id of the newest entry with
|
||||
// the same name. See above for a definition of "unique id".
|
||||
byName map[string]uint64
|
||||
|
||||
// byNameValue maps a HeaderField name/value pair to the unique id of the newest
|
||||
// entry with the same name and value. See above for a definition of "unique id".
|
||||
byNameValue map[pairNameValue]uint64
|
||||
}
|
||||
|
||||
type pairNameValue struct {
|
||||
name, value string
|
||||
}
|
||||
|
||||
func (t *headerFieldTable) init() {
|
||||
t.byName = make(map[string]uint64)
|
||||
t.byNameValue = make(map[pairNameValue]uint64)
|
||||
}
|
||||
|
||||
// len reports the number of entries in the table.
|
||||
func (t *headerFieldTable) len() int {
|
||||
return len(t.ents)
|
||||
}
|
||||
|
||||
// addEntry adds a new entry.
|
||||
func (t *headerFieldTable) addEntry(f HeaderField) {
|
||||
id := uint64(t.len()) + t.evictCount + 1
|
||||
t.byName[f.Name] = id
|
||||
t.byNameValue[pairNameValue{f.Name, f.Value}] = id
|
||||
t.ents = append(t.ents, f)
|
||||
}
|
||||
|
||||
// evictOldest evicts the n oldest entries in the table.
|
||||
func (t *headerFieldTable) evictOldest(n int) {
|
||||
if n > t.len() {
|
||||
panic(fmt.Sprintf("evictOldest(%v) on table with %v entries", n, t.len()))
|
||||
}
|
||||
for k := 0; k < n; k++ {
|
||||
f := t.ents[k]
|
||||
id := t.evictCount + uint64(k) + 1
|
||||
if t.byName[f.Name] == id {
|
||||
delete(t.byName, f.Name)
|
||||
}
|
||||
if p := (pairNameValue{f.Name, f.Value}); t.byNameValue[p] == id {
|
||||
delete(t.byNameValue, p)
|
||||
}
|
||||
}
|
||||
copy(t.ents, t.ents[n:])
|
||||
for k := t.len() - n; k < t.len(); k++ {
|
||||
t.ents[k] = HeaderField{} // so strings can be garbage collected
|
||||
}
|
||||
t.ents = t.ents[:t.len()-n]
|
||||
if t.evictCount+uint64(n) < t.evictCount {
|
||||
panic("evictCount overflow")
|
||||
}
|
||||
t.evictCount += uint64(n)
|
||||
}
|
||||
|
||||
// search finds f in the table. If there is no match, i is 0.
|
||||
// If both name and value match, i is the matched index and nameValueMatch
|
||||
// becomes true. If only name matches, i points to that index and
|
||||
// nameValueMatch becomes false.
|
||||
//
|
||||
// The returned index is a 1-based HPACK index. For dynamic tables, HPACK says
|
||||
// that index 1 should be the newest entry, but t.ents[0] is the oldest entry,
|
||||
// meaning t.ents is reversed for dynamic tables. Hence, when t is a dynamic
|
||||
// table, the return value i actually refers to the entry t.ents[t.len()-i].
|
||||
//
|
||||
// All tables are assumed to be a dynamic tables except for the global
|
||||
// staticTable pointer.
|
||||
//
|
||||
// See Section 2.3.3.
|
||||
func (t *headerFieldTable) search(f HeaderField) (i uint64, nameValueMatch bool) {
|
||||
if !f.Sensitive {
|
||||
if id := t.byNameValue[pairNameValue{f.Name, f.Value}]; id != 0 {
|
||||
return t.idToIndex(id), true
|
||||
}
|
||||
}
|
||||
if id := t.byName[f.Name]; id != 0 {
|
||||
return t.idToIndex(id), false
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// idToIndex converts a unique id to an HPACK index.
|
||||
// See Section 2.3.3.
|
||||
func (t *headerFieldTable) idToIndex(id uint64) uint64 {
|
||||
if id <= t.evictCount {
|
||||
panic(fmt.Sprintf("id (%v) <= evictCount (%v)", id, t.evictCount))
|
||||
}
|
||||
k := id - t.evictCount - 1 // convert id to an index t.ents[k]
|
||||
if t != staticTable {
|
||||
return uint64(t.len()) - k // dynamic table
|
||||
}
|
||||
return k + 1
|
||||
}
|
||||
|
||||
// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B
|
||||
var staticTable = newStaticTable()
|
||||
var staticTableEntries = [...]HeaderField{
|
||||
{Name: ":authority"},
|
||||
{Name: ":method", Value: "GET"},
|
||||
{Name: ":method", Value: "POST"},
|
||||
{Name: ":path", Value: "/"},
|
||||
{Name: ":path", Value: "/index.html"},
|
||||
{Name: ":scheme", Value: "http"},
|
||||
{Name: ":scheme", Value: "https"},
|
||||
{Name: ":status", Value: "200"},
|
||||
{Name: ":status", Value: "204"},
|
||||
{Name: ":status", Value: "206"},
|
||||
{Name: ":status", Value: "304"},
|
||||
{Name: ":status", Value: "400"},
|
||||
{Name: ":status", Value: "404"},
|
||||
{Name: ":status", Value: "500"},
|
||||
{Name: "accept-charset"},
|
||||
{Name: "accept-encoding", Value: "gzip, deflate"},
|
||||
{Name: "accept-language"},
|
||||
{Name: "accept-ranges"},
|
||||
{Name: "accept"},
|
||||
{Name: "access-control-allow-origin"},
|
||||
{Name: "age"},
|
||||
{Name: "allow"},
|
||||
{Name: "authorization"},
|
||||
{Name: "cache-control"},
|
||||
{Name: "content-disposition"},
|
||||
{Name: "content-encoding"},
|
||||
{Name: "content-language"},
|
||||
{Name: "content-length"},
|
||||
{Name: "content-location"},
|
||||
{Name: "content-range"},
|
||||
{Name: "content-type"},
|
||||
{Name: "cookie"},
|
||||
{Name: "date"},
|
||||
{Name: "etag"},
|
||||
{Name: "expect"},
|
||||
{Name: "expires"},
|
||||
{Name: "from"},
|
||||
{Name: "host"},
|
||||
{Name: "if-match"},
|
||||
{Name: "if-modified-since"},
|
||||
{Name: "if-none-match"},
|
||||
{Name: "if-range"},
|
||||
{Name: "if-unmodified-since"},
|
||||
{Name: "last-modified"},
|
||||
{Name: "link"},
|
||||
{Name: "location"},
|
||||
{Name: "max-forwards"},
|
||||
{Name: "proxy-authenticate"},
|
||||
{Name: "proxy-authorization"},
|
||||
{Name: "range"},
|
||||
{Name: "referer"},
|
||||
{Name: "refresh"},
|
||||
{Name: "retry-after"},
|
||||
{Name: "server"},
|
||||
{Name: "set-cookie"},
|
||||
{Name: "strict-transport-security"},
|
||||
{Name: "transfer-encoding"},
|
||||
{Name: "user-agent"},
|
||||
{Name: "vary"},
|
||||
{Name: "via"},
|
||||
{Name: "www-authenticate"},
|
||||
}
|
||||
|
||||
func newStaticTable() *headerFieldTable {
|
||||
t := &headerFieldTable{}
|
||||
t.init()
|
||||
for _, e := range staticTableEntries[:] {
|
||||
t.addEntry(e)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
var huffmanCodes = [256]uint32{
|
||||
0x1ff8,
|
||||
0x7fffd8,
|
||||
0xfffffe2,
|
||||
0xfffffe3,
|
||||
0xfffffe4,
|
||||
0xfffffe5,
|
||||
0xfffffe6,
|
||||
0xfffffe7,
|
||||
0xfffffe8,
|
||||
0xffffea,
|
||||
0x3ffffffc,
|
||||
0xfffffe9,
|
||||
0xfffffea,
|
||||
0x3ffffffd,
|
||||
0xfffffeb,
|
||||
0xfffffec,
|
||||
0xfffffed,
|
||||
0xfffffee,
|
||||
0xfffffef,
|
||||
0xffffff0,
|
||||
0xffffff1,
|
||||
0xffffff2,
|
||||
0x3ffffffe,
|
||||
0xffffff3,
|
||||
0xffffff4,
|
||||
0xffffff5,
|
||||
0xffffff6,
|
||||
0xffffff7,
|
||||
0xffffff8,
|
||||
0xffffff9,
|
||||
0xffffffa,
|
||||
0xffffffb,
|
||||
0x14,
|
||||
0x3f8,
|
||||
0x3f9,
|
||||
0xffa,
|
||||
0x1ff9,
|
||||
0x15,
|
||||
0xf8,
|
||||
0x7fa,
|
||||
0x3fa,
|
||||
0x3fb,
|
||||
0xf9,
|
||||
0x7fb,
|
||||
0xfa,
|
||||
0x16,
|
||||
0x17,
|
||||
0x18,
|
||||
0x0,
|
||||
0x1,
|
||||
0x2,
|
||||
0x19,
|
||||
0x1a,
|
||||
0x1b,
|
||||
0x1c,
|
||||
0x1d,
|
||||
0x1e,
|
||||
0x1f,
|
||||
0x5c,
|
||||
0xfb,
|
||||
0x7ffc,
|
||||
0x20,
|
||||
0xffb,
|
||||
0x3fc,
|
||||
0x1ffa,
|
||||
0x21,
|
||||
0x5d,
|
||||
0x5e,
|
||||
0x5f,
|
||||
0x60,
|
||||
0x61,
|
||||
0x62,
|
||||
0x63,
|
||||
0x64,
|
||||
0x65,
|
||||
0x66,
|
||||
0x67,
|
||||
0x68,
|
||||
0x69,
|
||||
0x6a,
|
||||
0x6b,
|
||||
0x6c,
|
||||
0x6d,
|
||||
0x6e,
|
||||
0x6f,
|
||||
0x70,
|
||||
0x71,
|
||||
0x72,
|
||||
0xfc,
|
||||
0x73,
|
||||
0xfd,
|
||||
0x1ffb,
|
||||
0x7fff0,
|
||||
0x1ffc,
|
||||
0x3ffc,
|
||||
0x22,
|
||||
0x7ffd,
|
||||
0x3,
|
||||
0x23,
|
||||
0x4,
|
||||
0x24,
|
||||
0x5,
|
||||
0x25,
|
||||
0x26,
|
||||
0x27,
|
||||
0x6,
|
||||
0x74,
|
||||
0x75,
|
||||
0x28,
|
||||
0x29,
|
||||
0x2a,
|
||||
0x7,
|
||||
0x2b,
|
||||
0x76,
|
||||
0x2c,
|
||||
0x8,
|
||||
0x9,
|
||||
0x2d,
|
||||
0x77,
|
||||
0x78,
|
||||
0x79,
|
||||
0x7a,
|
||||
0x7b,
|
||||
0x7ffe,
|
||||
0x7fc,
|
||||
0x3ffd,
|
||||
0x1ffd,
|
||||
0xffffffc,
|
||||
0xfffe6,
|
||||
0x3fffd2,
|
||||
0xfffe7,
|
||||
0xfffe8,
|
||||
0x3fffd3,
|
||||
0x3fffd4,
|
||||
0x3fffd5,
|
||||
0x7fffd9,
|
||||
0x3fffd6,
|
||||
0x7fffda,
|
||||
0x7fffdb,
|
||||
0x7fffdc,
|
||||
0x7fffdd,
|
||||
0x7fffde,
|
||||
0xffffeb,
|
||||
0x7fffdf,
|
||||
0xffffec,
|
||||
0xffffed,
|
||||
0x3fffd7,
|
||||
0x7fffe0,
|
||||
0xffffee,
|
||||
0x7fffe1,
|
||||
0x7fffe2,
|
||||
0x7fffe3,
|
||||
0x7fffe4,
|
||||
0x1fffdc,
|
||||
0x3fffd8,
|
||||
0x7fffe5,
|
||||
0x3fffd9,
|
||||
0x7fffe6,
|
||||
0x7fffe7,
|
||||
0xffffef,
|
||||
0x3fffda,
|
||||
0x1fffdd,
|
||||
0xfffe9,
|
||||
0x3fffdb,
|
||||
0x3fffdc,
|
||||
0x7fffe8,
|
||||
0x7fffe9,
|
||||
0x1fffde,
|
||||
0x7fffea,
|
||||
0x3fffdd,
|
||||
0x3fffde,
|
||||
0xfffff0,
|
||||
0x1fffdf,
|
||||
0x3fffdf,
|
||||
0x7fffeb,
|
||||
0x7fffec,
|
||||
0x1fffe0,
|
||||
0x1fffe1,
|
||||
0x3fffe0,
|
||||
0x1fffe2,
|
||||
0x7fffed,
|
||||
0x3fffe1,
|
||||
0x7fffee,
|
||||
0x7fffef,
|
||||
0xfffea,
|
||||
0x3fffe2,
|
||||
0x3fffe3,
|
||||
0x3fffe4,
|
||||
0x7ffff0,
|
||||
0x3fffe5,
|
||||
0x3fffe6,
|
||||
0x7ffff1,
|
||||
0x3ffffe0,
|
||||
0x3ffffe1,
|
||||
0xfffeb,
|
||||
0x7fff1,
|
||||
0x3fffe7,
|
||||
0x7ffff2,
|
||||
0x3fffe8,
|
||||
0x1ffffec,
|
||||
0x3ffffe2,
|
||||
0x3ffffe3,
|
||||
0x3ffffe4,
|
||||
0x7ffffde,
|
||||
0x7ffffdf,
|
||||
0x3ffffe5,
|
||||
0xfffff1,
|
||||
0x1ffffed,
|
||||
0x7fff2,
|
||||
0x1fffe3,
|
||||
0x3ffffe6,
|
||||
0x7ffffe0,
|
||||
0x7ffffe1,
|
||||
0x3ffffe7,
|
||||
0x7ffffe2,
|
||||
0xfffff2,
|
||||
0x1fffe4,
|
||||
0x1fffe5,
|
||||
0x3ffffe8,
|
||||
0x3ffffe9,
|
||||
0xffffffd,
|
||||
0x7ffffe3,
|
||||
0x7ffffe4,
|
||||
0x7ffffe5,
|
||||
0xfffec,
|
||||
0xfffff3,
|
||||
0xfffed,
|
||||
0x1fffe6,
|
||||
0x3fffe9,
|
||||
0x1fffe7,
|
||||
0x1fffe8,
|
||||
0x7ffff3,
|
||||
0x3fffea,
|
||||
0x3fffeb,
|
||||
0x1ffffee,
|
||||
0x1ffffef,
|
||||
0xfffff4,
|
||||
0xfffff5,
|
||||
0x3ffffea,
|
||||
0x7ffff4,
|
||||
0x3ffffeb,
|
||||
0x7ffffe6,
|
||||
0x3ffffec,
|
||||
0x3ffffed,
|
||||
0x7ffffe7,
|
||||
0x7ffffe8,
|
||||
0x7ffffe9,
|
||||
0x7ffffea,
|
||||
0x7ffffeb,
|
||||
0xffffffe,
|
||||
0x7ffffec,
|
||||
0x7ffffed,
|
||||
0x7ffffee,
|
||||
0x7ffffef,
|
||||
0x7fffff0,
|
||||
0x3ffffee,
|
||||
}
|
||||
|
||||
var huffmanCodeLen = [256]uint8{
|
||||
13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28,
|
||||
28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28,
|
||||
6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6,
|
||||
5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 8, 15, 6, 12, 10,
|
||||
13, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
|
||||
7, 7, 7, 7, 7, 7, 7, 7, 8, 7, 8, 13, 19, 13, 14, 6,
|
||||
15, 5, 6, 5, 6, 5, 6, 6, 6, 5, 7, 7, 6, 6, 6, 5,
|
||||
6, 7, 6, 5, 5, 6, 7, 7, 7, 7, 7, 15, 11, 14, 13, 28,
|
||||
20, 22, 20, 20, 22, 22, 22, 23, 22, 23, 23, 23, 23, 23, 24, 23,
|
||||
24, 24, 22, 23, 24, 23, 23, 23, 23, 21, 22, 23, 22, 23, 23, 24,
|
||||
22, 21, 20, 22, 22, 23, 23, 21, 23, 22, 22, 24, 21, 22, 23, 23,
|
||||
21, 21, 22, 21, 23, 22, 23, 23, 20, 22, 22, 22, 23, 22, 22, 23,
|
||||
26, 26, 20, 19, 22, 23, 22, 25, 26, 26, 26, 27, 27, 26, 24, 25,
|
||||
19, 21, 26, 27, 27, 26, 27, 24, 21, 21, 26, 26, 28, 27, 27, 27,
|
||||
20, 24, 20, 21, 22, 21, 21, 23, 22, 22, 25, 25, 24, 24, 26, 23,
|
||||
26, 27, 26, 26, 27, 27, 27, 27, 27, 28, 27, 27, 27, 27, 27, 26,
|
||||
}
|
214
vendor/golang.org/x/net/http2/hpack/tables_test.go
generated
vendored
Normal file
214
vendor/golang.org/x/net/http2/hpack/tables_test.go
generated
vendored
Normal file
|
@ -0,0 +1,214 @@
|
|||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package hpack
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestHeaderFieldTable(t *testing.T) {
|
||||
table := &headerFieldTable{}
|
||||
table.init()
|
||||
table.addEntry(pair("key1", "value1-1"))
|
||||
table.addEntry(pair("key2", "value2-1"))
|
||||
table.addEntry(pair("key1", "value1-2"))
|
||||
table.addEntry(pair("key3", "value3-1"))
|
||||
table.addEntry(pair("key4", "value4-1"))
|
||||
table.addEntry(pair("key2", "value2-2"))
|
||||
|
||||
// Tests will be run twice: once before evicting anything, and
|
||||
// again after evicting the three oldest entries.
|
||||
tests := []struct {
|
||||
f HeaderField
|
||||
beforeWantStaticI uint64
|
||||
beforeWantMatch bool
|
||||
afterWantStaticI uint64
|
||||
afterWantMatch bool
|
||||
}{
|
||||
{HeaderField{"key1", "value1-1", false}, 1, true, 0, false},
|
||||
{HeaderField{"key1", "value1-2", false}, 3, true, 0, false},
|
||||
{HeaderField{"key1", "value1-3", false}, 3, false, 0, false},
|
||||
{HeaderField{"key2", "value2-1", false}, 2, true, 3, false},
|
||||
{HeaderField{"key2", "value2-2", false}, 6, true, 3, true},
|
||||
{HeaderField{"key2", "value2-3", false}, 6, false, 3, false},
|
||||
{HeaderField{"key4", "value4-1", false}, 5, true, 2, true},
|
||||
// Name match only, because sensitive.
|
||||
{HeaderField{"key4", "value4-1", true}, 5, false, 2, false},
|
||||
// Key not found.
|
||||
{HeaderField{"key5", "value5-x", false}, 0, false, 0, false},
|
||||
}
|
||||
|
||||
staticToDynamic := func(i uint64) uint64 {
|
||||
if i == 0 {
|
||||
return 0
|
||||
}
|
||||
return uint64(table.len()) - i + 1 // dynamic is the reversed table
|
||||
}
|
||||
|
||||
searchStatic := func(f HeaderField) (uint64, bool) {
|
||||
old := staticTable
|
||||
staticTable = table
|
||||
defer func() { staticTable = old }()
|
||||
return staticTable.search(f)
|
||||
}
|
||||
|
||||
searchDynamic := func(f HeaderField) (uint64, bool) {
|
||||
return table.search(f)
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
gotI, gotMatch := searchStatic(test.f)
|
||||
if wantI, wantMatch := test.beforeWantStaticI, test.beforeWantMatch; gotI != wantI || gotMatch != wantMatch {
|
||||
t.Errorf("before evictions: searchStatic(%+v)=%v,%v want %v,%v", test.f, gotI, gotMatch, wantI, wantMatch)
|
||||
}
|
||||
gotI, gotMatch = searchDynamic(test.f)
|
||||
wantDynamicI := staticToDynamic(test.beforeWantStaticI)
|
||||
if wantI, wantMatch := wantDynamicI, test.beforeWantMatch; gotI != wantI || gotMatch != wantMatch {
|
||||
t.Errorf("before evictions: searchDynamic(%+v)=%v,%v want %v,%v", test.f, gotI, gotMatch, wantI, wantMatch)
|
||||
}
|
||||
}
|
||||
|
||||
table.evictOldest(3)
|
||||
|
||||
for _, test := range tests {
|
||||
gotI, gotMatch := searchStatic(test.f)
|
||||
if wantI, wantMatch := test.afterWantStaticI, test.afterWantMatch; gotI != wantI || gotMatch != wantMatch {
|
||||
t.Errorf("after evictions: searchStatic(%+v)=%v,%v want %v,%v", test.f, gotI, gotMatch, wantI, wantMatch)
|
||||
}
|
||||
gotI, gotMatch = searchDynamic(test.f)
|
||||
wantDynamicI := staticToDynamic(test.afterWantStaticI)
|
||||
if wantI, wantMatch := wantDynamicI, test.afterWantMatch; gotI != wantI || gotMatch != wantMatch {
|
||||
t.Errorf("after evictions: searchDynamic(%+v)=%v,%v want %v,%v", test.f, gotI, gotMatch, wantI, wantMatch)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHeaderFieldTable_LookupMapEviction(t *testing.T) {
|
||||
table := &headerFieldTable{}
|
||||
table.init()
|
||||
table.addEntry(pair("key1", "value1-1"))
|
||||
table.addEntry(pair("key2", "value2-1"))
|
||||
table.addEntry(pair("key1", "value1-2"))
|
||||
table.addEntry(pair("key3", "value3-1"))
|
||||
table.addEntry(pair("key4", "value4-1"))
|
||||
table.addEntry(pair("key2", "value2-2"))
|
||||
|
||||
// evict all pairs
|
||||
table.evictOldest(table.len())
|
||||
|
||||
if l := table.len(); l > 0 {
|
||||
t.Errorf("table.len() = %d, want 0", l)
|
||||
}
|
||||
|
||||
if l := len(table.byName); l > 0 {
|
||||
t.Errorf("len(table.byName) = %d, want 0", l)
|
||||
}
|
||||
|
||||
if l := len(table.byNameValue); l > 0 {
|
||||
t.Errorf("len(table.byNameValue) = %d, want 0", l)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStaticTable(t *testing.T) {
|
||||
fromSpec := `
|
||||
+-------+-----------------------------+---------------+
|
||||
| 1 | :authority | |
|
||||
| 2 | :method | GET |
|
||||
| 3 | :method | POST |
|
||||
| 4 | :path | / |
|
||||
| 5 | :path | /index.html |
|
||||
| 6 | :scheme | http |
|
||||
| 7 | :scheme | https |
|
||||
| 8 | :status | 200 |
|
||||
| 9 | :status | 204 |
|
||||
| 10 | :status | 206 |
|
||||
| 11 | :status | 304 |
|
||||
| 12 | :status | 400 |
|
||||
| 13 | :status | 404 |
|
||||
| 14 | :status | 500 |
|
||||
| 15 | accept-charset | |
|
||||
| 16 | accept-encoding | gzip, deflate |
|
||||
| 17 | accept-language | |
|
||||
| 18 | accept-ranges | |
|
||||
| 19 | accept | |
|
||||
| 20 | access-control-allow-origin | |
|
||||
| 21 | age | |
|
||||
| 22 | allow | |
|
||||
| 23 | authorization | |
|
||||
| 24 | cache-control | |
|
||||
| 25 | content-disposition | |
|
||||
| 26 | content-encoding | |
|
||||
| 27 | content-language | |
|
||||
| 28 | content-length | |
|
||||
| 29 | content-location | |
|
||||
| 30 | content-range | |
|
||||
| 31 | content-type | |
|
||||
| 32 | cookie | |
|
||||
| 33 | date | |
|
||||
| 34 | etag | |
|
||||
| 35 | expect | |
|
||||
| 36 | expires | |
|
||||
| 37 | from | |
|
||||
| 38 | host | |
|
||||
| 39 | if-match | |
|
||||
| 40 | if-modified-since | |
|
||||
| 41 | if-none-match | |
|
||||
| 42 | if-range | |
|
||||
| 43 | if-unmodified-since | |
|
||||
| 44 | last-modified | |
|
||||
| 45 | link | |
|
||||
| 46 | location | |
|
||||
| 47 | max-forwards | |
|
||||
| 48 | proxy-authenticate | |
|
||||
| 49 | proxy-authorization | |
|
||||
| 50 | range | |
|
||||
| 51 | referer | |
|
||||
| 52 | refresh | |
|
||||
| 53 | retry-after | |
|
||||
| 54 | server | |
|
||||
| 55 | set-cookie | |
|
||||
| 56 | strict-transport-security | |
|
||||
| 57 | transfer-encoding | |
|
||||
| 58 | user-agent | |
|
||||
| 59 | vary | |
|
||||
| 60 | via | |
|
||||
| 61 | www-authenticate | |
|
||||
+-------+-----------------------------+---------------+
|
||||
`
|
||||
bs := bufio.NewScanner(strings.NewReader(fromSpec))
|
||||
re := regexp.MustCompile(`\| (\d+)\s+\| (\S+)\s*\| (\S(.*\S)?)?\s+\|`)
|
||||
for bs.Scan() {
|
||||
l := bs.Text()
|
||||
if !strings.Contains(l, "|") {
|
||||
continue
|
||||
}
|
||||
m := re.FindStringSubmatch(l)
|
||||
if m == nil {
|
||||
continue
|
||||
}
|
||||
i, err := strconv.Atoi(m[1])
|
||||
if err != nil {
|
||||
t.Errorf("Bogus integer on line %q", l)
|
||||
continue
|
||||
}
|
||||
if i < 1 || i > staticTable.len() {
|
||||
t.Errorf("Bogus index %d on line %q", i, l)
|
||||
continue
|
||||
}
|
||||
if got, want := staticTable.ents[i-1].Name, m[2]; got != want {
|
||||
t.Errorf("header index %d name = %q; want %q", i, got, want)
|
||||
}
|
||||
if got, want := staticTable.ents[i-1].Value, m[3]; got != want {
|
||||
t.Errorf("header index %d value = %q; want %q", i, got, want)
|
||||
}
|
||||
}
|
||||
if err := bs.Err(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
391
vendor/golang.org/x/net/http2/http2.go
generated
vendored
Normal file
391
vendor/golang.org/x/net/http2/http2.go
generated
vendored
Normal file
|
@ -0,0 +1,391 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package http2 implements the HTTP/2 protocol.
|
||||
//
|
||||
// This package is low-level and intended to be used directly by very
|
||||
// few people. Most users will use it indirectly through the automatic
|
||||
// use by the net/http package (from Go 1.6 and later).
|
||||
// For use in earlier Go versions see ConfigureServer. (Transport support
|
||||
// requires Go 1.6 or later)
|
||||
//
|
||||
// See https://http2.github.io/ for more information on HTTP/2.
|
||||
//
|
||||
// See https://http2.golang.org/ for a test server running this code.
|
||||
//
|
||||
package http2 // import "golang.org/x/net/http2"
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/net/lex/httplex"
|
||||
)
|
||||
|
||||
var (
|
||||
VerboseLogs bool
|
||||
logFrameWrites bool
|
||||
logFrameReads bool
|
||||
inTests bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
e := os.Getenv("GODEBUG")
|
||||
if strings.Contains(e, "http2debug=1") {
|
||||
VerboseLogs = true
|
||||
}
|
||||
if strings.Contains(e, "http2debug=2") {
|
||||
VerboseLogs = true
|
||||
logFrameWrites = true
|
||||
logFrameReads = true
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
// ClientPreface is the string that must be sent by new
|
||||
// connections from clients.
|
||||
ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"
|
||||
|
||||
// SETTINGS_MAX_FRAME_SIZE default
|
||||
// http://http2.github.io/http2-spec/#rfc.section.6.5.2
|
||||
initialMaxFrameSize = 16384
|
||||
|
||||
// NextProtoTLS is the NPN/ALPN protocol negotiated during
|
||||
// HTTP/2's TLS setup.
|
||||
NextProtoTLS = "h2"
|
||||
|
||||
// http://http2.github.io/http2-spec/#SettingValues
|
||||
initialHeaderTableSize = 4096
|
||||
|
||||
initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size
|
||||
|
||||
defaultMaxReadFrameSize = 1 << 20
|
||||
)
|
||||
|
||||
var (
|
||||
clientPreface = []byte(ClientPreface)
|
||||
)
|
||||
|
||||
type streamState int
|
||||
|
||||
// HTTP/2 stream states.
|
||||
//
|
||||
// See http://tools.ietf.org/html/rfc7540#section-5.1.
|
||||
//
|
||||
// For simplicity, the server code merges "reserved (local)" into
|
||||
// "half-closed (remote)". This is one less state transition to track.
|
||||
// The only downside is that we send PUSH_PROMISEs slightly less
|
||||
// liberally than allowable. More discussion here:
|
||||
// https://lists.w3.org/Archives/Public/ietf-http-wg/2016JulSep/0599.html
|
||||
//
|
||||
// "reserved (remote)" is omitted since the client code does not
|
||||
// support server push.
|
||||
const (
|
||||
stateIdle streamState = iota
|
||||
stateOpen
|
||||
stateHalfClosedLocal
|
||||
stateHalfClosedRemote
|
||||
stateClosed
|
||||
)
|
||||
|
||||
var stateName = [...]string{
|
||||
stateIdle: "Idle",
|
||||
stateOpen: "Open",
|
||||
stateHalfClosedLocal: "HalfClosedLocal",
|
||||
stateHalfClosedRemote: "HalfClosedRemote",
|
||||
stateClosed: "Closed",
|
||||
}
|
||||
|
||||
func (st streamState) String() string {
|
||||
return stateName[st]
|
||||
}
|
||||
|
||||
// Setting is a setting parameter: which setting it is, and its value.
|
||||
type Setting struct {
|
||||
// ID is which setting is being set.
|
||||
// See http://http2.github.io/http2-spec/#SettingValues
|
||||
ID SettingID
|
||||
|
||||
// Val is the value.
|
||||
Val uint32
|
||||
}
|
||||
|
||||
func (s Setting) String() string {
|
||||
return fmt.Sprintf("[%v = %d]", s.ID, s.Val)
|
||||
}
|
||||
|
||||
// Valid reports whether the setting is valid.
|
||||
func (s Setting) Valid() error {
|
||||
// Limits and error codes from 6.5.2 Defined SETTINGS Parameters
|
||||
switch s.ID {
|
||||
case SettingEnablePush:
|
||||
if s.Val != 1 && s.Val != 0 {
|
||||
return ConnectionError(ErrCodeProtocol)
|
||||
}
|
||||
case SettingInitialWindowSize:
|
||||
if s.Val > 1<<31-1 {
|
||||
return ConnectionError(ErrCodeFlowControl)
|
||||
}
|
||||
case SettingMaxFrameSize:
|
||||
if s.Val < 16384 || s.Val > 1<<24-1 {
|
||||
return ConnectionError(ErrCodeProtocol)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// A SettingID is an HTTP/2 setting as defined in
|
||||
// http://http2.github.io/http2-spec/#iana-settings
|
||||
type SettingID uint16
|
||||
|
||||
const (
|
||||
SettingHeaderTableSize SettingID = 0x1
|
||||
SettingEnablePush SettingID = 0x2
|
||||
SettingMaxConcurrentStreams SettingID = 0x3
|
||||
SettingInitialWindowSize SettingID = 0x4
|
||||
SettingMaxFrameSize SettingID = 0x5
|
||||
SettingMaxHeaderListSize SettingID = 0x6
|
||||
)
|
||||
|
||||
var settingName = map[SettingID]string{
|
||||
SettingHeaderTableSize: "HEADER_TABLE_SIZE",
|
||||
SettingEnablePush: "ENABLE_PUSH",
|
||||
SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS",
|
||||
SettingInitialWindowSize: "INITIAL_WINDOW_SIZE",
|
||||
SettingMaxFrameSize: "MAX_FRAME_SIZE",
|
||||
SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE",
|
||||
}
|
||||
|
||||
func (s SettingID) String() string {
|
||||
if v, ok := settingName[s]; ok {
|
||||
return v
|
||||
}
|
||||
return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s))
|
||||
}
|
||||
|
||||
var (
|
||||
errInvalidHeaderFieldName = errors.New("http2: invalid header field name")
|
||||
errInvalidHeaderFieldValue = errors.New("http2: invalid header field value")
|
||||
)
|
||||
|
||||
// validWireHeaderFieldName reports whether v is a valid header field
|
||||
// name (key). See httplex.ValidHeaderName for the base rules.
|
||||
//
|
||||
// Further, http2 says:
|
||||
// "Just as in HTTP/1.x, header field names are strings of ASCII
|
||||
// characters that are compared in a case-insensitive
|
||||
// fashion. However, header field names MUST be converted to
|
||||
// lowercase prior to their encoding in HTTP/2. "
|
||||
func validWireHeaderFieldName(v string) bool {
|
||||
if len(v) == 0 {
|
||||
return false
|
||||
}
|
||||
for _, r := range v {
|
||||
if !httplex.IsTokenRune(r) {
|
||||
return false
|
||||
}
|
||||
if 'A' <= r && r <= 'Z' {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
var httpCodeStringCommon = map[int]string{} // n -> strconv.Itoa(n)
|
||||
|
||||
func init() {
|
||||
for i := 100; i <= 999; i++ {
|
||||
if v := http.StatusText(i); v != "" {
|
||||
httpCodeStringCommon[i] = strconv.Itoa(i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func httpCodeString(code int) string {
|
||||
if s, ok := httpCodeStringCommon[code]; ok {
|
||||
return s
|
||||
}
|
||||
return strconv.Itoa(code)
|
||||
}
|
||||
|
||||
// from pkg io
|
||||
type stringWriter interface {
|
||||
WriteString(s string) (n int, err error)
|
||||
}
|
||||
|
||||
// A gate lets two goroutines coordinate their activities.
|
||||
type gate chan struct{}
|
||||
|
||||
func (g gate) Done() { g <- struct{}{} }
|
||||
func (g gate) Wait() { <-g }
|
||||
|
||||
// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed).
|
||||
type closeWaiter chan struct{}
|
||||
|
||||
// Init makes a closeWaiter usable.
|
||||
// It exists because so a closeWaiter value can be placed inside a
|
||||
// larger struct and have the Mutex and Cond's memory in the same
|
||||
// allocation.
|
||||
func (cw *closeWaiter) Init() {
|
||||
*cw = make(chan struct{})
|
||||
}
|
||||
|
||||
// Close marks the closeWaiter as closed and unblocks any waiters.
|
||||
func (cw closeWaiter) Close() {
|
||||
close(cw)
|
||||
}
|
||||
|
||||
// Wait waits for the closeWaiter to become closed.
|
||||
func (cw closeWaiter) Wait() {
|
||||
<-cw
|
||||
}
|
||||
|
||||
// bufferedWriter is a buffered writer that writes to w.
|
||||
// Its buffered writer is lazily allocated as needed, to minimize
|
||||
// idle memory usage with many connections.
|
||||
type bufferedWriter struct {
|
||||
w io.Writer // immutable
|
||||
bw *bufio.Writer // non-nil when data is buffered
|
||||
}
|
||||
|
||||
func newBufferedWriter(w io.Writer) *bufferedWriter {
|
||||
return &bufferedWriter{w: w}
|
||||
}
|
||||
|
||||
// bufWriterPoolBufferSize is the size of bufio.Writer's
|
||||
// buffers created using bufWriterPool.
|
||||
//
|
||||
// TODO: pick a less arbitrary value? this is a bit under
|
||||
// (3 x typical 1500 byte MTU) at least. Other than that,
|
||||
// not much thought went into it.
|
||||
const bufWriterPoolBufferSize = 4 << 10
|
||||
|
||||
var bufWriterPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return bufio.NewWriterSize(nil, bufWriterPoolBufferSize)
|
||||
},
|
||||
}
|
||||
|
||||
func (w *bufferedWriter) Available() int {
|
||||
if w.bw == nil {
|
||||
return bufWriterPoolBufferSize
|
||||
}
|
||||
return w.bw.Available()
|
||||
}
|
||||
|
||||
func (w *bufferedWriter) Write(p []byte) (n int, err error) {
|
||||
if w.bw == nil {
|
||||
bw := bufWriterPool.Get().(*bufio.Writer)
|
||||
bw.Reset(w.w)
|
||||
w.bw = bw
|
||||
}
|
||||
return w.bw.Write(p)
|
||||
}
|
||||
|
||||
func (w *bufferedWriter) Flush() error {
|
||||
bw := w.bw
|
||||
if bw == nil {
|
||||
return nil
|
||||
}
|
||||
err := bw.Flush()
|
||||
bw.Reset(nil)
|
||||
bufWriterPool.Put(bw)
|
||||
w.bw = nil
|
||||
return err
|
||||
}
|
||||
|
||||
func mustUint31(v int32) uint32 {
|
||||
if v < 0 || v > 2147483647 {
|
||||
panic("out of range")
|
||||
}
|
||||
return uint32(v)
|
||||
}
|
||||
|
||||
// bodyAllowedForStatus reports whether a given response status code
|
||||
// permits a body. See RFC 7230, section 3.3.
|
||||
func bodyAllowedForStatus(status int) bool {
|
||||
switch {
|
||||
case status >= 100 && status <= 199:
|
||||
return false
|
||||
case status == 204:
|
||||
return false
|
||||
case status == 304:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
type httpError struct {
|
||||
msg string
|
||||
timeout bool
|
||||
}
|
||||
|
||||
func (e *httpError) Error() string { return e.msg }
|
||||
func (e *httpError) Timeout() bool { return e.timeout }
|
||||
func (e *httpError) Temporary() bool { return true }
|
||||
|
||||
var errTimeout error = &httpError{msg: "http2: timeout awaiting response headers", timeout: true}
|
||||
|
||||
type connectionStater interface {
|
||||
ConnectionState() tls.ConnectionState
|
||||
}
|
||||
|
||||
var sorterPool = sync.Pool{New: func() interface{} { return new(sorter) }}
|
||||
|
||||
type sorter struct {
|
||||
v []string // owned by sorter
|
||||
}
|
||||
|
||||
func (s *sorter) Len() int { return len(s.v) }
|
||||
func (s *sorter) Swap(i, j int) { s.v[i], s.v[j] = s.v[j], s.v[i] }
|
||||
func (s *sorter) Less(i, j int) bool { return s.v[i] < s.v[j] }
|
||||
|
||||
// Keys returns the sorted keys of h.
|
||||
//
|
||||
// The returned slice is only valid until s used again or returned to
|
||||
// its pool.
|
||||
func (s *sorter) Keys(h http.Header) []string {
|
||||
keys := s.v[:0]
|
||||
for k := range h {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
s.v = keys
|
||||
sort.Sort(s)
|
||||
return keys
|
||||
}
|
||||
|
||||
func (s *sorter) SortStrings(ss []string) {
|
||||
// Our sorter works on s.v, which sorter owns, so
|
||||
// stash it away while we sort the user's buffer.
|
||||
save := s.v
|
||||
s.v = ss
|
||||
sort.Sort(s)
|
||||
s.v = save
|
||||
}
|
||||
|
||||
// validPseudoPath reports whether v is a valid :path pseudo-header
|
||||
// value. It must be either:
|
||||
//
|
||||
// *) a non-empty string starting with '/'
|
||||
// *) the string '*', for OPTIONS requests.
|
||||
//
|
||||
// For now this is only used a quick check for deciding when to clean
|
||||
// up Opaque URLs before sending requests from the Transport.
|
||||
// See golang.org/issue/16847
|
||||
//
|
||||
// We used to enforce that the path also didn't start with "//", but
|
||||
// Google's GFE accepts such paths and Chrome sends them, so ignore
|
||||
// that part of the spec. See golang.org/issue/19103.
|
||||
func validPseudoPath(v string) bool {
|
||||
return (len(v) > 0 && v[0] == '/') || v == "*"
|
||||
}
|
199
vendor/golang.org/x/net/http2/http2_test.go
generated
vendored
Normal file
199
vendor/golang.org/x/net/http2/http2_test.go
generated
vendored
Normal file
|
@ -0,0 +1,199 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/net/http2/hpack"
|
||||
)
|
||||
|
||||
var knownFailing = flag.Bool("known_failing", false, "Run known-failing tests.")
|
||||
|
||||
func condSkipFailingTest(t *testing.T) {
|
||||
if !*knownFailing {
|
||||
t.Skip("Skipping known-failing test without --known_failing")
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
inTests = true
|
||||
DebugGoroutines = true
|
||||
flag.BoolVar(&VerboseLogs, "verboseh2", VerboseLogs, "Verbose HTTP/2 debug logging")
|
||||
}
|
||||
|
||||
func TestSettingString(t *testing.T) {
|
||||
tests := []struct {
|
||||
s Setting
|
||||
want string
|
||||
}{
|
||||
{Setting{SettingMaxFrameSize, 123}, "[MAX_FRAME_SIZE = 123]"},
|
||||
{Setting{1<<16 - 1, 123}, "[UNKNOWN_SETTING_65535 = 123]"},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
got := fmt.Sprint(tt.s)
|
||||
if got != tt.want {
|
||||
t.Errorf("%d. for %#v, string = %q; want %q", i, tt.s, got, tt.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type twriter struct {
|
||||
t testing.TB
|
||||
st *serverTester // optional
|
||||
}
|
||||
|
||||
func (w twriter) Write(p []byte) (n int, err error) {
|
||||
if w.st != nil {
|
||||
ps := string(p)
|
||||
for _, phrase := range w.st.logFilter {
|
||||
if strings.Contains(ps, phrase) {
|
||||
return len(p), nil // no logging
|
||||
}
|
||||
}
|
||||
}
|
||||
w.t.Logf("%s", p)
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// like encodeHeader, but don't add implicit pseudo headers.
|
||||
func encodeHeaderNoImplicit(t *testing.T, headers ...string) []byte {
|
||||
var buf bytes.Buffer
|
||||
enc := hpack.NewEncoder(&buf)
|
||||
for len(headers) > 0 {
|
||||
k, v := headers[0], headers[1]
|
||||
headers = headers[2:]
|
||||
if err := enc.WriteField(hpack.HeaderField{Name: k, Value: v}); err != nil {
|
||||
t.Fatalf("HPACK encoding error for %q/%q: %v", k, v, err)
|
||||
}
|
||||
}
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
// Verify that curl has http2.
|
||||
func requireCurl(t *testing.T) {
|
||||
out, err := dockerLogs(curl(t, "--version"))
|
||||
if err != nil {
|
||||
t.Skipf("failed to determine curl features; skipping test")
|
||||
}
|
||||
if !strings.Contains(string(out), "HTTP2") {
|
||||
t.Skip("curl doesn't support HTTP2; skipping test")
|
||||
}
|
||||
}
|
||||
|
||||
func curl(t *testing.T, args ...string) (container string) {
|
||||
out, err := exec.Command("docker", append([]string{"run", "-d", "--net=host", "gohttp2/curl"}, args...)...).Output()
|
||||
if err != nil {
|
||||
t.Skipf("Failed to run curl in docker: %v, %s", err, out)
|
||||
}
|
||||
return strings.TrimSpace(string(out))
|
||||
}
|
||||
|
||||
// Verify that h2load exists.
|
||||
func requireH2load(t *testing.T) {
|
||||
out, err := dockerLogs(h2load(t, "--version"))
|
||||
if err != nil {
|
||||
t.Skipf("failed to probe h2load; skipping test: %s", out)
|
||||
}
|
||||
if !strings.Contains(string(out), "h2load nghttp2/") {
|
||||
t.Skipf("h2load not present; skipping test. (Output=%q)", out)
|
||||
}
|
||||
}
|
||||
|
||||
func h2load(t *testing.T, args ...string) (container string) {
|
||||
out, err := exec.Command("docker", append([]string{"run", "-d", "--net=host", "--entrypoint=/usr/local/bin/h2load", "gohttp2/curl"}, args...)...).Output()
|
||||
if err != nil {
|
||||
t.Skipf("Failed to run h2load in docker: %v, %s", err, out)
|
||||
}
|
||||
return strings.TrimSpace(string(out))
|
||||
}
|
||||
|
||||
type puppetCommand struct {
|
||||
fn func(w http.ResponseWriter, r *http.Request)
|
||||
done chan<- bool
|
||||
}
|
||||
|
||||
type handlerPuppet struct {
|
||||
ch chan puppetCommand
|
||||
}
|
||||
|
||||
func newHandlerPuppet() *handlerPuppet {
|
||||
return &handlerPuppet{
|
||||
ch: make(chan puppetCommand),
|
||||
}
|
||||
}
|
||||
|
||||
func (p *handlerPuppet) act(w http.ResponseWriter, r *http.Request) {
|
||||
for cmd := range p.ch {
|
||||
cmd.fn(w, r)
|
||||
cmd.done <- true
|
||||
}
|
||||
}
|
||||
|
||||
func (p *handlerPuppet) done() { close(p.ch) }
|
||||
func (p *handlerPuppet) do(fn func(http.ResponseWriter, *http.Request)) {
|
||||
done := make(chan bool)
|
||||
p.ch <- puppetCommand{fn, done}
|
||||
<-done
|
||||
}
|
||||
func dockerLogs(container string) ([]byte, error) {
|
||||
out, err := exec.Command("docker", "wait", container).CombinedOutput()
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
exitStatus, err := strconv.Atoi(strings.TrimSpace(string(out)))
|
||||
if err != nil {
|
||||
return out, errors.New("unexpected exit status from docker wait")
|
||||
}
|
||||
out, err = exec.Command("docker", "logs", container).CombinedOutput()
|
||||
exec.Command("docker", "rm", container).Run()
|
||||
if err == nil && exitStatus != 0 {
|
||||
err = fmt.Errorf("exit status %d: %s", exitStatus, out)
|
||||
}
|
||||
return out, err
|
||||
}
|
||||
|
||||
func kill(container string) {
|
||||
exec.Command("docker", "kill", container).Run()
|
||||
exec.Command("docker", "rm", container).Run()
|
||||
}
|
||||
|
||||
func cleanDate(res *http.Response) {
|
||||
if d := res.Header["Date"]; len(d) == 1 {
|
||||
d[0] = "XXX"
|
||||
}
|
||||
}
|
||||
|
||||
func TestSorterPoolAllocs(t *testing.T) {
|
||||
ss := []string{"a", "b", "c"}
|
||||
h := http.Header{
|
||||
"a": nil,
|
||||
"b": nil,
|
||||
"c": nil,
|
||||
}
|
||||
sorter := new(sorter)
|
||||
|
||||
if allocs := testing.AllocsPerRun(100, func() {
|
||||
sorter.SortStrings(ss)
|
||||
}); allocs >= 1 {
|
||||
t.Logf("SortStrings allocs = %v; want <1", allocs)
|
||||
}
|
||||
|
||||
if allocs := testing.AllocsPerRun(5, func() {
|
||||
if len(sorter.Keys(h)) != 3 {
|
||||
t.Fatal("wrong result")
|
||||
}
|
||||
}); allocs > 0 {
|
||||
t.Logf("Keys allocs = %v; want <1", allocs)
|
||||
}
|
||||
}
|
21
vendor/golang.org/x/net/http2/not_go16.go
generated
vendored
Normal file
21
vendor/golang.org/x/net/http2/not_go16.go
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.6
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
func configureTransport(t1 *http.Transport) (*Transport, error) {
|
||||
return nil, errTransportVersion
|
||||
}
|
||||
|
||||
func transportExpectContinueTimeout(t1 *http.Transport) time.Duration {
|
||||
return 0
|
||||
|
||||
}
|
87
vendor/golang.org/x/net/http2/not_go17.go
generated
vendored
Normal file
87
vendor/golang.org/x/net/http2/not_go17.go
generated
vendored
Normal file
|
@ -0,0 +1,87 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.7
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
type contextContext interface {
|
||||
Done() <-chan struct{}
|
||||
Err() error
|
||||
}
|
||||
|
||||
type fakeContext struct{}
|
||||
|
||||
func (fakeContext) Done() <-chan struct{} { return nil }
|
||||
func (fakeContext) Err() error { panic("should not be called") }
|
||||
|
||||
func reqContext(r *http.Request) fakeContext {
|
||||
return fakeContext{}
|
||||
}
|
||||
|
||||
func setResponseUncompressed(res *http.Response) {
|
||||
// Nothing.
|
||||
}
|
||||
|
||||
type clientTrace struct{}
|
||||
|
||||
func requestTrace(*http.Request) *clientTrace { return nil }
|
||||
func traceGotConn(*http.Request, *ClientConn) {}
|
||||
func traceFirstResponseByte(*clientTrace) {}
|
||||
func traceWroteHeaders(*clientTrace) {}
|
||||
func traceWroteRequest(*clientTrace, error) {}
|
||||
func traceGot100Continue(trace *clientTrace) {}
|
||||
func traceWait100Continue(trace *clientTrace) {}
|
||||
|
||||
func nop() {}
|
||||
|
||||
func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) {
|
||||
return nil, nop
|
||||
}
|
||||
|
||||
func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) {
|
||||
return ctx, nop
|
||||
}
|
||||
|
||||
func requestWithContext(req *http.Request, ctx contextContext) *http.Request {
|
||||
return req
|
||||
}
|
||||
|
||||
// temporary copy of Go 1.6's private tls.Config.clone:
|
||||
func cloneTLSConfig(c *tls.Config) *tls.Config {
|
||||
return &tls.Config{
|
||||
Rand: c.Rand,
|
||||
Time: c.Time,
|
||||
Certificates: c.Certificates,
|
||||
NameToCertificate: c.NameToCertificate,
|
||||
GetCertificate: c.GetCertificate,
|
||||
RootCAs: c.RootCAs,
|
||||
NextProtos: c.NextProtos,
|
||||
ServerName: c.ServerName,
|
||||
ClientAuth: c.ClientAuth,
|
||||
ClientCAs: c.ClientCAs,
|
||||
InsecureSkipVerify: c.InsecureSkipVerify,
|
||||
CipherSuites: c.CipherSuites,
|
||||
PreferServerCipherSuites: c.PreferServerCipherSuites,
|
||||
SessionTicketsDisabled: c.SessionTicketsDisabled,
|
||||
SessionTicketKey: c.SessionTicketKey,
|
||||
ClientSessionCache: c.ClientSessionCache,
|
||||
MinVersion: c.MinVersion,
|
||||
MaxVersion: c.MaxVersion,
|
||||
CurvePreferences: c.CurvePreferences,
|
||||
}
|
||||
}
|
||||
|
||||
func (cc *ClientConn) Ping(ctx contextContext) error {
|
||||
return cc.ping(ctx)
|
||||
}
|
||||
|
||||
func (t *Transport) idleConnTimeout() time.Duration { return 0 }
|
29
vendor/golang.org/x/net/http2/not_go18.go
generated
vendored
Normal file
29
vendor/golang.org/x/net/http2/not_go18.go
generated
vendored
Normal file
|
@ -0,0 +1,29 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.8
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func configureServer18(h1 *http.Server, h2 *Server) error {
|
||||
// No IdleTimeout to sync prior to Go 1.8.
|
||||
return nil
|
||||
}
|
||||
|
||||
func shouldLogPanic(panicValue interface{}) bool {
|
||||
return panicValue != nil
|
||||
}
|
||||
|
||||
func reqGetBody(req *http.Request) func() (io.ReadCloser, error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func reqBodyIsNoBody(io.ReadCloser) bool { return false }
|
||||
|
||||
func go18httpNoBody() io.ReadCloser { return nil } // for tests only
|
16
vendor/golang.org/x/net/http2/not_go19.go
generated
vendored
Normal file
16
vendor/golang.org/x/net/http2/not_go19.go
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.9
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func configureServer19(s *http.Server, conf *Server) error {
|
||||
// not supported prior to go1.9
|
||||
return nil
|
||||
}
|
163
vendor/golang.org/x/net/http2/pipe.go
generated
vendored
Normal file
163
vendor/golang.org/x/net/http2/pipe.go
generated
vendored
Normal file
|
@ -0,0 +1,163 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like
|
||||
// io.Pipe except there are no PipeReader/PipeWriter halves, and the
|
||||
// underlying buffer is an interface. (io.Pipe is always unbuffered)
|
||||
type pipe struct {
|
||||
mu sync.Mutex
|
||||
c sync.Cond // c.L lazily initialized to &p.mu
|
||||
b pipeBuffer // nil when done reading
|
||||
err error // read error once empty. non-nil means closed.
|
||||
breakErr error // immediate read error (caller doesn't see rest of b)
|
||||
donec chan struct{} // closed on error
|
||||
readFn func() // optional code to run in Read before error
|
||||
}
|
||||
|
||||
type pipeBuffer interface {
|
||||
Len() int
|
||||
io.Writer
|
||||
io.Reader
|
||||
}
|
||||
|
||||
func (p *pipe) Len() int {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
if p.b == nil {
|
||||
return 0
|
||||
}
|
||||
return p.b.Len()
|
||||
}
|
||||
|
||||
// Read waits until data is available and copies bytes
|
||||
// from the buffer into p.
|
||||
func (p *pipe) Read(d []byte) (n int, err error) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
if p.c.L == nil {
|
||||
p.c.L = &p.mu
|
||||
}
|
||||
for {
|
||||
if p.breakErr != nil {
|
||||
return 0, p.breakErr
|
||||
}
|
||||
if p.b != nil && p.b.Len() > 0 {
|
||||
return p.b.Read(d)
|
||||
}
|
||||
if p.err != nil {
|
||||
if p.readFn != nil {
|
||||
p.readFn() // e.g. copy trailers
|
||||
p.readFn = nil // not sticky like p.err
|
||||
}
|
||||
p.b = nil
|
||||
return 0, p.err
|
||||
}
|
||||
p.c.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
var errClosedPipeWrite = errors.New("write on closed buffer")
|
||||
|
||||
// Write copies bytes from p into the buffer and wakes a reader.
|
||||
// It is an error to write more data than the buffer can hold.
|
||||
func (p *pipe) Write(d []byte) (n int, err error) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
if p.c.L == nil {
|
||||
p.c.L = &p.mu
|
||||
}
|
||||
defer p.c.Signal()
|
||||
if p.err != nil {
|
||||
return 0, errClosedPipeWrite
|
||||
}
|
||||
if p.breakErr != nil {
|
||||
return len(d), nil // discard when there is no reader
|
||||
}
|
||||
return p.b.Write(d)
|
||||
}
|
||||
|
||||
// CloseWithError causes the next Read (waking up a current blocked
|
||||
// Read if needed) to return the provided err after all data has been
|
||||
// read.
|
||||
//
|
||||
// The error must be non-nil.
|
||||
func (p *pipe) CloseWithError(err error) { p.closeWithError(&p.err, err, nil) }
|
||||
|
||||
// BreakWithError causes the next Read (waking up a current blocked
|
||||
// Read if needed) to return the provided err immediately, without
|
||||
// waiting for unread data.
|
||||
func (p *pipe) BreakWithError(err error) { p.closeWithError(&p.breakErr, err, nil) }
|
||||
|
||||
// closeWithErrorAndCode is like CloseWithError but also sets some code to run
|
||||
// in the caller's goroutine before returning the error.
|
||||
func (p *pipe) closeWithErrorAndCode(err error, fn func()) { p.closeWithError(&p.err, err, fn) }
|
||||
|
||||
func (p *pipe) closeWithError(dst *error, err error, fn func()) {
|
||||
if err == nil {
|
||||
panic("err must be non-nil")
|
||||
}
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
if p.c.L == nil {
|
||||
p.c.L = &p.mu
|
||||
}
|
||||
defer p.c.Signal()
|
||||
if *dst != nil {
|
||||
// Already been done.
|
||||
return
|
||||
}
|
||||
p.readFn = fn
|
||||
if dst == &p.breakErr {
|
||||
p.b = nil
|
||||
}
|
||||
*dst = err
|
||||
p.closeDoneLocked()
|
||||
}
|
||||
|
||||
// requires p.mu be held.
|
||||
func (p *pipe) closeDoneLocked() {
|
||||
if p.donec == nil {
|
||||
return
|
||||
}
|
||||
// Close if unclosed. This isn't racy since we always
|
||||
// hold p.mu while closing.
|
||||
select {
|
||||
case <-p.donec:
|
||||
default:
|
||||
close(p.donec)
|
||||
}
|
||||
}
|
||||
|
||||
// Err returns the error (if any) first set by BreakWithError or CloseWithError.
|
||||
func (p *pipe) Err() error {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
if p.breakErr != nil {
|
||||
return p.breakErr
|
||||
}
|
||||
return p.err
|
||||
}
|
||||
|
||||
// Done returns a channel which is closed if and when this pipe is closed
|
||||
// with CloseWithError.
|
||||
func (p *pipe) Done() <-chan struct{} {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
if p.donec == nil {
|
||||
p.donec = make(chan struct{})
|
||||
if p.err != nil || p.breakErr != nil {
|
||||
// Already hit an error.
|
||||
p.closeDoneLocked()
|
||||
}
|
||||
}
|
||||
return p.donec
|
||||
}
|
130
vendor/golang.org/x/net/http2/pipe_test.go
generated
vendored
Normal file
130
vendor/golang.org/x/net/http2/pipe_test.go
generated
vendored
Normal file
|
@ -0,0 +1,130 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestPipeClose(t *testing.T) {
|
||||
var p pipe
|
||||
p.b = new(bytes.Buffer)
|
||||
a := errors.New("a")
|
||||
b := errors.New("b")
|
||||
p.CloseWithError(a)
|
||||
p.CloseWithError(b)
|
||||
_, err := p.Read(make([]byte, 1))
|
||||
if err != a {
|
||||
t.Errorf("err = %v want %v", err, a)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPipeDoneChan(t *testing.T) {
|
||||
var p pipe
|
||||
done := p.Done()
|
||||
select {
|
||||
case <-done:
|
||||
t.Fatal("done too soon")
|
||||
default:
|
||||
}
|
||||
p.CloseWithError(io.EOF)
|
||||
select {
|
||||
case <-done:
|
||||
default:
|
||||
t.Fatal("should be done")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPipeDoneChan_ErrFirst(t *testing.T) {
|
||||
var p pipe
|
||||
p.CloseWithError(io.EOF)
|
||||
done := p.Done()
|
||||
select {
|
||||
case <-done:
|
||||
default:
|
||||
t.Fatal("should be done")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPipeDoneChan_Break(t *testing.T) {
|
||||
var p pipe
|
||||
done := p.Done()
|
||||
select {
|
||||
case <-done:
|
||||
t.Fatal("done too soon")
|
||||
default:
|
||||
}
|
||||
p.BreakWithError(io.EOF)
|
||||
select {
|
||||
case <-done:
|
||||
default:
|
||||
t.Fatal("should be done")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPipeDoneChan_Break_ErrFirst(t *testing.T) {
|
||||
var p pipe
|
||||
p.BreakWithError(io.EOF)
|
||||
done := p.Done()
|
||||
select {
|
||||
case <-done:
|
||||
default:
|
||||
t.Fatal("should be done")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPipeCloseWithError(t *testing.T) {
|
||||
p := &pipe{b: new(bytes.Buffer)}
|
||||
const body = "foo"
|
||||
io.WriteString(p, body)
|
||||
a := errors.New("test error")
|
||||
p.CloseWithError(a)
|
||||
all, err := ioutil.ReadAll(p)
|
||||
if string(all) != body {
|
||||
t.Errorf("read bytes = %q; want %q", all, body)
|
||||
}
|
||||
if err != a {
|
||||
t.Logf("read error = %v, %v", err, a)
|
||||
}
|
||||
// Read and Write should fail.
|
||||
if n, err := p.Write([]byte("abc")); err != errClosedPipeWrite || n != 0 {
|
||||
t.Errorf("Write(abc) after close\ngot %v, %v\nwant 0, %v", n, err, errClosedPipeWrite)
|
||||
}
|
||||
if n, err := p.Read(make([]byte, 1)); err == nil || n != 0 {
|
||||
t.Errorf("Read() after close\ngot %v, nil\nwant 0, %v", n, errClosedPipeWrite)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPipeBreakWithError(t *testing.T) {
|
||||
p := &pipe{b: new(bytes.Buffer)}
|
||||
io.WriteString(p, "foo")
|
||||
a := errors.New("test err")
|
||||
p.BreakWithError(a)
|
||||
all, err := ioutil.ReadAll(p)
|
||||
if string(all) != "" {
|
||||
t.Errorf("read bytes = %q; want empty string", all)
|
||||
}
|
||||
if err != a {
|
||||
t.Logf("read error = %v, %v", err, a)
|
||||
}
|
||||
if p.b != nil {
|
||||
t.Errorf("buffer should be nil after BreakWithError")
|
||||
}
|
||||
// Write should succeed silently.
|
||||
if n, err := p.Write([]byte("abc")); err != nil || n != 3 {
|
||||
t.Errorf("Write(abc) after break\ngot %v, %v\nwant 0, nil", n, err)
|
||||
}
|
||||
if p.b != nil {
|
||||
t.Errorf("buffer should be nil after Write")
|
||||
}
|
||||
// Read should fail.
|
||||
if n, err := p.Read(make([]byte, 1)); err == nil || n != 0 {
|
||||
t.Errorf("Read() after close\ngot %v, nil\nwant 0, not nil", n)
|
||||
}
|
||||
}
|
2888
vendor/golang.org/x/net/http2/server.go
generated
vendored
Normal file
2888
vendor/golang.org/x/net/http2/server.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
521
vendor/golang.org/x/net/http2/server_push_test.go
generated
vendored
Normal file
521
vendor/golang.org/x/net/http2/server_push_test.go
generated
vendored
Normal file
|
@ -0,0 +1,521 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.8
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestServer_Push_Success(t *testing.T) {
|
||||
const (
|
||||
mainBody = "<html>index page</html>"
|
||||
pushedBody = "<html>pushed page</html>"
|
||||
userAgent = "testagent"
|
||||
cookie = "testcookie"
|
||||
)
|
||||
|
||||
var stURL string
|
||||
checkPromisedReq := func(r *http.Request, wantMethod string, wantH http.Header) error {
|
||||
if got, want := r.Method, wantMethod; got != want {
|
||||
return fmt.Errorf("promised Req.Method=%q, want %q", got, want)
|
||||
}
|
||||
if got, want := r.Header, wantH; !reflect.DeepEqual(got, want) {
|
||||
return fmt.Errorf("promised Req.Header=%q, want %q", got, want)
|
||||
}
|
||||
if got, want := "https://"+r.Host, stURL; got != want {
|
||||
return fmt.Errorf("promised Req.Host=%q, want %q", got, want)
|
||||
}
|
||||
if r.Body == nil {
|
||||
return fmt.Errorf("nil Body")
|
||||
}
|
||||
if buf, err := ioutil.ReadAll(r.Body); err != nil || len(buf) != 0 {
|
||||
return fmt.Errorf("ReadAll(Body)=%q,%v, want '',nil", buf, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
errc := make(chan error, 3)
|
||||
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
|
||||
switch r.URL.RequestURI() {
|
||||
case "/":
|
||||
// Push "/pushed?get" as a GET request, using an absolute URL.
|
||||
opt := &http.PushOptions{
|
||||
Header: http.Header{
|
||||
"User-Agent": {userAgent},
|
||||
},
|
||||
}
|
||||
if err := w.(http.Pusher).Push(stURL+"/pushed?get", opt); err != nil {
|
||||
errc <- fmt.Errorf("error pushing /pushed?get: %v", err)
|
||||
return
|
||||
}
|
||||
// Push "/pushed?head" as a HEAD request, using a path.
|
||||
opt = &http.PushOptions{
|
||||
Method: "HEAD",
|
||||
Header: http.Header{
|
||||
"User-Agent": {userAgent},
|
||||
"Cookie": {cookie},
|
||||
},
|
||||
}
|
||||
if err := w.(http.Pusher).Push("/pushed?head", opt); err != nil {
|
||||
errc <- fmt.Errorf("error pushing /pushed?head: %v", err)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "text/html")
|
||||
w.Header().Set("Content-Length", strconv.Itoa(len(mainBody)))
|
||||
w.WriteHeader(200)
|
||||
io.WriteString(w, mainBody)
|
||||
errc <- nil
|
||||
|
||||
case "/pushed?get":
|
||||
wantH := http.Header{}
|
||||
wantH.Set("User-Agent", userAgent)
|
||||
if err := checkPromisedReq(r, "GET", wantH); err != nil {
|
||||
errc <- fmt.Errorf("/pushed?get: %v", err)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "text/html")
|
||||
w.Header().Set("Content-Length", strconv.Itoa(len(pushedBody)))
|
||||
w.WriteHeader(200)
|
||||
io.WriteString(w, pushedBody)
|
||||
errc <- nil
|
||||
|
||||
case "/pushed?head":
|
||||
wantH := http.Header{}
|
||||
wantH.Set("User-Agent", userAgent)
|
||||
wantH.Set("Cookie", cookie)
|
||||
if err := checkPromisedReq(r, "HEAD", wantH); err != nil {
|
||||
errc <- fmt.Errorf("/pushed?head: %v", err)
|
||||
return
|
||||
}
|
||||
w.WriteHeader(204)
|
||||
errc <- nil
|
||||
|
||||
default:
|
||||
errc <- fmt.Errorf("unknown RequestURL %q", r.URL.RequestURI())
|
||||
}
|
||||
})
|
||||
stURL = st.ts.URL
|
||||
|
||||
// Send one request, which should push two responses.
|
||||
st.greet()
|
||||
getSlash(st)
|
||||
for k := 0; k < 3; k++ {
|
||||
select {
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Errorf("timeout waiting for handler %d to finish", k)
|
||||
case err := <-errc:
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
checkPushPromise := func(f Frame, promiseID uint32, wantH [][2]string) error {
|
||||
pp, ok := f.(*PushPromiseFrame)
|
||||
if !ok {
|
||||
return fmt.Errorf("got a %T; want *PushPromiseFrame", f)
|
||||
}
|
||||
if !pp.HeadersEnded() {
|
||||
return fmt.Errorf("want END_HEADERS flag in PushPromiseFrame")
|
||||
}
|
||||
if got, want := pp.PromiseID, promiseID; got != want {
|
||||
return fmt.Errorf("got PromiseID %v; want %v", got, want)
|
||||
}
|
||||
gotH := st.decodeHeader(pp.HeaderBlockFragment())
|
||||
if !reflect.DeepEqual(gotH, wantH) {
|
||||
return fmt.Errorf("got promised headers %v; want %v", gotH, wantH)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
checkHeaders := func(f Frame, wantH [][2]string) error {
|
||||
hf, ok := f.(*HeadersFrame)
|
||||
if !ok {
|
||||
return fmt.Errorf("got a %T; want *HeadersFrame", f)
|
||||
}
|
||||
gotH := st.decodeHeader(hf.HeaderBlockFragment())
|
||||
if !reflect.DeepEqual(gotH, wantH) {
|
||||
return fmt.Errorf("got response headers %v; want %v", gotH, wantH)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
checkData := func(f Frame, wantData string) error {
|
||||
df, ok := f.(*DataFrame)
|
||||
if !ok {
|
||||
return fmt.Errorf("got a %T; want *DataFrame", f)
|
||||
}
|
||||
if gotData := string(df.Data()); gotData != wantData {
|
||||
return fmt.Errorf("got response data %q; want %q", gotData, wantData)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stream 1 has 2 PUSH_PROMISE + HEADERS + DATA
|
||||
// Stream 2 has HEADERS + DATA
|
||||
// Stream 4 has HEADERS
|
||||
expected := map[uint32][]func(Frame) error{
|
||||
1: {
|
||||
func(f Frame) error {
|
||||
return checkPushPromise(f, 2, [][2]string{
|
||||
{":method", "GET"},
|
||||
{":scheme", "https"},
|
||||
{":authority", st.ts.Listener.Addr().String()},
|
||||
{":path", "/pushed?get"},
|
||||
{"user-agent", userAgent},
|
||||
})
|
||||
},
|
||||
func(f Frame) error {
|
||||
return checkPushPromise(f, 4, [][2]string{
|
||||
{":method", "HEAD"},
|
||||
{":scheme", "https"},
|
||||
{":authority", st.ts.Listener.Addr().String()},
|
||||
{":path", "/pushed?head"},
|
||||
{"cookie", cookie},
|
||||
{"user-agent", userAgent},
|
||||
})
|
||||
},
|
||||
func(f Frame) error {
|
||||
return checkHeaders(f, [][2]string{
|
||||
{":status", "200"},
|
||||
{"content-type", "text/html"},
|
||||
{"content-length", strconv.Itoa(len(mainBody))},
|
||||
})
|
||||
},
|
||||
func(f Frame) error {
|
||||
return checkData(f, mainBody)
|
||||
},
|
||||
},
|
||||
2: {
|
||||
func(f Frame) error {
|
||||
return checkHeaders(f, [][2]string{
|
||||
{":status", "200"},
|
||||
{"content-type", "text/html"},
|
||||
{"content-length", strconv.Itoa(len(pushedBody))},
|
||||
})
|
||||
},
|
||||
func(f Frame) error {
|
||||
return checkData(f, pushedBody)
|
||||
},
|
||||
},
|
||||
4: {
|
||||
func(f Frame) error {
|
||||
return checkHeaders(f, [][2]string{
|
||||
{":status", "204"},
|
||||
})
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
consumed := map[uint32]int{}
|
||||
for k := 0; len(expected) > 0; k++ {
|
||||
f, err := st.readFrame()
|
||||
if err != nil {
|
||||
for id, left := range expected {
|
||||
t.Errorf("stream %d: missing %d frames", id, len(left))
|
||||
}
|
||||
t.Fatalf("readFrame %d: %v", k, err)
|
||||
}
|
||||
id := f.Header().StreamID
|
||||
label := fmt.Sprintf("stream %d, frame %d", id, consumed[id])
|
||||
if len(expected[id]) == 0 {
|
||||
t.Fatalf("%s: unexpected frame %#+v", label, f)
|
||||
}
|
||||
check := expected[id][0]
|
||||
expected[id] = expected[id][1:]
|
||||
if len(expected[id]) == 0 {
|
||||
delete(expected, id)
|
||||
}
|
||||
if err := check(f); err != nil {
|
||||
t.Fatalf("%s: %v", label, err)
|
||||
}
|
||||
consumed[id]++
|
||||
}
|
||||
}
|
||||
|
||||
func TestServer_Push_SuccessNoRace(t *testing.T) {
|
||||
// Regression test for issue #18326. Ensure the request handler can mutate
|
||||
// pushed request headers without racing with the PUSH_PROMISE write.
|
||||
errc := make(chan error, 2)
|
||||
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
|
||||
switch r.URL.RequestURI() {
|
||||
case "/":
|
||||
opt := &http.PushOptions{
|
||||
Header: http.Header{"User-Agent": {"testagent"}},
|
||||
}
|
||||
if err := w.(http.Pusher).Push("/pushed", opt); err != nil {
|
||||
errc <- fmt.Errorf("error pushing: %v", err)
|
||||
return
|
||||
}
|
||||
w.WriteHeader(200)
|
||||
errc <- nil
|
||||
|
||||
case "/pushed":
|
||||
// Update request header, ensure there is no race.
|
||||
r.Header.Set("User-Agent", "newagent")
|
||||
r.Header.Set("Cookie", "cookie")
|
||||
w.WriteHeader(200)
|
||||
errc <- nil
|
||||
|
||||
default:
|
||||
errc <- fmt.Errorf("unknown RequestURL %q", r.URL.RequestURI())
|
||||
}
|
||||
})
|
||||
|
||||
// Send one request, which should push one response.
|
||||
st.greet()
|
||||
getSlash(st)
|
||||
for k := 0; k < 2; k++ {
|
||||
select {
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Errorf("timeout waiting for handler %d to finish", k)
|
||||
case err := <-errc:
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestServer_Push_RejectRecursivePush(t *testing.T) {
|
||||
// Expect two requests, but might get three if there's a bug and the second push succeeds.
|
||||
errc := make(chan error, 3)
|
||||
handler := func(w http.ResponseWriter, r *http.Request) error {
|
||||
baseURL := "https://" + r.Host
|
||||
switch r.URL.Path {
|
||||
case "/":
|
||||
if err := w.(http.Pusher).Push(baseURL+"/push1", nil); err != nil {
|
||||
return fmt.Errorf("first Push()=%v, want nil", err)
|
||||
}
|
||||
return nil
|
||||
|
||||
case "/push1":
|
||||
if got, want := w.(http.Pusher).Push(baseURL+"/push2", nil), ErrRecursivePush; got != want {
|
||||
return fmt.Errorf("Push()=%v, want %v", got, want)
|
||||
}
|
||||
return nil
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unexpected path: %q", r.URL.Path)
|
||||
}
|
||||
}
|
||||
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
|
||||
errc <- handler(w, r)
|
||||
})
|
||||
defer st.Close()
|
||||
st.greet()
|
||||
getSlash(st)
|
||||
if err := <-errc; err != nil {
|
||||
t.Errorf("First request failed: %v", err)
|
||||
}
|
||||
if err := <-errc; err != nil {
|
||||
t.Errorf("Second request failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func testServer_Push_RejectSingleRequest(t *testing.T, doPush func(http.Pusher, *http.Request) error, settings ...Setting) {
|
||||
// Expect one request, but might get two if there's a bug and the push succeeds.
|
||||
errc := make(chan error, 2)
|
||||
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
|
||||
errc <- doPush(w.(http.Pusher), r)
|
||||
})
|
||||
defer st.Close()
|
||||
st.greet()
|
||||
if err := st.fr.WriteSettings(settings...); err != nil {
|
||||
st.t.Fatalf("WriteSettings: %v", err)
|
||||
}
|
||||
st.wantSettingsAck()
|
||||
getSlash(st)
|
||||
if err := <-errc; err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
// Should not get a PUSH_PROMISE frame.
|
||||
hf := st.wantHeaders()
|
||||
if !hf.StreamEnded() {
|
||||
t.Error("stream should end after headers")
|
||||
}
|
||||
}
|
||||
|
||||
func TestServer_Push_RejectIfDisabled(t *testing.T) {
|
||||
testServer_Push_RejectSingleRequest(t,
|
||||
func(p http.Pusher, r *http.Request) error {
|
||||
if got, want := p.Push("https://"+r.Host+"/pushed", nil), http.ErrNotSupported; got != want {
|
||||
return fmt.Errorf("Push()=%v, want %v", got, want)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Setting{SettingEnablePush, 0})
|
||||
}
|
||||
|
||||
func TestServer_Push_RejectWhenNoConcurrentStreams(t *testing.T) {
|
||||
testServer_Push_RejectSingleRequest(t,
|
||||
func(p http.Pusher, r *http.Request) error {
|
||||
if got, want := p.Push("https://"+r.Host+"/pushed", nil), ErrPushLimitReached; got != want {
|
||||
return fmt.Errorf("Push()=%v, want %v", got, want)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Setting{SettingMaxConcurrentStreams, 0})
|
||||
}
|
||||
|
||||
func TestServer_Push_RejectWrongScheme(t *testing.T) {
|
||||
testServer_Push_RejectSingleRequest(t,
|
||||
func(p http.Pusher, r *http.Request) error {
|
||||
if err := p.Push("http://"+r.Host+"/pushed", nil); err == nil {
|
||||
return errors.New("Push() should have failed (push target URL is http)")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func TestServer_Push_RejectMissingHost(t *testing.T) {
|
||||
testServer_Push_RejectSingleRequest(t,
|
||||
func(p http.Pusher, r *http.Request) error {
|
||||
if err := p.Push("https:pushed", nil); err == nil {
|
||||
return errors.New("Push() should have failed (push target URL missing host)")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func TestServer_Push_RejectRelativePath(t *testing.T) {
|
||||
testServer_Push_RejectSingleRequest(t,
|
||||
func(p http.Pusher, r *http.Request) error {
|
||||
if err := p.Push("../test", nil); err == nil {
|
||||
return errors.New("Push() should have failed (push target is a relative path)")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func TestServer_Push_RejectForbiddenMethod(t *testing.T) {
|
||||
testServer_Push_RejectSingleRequest(t,
|
||||
func(p http.Pusher, r *http.Request) error {
|
||||
if err := p.Push("https://"+r.Host+"/pushed", &http.PushOptions{Method: "POST"}); err == nil {
|
||||
return errors.New("Push() should have failed (cannot promise a POST)")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func TestServer_Push_RejectForbiddenHeader(t *testing.T) {
|
||||
testServer_Push_RejectSingleRequest(t,
|
||||
func(p http.Pusher, r *http.Request) error {
|
||||
header := http.Header{
|
||||
"Content-Length": {"10"},
|
||||
"Content-Encoding": {"gzip"},
|
||||
"Trailer": {"Foo"},
|
||||
"Te": {"trailers"},
|
||||
"Host": {"test.com"},
|
||||
":authority": {"test.com"},
|
||||
}
|
||||
if err := p.Push("https://"+r.Host+"/pushed", &http.PushOptions{Header: header}); err == nil {
|
||||
return errors.New("Push() should have failed (forbidden headers)")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func TestServer_Push_StateTransitions(t *testing.T) {
|
||||
const body = "foo"
|
||||
|
||||
gotPromise := make(chan bool)
|
||||
finishedPush := make(chan bool)
|
||||
|
||||
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
|
||||
switch r.URL.RequestURI() {
|
||||
case "/":
|
||||
if err := w.(http.Pusher).Push("/pushed", nil); err != nil {
|
||||
t.Errorf("Push error: %v", err)
|
||||
}
|
||||
// Don't finish this request until the push finishes so we don't
|
||||
// nondeterministically interleave output frames with the push.
|
||||
<-finishedPush
|
||||
case "/pushed":
|
||||
<-gotPromise
|
||||
}
|
||||
w.Header().Set("Content-Type", "text/html")
|
||||
w.Header().Set("Content-Length", strconv.Itoa(len(body)))
|
||||
w.WriteHeader(200)
|
||||
io.WriteString(w, body)
|
||||
})
|
||||
defer st.Close()
|
||||
|
||||
st.greet()
|
||||
if st.stream(2) != nil {
|
||||
t.Fatal("stream 2 should be empty")
|
||||
}
|
||||
if got, want := st.streamState(2), stateIdle; got != want {
|
||||
t.Fatalf("streamState(2)=%v, want %v", got, want)
|
||||
}
|
||||
getSlash(st)
|
||||
// After the PUSH_PROMISE is sent, the stream should be stateHalfClosedRemote.
|
||||
st.wantPushPromise()
|
||||
if got, want := st.streamState(2), stateHalfClosedRemote; got != want {
|
||||
t.Fatalf("streamState(2)=%v, want %v", got, want)
|
||||
}
|
||||
// We stall the HTTP handler for "/pushed" until the above check. If we don't
|
||||
// stall the handler, then the handler might write HEADERS and DATA and finish
|
||||
// the stream before we check st.streamState(2) -- should that happen, we'll
|
||||
// see stateClosed and fail the above check.
|
||||
close(gotPromise)
|
||||
st.wantHeaders()
|
||||
if df := st.wantData(); !df.StreamEnded() {
|
||||
t.Fatal("expected END_STREAM flag on DATA")
|
||||
}
|
||||
if got, want := st.streamState(2), stateClosed; got != want {
|
||||
t.Fatalf("streamState(2)=%v, want %v", got, want)
|
||||
}
|
||||
close(finishedPush)
|
||||
}
|
||||
|
||||
func TestServer_Push_RejectAfterGoAway(t *testing.T) {
|
||||
var readyOnce sync.Once
|
||||
ready := make(chan struct{})
|
||||
errc := make(chan error, 2)
|
||||
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
|
||||
select {
|
||||
case <-ready:
|
||||
case <-time.After(5 * time.Second):
|
||||
errc <- fmt.Errorf("timeout waiting for GOAWAY to be processed")
|
||||
}
|
||||
if got, want := w.(http.Pusher).Push("https://"+r.Host+"/pushed", nil), http.ErrNotSupported; got != want {
|
||||
errc <- fmt.Errorf("Push()=%v, want %v", got, want)
|
||||
}
|
||||
errc <- nil
|
||||
})
|
||||
defer st.Close()
|
||||
st.greet()
|
||||
getSlash(st)
|
||||
|
||||
// Send GOAWAY and wait for it to be processed.
|
||||
st.fr.WriteGoAway(1, ErrCodeNo, nil)
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-ready:
|
||||
return
|
||||
default:
|
||||
}
|
||||
st.sc.serveMsgCh <- func(loopNum int) {
|
||||
if !st.sc.pushEnabled {
|
||||
readyOnce.Do(func() { close(ready) })
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
if err := <-errc; err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
3725
vendor/golang.org/x/net/http2/server_test.go
generated
vendored
Normal file
3725
vendor/golang.org/x/net/http2/server_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
2303
vendor/golang.org/x/net/http2/transport.go
generated
vendored
Normal file
2303
vendor/golang.org/x/net/http2/transport.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
3847
vendor/golang.org/x/net/http2/transport_test.go
generated
vendored
Normal file
3847
vendor/golang.org/x/net/http2/transport_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
365
vendor/golang.org/x/net/http2/write.go
generated
vendored
Normal file
365
vendor/golang.org/x/net/http2/write.go
generated
vendored
Normal file
|
@ -0,0 +1,365 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"golang.org/x/net/http2/hpack"
|
||||
"golang.org/x/net/lex/httplex"
|
||||
)
|
||||
|
||||
// writeFramer is implemented by any type that is used to write frames.
|
||||
type writeFramer interface {
|
||||
writeFrame(writeContext) error
|
||||
|
||||
// staysWithinBuffer reports whether this writer promises that
|
||||
// it will only write less than or equal to size bytes, and it
|
||||
// won't Flush the write context.
|
||||
staysWithinBuffer(size int) bool
|
||||
}
|
||||
|
||||
// writeContext is the interface needed by the various frame writer
|
||||
// types below. All the writeFrame methods below are scheduled via the
|
||||
// frame writing scheduler (see writeScheduler in writesched.go).
|
||||
//
|
||||
// This interface is implemented by *serverConn.
|
||||
//
|
||||
// TODO: decide whether to a) use this in the client code (which didn't
|
||||
// end up using this yet, because it has a simpler design, not
|
||||
// currently implementing priorities), or b) delete this and
|
||||
// make the server code a bit more concrete.
|
||||
type writeContext interface {
|
||||
Framer() *Framer
|
||||
Flush() error
|
||||
CloseConn() error
|
||||
// HeaderEncoder returns an HPACK encoder that writes to the
|
||||
// returned buffer.
|
||||
HeaderEncoder() (*hpack.Encoder, *bytes.Buffer)
|
||||
}
|
||||
|
||||
// writeEndsStream reports whether w writes a frame that will transition
|
||||
// the stream to a half-closed local state. This returns false for RST_STREAM,
|
||||
// which closes the entire stream (not just the local half).
|
||||
func writeEndsStream(w writeFramer) bool {
|
||||
switch v := w.(type) {
|
||||
case *writeData:
|
||||
return v.endStream
|
||||
case *writeResHeaders:
|
||||
return v.endStream
|
||||
case nil:
|
||||
// This can only happen if the caller reuses w after it's
|
||||
// been intentionally nil'ed out to prevent use. Keep this
|
||||
// here to catch future refactoring breaking it.
|
||||
panic("writeEndsStream called on nil writeFramer")
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type flushFrameWriter struct{}
|
||||
|
||||
func (flushFrameWriter) writeFrame(ctx writeContext) error {
|
||||
return ctx.Flush()
|
||||
}
|
||||
|
||||
func (flushFrameWriter) staysWithinBuffer(max int) bool { return false }
|
||||
|
||||
type writeSettings []Setting
|
||||
|
||||
func (s writeSettings) staysWithinBuffer(max int) bool {
|
||||
const settingSize = 6 // uint16 + uint32
|
||||
return frameHeaderLen+settingSize*len(s) <= max
|
||||
|
||||
}
|
||||
|
||||
func (s writeSettings) writeFrame(ctx writeContext) error {
|
||||
return ctx.Framer().WriteSettings([]Setting(s)...)
|
||||
}
|
||||
|
||||
type writeGoAway struct {
|
||||
maxStreamID uint32
|
||||
code ErrCode
|
||||
}
|
||||
|
||||
func (p *writeGoAway) writeFrame(ctx writeContext) error {
|
||||
err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil)
|
||||
ctx.Flush() // ignore error: we're hanging up on them anyway
|
||||
return err
|
||||
}
|
||||
|
||||
func (*writeGoAway) staysWithinBuffer(max int) bool { return false } // flushes
|
||||
|
||||
type writeData struct {
|
||||
streamID uint32
|
||||
p []byte
|
||||
endStream bool
|
||||
}
|
||||
|
||||
func (w *writeData) String() string {
|
||||
return fmt.Sprintf("writeData(stream=%d, p=%d, endStream=%v)", w.streamID, len(w.p), w.endStream)
|
||||
}
|
||||
|
||||
func (w *writeData) writeFrame(ctx writeContext) error {
|
||||
return ctx.Framer().WriteData(w.streamID, w.endStream, w.p)
|
||||
}
|
||||
|
||||
func (w *writeData) staysWithinBuffer(max int) bool {
|
||||
return frameHeaderLen+len(w.p) <= max
|
||||
}
|
||||
|
||||
// handlerPanicRST is the message sent from handler goroutines when
|
||||
// the handler panics.
|
||||
type handlerPanicRST struct {
|
||||
StreamID uint32
|
||||
}
|
||||
|
||||
func (hp handlerPanicRST) writeFrame(ctx writeContext) error {
|
||||
return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal)
|
||||
}
|
||||
|
||||
func (hp handlerPanicRST) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }
|
||||
|
||||
func (se StreamError) writeFrame(ctx writeContext) error {
|
||||
return ctx.Framer().WriteRSTStream(se.StreamID, se.Code)
|
||||
}
|
||||
|
||||
func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }
|
||||
|
||||
type writePingAck struct{ pf *PingFrame }
|
||||
|
||||
func (w writePingAck) writeFrame(ctx writeContext) error {
|
||||
return ctx.Framer().WritePing(true, w.pf.Data)
|
||||
}
|
||||
|
||||
func (w writePingAck) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.pf.Data) <= max }
|
||||
|
||||
type writeSettingsAck struct{}
|
||||
|
||||
func (writeSettingsAck) writeFrame(ctx writeContext) error {
|
||||
return ctx.Framer().WriteSettingsAck()
|
||||
}
|
||||
|
||||
func (writeSettingsAck) staysWithinBuffer(max int) bool { return frameHeaderLen <= max }
|
||||
|
||||
// splitHeaderBlock splits headerBlock into fragments so that each fragment fits
|
||||
// in a single frame, then calls fn for each fragment. firstFrag/lastFrag are true
|
||||
// for the first/last fragment, respectively.
|
||||
func splitHeaderBlock(ctx writeContext, headerBlock []byte, fn func(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error) error {
|
||||
// For now we're lazy and just pick the minimum MAX_FRAME_SIZE
|
||||
// that all peers must support (16KB). Later we could care
|
||||
// more and send larger frames if the peer advertised it, but
|
||||
// there's little point. Most headers are small anyway (so we
|
||||
// generally won't have CONTINUATION frames), and extra frames
|
||||
// only waste 9 bytes anyway.
|
||||
const maxFrameSize = 16384
|
||||
|
||||
first := true
|
||||
for len(headerBlock) > 0 {
|
||||
frag := headerBlock
|
||||
if len(frag) > maxFrameSize {
|
||||
frag = frag[:maxFrameSize]
|
||||
}
|
||||
headerBlock = headerBlock[len(frag):]
|
||||
if err := fn(ctx, frag, first, len(headerBlock) == 0); err != nil {
|
||||
return err
|
||||
}
|
||||
first = false
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames
|
||||
// for HTTP response headers or trailers from a server handler.
|
||||
type writeResHeaders struct {
|
||||
streamID uint32
|
||||
httpResCode int // 0 means no ":status" line
|
||||
h http.Header // may be nil
|
||||
trailers []string // if non-nil, which keys of h to write. nil means all.
|
||||
endStream bool
|
||||
|
||||
date string
|
||||
contentType string
|
||||
contentLength string
|
||||
}
|
||||
|
||||
func encKV(enc *hpack.Encoder, k, v string) {
|
||||
if VerboseLogs {
|
||||
log.Printf("http2: server encoding header %q = %q", k, v)
|
||||
}
|
||||
enc.WriteField(hpack.HeaderField{Name: k, Value: v})
|
||||
}
|
||||
|
||||
func (w *writeResHeaders) staysWithinBuffer(max int) bool {
|
||||
// TODO: this is a common one. It'd be nice to return true
|
||||
// here and get into the fast path if we could be clever and
|
||||
// calculate the size fast enough, or at least a conservative
|
||||
// uppper bound that usually fires. (Maybe if w.h and
|
||||
// w.trailers are nil, so we don't need to enumerate it.)
|
||||
// Otherwise I'm afraid that just calculating the length to
|
||||
// answer this question would be slower than the ~2µs benefit.
|
||||
return false
|
||||
}
|
||||
|
||||
func (w *writeResHeaders) writeFrame(ctx writeContext) error {
|
||||
enc, buf := ctx.HeaderEncoder()
|
||||
buf.Reset()
|
||||
|
||||
if w.httpResCode != 0 {
|
||||
encKV(enc, ":status", httpCodeString(w.httpResCode))
|
||||
}
|
||||
|
||||
encodeHeaders(enc, w.h, w.trailers)
|
||||
|
||||
if w.contentType != "" {
|
||||
encKV(enc, "content-type", w.contentType)
|
||||
}
|
||||
if w.contentLength != "" {
|
||||
encKV(enc, "content-length", w.contentLength)
|
||||
}
|
||||
if w.date != "" {
|
||||
encKV(enc, "date", w.date)
|
||||
}
|
||||
|
||||
headerBlock := buf.Bytes()
|
||||
if len(headerBlock) == 0 && w.trailers == nil {
|
||||
panic("unexpected empty hpack")
|
||||
}
|
||||
|
||||
return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock)
|
||||
}
|
||||
|
||||
func (w *writeResHeaders) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error {
|
||||
if firstFrag {
|
||||
return ctx.Framer().WriteHeaders(HeadersFrameParam{
|
||||
StreamID: w.streamID,
|
||||
BlockFragment: frag,
|
||||
EndStream: w.endStream,
|
||||
EndHeaders: lastFrag,
|
||||
})
|
||||
} else {
|
||||
return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag)
|
||||
}
|
||||
}
|
||||
|
||||
// writePushPromise is a request to write a PUSH_PROMISE and 0+ CONTINUATION frames.
|
||||
type writePushPromise struct {
|
||||
streamID uint32 // pusher stream
|
||||
method string // for :method
|
||||
url *url.URL // for :scheme, :authority, :path
|
||||
h http.Header
|
||||
|
||||
// Creates an ID for a pushed stream. This runs on serveG just before
|
||||
// the frame is written. The returned ID is copied to promisedID.
|
||||
allocatePromisedID func() (uint32, error)
|
||||
promisedID uint32
|
||||
}
|
||||
|
||||
func (w *writePushPromise) staysWithinBuffer(max int) bool {
|
||||
// TODO: see writeResHeaders.staysWithinBuffer
|
||||
return false
|
||||
}
|
||||
|
||||
func (w *writePushPromise) writeFrame(ctx writeContext) error {
|
||||
enc, buf := ctx.HeaderEncoder()
|
||||
buf.Reset()
|
||||
|
||||
encKV(enc, ":method", w.method)
|
||||
encKV(enc, ":scheme", w.url.Scheme)
|
||||
encKV(enc, ":authority", w.url.Host)
|
||||
encKV(enc, ":path", w.url.RequestURI())
|
||||
encodeHeaders(enc, w.h, nil)
|
||||
|
||||
headerBlock := buf.Bytes()
|
||||
if len(headerBlock) == 0 {
|
||||
panic("unexpected empty hpack")
|
||||
}
|
||||
|
||||
return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock)
|
||||
}
|
||||
|
||||
func (w *writePushPromise) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error {
|
||||
if firstFrag {
|
||||
return ctx.Framer().WritePushPromise(PushPromiseParam{
|
||||
StreamID: w.streamID,
|
||||
PromiseID: w.promisedID,
|
||||
BlockFragment: frag,
|
||||
EndHeaders: lastFrag,
|
||||
})
|
||||
} else {
|
||||
return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag)
|
||||
}
|
||||
}
|
||||
|
||||
type write100ContinueHeadersFrame struct {
|
||||
streamID uint32
|
||||
}
|
||||
|
||||
func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error {
|
||||
enc, buf := ctx.HeaderEncoder()
|
||||
buf.Reset()
|
||||
encKV(enc, ":status", "100")
|
||||
return ctx.Framer().WriteHeaders(HeadersFrameParam{
|
||||
StreamID: w.streamID,
|
||||
BlockFragment: buf.Bytes(),
|
||||
EndStream: false,
|
||||
EndHeaders: true,
|
||||
})
|
||||
}
|
||||
|
||||
func (w write100ContinueHeadersFrame) staysWithinBuffer(max int) bool {
|
||||
// Sloppy but conservative:
|
||||
return 9+2*(len(":status")+len("100")) <= max
|
||||
}
|
||||
|
||||
type writeWindowUpdate struct {
|
||||
streamID uint32 // or 0 for conn-level
|
||||
n uint32
|
||||
}
|
||||
|
||||
func (wu writeWindowUpdate) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }
|
||||
|
||||
func (wu writeWindowUpdate) writeFrame(ctx writeContext) error {
|
||||
return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n)
|
||||
}
|
||||
|
||||
// encodeHeaders encodes an http.Header. If keys is not nil, then (k, h[k])
|
||||
// is encoded only only if k is in keys.
|
||||
func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) {
|
||||
if keys == nil {
|
||||
sorter := sorterPool.Get().(*sorter)
|
||||
// Using defer here, since the returned keys from the
|
||||
// sorter.Keys method is only valid until the sorter
|
||||
// is returned:
|
||||
defer sorterPool.Put(sorter)
|
||||
keys = sorter.Keys(h)
|
||||
}
|
||||
for _, k := range keys {
|
||||
vv := h[k]
|
||||
k = lowerHeader(k)
|
||||
if !validWireHeaderFieldName(k) {
|
||||
// Skip it as backup paranoia. Per
|
||||
// golang.org/issue/14048, these should
|
||||
// already be rejected at a higher level.
|
||||
continue
|
||||
}
|
||||
isTE := k == "transfer-encoding"
|
||||
for _, v := range vv {
|
||||
if !httplex.ValidHeaderFieldValue(v) {
|
||||
// TODO: return an error? golang.org/issue/14048
|
||||
// For now just omit it.
|
||||
continue
|
||||
}
|
||||
// TODO: more of "8.1.2.2 Connection-Specific Header Fields"
|
||||
if isTE && v != "trailers" {
|
||||
continue
|
||||
}
|
||||
encKV(enc, k, v)
|
||||
}
|
||||
}
|
||||
}
|
242
vendor/golang.org/x/net/http2/writesched.go
generated
vendored
Normal file
242
vendor/golang.org/x/net/http2/writesched.go
generated
vendored
Normal file
|
@ -0,0 +1,242 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import "fmt"
|
||||
|
||||
// WriteScheduler is the interface implemented by HTTP/2 write schedulers.
|
||||
// Methods are never called concurrently.
|
||||
type WriteScheduler interface {
|
||||
// OpenStream opens a new stream in the write scheduler.
|
||||
// It is illegal to call this with streamID=0 or with a streamID that is
|
||||
// already open -- the call may panic.
|
||||
OpenStream(streamID uint32, options OpenStreamOptions)
|
||||
|
||||
// CloseStream closes a stream in the write scheduler. Any frames queued on
|
||||
// this stream should be discarded. It is illegal to call this on a stream
|
||||
// that is not open -- the call may panic.
|
||||
CloseStream(streamID uint32)
|
||||
|
||||
// AdjustStream adjusts the priority of the given stream. This may be called
|
||||
// on a stream that has not yet been opened or has been closed. Note that
|
||||
// RFC 7540 allows PRIORITY frames to be sent on streams in any state. See:
|
||||
// https://tools.ietf.org/html/rfc7540#section-5.1
|
||||
AdjustStream(streamID uint32, priority PriorityParam)
|
||||
|
||||
// Push queues a frame in the scheduler. In most cases, this will not be
|
||||
// called with wr.StreamID()!=0 unless that stream is currently open. The one
|
||||
// exception is RST_STREAM frames, which may be sent on idle or closed streams.
|
||||
Push(wr FrameWriteRequest)
|
||||
|
||||
// Pop dequeues the next frame to write. Returns false if no frames can
|
||||
// be written. Frames with a given wr.StreamID() are Pop'd in the same
|
||||
// order they are Push'd.
|
||||
Pop() (wr FrameWriteRequest, ok bool)
|
||||
}
|
||||
|
||||
// OpenStreamOptions specifies extra options for WriteScheduler.OpenStream.
|
||||
type OpenStreamOptions struct {
|
||||
// PusherID is zero if the stream was initiated by the client. Otherwise,
|
||||
// PusherID names the stream that pushed the newly opened stream.
|
||||
PusherID uint32
|
||||
}
|
||||
|
||||
// FrameWriteRequest is a request to write a frame.
|
||||
type FrameWriteRequest struct {
|
||||
// write is the interface value that does the writing, once the
|
||||
// WriteScheduler has selected this frame to write. The write
|
||||
// functions are all defined in write.go.
|
||||
write writeFramer
|
||||
|
||||
// stream is the stream on which this frame will be written.
|
||||
// nil for non-stream frames like PING and SETTINGS.
|
||||
stream *stream
|
||||
|
||||
// done, if non-nil, must be a buffered channel with space for
|
||||
// 1 message and is sent the return value from write (or an
|
||||
// earlier error) when the frame has been written.
|
||||
done chan error
|
||||
}
|
||||
|
||||
// StreamID returns the id of the stream this frame will be written to.
|
||||
// 0 is used for non-stream frames such as PING and SETTINGS.
|
||||
func (wr FrameWriteRequest) StreamID() uint32 {
|
||||
if wr.stream == nil {
|
||||
if se, ok := wr.write.(StreamError); ok {
|
||||
// (*serverConn).resetStream doesn't set
|
||||
// stream because it doesn't necessarily have
|
||||
// one. So special case this type of write
|
||||
// message.
|
||||
return se.StreamID
|
||||
}
|
||||
return 0
|
||||
}
|
||||
return wr.stream.id
|
||||
}
|
||||
|
||||
// DataSize returns the number of flow control bytes that must be consumed
|
||||
// to write this entire frame. This is 0 for non-DATA frames.
|
||||
func (wr FrameWriteRequest) DataSize() int {
|
||||
if wd, ok := wr.write.(*writeData); ok {
|
||||
return len(wd.p)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Consume consumes min(n, available) bytes from this frame, where available
|
||||
// is the number of flow control bytes available on the stream. Consume returns
|
||||
// 0, 1, or 2 frames, where the integer return value gives the number of frames
|
||||
// returned.
|
||||
//
|
||||
// If flow control prevents consuming any bytes, this returns (_, _, 0). If
|
||||
// the entire frame was consumed, this returns (wr, _, 1). Otherwise, this
|
||||
// returns (consumed, rest, 2), where 'consumed' contains the consumed bytes and
|
||||
// 'rest' contains the remaining bytes. The consumed bytes are deducted from the
|
||||
// underlying stream's flow control budget.
|
||||
func (wr FrameWriteRequest) Consume(n int32) (FrameWriteRequest, FrameWriteRequest, int) {
|
||||
var empty FrameWriteRequest
|
||||
|
||||
// Non-DATA frames are always consumed whole.
|
||||
wd, ok := wr.write.(*writeData)
|
||||
if !ok || len(wd.p) == 0 {
|
||||
return wr, empty, 1
|
||||
}
|
||||
|
||||
// Might need to split after applying limits.
|
||||
allowed := wr.stream.flow.available()
|
||||
if n < allowed {
|
||||
allowed = n
|
||||
}
|
||||
if wr.stream.sc.maxFrameSize < allowed {
|
||||
allowed = wr.stream.sc.maxFrameSize
|
||||
}
|
||||
if allowed <= 0 {
|
||||
return empty, empty, 0
|
||||
}
|
||||
if len(wd.p) > int(allowed) {
|
||||
wr.stream.flow.take(allowed)
|
||||
consumed := FrameWriteRequest{
|
||||
stream: wr.stream,
|
||||
write: &writeData{
|
||||
streamID: wd.streamID,
|
||||
p: wd.p[:allowed],
|
||||
// Even if the original had endStream set, there
|
||||
// are bytes remaining because len(wd.p) > allowed,
|
||||
// so we know endStream is false.
|
||||
endStream: false,
|
||||
},
|
||||
// Our caller is blocking on the final DATA frame, not
|
||||
// this intermediate frame, so no need to wait.
|
||||
done: nil,
|
||||
}
|
||||
rest := FrameWriteRequest{
|
||||
stream: wr.stream,
|
||||
write: &writeData{
|
||||
streamID: wd.streamID,
|
||||
p: wd.p[allowed:],
|
||||
endStream: wd.endStream,
|
||||
},
|
||||
done: wr.done,
|
||||
}
|
||||
return consumed, rest, 2
|
||||
}
|
||||
|
||||
// The frame is consumed whole.
|
||||
// NB: This cast cannot overflow because allowed is <= math.MaxInt32.
|
||||
wr.stream.flow.take(int32(len(wd.p)))
|
||||
return wr, empty, 1
|
||||
}
|
||||
|
||||
// String is for debugging only.
|
||||
func (wr FrameWriteRequest) String() string {
|
||||
var des string
|
||||
if s, ok := wr.write.(fmt.Stringer); ok {
|
||||
des = s.String()
|
||||
} else {
|
||||
des = fmt.Sprintf("%T", wr.write)
|
||||
}
|
||||
return fmt.Sprintf("[FrameWriteRequest stream=%d, ch=%v, writer=%v]", wr.StreamID(), wr.done != nil, des)
|
||||
}
|
||||
|
||||
// replyToWriter sends err to wr.done and panics if the send must block
|
||||
// This does nothing if wr.done is nil.
|
||||
func (wr *FrameWriteRequest) replyToWriter(err error) {
|
||||
if wr.done == nil {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case wr.done <- err:
|
||||
default:
|
||||
panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wr.write))
|
||||
}
|
||||
wr.write = nil // prevent use (assume it's tainted after wr.done send)
|
||||
}
|
||||
|
||||
// writeQueue is used by implementations of WriteScheduler.
|
||||
type writeQueue struct {
|
||||
s []FrameWriteRequest
|
||||
}
|
||||
|
||||
func (q *writeQueue) empty() bool { return len(q.s) == 0 }
|
||||
|
||||
func (q *writeQueue) push(wr FrameWriteRequest) {
|
||||
q.s = append(q.s, wr)
|
||||
}
|
||||
|
||||
func (q *writeQueue) shift() FrameWriteRequest {
|
||||
if len(q.s) == 0 {
|
||||
panic("invalid use of queue")
|
||||
}
|
||||
wr := q.s[0]
|
||||
// TODO: less copy-happy queue.
|
||||
copy(q.s, q.s[1:])
|
||||
q.s[len(q.s)-1] = FrameWriteRequest{}
|
||||
q.s = q.s[:len(q.s)-1]
|
||||
return wr
|
||||
}
|
||||
|
||||
// consume consumes up to n bytes from q.s[0]. If the frame is
|
||||
// entirely consumed, it is removed from the queue. If the frame
|
||||
// is partially consumed, the frame is kept with the consumed
|
||||
// bytes removed. Returns true iff any bytes were consumed.
|
||||
func (q *writeQueue) consume(n int32) (FrameWriteRequest, bool) {
|
||||
if len(q.s) == 0 {
|
||||
return FrameWriteRequest{}, false
|
||||
}
|
||||
consumed, rest, numresult := q.s[0].Consume(n)
|
||||
switch numresult {
|
||||
case 0:
|
||||
return FrameWriteRequest{}, false
|
||||
case 1:
|
||||
q.shift()
|
||||
case 2:
|
||||
q.s[0] = rest
|
||||
}
|
||||
return consumed, true
|
||||
}
|
||||
|
||||
type writeQueuePool []*writeQueue
|
||||
|
||||
// put inserts an unused writeQueue into the pool.
|
||||
func (p *writeQueuePool) put(q *writeQueue) {
|
||||
for i := range q.s {
|
||||
q.s[i] = FrameWriteRequest{}
|
||||
}
|
||||
q.s = q.s[:0]
|
||||
*p = append(*p, q)
|
||||
}
|
||||
|
||||
// get returns an empty writeQueue.
|
||||
func (p *writeQueuePool) get() *writeQueue {
|
||||
ln := len(*p)
|
||||
if ln == 0 {
|
||||
return new(writeQueue)
|
||||
}
|
||||
x := ln - 1
|
||||
q := (*p)[x]
|
||||
(*p)[x] = nil
|
||||
*p = (*p)[:x]
|
||||
return q
|
||||
}
|
452
vendor/golang.org/x/net/http2/writesched_priority.go
generated
vendored
Normal file
452
vendor/golang.org/x/net/http2/writesched_priority.go
generated
vendored
Normal file
|
@ -0,0 +1,452 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// RFC 7540, Section 5.3.5: the default weight is 16.
|
||||
const priorityDefaultWeight = 15 // 16 = 15 + 1
|
||||
|
||||
// PriorityWriteSchedulerConfig configures a priorityWriteScheduler.
|
||||
type PriorityWriteSchedulerConfig struct {
|
||||
// MaxClosedNodesInTree controls the maximum number of closed streams to
|
||||
// retain in the priority tree. Setting this to zero saves a small amount
|
||||
// of memory at the cost of performance.
|
||||
//
|
||||
// See RFC 7540, Section 5.3.4:
|
||||
// "It is possible for a stream to become closed while prioritization
|
||||
// information ... is in transit. ... This potentially creates suboptimal
|
||||
// prioritization, since the stream could be given a priority that is
|
||||
// different from what is intended. To avoid these problems, an endpoint
|
||||
// SHOULD retain stream prioritization state for a period after streams
|
||||
// become closed. The longer state is retained, the lower the chance that
|
||||
// streams are assigned incorrect or default priority values."
|
||||
MaxClosedNodesInTree int
|
||||
|
||||
// MaxIdleNodesInTree controls the maximum number of idle streams to
|
||||
// retain in the priority tree. Setting this to zero saves a small amount
|
||||
// of memory at the cost of performance.
|
||||
//
|
||||
// See RFC 7540, Section 5.3.4:
|
||||
// Similarly, streams that are in the "idle" state can be assigned
|
||||
// priority or become a parent of other streams. This allows for the
|
||||
// creation of a grouping node in the dependency tree, which enables
|
||||
// more flexible expressions of priority. Idle streams begin with a
|
||||
// default priority (Section 5.3.5).
|
||||
MaxIdleNodesInTree int
|
||||
|
||||
// ThrottleOutOfOrderWrites enables write throttling to help ensure that
|
||||
// data is delivered in priority order. This works around a race where
|
||||
// stream B depends on stream A and both streams are about to call Write
|
||||
// to queue DATA frames. If B wins the race, a naive scheduler would eagerly
|
||||
// write as much data from B as possible, but this is suboptimal because A
|
||||
// is a higher-priority stream. With throttling enabled, we write a small
|
||||
// amount of data from B to minimize the amount of bandwidth that B can
|
||||
// steal from A.
|
||||
ThrottleOutOfOrderWrites bool
|
||||
}
|
||||
|
||||
// NewPriorityWriteScheduler constructs a WriteScheduler that schedules
|
||||
// frames by following HTTP/2 priorities as described in RFC 7540 Section 5.3.
|
||||
// If cfg is nil, default options are used.
|
||||
func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler {
|
||||
if cfg == nil {
|
||||
// For justification of these defaults, see:
|
||||
// https://docs.google.com/document/d/1oLhNg1skaWD4_DtaoCxdSRN5erEXrH-KnLrMwEpOtFY
|
||||
cfg = &PriorityWriteSchedulerConfig{
|
||||
MaxClosedNodesInTree: 10,
|
||||
MaxIdleNodesInTree: 10,
|
||||
ThrottleOutOfOrderWrites: false,
|
||||
}
|
||||
}
|
||||
|
||||
ws := &priorityWriteScheduler{
|
||||
nodes: make(map[uint32]*priorityNode),
|
||||
maxClosedNodesInTree: cfg.MaxClosedNodesInTree,
|
||||
maxIdleNodesInTree: cfg.MaxIdleNodesInTree,
|
||||
enableWriteThrottle: cfg.ThrottleOutOfOrderWrites,
|
||||
}
|
||||
ws.nodes[0] = &ws.root
|
||||
if cfg.ThrottleOutOfOrderWrites {
|
||||
ws.writeThrottleLimit = 1024
|
||||
} else {
|
||||
ws.writeThrottleLimit = math.MaxInt32
|
||||
}
|
||||
return ws
|
||||
}
|
||||
|
||||
type priorityNodeState int
|
||||
|
||||
const (
|
||||
priorityNodeOpen priorityNodeState = iota
|
||||
priorityNodeClosed
|
||||
priorityNodeIdle
|
||||
)
|
||||
|
||||
// priorityNode is a node in an HTTP/2 priority tree.
|
||||
// Each node is associated with a single stream ID.
|
||||
// See RFC 7540, Section 5.3.
|
||||
type priorityNode struct {
|
||||
q writeQueue // queue of pending frames to write
|
||||
id uint32 // id of the stream, or 0 for the root of the tree
|
||||
weight uint8 // the actual weight is weight+1, so the value is in [1,256]
|
||||
state priorityNodeState // open | closed | idle
|
||||
bytes int64 // number of bytes written by this node, or 0 if closed
|
||||
subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree
|
||||
|
||||
// These links form the priority tree.
|
||||
parent *priorityNode
|
||||
kids *priorityNode // start of the kids list
|
||||
prev, next *priorityNode // doubly-linked list of siblings
|
||||
}
|
||||
|
||||
func (n *priorityNode) setParent(parent *priorityNode) {
|
||||
if n == parent {
|
||||
panic("setParent to self")
|
||||
}
|
||||
if n.parent == parent {
|
||||
return
|
||||
}
|
||||
// Unlink from current parent.
|
||||
if parent := n.parent; parent != nil {
|
||||
if n.prev == nil {
|
||||
parent.kids = n.next
|
||||
} else {
|
||||
n.prev.next = n.next
|
||||
}
|
||||
if n.next != nil {
|
||||
n.next.prev = n.prev
|
||||
}
|
||||
}
|
||||
// Link to new parent.
|
||||
// If parent=nil, remove n from the tree.
|
||||
// Always insert at the head of parent.kids (this is assumed by walkReadyInOrder).
|
||||
n.parent = parent
|
||||
if parent == nil {
|
||||
n.next = nil
|
||||
n.prev = nil
|
||||
} else {
|
||||
n.next = parent.kids
|
||||
n.prev = nil
|
||||
if n.next != nil {
|
||||
n.next.prev = n
|
||||
}
|
||||
parent.kids = n
|
||||
}
|
||||
}
|
||||
|
||||
func (n *priorityNode) addBytes(b int64) {
|
||||
n.bytes += b
|
||||
for ; n != nil; n = n.parent {
|
||||
n.subtreeBytes += b
|
||||
}
|
||||
}
|
||||
|
||||
// walkReadyInOrder iterates over the tree in priority order, calling f for each node
|
||||
// with a non-empty write queue. When f returns true, this funcion returns true and the
|
||||
// walk halts. tmp is used as scratch space for sorting.
|
||||
//
|
||||
// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true
|
||||
// if any ancestor p of n is still open (ignoring the root node).
|
||||
func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool {
|
||||
if !n.q.empty() && f(n, openParent) {
|
||||
return true
|
||||
}
|
||||
if n.kids == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Don't consider the root "open" when updating openParent since
|
||||
// we can't send data frames on the root stream (only control frames).
|
||||
if n.id != 0 {
|
||||
openParent = openParent || (n.state == priorityNodeOpen)
|
||||
}
|
||||
|
||||
// Common case: only one kid or all kids have the same weight.
|
||||
// Some clients don't use weights; other clients (like web browsers)
|
||||
// use mostly-linear priority trees.
|
||||
w := n.kids.weight
|
||||
needSort := false
|
||||
for k := n.kids.next; k != nil; k = k.next {
|
||||
if k.weight != w {
|
||||
needSort = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !needSort {
|
||||
for k := n.kids; k != nil; k = k.next {
|
||||
if k.walkReadyInOrder(openParent, tmp, f) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Uncommon case: sort the child nodes. We remove the kids from the parent,
|
||||
// then re-insert after sorting so we can reuse tmp for future sort calls.
|
||||
*tmp = (*tmp)[:0]
|
||||
for n.kids != nil {
|
||||
*tmp = append(*tmp, n.kids)
|
||||
n.kids.setParent(nil)
|
||||
}
|
||||
sort.Sort(sortPriorityNodeSiblings(*tmp))
|
||||
for i := len(*tmp) - 1; i >= 0; i-- {
|
||||
(*tmp)[i].setParent(n) // setParent inserts at the head of n.kids
|
||||
}
|
||||
for k := n.kids; k != nil; k = k.next {
|
||||
if k.walkReadyInOrder(openParent, tmp, f) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type sortPriorityNodeSiblings []*priorityNode
|
||||
|
||||
func (z sortPriorityNodeSiblings) Len() int { return len(z) }
|
||||
func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] }
|
||||
func (z sortPriorityNodeSiblings) Less(i, k int) bool {
|
||||
// Prefer the subtree that has sent fewer bytes relative to its weight.
|
||||
// See sections 5.3.2 and 5.3.4.
|
||||
wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes)
|
||||
wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes)
|
||||
if bi == 0 && bk == 0 {
|
||||
return wi >= wk
|
||||
}
|
||||
if bk == 0 {
|
||||
return false
|
||||
}
|
||||
return bi/bk <= wi/wk
|
||||
}
|
||||
|
||||
type priorityWriteScheduler struct {
|
||||
// root is the root of the priority tree, where root.id = 0.
|
||||
// The root queues control frames that are not associated with any stream.
|
||||
root priorityNode
|
||||
|
||||
// nodes maps stream ids to priority tree nodes.
|
||||
nodes map[uint32]*priorityNode
|
||||
|
||||
// maxID is the maximum stream id in nodes.
|
||||
maxID uint32
|
||||
|
||||
// lists of nodes that have been closed or are idle, but are kept in
|
||||
// the tree for improved prioritization. When the lengths exceed either
|
||||
// maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded.
|
||||
closedNodes, idleNodes []*priorityNode
|
||||
|
||||
// From the config.
|
||||
maxClosedNodesInTree int
|
||||
maxIdleNodesInTree int
|
||||
writeThrottleLimit int32
|
||||
enableWriteThrottle bool
|
||||
|
||||
// tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations.
|
||||
tmp []*priorityNode
|
||||
|
||||
// pool of empty queues for reuse.
|
||||
queuePool writeQueuePool
|
||||
}
|
||||
|
||||
func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) {
|
||||
// The stream may be currently idle but cannot be opened or closed.
|
||||
if curr := ws.nodes[streamID]; curr != nil {
|
||||
if curr.state != priorityNodeIdle {
|
||||
panic(fmt.Sprintf("stream %d already opened", streamID))
|
||||
}
|
||||
curr.state = priorityNodeOpen
|
||||
return
|
||||
}
|
||||
|
||||
// RFC 7540, Section 5.3.5:
|
||||
// "All streams are initially assigned a non-exclusive dependency on stream 0x0.
|
||||
// Pushed streams initially depend on their associated stream. In both cases,
|
||||
// streams are assigned a default weight of 16."
|
||||
parent := ws.nodes[options.PusherID]
|
||||
if parent == nil {
|
||||
parent = &ws.root
|
||||
}
|
||||
n := &priorityNode{
|
||||
q: *ws.queuePool.get(),
|
||||
id: streamID,
|
||||
weight: priorityDefaultWeight,
|
||||
state: priorityNodeOpen,
|
||||
}
|
||||
n.setParent(parent)
|
||||
ws.nodes[streamID] = n
|
||||
if streamID > ws.maxID {
|
||||
ws.maxID = streamID
|
||||
}
|
||||
}
|
||||
|
||||
func (ws *priorityWriteScheduler) CloseStream(streamID uint32) {
|
||||
if streamID == 0 {
|
||||
panic("violation of WriteScheduler interface: cannot close stream 0")
|
||||
}
|
||||
if ws.nodes[streamID] == nil {
|
||||
panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID))
|
||||
}
|
||||
if ws.nodes[streamID].state != priorityNodeOpen {
|
||||
panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID))
|
||||
}
|
||||
|
||||
n := ws.nodes[streamID]
|
||||
n.state = priorityNodeClosed
|
||||
n.addBytes(-n.bytes)
|
||||
|
||||
q := n.q
|
||||
ws.queuePool.put(&q)
|
||||
n.q.s = nil
|
||||
if ws.maxClosedNodesInTree > 0 {
|
||||
ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n)
|
||||
} else {
|
||||
ws.removeNode(n)
|
||||
}
|
||||
}
|
||||
|
||||
func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) {
|
||||
if streamID == 0 {
|
||||
panic("adjustPriority on root")
|
||||
}
|
||||
|
||||
// If streamID does not exist, there are two cases:
|
||||
// - A closed stream that has been removed (this will have ID <= maxID)
|
||||
// - An idle stream that is being used for "grouping" (this will have ID > maxID)
|
||||
n := ws.nodes[streamID]
|
||||
if n == nil {
|
||||
if streamID <= ws.maxID || ws.maxIdleNodesInTree == 0 {
|
||||
return
|
||||
}
|
||||
ws.maxID = streamID
|
||||
n = &priorityNode{
|
||||
q: *ws.queuePool.get(),
|
||||
id: streamID,
|
||||
weight: priorityDefaultWeight,
|
||||
state: priorityNodeIdle,
|
||||
}
|
||||
n.setParent(&ws.root)
|
||||
ws.nodes[streamID] = n
|
||||
ws.addClosedOrIdleNode(&ws.idleNodes, ws.maxIdleNodesInTree, n)
|
||||
}
|
||||
|
||||
// Section 5.3.1: A dependency on a stream that is not currently in the tree
|
||||
// results in that stream being given a default priority (Section 5.3.5).
|
||||
parent := ws.nodes[priority.StreamDep]
|
||||
if parent == nil {
|
||||
n.setParent(&ws.root)
|
||||
n.weight = priorityDefaultWeight
|
||||
return
|
||||
}
|
||||
|
||||
// Ignore if the client tries to make a node its own parent.
|
||||
if n == parent {
|
||||
return
|
||||
}
|
||||
|
||||
// Section 5.3.3:
|
||||
// "If a stream is made dependent on one of its own dependencies, the
|
||||
// formerly dependent stream is first moved to be dependent on the
|
||||
// reprioritized stream's previous parent. The moved dependency retains
|
||||
// its weight."
|
||||
//
|
||||
// That is: if parent depends on n, move parent to depend on n.parent.
|
||||
for x := parent.parent; x != nil; x = x.parent {
|
||||
if x == n {
|
||||
parent.setParent(n.parent)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Section 5.3.3: The exclusive flag causes the stream to become the sole
|
||||
// dependency of its parent stream, causing other dependencies to become
|
||||
// dependent on the exclusive stream.
|
||||
if priority.Exclusive {
|
||||
k := parent.kids
|
||||
for k != nil {
|
||||
next := k.next
|
||||
if k != n {
|
||||
k.setParent(n)
|
||||
}
|
||||
k = next
|
||||
}
|
||||
}
|
||||
|
||||
n.setParent(parent)
|
||||
n.weight = priority.Weight
|
||||
}
|
||||
|
||||
func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) {
|
||||
var n *priorityNode
|
||||
if id := wr.StreamID(); id == 0 {
|
||||
n = &ws.root
|
||||
} else {
|
||||
n = ws.nodes[id]
|
||||
if n == nil {
|
||||
// id is an idle or closed stream. wr should not be a HEADERS or
|
||||
// DATA frame. However, wr can be a RST_STREAM. In this case, we
|
||||
// push wr onto the root, rather than creating a new priorityNode,
|
||||
// since RST_STREAM is tiny and the stream's priority is unknown
|
||||
// anyway. See issue #17919.
|
||||
if wr.DataSize() > 0 {
|
||||
panic("add DATA on non-open stream")
|
||||
}
|
||||
n = &ws.root
|
||||
}
|
||||
}
|
||||
n.q.push(wr)
|
||||
}
|
||||
|
||||
func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) {
|
||||
ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool {
|
||||
limit := int32(math.MaxInt32)
|
||||
if openParent {
|
||||
limit = ws.writeThrottleLimit
|
||||
}
|
||||
wr, ok = n.q.consume(limit)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
n.addBytes(int64(wr.DataSize()))
|
||||
// If B depends on A and B continuously has data available but A
|
||||
// does not, gradually increase the throttling limit to allow B to
|
||||
// steal more and more bandwidth from A.
|
||||
if openParent {
|
||||
ws.writeThrottleLimit += 1024
|
||||
if ws.writeThrottleLimit < 0 {
|
||||
ws.writeThrottleLimit = math.MaxInt32
|
||||
}
|
||||
} else if ws.enableWriteThrottle {
|
||||
ws.writeThrottleLimit = 1024
|
||||
}
|
||||
return true
|
||||
})
|
||||
return wr, ok
|
||||
}
|
||||
|
||||
func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) {
|
||||
if maxSize == 0 {
|
||||
return
|
||||
}
|
||||
if len(*list) == maxSize {
|
||||
// Remove the oldest node, then shift left.
|
||||
ws.removeNode((*list)[0])
|
||||
x := (*list)[1:]
|
||||
copy(*list, x)
|
||||
*list = (*list)[:len(x)]
|
||||
}
|
||||
*list = append(*list, n)
|
||||
}
|
||||
|
||||
func (ws *priorityWriteScheduler) removeNode(n *priorityNode) {
|
||||
for k := n.kids; k != nil; k = k.next {
|
||||
k.setParent(n.parent)
|
||||
}
|
||||
n.setParent(nil)
|
||||
delete(ws.nodes, n.id)
|
||||
}
|
541
vendor/golang.org/x/net/http2/writesched_priority_test.go
generated
vendored
Normal file
541
vendor/golang.org/x/net/http2/writesched_priority_test.go
generated
vendored
Normal file
|
@ -0,0 +1,541 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sort"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func defaultPriorityWriteScheduler() *priorityWriteScheduler {
|
||||
return NewPriorityWriteScheduler(nil).(*priorityWriteScheduler)
|
||||
}
|
||||
|
||||
func checkPriorityWellFormed(ws *priorityWriteScheduler) error {
|
||||
for id, n := range ws.nodes {
|
||||
if id != n.id {
|
||||
return fmt.Errorf("bad ws.nodes: ws.nodes[%d] = %d", id, n.id)
|
||||
}
|
||||
if n.parent == nil {
|
||||
if n.next != nil || n.prev != nil {
|
||||
return fmt.Errorf("bad node %d: nil parent but prev/next not nil", id)
|
||||
}
|
||||
continue
|
||||
}
|
||||
found := false
|
||||
for k := n.parent.kids; k != nil; k = k.next {
|
||||
if k.id == id {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return fmt.Errorf("bad node %d: not found in parent %d kids list", id, n.parent.id)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func fmtTree(ws *priorityWriteScheduler, fmtNode func(*priorityNode) string) string {
|
||||
var ids []int
|
||||
for _, n := range ws.nodes {
|
||||
ids = append(ids, int(n.id))
|
||||
}
|
||||
sort.Ints(ids)
|
||||
|
||||
var buf bytes.Buffer
|
||||
for _, id := range ids {
|
||||
if buf.Len() != 0 {
|
||||
buf.WriteString(" ")
|
||||
}
|
||||
if id == 0 {
|
||||
buf.WriteString(fmtNode(&ws.root))
|
||||
} else {
|
||||
buf.WriteString(fmtNode(ws.nodes[uint32(id)]))
|
||||
}
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func fmtNodeParentSkipRoot(n *priorityNode) string {
|
||||
switch {
|
||||
case n.id == 0:
|
||||
return ""
|
||||
case n.parent == nil:
|
||||
return fmt.Sprintf("%d{parent:nil}", n.id)
|
||||
default:
|
||||
return fmt.Sprintf("%d{parent:%d}", n.id, n.parent.id)
|
||||
}
|
||||
}
|
||||
|
||||
func fmtNodeWeightParentSkipRoot(n *priorityNode) string {
|
||||
switch {
|
||||
case n.id == 0:
|
||||
return ""
|
||||
case n.parent == nil:
|
||||
return fmt.Sprintf("%d{weight:%d,parent:nil}", n.id, n.weight)
|
||||
default:
|
||||
return fmt.Sprintf("%d{weight:%d,parent:%d}", n.id, n.weight, n.parent.id)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPriorityTwoStreams(t *testing.T) {
|
||||
ws := defaultPriorityWriteScheduler()
|
||||
ws.OpenStream(1, OpenStreamOptions{})
|
||||
ws.OpenStream(2, OpenStreamOptions{})
|
||||
|
||||
want := "1{weight:15,parent:0} 2{weight:15,parent:0}"
|
||||
if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want {
|
||||
t.Errorf("After open\ngot %q\nwant %q", got, want)
|
||||
}
|
||||
|
||||
// Move 1's parent to 2.
|
||||
ws.AdjustStream(1, PriorityParam{
|
||||
StreamDep: 2,
|
||||
Weight: 32,
|
||||
Exclusive: false,
|
||||
})
|
||||
want = "1{weight:32,parent:2} 2{weight:15,parent:0}"
|
||||
if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want {
|
||||
t.Errorf("After adjust\ngot %q\nwant %q", got, want)
|
||||
}
|
||||
|
||||
if err := checkPriorityWellFormed(ws); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPriorityAdjustExclusiveZero(t *testing.T) {
|
||||
// 1, 2, and 3 are all children of the 0 stream.
|
||||
// Exclusive reprioritization to any of the streams should bring
|
||||
// the rest of the streams under the reprioritized stream.
|
||||
ws := defaultPriorityWriteScheduler()
|
||||
ws.OpenStream(1, OpenStreamOptions{})
|
||||
ws.OpenStream(2, OpenStreamOptions{})
|
||||
ws.OpenStream(3, OpenStreamOptions{})
|
||||
|
||||
want := "1{weight:15,parent:0} 2{weight:15,parent:0} 3{weight:15,parent:0}"
|
||||
if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want {
|
||||
t.Errorf("After open\ngot %q\nwant %q", got, want)
|
||||
}
|
||||
|
||||
ws.AdjustStream(2, PriorityParam{
|
||||
StreamDep: 0,
|
||||
Weight: 20,
|
||||
Exclusive: true,
|
||||
})
|
||||
want = "1{weight:15,parent:2} 2{weight:20,parent:0} 3{weight:15,parent:2}"
|
||||
if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want {
|
||||
t.Errorf("After adjust\ngot %q\nwant %q", got, want)
|
||||
}
|
||||
|
||||
if err := checkPriorityWellFormed(ws); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPriorityAdjustOwnParent(t *testing.T) {
|
||||
// Assigning a node as its own parent should have no effect.
|
||||
ws := defaultPriorityWriteScheduler()
|
||||
ws.OpenStream(1, OpenStreamOptions{})
|
||||
ws.OpenStream(2, OpenStreamOptions{})
|
||||
ws.AdjustStream(2, PriorityParam{
|
||||
StreamDep: 2,
|
||||
Weight: 20,
|
||||
Exclusive: true,
|
||||
})
|
||||
want := "1{weight:15,parent:0} 2{weight:15,parent:0}"
|
||||
if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want {
|
||||
t.Errorf("After adjust\ngot %q\nwant %q", got, want)
|
||||
}
|
||||
if err := checkPriorityWellFormed(ws); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPriorityClosedStreams(t *testing.T) {
|
||||
ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{MaxClosedNodesInTree: 2}).(*priorityWriteScheduler)
|
||||
ws.OpenStream(1, OpenStreamOptions{})
|
||||
ws.OpenStream(2, OpenStreamOptions{PusherID: 1})
|
||||
ws.OpenStream(3, OpenStreamOptions{PusherID: 2})
|
||||
ws.OpenStream(4, OpenStreamOptions{PusherID: 3})
|
||||
|
||||
// Close the first three streams. We lose 1, but keep 2 and 3.
|
||||
ws.CloseStream(1)
|
||||
ws.CloseStream(2)
|
||||
ws.CloseStream(3)
|
||||
|
||||
want := "2{weight:15,parent:0} 3{weight:15,parent:2} 4{weight:15,parent:3}"
|
||||
if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want {
|
||||
t.Errorf("After close\ngot %q\nwant %q", got, want)
|
||||
}
|
||||
if err := checkPriorityWellFormed(ws); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// Adding a stream as an exclusive child of 1 gives it default
|
||||
// priorities, since 1 is gone.
|
||||
ws.OpenStream(5, OpenStreamOptions{})
|
||||
ws.AdjustStream(5, PriorityParam{StreamDep: 1, Weight: 15, Exclusive: true})
|
||||
|
||||
// Adding a stream as an exclusive child of 2 should work, since 2 is not gone.
|
||||
ws.OpenStream(6, OpenStreamOptions{})
|
||||
ws.AdjustStream(6, PriorityParam{StreamDep: 2, Weight: 15, Exclusive: true})
|
||||
|
||||
want = "2{weight:15,parent:0} 3{weight:15,parent:6} 4{weight:15,parent:3} 5{weight:15,parent:0} 6{weight:15,parent:2}"
|
||||
if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want {
|
||||
t.Errorf("After add streams\ngot %q\nwant %q", got, want)
|
||||
}
|
||||
if err := checkPriorityWellFormed(ws); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPriorityClosedStreamsDisabled(t *testing.T) {
|
||||
ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{}).(*priorityWriteScheduler)
|
||||
ws.OpenStream(1, OpenStreamOptions{})
|
||||
ws.OpenStream(2, OpenStreamOptions{PusherID: 1})
|
||||
ws.OpenStream(3, OpenStreamOptions{PusherID: 2})
|
||||
|
||||
// Close the first two streams. We keep only 3.
|
||||
ws.CloseStream(1)
|
||||
ws.CloseStream(2)
|
||||
|
||||
want := "3{weight:15,parent:0}"
|
||||
if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want {
|
||||
t.Errorf("After close\ngot %q\nwant %q", got, want)
|
||||
}
|
||||
if err := checkPriorityWellFormed(ws); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPriorityIdleStreams(t *testing.T) {
|
||||
ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{MaxIdleNodesInTree: 2}).(*priorityWriteScheduler)
|
||||
ws.AdjustStream(1, PriorityParam{StreamDep: 0, Weight: 15}) // idle
|
||||
ws.AdjustStream(2, PriorityParam{StreamDep: 0, Weight: 15}) // idle
|
||||
ws.AdjustStream(3, PriorityParam{StreamDep: 2, Weight: 20}) // idle
|
||||
ws.OpenStream(4, OpenStreamOptions{})
|
||||
ws.OpenStream(5, OpenStreamOptions{})
|
||||
ws.OpenStream(6, OpenStreamOptions{})
|
||||
ws.AdjustStream(4, PriorityParam{StreamDep: 1, Weight: 15})
|
||||
ws.AdjustStream(5, PriorityParam{StreamDep: 2, Weight: 15})
|
||||
ws.AdjustStream(6, PriorityParam{StreamDep: 3, Weight: 15})
|
||||
|
||||
want := "2{weight:15,parent:0} 3{weight:20,parent:2} 4{weight:15,parent:0} 5{weight:15,parent:2} 6{weight:15,parent:3}"
|
||||
if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want {
|
||||
t.Errorf("After open\ngot %q\nwant %q", got, want)
|
||||
}
|
||||
if err := checkPriorityWellFormed(ws); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPriorityIdleStreamsDisabled(t *testing.T) {
|
||||
ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{}).(*priorityWriteScheduler)
|
||||
ws.AdjustStream(1, PriorityParam{StreamDep: 0, Weight: 15}) // idle
|
||||
ws.AdjustStream(2, PriorityParam{StreamDep: 0, Weight: 15}) // idle
|
||||
ws.AdjustStream(3, PriorityParam{StreamDep: 2, Weight: 20}) // idle
|
||||
ws.OpenStream(4, OpenStreamOptions{})
|
||||
|
||||
want := "4{weight:15,parent:0}"
|
||||
if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want {
|
||||
t.Errorf("After open\ngot %q\nwant %q", got, want)
|
||||
}
|
||||
if err := checkPriorityWellFormed(ws); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrioritySection531NonExclusive(t *testing.T) {
|
||||
// Example from RFC 7540 Section 5.3.1.
|
||||
// A,B,C,D = 1,2,3,4
|
||||
ws := defaultPriorityWriteScheduler()
|
||||
ws.OpenStream(1, OpenStreamOptions{})
|
||||
ws.OpenStream(2, OpenStreamOptions{PusherID: 1})
|
||||
ws.OpenStream(3, OpenStreamOptions{PusherID: 1})
|
||||
ws.OpenStream(4, OpenStreamOptions{})
|
||||
ws.AdjustStream(4, PriorityParam{
|
||||
StreamDep: 1,
|
||||
Weight: 15,
|
||||
Exclusive: false,
|
||||
})
|
||||
want := "1{parent:0} 2{parent:1} 3{parent:1} 4{parent:1}"
|
||||
if got := fmtTree(ws, fmtNodeParentSkipRoot); got != want {
|
||||
t.Errorf("After adjust\ngot %q\nwant %q", got, want)
|
||||
}
|
||||
if err := checkPriorityWellFormed(ws); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrioritySection531Exclusive(t *testing.T) {
|
||||
// Example from RFC 7540 Section 5.3.1.
|
||||
// A,B,C,D = 1,2,3,4
|
||||
ws := defaultPriorityWriteScheduler()
|
||||
ws.OpenStream(1, OpenStreamOptions{})
|
||||
ws.OpenStream(2, OpenStreamOptions{PusherID: 1})
|
||||
ws.OpenStream(3, OpenStreamOptions{PusherID: 1})
|
||||
ws.OpenStream(4, OpenStreamOptions{})
|
||||
ws.AdjustStream(4, PriorityParam{
|
||||
StreamDep: 1,
|
||||
Weight: 15,
|
||||
Exclusive: true,
|
||||
})
|
||||
want := "1{parent:0} 2{parent:4} 3{parent:4} 4{parent:1}"
|
||||
if got := fmtTree(ws, fmtNodeParentSkipRoot); got != want {
|
||||
t.Errorf("After adjust\ngot %q\nwant %q", got, want)
|
||||
}
|
||||
if err := checkPriorityWellFormed(ws); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func makeSection533Tree() *priorityWriteScheduler {
|
||||
// Initial tree from RFC 7540 Section 5.3.3.
|
||||
// A,B,C,D,E,F = 1,2,3,4,5,6
|
||||
ws := defaultPriorityWriteScheduler()
|
||||
ws.OpenStream(1, OpenStreamOptions{})
|
||||
ws.OpenStream(2, OpenStreamOptions{PusherID: 1})
|
||||
ws.OpenStream(3, OpenStreamOptions{PusherID: 1})
|
||||
ws.OpenStream(4, OpenStreamOptions{PusherID: 3})
|
||||
ws.OpenStream(5, OpenStreamOptions{PusherID: 3})
|
||||
ws.OpenStream(6, OpenStreamOptions{PusherID: 4})
|
||||
return ws
|
||||
}
|
||||
|
||||
func TestPrioritySection533NonExclusive(t *testing.T) {
|
||||
// Example from RFC 7540 Section 5.3.3.
|
||||
// A,B,C,D,E,F = 1,2,3,4,5,6
|
||||
ws := defaultPriorityWriteScheduler()
|
||||
ws.OpenStream(1, OpenStreamOptions{})
|
||||
ws.OpenStream(2, OpenStreamOptions{PusherID: 1})
|
||||
ws.OpenStream(3, OpenStreamOptions{PusherID: 1})
|
||||
ws.OpenStream(4, OpenStreamOptions{PusherID: 3})
|
||||
ws.OpenStream(5, OpenStreamOptions{PusherID: 3})
|
||||
ws.OpenStream(6, OpenStreamOptions{PusherID: 4})
|
||||
ws.AdjustStream(1, PriorityParam{
|
||||
StreamDep: 4,
|
||||
Weight: 15,
|
||||
Exclusive: false,
|
||||
})
|
||||
want := "1{parent:4} 2{parent:1} 3{parent:1} 4{parent:0} 5{parent:3} 6{parent:4}"
|
||||
if got := fmtTree(ws, fmtNodeParentSkipRoot); got != want {
|
||||
t.Errorf("After adjust\ngot %q\nwant %q", got, want)
|
||||
}
|
||||
if err := checkPriorityWellFormed(ws); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrioritySection533Exclusive(t *testing.T) {
|
||||
// Example from RFC 7540 Section 5.3.3.
|
||||
// A,B,C,D,E,F = 1,2,3,4,5,6
|
||||
ws := defaultPriorityWriteScheduler()
|
||||
ws.OpenStream(1, OpenStreamOptions{})
|
||||
ws.OpenStream(2, OpenStreamOptions{PusherID: 1})
|
||||
ws.OpenStream(3, OpenStreamOptions{PusherID: 1})
|
||||
ws.OpenStream(4, OpenStreamOptions{PusherID: 3})
|
||||
ws.OpenStream(5, OpenStreamOptions{PusherID: 3})
|
||||
ws.OpenStream(6, OpenStreamOptions{PusherID: 4})
|
||||
ws.AdjustStream(1, PriorityParam{
|
||||
StreamDep: 4,
|
||||
Weight: 15,
|
||||
Exclusive: true,
|
||||
})
|
||||
want := "1{parent:4} 2{parent:1} 3{parent:1} 4{parent:0} 5{parent:3} 6{parent:1}"
|
||||
if got := fmtTree(ws, fmtNodeParentSkipRoot); got != want {
|
||||
t.Errorf("After adjust\ngot %q\nwant %q", got, want)
|
||||
}
|
||||
if err := checkPriorityWellFormed(ws); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func checkPopAll(ws WriteScheduler, order []uint32) error {
|
||||
for k, id := range order {
|
||||
wr, ok := ws.Pop()
|
||||
if !ok {
|
||||
return fmt.Errorf("Pop[%d]: got ok=false, want %d (order=%v)", k, id, order)
|
||||
}
|
||||
if got := wr.StreamID(); got != id {
|
||||
return fmt.Errorf("Pop[%d]: got %v, want %d (order=%v)", k, got, id, order)
|
||||
}
|
||||
}
|
||||
wr, ok := ws.Pop()
|
||||
if ok {
|
||||
return fmt.Errorf("Pop[%d]: got %v, want ok=false (order=%v)", len(order), wr.StreamID(), order)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestPriorityPopFrom533Tree(t *testing.T) {
|
||||
ws := makeSection533Tree()
|
||||
|
||||
ws.Push(makeWriteHeadersRequest(3 /*C*/))
|
||||
ws.Push(makeWriteNonStreamRequest())
|
||||
ws.Push(makeWriteHeadersRequest(5 /*E*/))
|
||||
ws.Push(makeWriteHeadersRequest(1 /*A*/))
|
||||
t.Log("tree:", fmtTree(ws, fmtNodeParentSkipRoot))
|
||||
|
||||
if err := checkPopAll(ws, []uint32{0 /*NonStream*/, 1, 3, 5}); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPriorityPopFromLinearTree(t *testing.T) {
|
||||
ws := defaultPriorityWriteScheduler()
|
||||
ws.OpenStream(1, OpenStreamOptions{})
|
||||
ws.OpenStream(2, OpenStreamOptions{PusherID: 1})
|
||||
ws.OpenStream(3, OpenStreamOptions{PusherID: 2})
|
||||
ws.OpenStream(4, OpenStreamOptions{PusherID: 3})
|
||||
|
||||
ws.Push(makeWriteHeadersRequest(3))
|
||||
ws.Push(makeWriteHeadersRequest(4))
|
||||
ws.Push(makeWriteHeadersRequest(1))
|
||||
ws.Push(makeWriteHeadersRequest(2))
|
||||
ws.Push(makeWriteNonStreamRequest())
|
||||
ws.Push(makeWriteNonStreamRequest())
|
||||
t.Log("tree:", fmtTree(ws, fmtNodeParentSkipRoot))
|
||||
|
||||
if err := checkPopAll(ws, []uint32{0, 0 /*NonStreams*/, 1, 2, 3, 4}); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPriorityFlowControl(t *testing.T) {
|
||||
ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{ThrottleOutOfOrderWrites: false})
|
||||
ws.OpenStream(1, OpenStreamOptions{})
|
||||
ws.OpenStream(2, OpenStreamOptions{PusherID: 1})
|
||||
|
||||
sc := &serverConn{maxFrameSize: 16}
|
||||
st1 := &stream{id: 1, sc: sc}
|
||||
st2 := &stream{id: 2, sc: sc}
|
||||
|
||||
ws.Push(FrameWriteRequest{&writeData{1, make([]byte, 16), false}, st1, nil})
|
||||
ws.Push(FrameWriteRequest{&writeData{2, make([]byte, 16), false}, st2, nil})
|
||||
ws.AdjustStream(2, PriorityParam{StreamDep: 1})
|
||||
|
||||
// No flow-control bytes available.
|
||||
if wr, ok := ws.Pop(); ok {
|
||||
t.Fatalf("Pop(limited by flow control)=%v,true, want false", wr)
|
||||
}
|
||||
|
||||
// Add enough flow-control bytes to write st2 in two Pop calls.
|
||||
// Should write data from st2 even though it's lower priority than st1.
|
||||
for i := 1; i <= 2; i++ {
|
||||
st2.flow.add(8)
|
||||
wr, ok := ws.Pop()
|
||||
if !ok {
|
||||
t.Fatalf("Pop(%d)=false, want true", i)
|
||||
}
|
||||
if got, want := wr.DataSize(), 8; got != want {
|
||||
t.Fatalf("Pop(%d)=%d bytes, want %d bytes", i, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPriorityThrottleOutOfOrderWrites(t *testing.T) {
|
||||
ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{ThrottleOutOfOrderWrites: true})
|
||||
ws.OpenStream(1, OpenStreamOptions{})
|
||||
ws.OpenStream(2, OpenStreamOptions{PusherID: 1})
|
||||
|
||||
sc := &serverConn{maxFrameSize: 4096}
|
||||
st1 := &stream{id: 1, sc: sc}
|
||||
st2 := &stream{id: 2, sc: sc}
|
||||
st1.flow.add(4096)
|
||||
st2.flow.add(4096)
|
||||
ws.Push(FrameWriteRequest{&writeData{2, make([]byte, 4096), false}, st2, nil})
|
||||
ws.AdjustStream(2, PriorityParam{StreamDep: 1})
|
||||
|
||||
// We have enough flow-control bytes to write st2 in a single Pop call.
|
||||
// However, due to out-of-order write throttling, the first call should
|
||||
// only write 1KB.
|
||||
wr, ok := ws.Pop()
|
||||
if !ok {
|
||||
t.Fatalf("Pop(st2.first)=false, want true")
|
||||
}
|
||||
if got, want := wr.StreamID(), uint32(2); got != want {
|
||||
t.Fatalf("Pop(st2.first)=stream %d, want stream %d", got, want)
|
||||
}
|
||||
if got, want := wr.DataSize(), 1024; got != want {
|
||||
t.Fatalf("Pop(st2.first)=%d bytes, want %d bytes", got, want)
|
||||
}
|
||||
|
||||
// Now add data on st1. This should take precedence.
|
||||
ws.Push(FrameWriteRequest{&writeData{1, make([]byte, 4096), false}, st1, nil})
|
||||
wr, ok = ws.Pop()
|
||||
if !ok {
|
||||
t.Fatalf("Pop(st1)=false, want true")
|
||||
}
|
||||
if got, want := wr.StreamID(), uint32(1); got != want {
|
||||
t.Fatalf("Pop(st1)=stream %d, want stream %d", got, want)
|
||||
}
|
||||
if got, want := wr.DataSize(), 4096; got != want {
|
||||
t.Fatalf("Pop(st1)=%d bytes, want %d bytes", got, want)
|
||||
}
|
||||
|
||||
// Should go back to writing 1KB from st2.
|
||||
wr, ok = ws.Pop()
|
||||
if !ok {
|
||||
t.Fatalf("Pop(st2.last)=false, want true")
|
||||
}
|
||||
if got, want := wr.StreamID(), uint32(2); got != want {
|
||||
t.Fatalf("Pop(st2.last)=stream %d, want stream %d", got, want)
|
||||
}
|
||||
if got, want := wr.DataSize(), 1024; got != want {
|
||||
t.Fatalf("Pop(st2.last)=%d bytes, want %d bytes", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPriorityWeights(t *testing.T) {
|
||||
ws := defaultPriorityWriteScheduler()
|
||||
ws.OpenStream(1, OpenStreamOptions{})
|
||||
ws.OpenStream(2, OpenStreamOptions{})
|
||||
|
||||
sc := &serverConn{maxFrameSize: 8}
|
||||
st1 := &stream{id: 1, sc: sc}
|
||||
st2 := &stream{id: 2, sc: sc}
|
||||
st1.flow.add(40)
|
||||
st2.flow.add(40)
|
||||
|
||||
ws.Push(FrameWriteRequest{&writeData{1, make([]byte, 40), false}, st1, nil})
|
||||
ws.Push(FrameWriteRequest{&writeData{2, make([]byte, 40), false}, st2, nil})
|
||||
ws.AdjustStream(1, PriorityParam{StreamDep: 0, Weight: 34})
|
||||
ws.AdjustStream(2, PriorityParam{StreamDep: 0, Weight: 9})
|
||||
|
||||
// st1 gets 3.5x the bandwidth of st2 (3.5 = (34+1)/(9+1)).
|
||||
// The maximum frame size is 8 bytes. The write sequence should be:
|
||||
// st1, total bytes so far is (st1=8, st=0)
|
||||
// st2, total bytes so far is (st1=8, st=8)
|
||||
// st1, total bytes so far is (st1=16, st=8)
|
||||
// st1, total bytes so far is (st1=24, st=8) // 3x bandwidth
|
||||
// st1, total bytes so far is (st1=32, st=8) // 4x bandwidth
|
||||
// st2, total bytes so far is (st1=32, st=16) // 2x bandwidth
|
||||
// st1, total bytes so far is (st1=40, st=16)
|
||||
// st2, total bytes so far is (st1=40, st=24)
|
||||
// st2, total bytes so far is (st1=40, st=32)
|
||||
// st2, total bytes so far is (st1=40, st=40)
|
||||
if err := checkPopAll(ws, []uint32{1, 2, 1, 1, 1, 2, 1, 2, 2, 2}); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPriorityRstStreamOnNonOpenStreams(t *testing.T) {
|
||||
ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{
|
||||
MaxClosedNodesInTree: 0,
|
||||
MaxIdleNodesInTree: 0,
|
||||
})
|
||||
ws.OpenStream(1, OpenStreamOptions{})
|
||||
ws.CloseStream(1)
|
||||
ws.Push(FrameWriteRequest{write: streamError(1, ErrCodeProtocol)})
|
||||
ws.Push(FrameWriteRequest{write: streamError(2, ErrCodeProtocol)})
|
||||
|
||||
if err := checkPopAll(ws, []uint32{1, 2}); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
72
vendor/golang.org/x/net/http2/writesched_random.go
generated
vendored
Normal file
72
vendor/golang.org/x/net/http2/writesched_random.go
generated
vendored
Normal file
|
@ -0,0 +1,72 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import "math"
|
||||
|
||||
// NewRandomWriteScheduler constructs a WriteScheduler that ignores HTTP/2
|
||||
// priorities. Control frames like SETTINGS and PING are written before DATA
|
||||
// frames, but if no control frames are queued and multiple streams have queued
|
||||
// HEADERS or DATA frames, Pop selects a ready stream arbitrarily.
|
||||
func NewRandomWriteScheduler() WriteScheduler {
|
||||
return &randomWriteScheduler{sq: make(map[uint32]*writeQueue)}
|
||||
}
|
||||
|
||||
type randomWriteScheduler struct {
|
||||
// zero are frames not associated with a specific stream.
|
||||
zero writeQueue
|
||||
|
||||
// sq contains the stream-specific queues, keyed by stream ID.
|
||||
// When a stream is idle or closed, it's deleted from the map.
|
||||
sq map[uint32]*writeQueue
|
||||
|
||||
// pool of empty queues for reuse.
|
||||
queuePool writeQueuePool
|
||||
}
|
||||
|
||||
func (ws *randomWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) {
|
||||
// no-op: idle streams are not tracked
|
||||
}
|
||||
|
||||
func (ws *randomWriteScheduler) CloseStream(streamID uint32) {
|
||||
q, ok := ws.sq[streamID]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
delete(ws.sq, streamID)
|
||||
ws.queuePool.put(q)
|
||||
}
|
||||
|
||||
func (ws *randomWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) {
|
||||
// no-op: priorities are ignored
|
||||
}
|
||||
|
||||
func (ws *randomWriteScheduler) Push(wr FrameWriteRequest) {
|
||||
id := wr.StreamID()
|
||||
if id == 0 {
|
||||
ws.zero.push(wr)
|
||||
return
|
||||
}
|
||||
q, ok := ws.sq[id]
|
||||
if !ok {
|
||||
q = ws.queuePool.get()
|
||||
ws.sq[id] = q
|
||||
}
|
||||
q.push(wr)
|
||||
}
|
||||
|
||||
func (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) {
|
||||
// Control frames first.
|
||||
if !ws.zero.empty() {
|
||||
return ws.zero.shift(), true
|
||||
}
|
||||
// Iterate over all non-idle streams until finding one that can be consumed.
|
||||
for _, q := range ws.sq {
|
||||
if wr, ok := q.consume(math.MaxInt32); ok {
|
||||
return wr, true
|
||||
}
|
||||
}
|
||||
return FrameWriteRequest{}, false
|
||||
}
|
44
vendor/golang.org/x/net/http2/writesched_random_test.go
generated
vendored
Normal file
44
vendor/golang.org/x/net/http2/writesched_random_test.go
generated
vendored
Normal file
|
@ -0,0 +1,44 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestRandomScheduler(t *testing.T) {
|
||||
ws := NewRandomWriteScheduler()
|
||||
ws.Push(makeWriteHeadersRequest(3))
|
||||
ws.Push(makeWriteHeadersRequest(4))
|
||||
ws.Push(makeWriteHeadersRequest(1))
|
||||
ws.Push(makeWriteHeadersRequest(2))
|
||||
ws.Push(makeWriteNonStreamRequest())
|
||||
ws.Push(makeWriteNonStreamRequest())
|
||||
|
||||
// Pop all frames. Should get the non-stream requests first,
|
||||
// followed by the stream requests in any order.
|
||||
var order []FrameWriteRequest
|
||||
for {
|
||||
wr, ok := ws.Pop()
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
order = append(order, wr)
|
||||
}
|
||||
t.Logf("got frames: %v", order)
|
||||
if len(order) != 6 {
|
||||
t.Fatalf("got %d frames, expected 6", len(order))
|
||||
}
|
||||
if order[0].StreamID() != 0 || order[1].StreamID() != 0 {
|
||||
t.Fatal("expected non-stream frames first", order[0], order[1])
|
||||
}
|
||||
got := make(map[uint32]bool)
|
||||
for _, wr := range order[2:] {
|
||||
got[wr.StreamID()] = true
|
||||
}
|
||||
for id := uint32(1); id <= 4; id++ {
|
||||
if !got[id] {
|
||||
t.Errorf("frame not found for stream %d", id)
|
||||
}
|
||||
}
|
||||
}
|
125
vendor/golang.org/x/net/http2/writesched_test.go
generated
vendored
Normal file
125
vendor/golang.org/x/net/http2/writesched_test.go
generated
vendored
Normal file
|
@ -0,0 +1,125 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func makeWriteNonStreamRequest() FrameWriteRequest {
|
||||
return FrameWriteRequest{writeSettingsAck{}, nil, nil}
|
||||
}
|
||||
|
||||
func makeWriteHeadersRequest(streamID uint32) FrameWriteRequest {
|
||||
st := &stream{id: streamID}
|
||||
return FrameWriteRequest{&writeResHeaders{streamID: streamID, httpResCode: 200}, st, nil}
|
||||
}
|
||||
|
||||
func checkConsume(wr FrameWriteRequest, nbytes int32, want []FrameWriteRequest) error {
|
||||
consumed, rest, n := wr.Consume(nbytes)
|
||||
var wantConsumed, wantRest FrameWriteRequest
|
||||
switch len(want) {
|
||||
case 0:
|
||||
case 1:
|
||||
wantConsumed = want[0]
|
||||
case 2:
|
||||
wantConsumed = want[0]
|
||||
wantRest = want[1]
|
||||
}
|
||||
if !reflect.DeepEqual(consumed, wantConsumed) || !reflect.DeepEqual(rest, wantRest) || n != len(want) {
|
||||
return fmt.Errorf("got %v, %v, %v\nwant %v, %v, %v", consumed, rest, n, wantConsumed, wantRest, len(want))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestFrameWriteRequestNonData(t *testing.T) {
|
||||
wr := makeWriteNonStreamRequest()
|
||||
if got, want := wr.DataSize(), 0; got != want {
|
||||
t.Errorf("DataSize: got %v, want %v", got, want)
|
||||
}
|
||||
|
||||
// Non-DATA frames are always consumed whole.
|
||||
if err := checkConsume(wr, 0, []FrameWriteRequest{wr}); err != nil {
|
||||
t.Errorf("Consume:\n%v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFrameWriteRequestData(t *testing.T) {
|
||||
st := &stream{
|
||||
id: 1,
|
||||
sc: &serverConn{maxFrameSize: 16},
|
||||
}
|
||||
const size = 32
|
||||
wr := FrameWriteRequest{&writeData{st.id, make([]byte, size), true}, st, make(chan error)}
|
||||
if got, want := wr.DataSize(), size; got != want {
|
||||
t.Errorf("DataSize: got %v, want %v", got, want)
|
||||
}
|
||||
|
||||
// No flow-control bytes available: cannot consume anything.
|
||||
if err := checkConsume(wr, math.MaxInt32, []FrameWriteRequest{}); err != nil {
|
||||
t.Errorf("Consume(limited by flow control):\n%v", err)
|
||||
}
|
||||
|
||||
// Add enough flow-control bytes to consume the entire frame,
|
||||
// but we're now restricted by st.sc.maxFrameSize.
|
||||
st.flow.add(size)
|
||||
want := []FrameWriteRequest{
|
||||
{
|
||||
write: &writeData{st.id, make([]byte, st.sc.maxFrameSize), false},
|
||||
stream: st,
|
||||
done: nil,
|
||||
},
|
||||
{
|
||||
write: &writeData{st.id, make([]byte, size-st.sc.maxFrameSize), true},
|
||||
stream: st,
|
||||
done: wr.done,
|
||||
},
|
||||
}
|
||||
if err := checkConsume(wr, math.MaxInt32, want); err != nil {
|
||||
t.Errorf("Consume(limited by maxFrameSize):\n%v", err)
|
||||
}
|
||||
rest := want[1]
|
||||
|
||||
// Consume 8 bytes from the remaining frame.
|
||||
want = []FrameWriteRequest{
|
||||
{
|
||||
write: &writeData{st.id, make([]byte, 8), false},
|
||||
stream: st,
|
||||
done: nil,
|
||||
},
|
||||
{
|
||||
write: &writeData{st.id, make([]byte, size-st.sc.maxFrameSize-8), true},
|
||||
stream: st,
|
||||
done: wr.done,
|
||||
},
|
||||
}
|
||||
if err := checkConsume(rest, 8, want); err != nil {
|
||||
t.Errorf("Consume(8):\n%v", err)
|
||||
}
|
||||
rest = want[1]
|
||||
|
||||
// Consume all remaining bytes.
|
||||
want = []FrameWriteRequest{
|
||||
{
|
||||
write: &writeData{st.id, make([]byte, size-st.sc.maxFrameSize-8), true},
|
||||
stream: st,
|
||||
done: wr.done,
|
||||
},
|
||||
}
|
||||
if err := checkConsume(rest, math.MaxInt32, want); err != nil {
|
||||
t.Errorf("Consume(remainder):\n%v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFrameWriteRequest_StreamID(t *testing.T) {
|
||||
const streamID = 123
|
||||
wr := FrameWriteRequest{write: streamError(streamID, ErrCodeNo)}
|
||||
if got := wr.StreamID(); got != streamID {
|
||||
t.Errorf("FrameWriteRequest(StreamError) = %v; want %v", got, streamID)
|
||||
}
|
||||
}
|
356
vendor/golang.org/x/net/http2/z_spec_test.go
generated
vendored
Normal file
356
vendor/golang.org/x/net/http2/z_spec_test.go
generated
vendored
Normal file
|
@ -0,0 +1,356 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var coverSpec = flag.Bool("coverspec", false, "Run spec coverage tests")
|
||||
|
||||
// The global map of sentence coverage for the http2 spec.
|
||||
var defaultSpecCoverage specCoverage
|
||||
|
||||
var loadSpecOnce sync.Once
|
||||
|
||||
func loadSpec() {
|
||||
if f, err := os.Open("testdata/draft-ietf-httpbis-http2.xml"); err != nil {
|
||||
panic(err)
|
||||
} else {
|
||||
defaultSpecCoverage = readSpecCov(f)
|
||||
f.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// covers marks all sentences for section sec in defaultSpecCoverage. Sentences not
|
||||
// "covered" will be included in report outputted by TestSpecCoverage.
|
||||
func covers(sec, sentences string) {
|
||||
loadSpecOnce.Do(loadSpec)
|
||||
defaultSpecCoverage.cover(sec, sentences)
|
||||
}
|
||||
|
||||
type specPart struct {
|
||||
section string
|
||||
sentence string
|
||||
}
|
||||
|
||||
func (ss specPart) Less(oo specPart) bool {
|
||||
atoi := func(s string) int {
|
||||
n, err := strconv.Atoi(s)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return n
|
||||
}
|
||||
a := strings.Split(ss.section, ".")
|
||||
b := strings.Split(oo.section, ".")
|
||||
for len(a) > 0 {
|
||||
if len(b) == 0 {
|
||||
return false
|
||||
}
|
||||
x, y := atoi(a[0]), atoi(b[0])
|
||||
if x == y {
|
||||
a, b = a[1:], b[1:]
|
||||
continue
|
||||
}
|
||||
return x < y
|
||||
}
|
||||
if len(b) > 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type bySpecSection []specPart
|
||||
|
||||
func (a bySpecSection) Len() int { return len(a) }
|
||||
func (a bySpecSection) Less(i, j int) bool { return a[i].Less(a[j]) }
|
||||
func (a bySpecSection) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
type specCoverage struct {
|
||||
coverage map[specPart]bool
|
||||
d *xml.Decoder
|
||||
}
|
||||
|
||||
func joinSection(sec []int) string {
|
||||
s := fmt.Sprintf("%d", sec[0])
|
||||
for _, n := range sec[1:] {
|
||||
s = fmt.Sprintf("%s.%d", s, n)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (sc specCoverage) readSection(sec []int) {
|
||||
var (
|
||||
buf = new(bytes.Buffer)
|
||||
sub = 0
|
||||
)
|
||||
for {
|
||||
tk, err := sc.d.Token()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
return
|
||||
}
|
||||
panic(err)
|
||||
}
|
||||
switch v := tk.(type) {
|
||||
case xml.StartElement:
|
||||
if skipElement(v) {
|
||||
if err := sc.d.Skip(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if v.Name.Local == "section" {
|
||||
sub++
|
||||
}
|
||||
break
|
||||
}
|
||||
switch v.Name.Local {
|
||||
case "section":
|
||||
sub++
|
||||
sc.readSection(append(sec, sub))
|
||||
case "xref":
|
||||
buf.Write(sc.readXRef(v))
|
||||
}
|
||||
case xml.CharData:
|
||||
if len(sec) == 0 {
|
||||
break
|
||||
}
|
||||
buf.Write(v)
|
||||
case xml.EndElement:
|
||||
if v.Name.Local == "section" {
|
||||
sc.addSentences(joinSection(sec), buf.String())
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (sc specCoverage) readXRef(se xml.StartElement) []byte {
|
||||
var b []byte
|
||||
for {
|
||||
tk, err := sc.d.Token()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
switch v := tk.(type) {
|
||||
case xml.CharData:
|
||||
if b != nil {
|
||||
panic("unexpected CharData")
|
||||
}
|
||||
b = []byte(string(v))
|
||||
case xml.EndElement:
|
||||
if v.Name.Local != "xref" {
|
||||
panic("expected </xref>")
|
||||
}
|
||||
if b != nil {
|
||||
return b
|
||||
}
|
||||
sig := attrSig(se)
|
||||
switch sig {
|
||||
case "target":
|
||||
return []byte(fmt.Sprintf("[%s]", attrValue(se, "target")))
|
||||
case "fmt-of,rel,target", "fmt-,,rel,target":
|
||||
return []byte(fmt.Sprintf("[%s, %s]", attrValue(se, "target"), attrValue(se, "rel")))
|
||||
case "fmt-of,sec,target", "fmt-,,sec,target":
|
||||
return []byte(fmt.Sprintf("[section %s of %s]", attrValue(se, "sec"), attrValue(se, "target")))
|
||||
case "fmt-of,rel,sec,target":
|
||||
return []byte(fmt.Sprintf("[section %s of %s, %s]", attrValue(se, "sec"), attrValue(se, "target"), attrValue(se, "rel")))
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown attribute signature %q in %#v", sig, fmt.Sprintf("%#v", se)))
|
||||
}
|
||||
default:
|
||||
panic(fmt.Sprintf("unexpected tag %q", v))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var skipAnchor = map[string]bool{
|
||||
"intro": true,
|
||||
"Overview": true,
|
||||
}
|
||||
|
||||
var skipTitle = map[string]bool{
|
||||
"Acknowledgements": true,
|
||||
"Change Log": true,
|
||||
"Document Organization": true,
|
||||
"Conventions and Terminology": true,
|
||||
}
|
||||
|
||||
func skipElement(s xml.StartElement) bool {
|
||||
switch s.Name.Local {
|
||||
case "artwork":
|
||||
return true
|
||||
case "section":
|
||||
for _, attr := range s.Attr {
|
||||
switch attr.Name.Local {
|
||||
case "anchor":
|
||||
if skipAnchor[attr.Value] || strings.HasPrefix(attr.Value, "changes.since.") {
|
||||
return true
|
||||
}
|
||||
case "title":
|
||||
if skipTitle[attr.Value] {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func readSpecCov(r io.Reader) specCoverage {
|
||||
sc := specCoverage{
|
||||
coverage: map[specPart]bool{},
|
||||
d: xml.NewDecoder(r)}
|
||||
sc.readSection(nil)
|
||||
return sc
|
||||
}
|
||||
|
||||
func (sc specCoverage) addSentences(sec string, sentence string) {
|
||||
for _, s := range parseSentences(sentence) {
|
||||
sc.coverage[specPart{sec, s}] = false
|
||||
}
|
||||
}
|
||||
|
||||
func (sc specCoverage) cover(sec string, sentence string) {
|
||||
for _, s := range parseSentences(sentence) {
|
||||
p := specPart{sec, s}
|
||||
if _, ok := sc.coverage[p]; !ok {
|
||||
panic(fmt.Sprintf("Not found in spec: %q, %q", sec, s))
|
||||
}
|
||||
sc.coverage[specPart{sec, s}] = true
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
var whitespaceRx = regexp.MustCompile(`\s+`)
|
||||
|
||||
func parseSentences(sens string) []string {
|
||||
sens = strings.TrimSpace(sens)
|
||||
if sens == "" {
|
||||
return nil
|
||||
}
|
||||
ss := strings.Split(whitespaceRx.ReplaceAllString(sens, " "), ". ")
|
||||
for i, s := range ss {
|
||||
s = strings.TrimSpace(s)
|
||||
if !strings.HasSuffix(s, ".") {
|
||||
s += "."
|
||||
}
|
||||
ss[i] = s
|
||||
}
|
||||
return ss
|
||||
}
|
||||
|
||||
func TestSpecParseSentences(t *testing.T) {
|
||||
tests := []struct {
|
||||
ss string
|
||||
want []string
|
||||
}{
|
||||
{"Sentence 1. Sentence 2.",
|
||||
[]string{
|
||||
"Sentence 1.",
|
||||
"Sentence 2.",
|
||||
}},
|
||||
{"Sentence 1. \nSentence 2.\tSentence 3.",
|
||||
[]string{
|
||||
"Sentence 1.",
|
||||
"Sentence 2.",
|
||||
"Sentence 3.",
|
||||
}},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
got := parseSentences(tt.ss)
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("%d: got = %q, want %q", i, got, tt.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSpecCoverage(t *testing.T) {
|
||||
if !*coverSpec {
|
||||
t.Skip()
|
||||
}
|
||||
|
||||
loadSpecOnce.Do(loadSpec)
|
||||
|
||||
var (
|
||||
list []specPart
|
||||
cv = defaultSpecCoverage.coverage
|
||||
total = len(cv)
|
||||
complete = 0
|
||||
)
|
||||
|
||||
for sp, touched := range defaultSpecCoverage.coverage {
|
||||
if touched {
|
||||
complete++
|
||||
} else {
|
||||
list = append(list, sp)
|
||||
}
|
||||
}
|
||||
sort.Stable(bySpecSection(list))
|
||||
|
||||
if testing.Short() && len(list) > 5 {
|
||||
list = list[:5]
|
||||
}
|
||||
|
||||
for _, p := range list {
|
||||
t.Errorf("\tSECTION %s: %s", p.section, p.sentence)
|
||||
}
|
||||
|
||||
t.Logf("%d/%d (%d%%) sentences covered", complete, total, (complete/total)*100)
|
||||
}
|
||||
|
||||
func attrSig(se xml.StartElement) string {
|
||||
var names []string
|
||||
for _, attr := range se.Attr {
|
||||
if attr.Name.Local == "fmt" {
|
||||
names = append(names, "fmt-"+attr.Value)
|
||||
} else {
|
||||
names = append(names, attr.Name.Local)
|
||||
}
|
||||
}
|
||||
sort.Strings(names)
|
||||
return strings.Join(names, ",")
|
||||
}
|
||||
|
||||
func attrValue(se xml.StartElement, attr string) string {
|
||||
for _, a := range se.Attr {
|
||||
if a.Name.Local == attr {
|
||||
return a.Value
|
||||
}
|
||||
}
|
||||
panic("unknown attribute " + attr)
|
||||
}
|
||||
|
||||
func TestSpecPartLess(t *testing.T) {
|
||||
tests := []struct {
|
||||
sec1, sec2 string
|
||||
want bool
|
||||
}{
|
||||
{"6.2.1", "6.2", false},
|
||||
{"6.2", "6.2.1", true},
|
||||
{"6.10", "6.10.1", true},
|
||||
{"6.10", "6.1.1", false}, // 10, not 1
|
||||
{"6.1", "6.1", false}, // equal, so not less
|
||||
}
|
||||
for _, tt := range tests {
|
||||
got := (specPart{tt.sec1, "foo"}).Less(specPart{tt.sec2, "foo"})
|
||||
if got != tt.want {
|
||||
t.Errorf("Less(%q, %q) = %v; want %v", tt.sec1, tt.sec2, got, tt.want)
|
||||
}
|
||||
}
|
||||
}
|
70
vendor/golang.org/x/net/idna/example_test.go
generated
vendored
Normal file
70
vendor/golang.org/x/net/idna/example_test.go
generated
vendored
Normal file
|
@ -0,0 +1,70 @@
|
|||
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
||||
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package idna_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/net/idna"
|
||||
)
|
||||
|
||||
func ExampleProfile() {
|
||||
// Raw Punycode has no restrictions and does no mappings.
|
||||
fmt.Println(idna.ToASCII(""))
|
||||
fmt.Println(idna.ToASCII("*.faß.com"))
|
||||
fmt.Println(idna.Punycode.ToASCII("*.faß.com"))
|
||||
|
||||
// Rewrite IDN for lookup. This (currently) uses transitional mappings to
|
||||
// find a balance between IDNA2003 and IDNA2008 compatibility.
|
||||
fmt.Println(idna.Lookup.ToASCII(""))
|
||||
fmt.Println(idna.Lookup.ToASCII("www.faß.com"))
|
||||
|
||||
// Convert an IDN to ASCII for registration purposes. This changes the
|
||||
// encoding, but reports an error if the input was illformed.
|
||||
fmt.Println(idna.Registration.ToASCII(""))
|
||||
fmt.Println(idna.Registration.ToASCII("www.faß.com"))
|
||||
|
||||
// Output:
|
||||
// <nil>
|
||||
// *.xn--fa-hia.com <nil>
|
||||
// *.xn--fa-hia.com <nil>
|
||||
// <nil>
|
||||
// www.fass.com <nil>
|
||||
// idna: invalid label ""
|
||||
// www.xn--fa-hia.com <nil>
|
||||
}
|
||||
|
||||
func ExampleNew() {
|
||||
var p *idna.Profile
|
||||
|
||||
// Raw Punycode has no restrictions and does no mappings.
|
||||
p = idna.New()
|
||||
fmt.Println(p.ToASCII("*.faß.com"))
|
||||
|
||||
// Do mappings. Note that star is not allowed in a DNS lookup.
|
||||
p = idna.New(
|
||||
idna.MapForLookup(),
|
||||
idna.Transitional(true)) // Map ß -> ss
|
||||
fmt.Println(p.ToASCII("*.faß.com"))
|
||||
|
||||
// Lookup for registration. Also does not allow '*'.
|
||||
p = idna.New(idna.ValidateForRegistration())
|
||||
fmt.Println(p.ToUnicode("*.faß.com"))
|
||||
|
||||
// Set up a profile maps for lookup, but allows wild cards.
|
||||
p = idna.New(
|
||||
idna.MapForLookup(),
|
||||
idna.Transitional(true), // Map ß -> ss
|
||||
idna.StrictDomainName(false)) // Set more permissive ASCII rules.
|
||||
fmt.Println(p.ToASCII("*.faß.com"))
|
||||
|
||||
// Output:
|
||||
// *.xn--fa-hia.com <nil>
|
||||
// *.fass.com idna: disallowed rune U+002A
|
||||
// *.faß.com idna: disallowed rune U+002A
|
||||
// *.fass.com <nil>
|
||||
}
|
732
vendor/golang.org/x/net/idna/idna.go
generated
vendored
Normal file
732
vendor/golang.org/x/net/idna/idna.go
generated
vendored
Normal file
|
@ -0,0 +1,732 @@
|
|||
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
||||
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package idna implements IDNA2008 using the compatibility processing
|
||||
// defined by UTS (Unicode Technical Standard) #46, which defines a standard to
|
||||
// deal with the transition from IDNA2003.
|
||||
//
|
||||
// IDNA2008 (Internationalized Domain Names for Applications), is defined in RFC
|
||||
// 5890, RFC 5891, RFC 5892, RFC 5893 and RFC 5894.
|
||||
// UTS #46 is defined in http://www.unicode.org/reports/tr46.
|
||||
// See http://unicode.org/cldr/utility/idna.jsp for a visualization of the
|
||||
// differences between these two standards.
|
||||
package idna // import "golang.org/x/net/idna"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"golang.org/x/text/secure/bidirule"
|
||||
"golang.org/x/text/unicode/bidi"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
|
||||
// NOTE: Unlike common practice in Go APIs, the functions will return a
|
||||
// sanitized domain name in case of errors. Browsers sometimes use a partially
|
||||
// evaluated string as lookup.
|
||||
// TODO: the current error handling is, in my opinion, the least opinionated.
|
||||
// Other strategies are also viable, though:
|
||||
// Option 1) Return an empty string in case of error, but allow the user to
|
||||
// specify explicitly which errors to ignore.
|
||||
// Option 2) Return the partially evaluated string if it is itself a valid
|
||||
// string, otherwise return the empty string in case of error.
|
||||
// Option 3) Option 1 and 2.
|
||||
// Option 4) Always return an empty string for now and implement Option 1 as
|
||||
// needed, and document that the return string may not be empty in case of
|
||||
// error in the future.
|
||||
// I think Option 1 is best, but it is quite opinionated.
|
||||
|
||||
// ToASCII is a wrapper for Punycode.ToASCII.
|
||||
func ToASCII(s string) (string, error) {
|
||||
return Punycode.process(s, true)
|
||||
}
|
||||
|
||||
// ToUnicode is a wrapper for Punycode.ToUnicode.
|
||||
func ToUnicode(s string) (string, error) {
|
||||
return Punycode.process(s, false)
|
||||
}
|
||||
|
||||
// An Option configures a Profile at creation time.
|
||||
type Option func(*options)
|
||||
|
||||
// Transitional sets a Profile to use the Transitional mapping as defined in UTS
|
||||
// #46. This will cause, for example, "ß" to be mapped to "ss". Using the
|
||||
// transitional mapping provides a compromise between IDNA2003 and IDNA2008
|
||||
// compatibility. It is used by most browsers when resolving domain names. This
|
||||
// option is only meaningful if combined with MapForLookup.
|
||||
func Transitional(transitional bool) Option {
|
||||
return func(o *options) { o.transitional = true }
|
||||
}
|
||||
|
||||
// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts
|
||||
// are longer than allowed by the RFC.
|
||||
func VerifyDNSLength(verify bool) Option {
|
||||
return func(o *options) { o.verifyDNSLength = verify }
|
||||
}
|
||||
|
||||
// RemoveLeadingDots removes leading label separators. Leading runes that map to
|
||||
// dots, such as U+3002 IDEOGRAPHIC FULL STOP, are removed as well.
|
||||
//
|
||||
// This is the behavior suggested by the UTS #46 and is adopted by some
|
||||
// browsers.
|
||||
func RemoveLeadingDots(remove bool) Option {
|
||||
return func(o *options) { o.removeLeadingDots = remove }
|
||||
}
|
||||
|
||||
// ValidateLabels sets whether to check the mandatory label validation criteria
|
||||
// as defined in Section 5.4 of RFC 5891. This includes testing for correct use
|
||||
// of hyphens ('-'), normalization, validity of runes, and the context rules.
|
||||
func ValidateLabels(enable bool) Option {
|
||||
return func(o *options) {
|
||||
// Don't override existing mappings, but set one that at least checks
|
||||
// normalization if it is not set.
|
||||
if o.mapping == nil && enable {
|
||||
o.mapping = normalize
|
||||
}
|
||||
o.trie = trie
|
||||
o.validateLabels = enable
|
||||
o.fromPuny = validateFromPunycode
|
||||
}
|
||||
}
|
||||
|
||||
// StrictDomainName limits the set of permissible ASCII characters to those
|
||||
// allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the
|
||||
// hyphen). This is set by default for MapForLookup and ValidateForRegistration.
|
||||
//
|
||||
// This option is useful, for instance, for browsers that allow characters
|
||||
// outside this range, for example a '_' (U+005F LOW LINE). See
|
||||
// http://www.rfc-editor.org/std/std3.txt for more details This option
|
||||
// corresponds to the UseSTD3ASCIIRules option in UTS #46.
|
||||
func StrictDomainName(use bool) Option {
|
||||
return func(o *options) {
|
||||
o.trie = trie
|
||||
o.useSTD3Rules = use
|
||||
o.fromPuny = validateFromPunycode
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE: the following options pull in tables. The tables should not be linked
|
||||
// in as long as the options are not used.
|
||||
|
||||
// BidiRule enables the Bidi rule as defined in RFC 5893. Any application
|
||||
// that relies on proper validation of labels should include this rule.
|
||||
func BidiRule() Option {
|
||||
return func(o *options) { o.bidirule = bidirule.ValidString }
|
||||
}
|
||||
|
||||
// ValidateForRegistration sets validation options to verify that a given IDN is
|
||||
// properly formatted for registration as defined by Section 4 of RFC 5891.
|
||||
func ValidateForRegistration() Option {
|
||||
return func(o *options) {
|
||||
o.mapping = validateRegistration
|
||||
StrictDomainName(true)(o)
|
||||
ValidateLabels(true)(o)
|
||||
VerifyDNSLength(true)(o)
|
||||
BidiRule()(o)
|
||||
}
|
||||
}
|
||||
|
||||
// MapForLookup sets validation and mapping options such that a given IDN is
|
||||
// transformed for domain name lookup according to the requirements set out in
|
||||
// Section 5 of RFC 5891. The mappings follow the recommendations of RFC 5894,
|
||||
// RFC 5895 and UTS 46. It does not add the Bidi Rule. Use the BidiRule option
|
||||
// to add this check.
|
||||
//
|
||||
// The mappings include normalization and mapping case, width and other
|
||||
// compatibility mappings.
|
||||
func MapForLookup() Option {
|
||||
return func(o *options) {
|
||||
o.mapping = validateAndMap
|
||||
StrictDomainName(true)(o)
|
||||
ValidateLabels(true)(o)
|
||||
}
|
||||
}
|
||||
|
||||
type options struct {
|
||||
transitional bool
|
||||
useSTD3Rules bool
|
||||
validateLabels bool
|
||||
verifyDNSLength bool
|
||||
removeLeadingDots bool
|
||||
|
||||
trie *idnaTrie
|
||||
|
||||
// fromPuny calls validation rules when converting A-labels to U-labels.
|
||||
fromPuny func(p *Profile, s string) error
|
||||
|
||||
// mapping implements a validation and mapping step as defined in RFC 5895
|
||||
// or UTS 46, tailored to, for example, domain registration or lookup.
|
||||
mapping func(p *Profile, s string) (mapped string, isBidi bool, err error)
|
||||
|
||||
// bidirule, if specified, checks whether s conforms to the Bidi Rule
|
||||
// defined in RFC 5893.
|
||||
bidirule func(s string) bool
|
||||
}
|
||||
|
||||
// A Profile defines the configuration of an IDNA mapper.
|
||||
type Profile struct {
|
||||
options
|
||||
}
|
||||
|
||||
func apply(o *options, opts []Option) {
|
||||
for _, f := range opts {
|
||||
f(o)
|
||||
}
|
||||
}
|
||||
|
||||
// New creates a new Profile.
|
||||
//
|
||||
// With no options, the returned Profile is the most permissive and equals the
|
||||
// Punycode Profile. Options can be passed to further restrict the Profile. The
|
||||
// MapForLookup and ValidateForRegistration options set a collection of options,
|
||||
// for lookup and registration purposes respectively, which can be tailored by
|
||||
// adding more fine-grained options, where later options override earlier
|
||||
// options.
|
||||
func New(o ...Option) *Profile {
|
||||
p := &Profile{}
|
||||
apply(&p.options, o)
|
||||
return p
|
||||
}
|
||||
|
||||
// ToASCII converts a domain or domain label to its ASCII form. For example,
|
||||
// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and
|
||||
// ToASCII("golang") is "golang". If an error is encountered it will return
|
||||
// an error and a (partially) processed result.
|
||||
func (p *Profile) ToASCII(s string) (string, error) {
|
||||
return p.process(s, true)
|
||||
}
|
||||
|
||||
// ToUnicode converts a domain or domain label to its Unicode form. For example,
|
||||
// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and
|
||||
// ToUnicode("golang") is "golang". If an error is encountered it will return
|
||||
// an error and a (partially) processed result.
|
||||
func (p *Profile) ToUnicode(s string) (string, error) {
|
||||
pp := *p
|
||||
pp.transitional = false
|
||||
return pp.process(s, false)
|
||||
}
|
||||
|
||||
// String reports a string with a description of the profile for debugging
|
||||
// purposes. The string format may change with different versions.
|
||||
func (p *Profile) String() string {
|
||||
s := ""
|
||||
if p.transitional {
|
||||
s = "Transitional"
|
||||
} else {
|
||||
s = "NonTransitional"
|
||||
}
|
||||
if p.useSTD3Rules {
|
||||
s += ":UseSTD3Rules"
|
||||
}
|
||||
if p.validateLabels {
|
||||
s += ":ValidateLabels"
|
||||
}
|
||||
if p.verifyDNSLength {
|
||||
s += ":VerifyDNSLength"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
var (
|
||||
// Punycode is a Profile that does raw punycode processing with a minimum
|
||||
// of validation.
|
||||
Punycode *Profile = punycode
|
||||
|
||||
// Lookup is the recommended profile for looking up domain names, according
|
||||
// to Section 5 of RFC 5891. The exact configuration of this profile may
|
||||
// change over time.
|
||||
Lookup *Profile = lookup
|
||||
|
||||
// Display is the recommended profile for displaying domain names.
|
||||
// The configuration of this profile may change over time.
|
||||
Display *Profile = display
|
||||
|
||||
// Registration is the recommended profile for checking whether a given
|
||||
// IDN is valid for registration, according to Section 4 of RFC 5891.
|
||||
Registration *Profile = registration
|
||||
|
||||
punycode = &Profile{}
|
||||
lookup = &Profile{options{
|
||||
transitional: true,
|
||||
useSTD3Rules: true,
|
||||
validateLabels: true,
|
||||
trie: trie,
|
||||
fromPuny: validateFromPunycode,
|
||||
mapping: validateAndMap,
|
||||
bidirule: bidirule.ValidString,
|
||||
}}
|
||||
display = &Profile{options{
|
||||
useSTD3Rules: true,
|
||||
validateLabels: true,
|
||||
trie: trie,
|
||||
fromPuny: validateFromPunycode,
|
||||
mapping: validateAndMap,
|
||||
bidirule: bidirule.ValidString,
|
||||
}}
|
||||
registration = &Profile{options{
|
||||
useSTD3Rules: true,
|
||||
validateLabels: true,
|
||||
verifyDNSLength: true,
|
||||
trie: trie,
|
||||
fromPuny: validateFromPunycode,
|
||||
mapping: validateRegistration,
|
||||
bidirule: bidirule.ValidString,
|
||||
}}
|
||||
|
||||
// TODO: profiles
|
||||
// Register: recommended for approving domain names: don't do any mappings
|
||||
// but rather reject on invalid input. Bundle or block deviation characters.
|
||||
)
|
||||
|
||||
type labelError struct{ label, code_ string }
|
||||
|
||||
func (e labelError) code() string { return e.code_ }
|
||||
func (e labelError) Error() string {
|
||||
return fmt.Sprintf("idna: invalid label %q", e.label)
|
||||
}
|
||||
|
||||
type runeError rune
|
||||
|
||||
func (e runeError) code() string { return "P1" }
|
||||
func (e runeError) Error() string {
|
||||
return fmt.Sprintf("idna: disallowed rune %U", e)
|
||||
}
|
||||
|
||||
// process implements the algorithm described in section 4 of UTS #46,
|
||||
// see http://www.unicode.org/reports/tr46.
|
||||
func (p *Profile) process(s string, toASCII bool) (string, error) {
|
||||
var err error
|
||||
var isBidi bool
|
||||
if p.mapping != nil {
|
||||
s, isBidi, err = p.mapping(p, s)
|
||||
}
|
||||
// Remove leading empty labels.
|
||||
if p.removeLeadingDots {
|
||||
for ; len(s) > 0 && s[0] == '.'; s = s[1:] {
|
||||
}
|
||||
}
|
||||
// TODO: allow for a quick check of the tables data.
|
||||
// It seems like we should only create this error on ToASCII, but the
|
||||
// UTS 46 conformance tests suggests we should always check this.
|
||||
if err == nil && p.verifyDNSLength && s == "" {
|
||||
err = &labelError{s, "A4"}
|
||||
}
|
||||
labels := labelIter{orig: s}
|
||||
for ; !labels.done(); labels.next() {
|
||||
label := labels.label()
|
||||
if label == "" {
|
||||
// Empty labels are not okay. The label iterator skips the last
|
||||
// label if it is empty.
|
||||
if err == nil && p.verifyDNSLength {
|
||||
err = &labelError{s, "A4"}
|
||||
}
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(label, acePrefix) {
|
||||
u, err2 := decode(label[len(acePrefix):])
|
||||
if err2 != nil {
|
||||
if err == nil {
|
||||
err = err2
|
||||
}
|
||||
// Spec says keep the old label.
|
||||
continue
|
||||
}
|
||||
isBidi = isBidi || bidirule.DirectionString(u) != bidi.LeftToRight
|
||||
labels.set(u)
|
||||
if err == nil && p.validateLabels {
|
||||
err = p.fromPuny(p, u)
|
||||
}
|
||||
if err == nil {
|
||||
// This should be called on NonTransitional, according to the
|
||||
// spec, but that currently does not have any effect. Use the
|
||||
// original profile to preserve options.
|
||||
err = p.validateLabel(u)
|
||||
}
|
||||
} else if err == nil {
|
||||
err = p.validateLabel(label)
|
||||
}
|
||||
}
|
||||
if isBidi && p.bidirule != nil && err == nil {
|
||||
for labels.reset(); !labels.done(); labels.next() {
|
||||
if !p.bidirule(labels.label()) {
|
||||
err = &labelError{s, "B"}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if toASCII {
|
||||
for labels.reset(); !labels.done(); labels.next() {
|
||||
label := labels.label()
|
||||
if !ascii(label) {
|
||||
a, err2 := encode(acePrefix, label)
|
||||
if err == nil {
|
||||
err = err2
|
||||
}
|
||||
label = a
|
||||
labels.set(a)
|
||||
}
|
||||
n := len(label)
|
||||
if p.verifyDNSLength && err == nil && (n == 0 || n > 63) {
|
||||
err = &labelError{label, "A4"}
|
||||
}
|
||||
}
|
||||
}
|
||||
s = labels.result()
|
||||
if toASCII && p.verifyDNSLength && err == nil {
|
||||
// Compute the length of the domain name minus the root label and its dot.
|
||||
n := len(s)
|
||||
if n > 0 && s[n-1] == '.' {
|
||||
n--
|
||||
}
|
||||
if len(s) < 1 || n > 253 {
|
||||
err = &labelError{s, "A4"}
|
||||
}
|
||||
}
|
||||
return s, err
|
||||
}
|
||||
|
||||
func normalize(p *Profile, s string) (mapped string, isBidi bool, err error) {
|
||||
// TODO: consider first doing a quick check to see if any of these checks
|
||||
// need to be done. This will make it slower in the general case, but
|
||||
// faster in the common case.
|
||||
mapped = norm.NFC.String(s)
|
||||
isBidi = bidirule.DirectionString(mapped) == bidi.RightToLeft
|
||||
return mapped, isBidi, nil
|
||||
}
|
||||
|
||||
func validateRegistration(p *Profile, s string) (idem string, bidi bool, err error) {
|
||||
// TODO: filter need for normalization in loop below.
|
||||
if !norm.NFC.IsNormalString(s) {
|
||||
return s, false, &labelError{s, "V1"}
|
||||
}
|
||||
for i := 0; i < len(s); {
|
||||
v, sz := trie.lookupString(s[i:])
|
||||
if sz == 0 {
|
||||
return s, bidi, runeError(utf8.RuneError)
|
||||
}
|
||||
bidi = bidi || info(v).isBidi(s[i:])
|
||||
// Copy bytes not copied so far.
|
||||
switch p.simplify(info(v).category()) {
|
||||
// TODO: handle the NV8 defined in the Unicode idna data set to allow
|
||||
// for strict conformance to IDNA2008.
|
||||
case valid, deviation:
|
||||
case disallowed, mapped, unknown, ignored:
|
||||
r, _ := utf8.DecodeRuneInString(s[i:])
|
||||
return s, bidi, runeError(r)
|
||||
}
|
||||
i += sz
|
||||
}
|
||||
return s, bidi, nil
|
||||
}
|
||||
|
||||
func (c info) isBidi(s string) bool {
|
||||
if !c.isMapped() {
|
||||
return c&attributesMask == rtl
|
||||
}
|
||||
// TODO: also store bidi info for mapped data. This is possible, but a bit
|
||||
// cumbersome and not for the common case.
|
||||
p, _ := bidi.LookupString(s)
|
||||
switch p.Class() {
|
||||
case bidi.R, bidi.AL, bidi.AN:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func validateAndMap(p *Profile, s string) (vm string, bidi bool, err error) {
|
||||
var (
|
||||
b []byte
|
||||
k int
|
||||
)
|
||||
// combinedInfoBits contains the or-ed bits of all runes. We use this
|
||||
// to derive the mayNeedNorm bit later. This may trigger normalization
|
||||
// overeagerly, but it will not do so in the common case. The end result
|
||||
// is another 10% saving on BenchmarkProfile for the common case.
|
||||
var combinedInfoBits info
|
||||
for i := 0; i < len(s); {
|
||||
v, sz := trie.lookupString(s[i:])
|
||||
if sz == 0 {
|
||||
b = append(b, s[k:i]...)
|
||||
b = append(b, "\ufffd"...)
|
||||
k = len(s)
|
||||
if err == nil {
|
||||
err = runeError(utf8.RuneError)
|
||||
}
|
||||
break
|
||||
}
|
||||
combinedInfoBits |= info(v)
|
||||
bidi = bidi || info(v).isBidi(s[i:])
|
||||
start := i
|
||||
i += sz
|
||||
// Copy bytes not copied so far.
|
||||
switch p.simplify(info(v).category()) {
|
||||
case valid:
|
||||
continue
|
||||
case disallowed:
|
||||
if err == nil {
|
||||
r, _ := utf8.DecodeRuneInString(s[start:])
|
||||
err = runeError(r)
|
||||
}
|
||||
continue
|
||||
case mapped, deviation:
|
||||
b = append(b, s[k:start]...)
|
||||
b = info(v).appendMapping(b, s[start:i])
|
||||
case ignored:
|
||||
b = append(b, s[k:start]...)
|
||||
// drop the rune
|
||||
case unknown:
|
||||
b = append(b, s[k:start]...)
|
||||
b = append(b, "\ufffd"...)
|
||||
}
|
||||
k = i
|
||||
}
|
||||
if k == 0 {
|
||||
// No changes so far.
|
||||
if combinedInfoBits&mayNeedNorm != 0 {
|
||||
s = norm.NFC.String(s)
|
||||
}
|
||||
} else {
|
||||
b = append(b, s[k:]...)
|
||||
if norm.NFC.QuickSpan(b) != len(b) {
|
||||
b = norm.NFC.Bytes(b)
|
||||
}
|
||||
// TODO: the punycode converters require strings as input.
|
||||
s = string(b)
|
||||
}
|
||||
return s, bidi, err
|
||||
}
|
||||
|
||||
// A labelIter allows iterating over domain name labels.
|
||||
type labelIter struct {
|
||||
orig string
|
||||
slice []string
|
||||
curStart int
|
||||
curEnd int
|
||||
i int
|
||||
}
|
||||
|
||||
func (l *labelIter) reset() {
|
||||
l.curStart = 0
|
||||
l.curEnd = 0
|
||||
l.i = 0
|
||||
}
|
||||
|
||||
func (l *labelIter) done() bool {
|
||||
return l.curStart >= len(l.orig)
|
||||
}
|
||||
|
||||
func (l *labelIter) result() string {
|
||||
if l.slice != nil {
|
||||
return strings.Join(l.slice, ".")
|
||||
}
|
||||
return l.orig
|
||||
}
|
||||
|
||||
func (l *labelIter) label() string {
|
||||
if l.slice != nil {
|
||||
return l.slice[l.i]
|
||||
}
|
||||
p := strings.IndexByte(l.orig[l.curStart:], '.')
|
||||
l.curEnd = l.curStart + p
|
||||
if p == -1 {
|
||||
l.curEnd = len(l.orig)
|
||||
}
|
||||
return l.orig[l.curStart:l.curEnd]
|
||||
}
|
||||
|
||||
// next sets the value to the next label. It skips the last label if it is empty.
|
||||
func (l *labelIter) next() {
|
||||
l.i++
|
||||
if l.slice != nil {
|
||||
if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" {
|
||||
l.curStart = len(l.orig)
|
||||
}
|
||||
} else {
|
||||
l.curStart = l.curEnd + 1
|
||||
if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' {
|
||||
l.curStart = len(l.orig)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (l *labelIter) set(s string) {
|
||||
if l.slice == nil {
|
||||
l.slice = strings.Split(l.orig, ".")
|
||||
}
|
||||
l.slice[l.i] = s
|
||||
}
|
||||
|
||||
// acePrefix is the ASCII Compatible Encoding prefix.
|
||||
const acePrefix = "xn--"
|
||||
|
||||
func (p *Profile) simplify(cat category) category {
|
||||
switch cat {
|
||||
case disallowedSTD3Mapped:
|
||||
if p.useSTD3Rules {
|
||||
cat = disallowed
|
||||
} else {
|
||||
cat = mapped
|
||||
}
|
||||
case disallowedSTD3Valid:
|
||||
if p.useSTD3Rules {
|
||||
cat = disallowed
|
||||
} else {
|
||||
cat = valid
|
||||
}
|
||||
case deviation:
|
||||
if !p.transitional {
|
||||
cat = valid
|
||||
}
|
||||
case validNV8, validXV8:
|
||||
// TODO: handle V2008
|
||||
cat = valid
|
||||
}
|
||||
return cat
|
||||
}
|
||||
|
||||
func validateFromPunycode(p *Profile, s string) error {
|
||||
if !norm.NFC.IsNormalString(s) {
|
||||
return &labelError{s, "V1"}
|
||||
}
|
||||
// TODO: detect whether string may have to be normalized in the following
|
||||
// loop.
|
||||
for i := 0; i < len(s); {
|
||||
v, sz := trie.lookupString(s[i:])
|
||||
if sz == 0 {
|
||||
return runeError(utf8.RuneError)
|
||||
}
|
||||
if c := p.simplify(info(v).category()); c != valid && c != deviation {
|
||||
return &labelError{s, "V6"}
|
||||
}
|
||||
i += sz
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
const (
|
||||
zwnj = "\u200c"
|
||||
zwj = "\u200d"
|
||||
)
|
||||
|
||||
type joinState int8
|
||||
|
||||
const (
|
||||
stateStart joinState = iota
|
||||
stateVirama
|
||||
stateBefore
|
||||
stateBeforeVirama
|
||||
stateAfter
|
||||
stateFAIL
|
||||
)
|
||||
|
||||
var joinStates = [][numJoinTypes]joinState{
|
||||
stateStart: {
|
||||
joiningL: stateBefore,
|
||||
joiningD: stateBefore,
|
||||
joinZWNJ: stateFAIL,
|
||||
joinZWJ: stateFAIL,
|
||||
joinVirama: stateVirama,
|
||||
},
|
||||
stateVirama: {
|
||||
joiningL: stateBefore,
|
||||
joiningD: stateBefore,
|
||||
},
|
||||
stateBefore: {
|
||||
joiningL: stateBefore,
|
||||
joiningD: stateBefore,
|
||||
joiningT: stateBefore,
|
||||
joinZWNJ: stateAfter,
|
||||
joinZWJ: stateFAIL,
|
||||
joinVirama: stateBeforeVirama,
|
||||
},
|
||||
stateBeforeVirama: {
|
||||
joiningL: stateBefore,
|
||||
joiningD: stateBefore,
|
||||
joiningT: stateBefore,
|
||||
},
|
||||
stateAfter: {
|
||||
joiningL: stateFAIL,
|
||||
joiningD: stateBefore,
|
||||
joiningT: stateAfter,
|
||||
joiningR: stateStart,
|
||||
joinZWNJ: stateFAIL,
|
||||
joinZWJ: stateFAIL,
|
||||
joinVirama: stateAfter, // no-op as we can't accept joiners here
|
||||
},
|
||||
stateFAIL: {
|
||||
0: stateFAIL,
|
||||
joiningL: stateFAIL,
|
||||
joiningD: stateFAIL,
|
||||
joiningT: stateFAIL,
|
||||
joiningR: stateFAIL,
|
||||
joinZWNJ: stateFAIL,
|
||||
joinZWJ: stateFAIL,
|
||||
joinVirama: stateFAIL,
|
||||
},
|
||||
}
|
||||
|
||||
// validateLabel validates the criteria from Section 4.1. Item 1, 4, and 6 are
|
||||
// already implicitly satisfied by the overall implementation.
|
||||
func (p *Profile) validateLabel(s string) (err error) {
|
||||
if s == "" {
|
||||
if p.verifyDNSLength {
|
||||
return &labelError{s, "A4"}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if !p.validateLabels {
|
||||
return nil
|
||||
}
|
||||
trie := p.trie // p.validateLabels is only set if trie is set.
|
||||
if len(s) > 4 && s[2] == '-' && s[3] == '-' {
|
||||
return &labelError{s, "V2"}
|
||||
}
|
||||
if s[0] == '-' || s[len(s)-1] == '-' {
|
||||
return &labelError{s, "V3"}
|
||||
}
|
||||
// TODO: merge the use of this in the trie.
|
||||
v, sz := trie.lookupString(s)
|
||||
x := info(v)
|
||||
if x.isModifier() {
|
||||
return &labelError{s, "V5"}
|
||||
}
|
||||
// Quickly return in the absence of zero-width (non) joiners.
|
||||
if strings.Index(s, zwj) == -1 && strings.Index(s, zwnj) == -1 {
|
||||
return nil
|
||||
}
|
||||
st := stateStart
|
||||
for i := 0; ; {
|
||||
jt := x.joinType()
|
||||
if s[i:i+sz] == zwj {
|
||||
jt = joinZWJ
|
||||
} else if s[i:i+sz] == zwnj {
|
||||
jt = joinZWNJ
|
||||
}
|
||||
st = joinStates[st][jt]
|
||||
if x.isViramaModifier() {
|
||||
st = joinStates[st][joinVirama]
|
||||
}
|
||||
if i += sz; i == len(s) {
|
||||
break
|
||||
}
|
||||
v, sz = trie.lookupString(s[i:])
|
||||
x = info(v)
|
||||
}
|
||||
if st == stateFAIL || st == stateAfter {
|
||||
return &labelError{s, "C"}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ascii(s string) bool {
|
||||
for i := 0; i < len(s); i++ {
|
||||
if s[i] >= utf8.RuneSelf {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
108
vendor/golang.org/x/net/idna/idna_test.go
generated
vendored
Normal file
108
vendor/golang.org/x/net/idna/idna_test.go
generated
vendored
Normal file
|
@ -0,0 +1,108 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package idna
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
var idnaTestCases = [...]struct {
|
||||
ascii, unicode string
|
||||
}{
|
||||
// Labels.
|
||||
{"books", "books"},
|
||||
{"xn--bcher-kva", "bücher"},
|
||||
|
||||
// Domains.
|
||||
{"foo--xn--bar.org", "foo--xn--bar.org"},
|
||||
{"golang.org", "golang.org"},
|
||||
{"example.xn--p1ai", "example.рф"},
|
||||
{"xn--czrw28b.tw", "商業.tw"},
|
||||
{"www.xn--mller-kva.de", "www.müller.de"},
|
||||
}
|
||||
|
||||
func TestIDNA(t *testing.T) {
|
||||
for _, tc := range idnaTestCases {
|
||||
if a, err := ToASCII(tc.unicode); err != nil {
|
||||
t.Errorf("ToASCII(%q): %v", tc.unicode, err)
|
||||
} else if a != tc.ascii {
|
||||
t.Errorf("ToASCII(%q): got %q, want %q", tc.unicode, a, tc.ascii)
|
||||
}
|
||||
|
||||
if u, err := ToUnicode(tc.ascii); err != nil {
|
||||
t.Errorf("ToUnicode(%q): %v", tc.ascii, err)
|
||||
} else if u != tc.unicode {
|
||||
t.Errorf("ToUnicode(%q): got %q, want %q", tc.ascii, u, tc.unicode)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIDNASeparators(t *testing.T) {
|
||||
type subCase struct {
|
||||
unicode string
|
||||
wantASCII string
|
||||
wantErr bool
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
profile *Profile
|
||||
subCases []subCase
|
||||
}{
|
||||
{
|
||||
name: "Punycode", profile: Punycode,
|
||||
subCases: []subCase{
|
||||
{"example\u3002jp", "xn--examplejp-ck3h", false},
|
||||
{"東京\uFF0Ejp", "xn--jp-l92cn98g071o", false},
|
||||
{"大阪\uFF61jp", "xn--jp-ku9cz72u463f", false},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Lookup", profile: Lookup,
|
||||
subCases: []subCase{
|
||||
{"example\u3002jp", "example.jp", false},
|
||||
{"東京\uFF0Ejp", "xn--1lqs71d.jp", false},
|
||||
{"大阪\uFF61jp", "xn--pssu33l.jp", false},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Display", profile: Display,
|
||||
subCases: []subCase{
|
||||
{"example\u3002jp", "example.jp", false},
|
||||
{"東京\uFF0Ejp", "xn--1lqs71d.jp", false},
|
||||
{"大阪\uFF61jp", "xn--pssu33l.jp", false},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Registration", profile: Registration,
|
||||
subCases: []subCase{
|
||||
{"example\u3002jp", "", true},
|
||||
{"東京\uFF0Ejp", "", true},
|
||||
{"大阪\uFF61jp", "", true},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
for _, c := range tc.subCases {
|
||||
gotA, err := tc.profile.ToASCII(c.unicode)
|
||||
if c.wantErr {
|
||||
if err == nil {
|
||||
t.Errorf("ToASCII(%q): got no error, but an error expected", c.unicode)
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("ToASCII(%q): got err=%v, but no error expected", c.unicode, err)
|
||||
} else if gotA != c.wantASCII {
|
||||
t.Errorf("ToASCII(%q): got %q, want %q", c.unicode, gotA, c.wantASCII)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(nigeltao): test errors, once we've specified when ToASCII and ToUnicode
|
||||
// return errors.
|
203
vendor/golang.org/x/net/idna/punycode.go
generated
vendored
Normal file
203
vendor/golang.org/x/net/idna/punycode.go
generated
vendored
Normal file
|
@ -0,0 +1,203 @@
|
|||
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
||||
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package idna
|
||||
|
||||
// This file implements the Punycode algorithm from RFC 3492.
|
||||
|
||||
import (
|
||||
"math"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// These parameter values are specified in section 5.
|
||||
//
|
||||
// All computation is done with int32s, so that overflow behavior is identical
|
||||
// regardless of whether int is 32-bit or 64-bit.
|
||||
const (
|
||||
base int32 = 36
|
||||
damp int32 = 700
|
||||
initialBias int32 = 72
|
||||
initialN int32 = 128
|
||||
skew int32 = 38
|
||||
tmax int32 = 26
|
||||
tmin int32 = 1
|
||||
)
|
||||
|
||||
func punyError(s string) error { return &labelError{s, "A3"} }
|
||||
|
||||
// decode decodes a string as specified in section 6.2.
|
||||
func decode(encoded string) (string, error) {
|
||||
if encoded == "" {
|
||||
return "", nil
|
||||
}
|
||||
pos := 1 + strings.LastIndex(encoded, "-")
|
||||
if pos == 1 {
|
||||
return "", punyError(encoded)
|
||||
}
|
||||
if pos == len(encoded) {
|
||||
return encoded[:len(encoded)-1], nil
|
||||
}
|
||||
output := make([]rune, 0, len(encoded))
|
||||
if pos != 0 {
|
||||
for _, r := range encoded[:pos-1] {
|
||||
output = append(output, r)
|
||||
}
|
||||
}
|
||||
i, n, bias := int32(0), initialN, initialBias
|
||||
for pos < len(encoded) {
|
||||
oldI, w := i, int32(1)
|
||||
for k := base; ; k += base {
|
||||
if pos == len(encoded) {
|
||||
return "", punyError(encoded)
|
||||
}
|
||||
digit, ok := decodeDigit(encoded[pos])
|
||||
if !ok {
|
||||
return "", punyError(encoded)
|
||||
}
|
||||
pos++
|
||||
i += digit * w
|
||||
if i < 0 {
|
||||
return "", punyError(encoded)
|
||||
}
|
||||
t := k - bias
|
||||
if t < tmin {
|
||||
t = tmin
|
||||
} else if t > tmax {
|
||||
t = tmax
|
||||
}
|
||||
if digit < t {
|
||||
break
|
||||
}
|
||||
w *= base - t
|
||||
if w >= math.MaxInt32/base {
|
||||
return "", punyError(encoded)
|
||||
}
|
||||
}
|
||||
x := int32(len(output) + 1)
|
||||
bias = adapt(i-oldI, x, oldI == 0)
|
||||
n += i / x
|
||||
i %= x
|
||||
if n > utf8.MaxRune || len(output) >= 1024 {
|
||||
return "", punyError(encoded)
|
||||
}
|
||||
output = append(output, 0)
|
||||
copy(output[i+1:], output[i:])
|
||||
output[i] = n
|
||||
i++
|
||||
}
|
||||
return string(output), nil
|
||||
}
|
||||
|
||||
// encode encodes a string as specified in section 6.3 and prepends prefix to
|
||||
// the result.
|
||||
//
|
||||
// The "while h < length(input)" line in the specification becomes "for
|
||||
// remaining != 0" in the Go code, because len(s) in Go is in bytes, not runes.
|
||||
func encode(prefix, s string) (string, error) {
|
||||
output := make([]byte, len(prefix), len(prefix)+1+2*len(s))
|
||||
copy(output, prefix)
|
||||
delta, n, bias := int32(0), initialN, initialBias
|
||||
b, remaining := int32(0), int32(0)
|
||||
for _, r := range s {
|
||||
if r < 0x80 {
|
||||
b++
|
||||
output = append(output, byte(r))
|
||||
} else {
|
||||
remaining++
|
||||
}
|
||||
}
|
||||
h := b
|
||||
if b > 0 {
|
||||
output = append(output, '-')
|
||||
}
|
||||
for remaining != 0 {
|
||||
m := int32(0x7fffffff)
|
||||
for _, r := range s {
|
||||
if m > r && r >= n {
|
||||
m = r
|
||||
}
|
||||
}
|
||||
delta += (m - n) * (h + 1)
|
||||
if delta < 0 {
|
||||
return "", punyError(s)
|
||||
}
|
||||
n = m
|
||||
for _, r := range s {
|
||||
if r < n {
|
||||
delta++
|
||||
if delta < 0 {
|
||||
return "", punyError(s)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if r > n {
|
||||
continue
|
||||
}
|
||||
q := delta
|
||||
for k := base; ; k += base {
|
||||
t := k - bias
|
||||
if t < tmin {
|
||||
t = tmin
|
||||
} else if t > tmax {
|
||||
t = tmax
|
||||
}
|
||||
if q < t {
|
||||
break
|
||||
}
|
||||
output = append(output, encodeDigit(t+(q-t)%(base-t)))
|
||||
q = (q - t) / (base - t)
|
||||
}
|
||||
output = append(output, encodeDigit(q))
|
||||
bias = adapt(delta, h+1, h == b)
|
||||
delta = 0
|
||||
h++
|
||||
remaining--
|
||||
}
|
||||
delta++
|
||||
n++
|
||||
}
|
||||
return string(output), nil
|
||||
}
|
||||
|
||||
func decodeDigit(x byte) (digit int32, ok bool) {
|
||||
switch {
|
||||
case '0' <= x && x <= '9':
|
||||
return int32(x - ('0' - 26)), true
|
||||
case 'A' <= x && x <= 'Z':
|
||||
return int32(x - 'A'), true
|
||||
case 'a' <= x && x <= 'z':
|
||||
return int32(x - 'a'), true
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
func encodeDigit(digit int32) byte {
|
||||
switch {
|
||||
case 0 <= digit && digit < 26:
|
||||
return byte(digit + 'a')
|
||||
case 26 <= digit && digit < 36:
|
||||
return byte(digit + ('0' - 26))
|
||||
}
|
||||
panic("idna: internal error in punycode encoding")
|
||||
}
|
||||
|
||||
// adapt is the bias adaptation function specified in section 6.1.
|
||||
func adapt(delta, numPoints int32, firstTime bool) int32 {
|
||||
if firstTime {
|
||||
delta /= damp
|
||||
} else {
|
||||
delta /= 2
|
||||
}
|
||||
delta += delta / numPoints
|
||||
k := int32(0)
|
||||
for delta > ((base-tmin)*tmax)/2 {
|
||||
delta /= base - tmin
|
||||
k += base
|
||||
}
|
||||
return k + (base-tmin+1)*delta/(delta+skew)
|
||||
}
|
198
vendor/golang.org/x/net/idna/punycode_test.go
generated
vendored
Normal file
198
vendor/golang.org/x/net/idna/punycode_test.go
generated
vendored
Normal file
|
@ -0,0 +1,198 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package idna
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var punycodeTestCases = [...]struct {
|
||||
s, encoded string
|
||||
}{
|
||||
{"", ""},
|
||||
{"-", "--"},
|
||||
{"-a", "-a-"},
|
||||
{"-a-", "-a--"},
|
||||
{"a", "a-"},
|
||||
{"a-", "a--"},
|
||||
{"a-b", "a-b-"},
|
||||
{"books", "books-"},
|
||||
{"bücher", "bcher-kva"},
|
||||
{"Hello世界", "Hello-ck1hg65u"},
|
||||
{"ü", "tda"},
|
||||
{"üý", "tdac"},
|
||||
|
||||
// The test cases below come from RFC 3492 section 7.1 with Errata 3026.
|
||||
{
|
||||
// (A) Arabic (Egyptian).
|
||||
"\u0644\u064A\u0647\u0645\u0627\u0628\u062A\u0643\u0644" +
|
||||
"\u0645\u0648\u0634\u0639\u0631\u0628\u064A\u061F",
|
||||
"egbpdaj6bu4bxfgehfvwxn",
|
||||
},
|
||||
{
|
||||
// (B) Chinese (simplified).
|
||||
"\u4ED6\u4EEC\u4E3A\u4EC0\u4E48\u4E0D\u8BF4\u4E2D\u6587",
|
||||
"ihqwcrb4cv8a8dqg056pqjye",
|
||||
},
|
||||
{
|
||||
// (C) Chinese (traditional).
|
||||
"\u4ED6\u5011\u7232\u4EC0\u9EBD\u4E0D\u8AAA\u4E2D\u6587",
|
||||
"ihqwctvzc91f659drss3x8bo0yb",
|
||||
},
|
||||
{
|
||||
// (D) Czech.
|
||||
"\u0050\u0072\u006F\u010D\u0070\u0072\u006F\u0073\u0074" +
|
||||
"\u011B\u006E\u0065\u006D\u006C\u0075\u0076\u00ED\u010D" +
|
||||
"\u0065\u0073\u006B\u0079",
|
||||
"Proprostnemluvesky-uyb24dma41a",
|
||||
},
|
||||
{
|
||||
// (E) Hebrew.
|
||||
"\u05DC\u05DE\u05D4\u05D4\u05DD\u05E4\u05E9\u05D5\u05D8" +
|
||||
"\u05DC\u05D0\u05DE\u05D3\u05D1\u05E8\u05D9\u05DD\u05E2" +
|
||||
"\u05D1\u05E8\u05D9\u05EA",
|
||||
"4dbcagdahymbxekheh6e0a7fei0b",
|
||||
},
|
||||
{
|
||||
// (F) Hindi (Devanagari).
|
||||
"\u092F\u0939\u0932\u094B\u0917\u0939\u093F\u0928\u094D" +
|
||||
"\u0926\u0940\u0915\u094D\u092F\u094B\u0902\u0928\u0939" +
|
||||
"\u0940\u0902\u092C\u094B\u0932\u0938\u0915\u0924\u0947" +
|
||||
"\u0939\u0948\u0902",
|
||||
"i1baa7eci9glrd9b2ae1bj0hfcgg6iyaf8o0a1dig0cd",
|
||||
},
|
||||
{
|
||||
// (G) Japanese (kanji and hiragana).
|
||||
"\u306A\u305C\u307F\u3093\u306A\u65E5\u672C\u8A9E\u3092" +
|
||||
"\u8A71\u3057\u3066\u304F\u308C\u306A\u3044\u306E\u304B",
|
||||
"n8jok5ay5dzabd5bym9f0cm5685rrjetr6pdxa",
|
||||
},
|
||||
{
|
||||
// (H) Korean (Hangul syllables).
|
||||
"\uC138\uACC4\uC758\uBAA8\uB4E0\uC0AC\uB78C\uB4E4\uC774" +
|
||||
"\uD55C\uAD6D\uC5B4\uB97C\uC774\uD574\uD55C\uB2E4\uBA74" +
|
||||
"\uC5BC\uB9C8\uB098\uC88B\uC744\uAE4C",
|
||||
"989aomsvi5e83db1d2a355cv1e0vak1dwrv93d5xbh15a0dt30a5j" +
|
||||
"psd879ccm6fea98c",
|
||||
},
|
||||
{
|
||||
// (I) Russian (Cyrillic).
|
||||
"\u043F\u043E\u0447\u0435\u043C\u0443\u0436\u0435\u043E" +
|
||||
"\u043D\u0438\u043D\u0435\u0433\u043E\u0432\u043E\u0440" +
|
||||
"\u044F\u0442\u043F\u043E\u0440\u0443\u0441\u0441\u043A" +
|
||||
"\u0438",
|
||||
"b1abfaaepdrnnbgefbadotcwatmq2g4l",
|
||||
},
|
||||
{
|
||||
// (J) Spanish.
|
||||
"\u0050\u006F\u0072\u0071\u0075\u00E9\u006E\u006F\u0070" +
|
||||
"\u0075\u0065\u0064\u0065\u006E\u0073\u0069\u006D\u0070" +
|
||||
"\u006C\u0065\u006D\u0065\u006E\u0074\u0065\u0068\u0061" +
|
||||
"\u0062\u006C\u0061\u0072\u0065\u006E\u0045\u0073\u0070" +
|
||||
"\u0061\u00F1\u006F\u006C",
|
||||
"PorqunopuedensimplementehablarenEspaol-fmd56a",
|
||||
},
|
||||
{
|
||||
// (K) Vietnamese.
|
||||
"\u0054\u1EA1\u0069\u0073\u0061\u006F\u0068\u1ECD\u006B" +
|
||||
"\u0068\u00F4\u006E\u0067\u0074\u0068\u1EC3\u0063\u0068" +
|
||||
"\u1EC9\u006E\u00F3\u0069\u0074\u0069\u1EBF\u006E\u0067" +
|
||||
"\u0056\u0069\u1EC7\u0074",
|
||||
"TisaohkhngthchnitingVit-kjcr8268qyxafd2f1b9g",
|
||||
},
|
||||
{
|
||||
// (L) 3<nen>B<gumi><kinpachi><sensei>.
|
||||
"\u0033\u5E74\u0042\u7D44\u91D1\u516B\u5148\u751F",
|
||||
"3B-ww4c5e180e575a65lsy2b",
|
||||
},
|
||||
{
|
||||
// (M) <amuro><namie>-with-SUPER-MONKEYS.
|
||||
"\u5B89\u5BA4\u5948\u7F8E\u6075\u002D\u0077\u0069\u0074" +
|
||||
"\u0068\u002D\u0053\u0055\u0050\u0045\u0052\u002D\u004D" +
|
||||
"\u004F\u004E\u004B\u0045\u0059\u0053",
|
||||
"-with-SUPER-MONKEYS-pc58ag80a8qai00g7n9n",
|
||||
},
|
||||
{
|
||||
// (N) Hello-Another-Way-<sorezore><no><basho>.
|
||||
"\u0048\u0065\u006C\u006C\u006F\u002D\u0041\u006E\u006F" +
|
||||
"\u0074\u0068\u0065\u0072\u002D\u0057\u0061\u0079\u002D" +
|
||||
"\u305D\u308C\u305E\u308C\u306E\u5834\u6240",
|
||||
"Hello-Another-Way--fc4qua05auwb3674vfr0b",
|
||||
},
|
||||
{
|
||||
// (O) <hitotsu><yane><no><shita>2.
|
||||
"\u3072\u3068\u3064\u5C4B\u6839\u306E\u4E0B\u0032",
|
||||
"2-u9tlzr9756bt3uc0v",
|
||||
},
|
||||
{
|
||||
// (P) Maji<de>Koi<suru>5<byou><mae>
|
||||
"\u004D\u0061\u006A\u0069\u3067\u004B\u006F\u0069\u3059" +
|
||||
"\u308B\u0035\u79D2\u524D",
|
||||
"MajiKoi5-783gue6qz075azm5e",
|
||||
},
|
||||
{
|
||||
// (Q) <pafii>de<runba>
|
||||
"\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0",
|
||||
"de-jg4avhby1noc0d",
|
||||
},
|
||||
{
|
||||
// (R) <sono><supiido><de>
|
||||
"\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067",
|
||||
"d9juau41awczczp",
|
||||
},
|
||||
{
|
||||
// (S) -> $1.00 <-
|
||||
"\u002D\u003E\u0020\u0024\u0031\u002E\u0030\u0030\u0020" +
|
||||
"\u003C\u002D",
|
||||
"-> $1.00 <--",
|
||||
},
|
||||
}
|
||||
|
||||
func TestPunycode(t *testing.T) {
|
||||
for _, tc := range punycodeTestCases {
|
||||
if got, err := decode(tc.encoded); err != nil {
|
||||
t.Errorf("decode(%q): %v", tc.encoded, err)
|
||||
} else if got != tc.s {
|
||||
t.Errorf("decode(%q): got %q, want %q", tc.encoded, got, tc.s)
|
||||
}
|
||||
|
||||
if got, err := encode("", tc.s); err != nil {
|
||||
t.Errorf(`encode("", %q): %v`, tc.s, err)
|
||||
} else if got != tc.encoded {
|
||||
t.Errorf(`encode("", %q): got %q, want %q`, tc.s, got, tc.encoded)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var punycodeErrorTestCases = [...]string{
|
||||
"decode -", // A sole '-' is invalid.
|
||||
"decode foo\x00bar", // '\x00' is not in [0-9A-Za-z].
|
||||
"decode foo#bar", // '#' is not in [0-9A-Za-z].
|
||||
"decode foo\u00A3bar", // '\u00A3' is not in [0-9A-Za-z].
|
||||
"decode 9", // "9a" decodes to codepoint \u00A3; "9" is truncated.
|
||||
"decode 99999a", // "99999a" decodes to codepoint \U0048A3C1, which is > \U0010FFFF.
|
||||
"decode 9999999999a", // "9999999999a" overflows the int32 calculation.
|
||||
|
||||
"encode " + strings.Repeat("x", 65536) + "\uff00", // int32 overflow.
|
||||
}
|
||||
|
||||
func TestPunycodeErrors(t *testing.T) {
|
||||
for _, tc := range punycodeErrorTestCases {
|
||||
var err error
|
||||
switch {
|
||||
case strings.HasPrefix(tc, "decode "):
|
||||
_, err = decode(tc[7:])
|
||||
case strings.HasPrefix(tc, "encode "):
|
||||
_, err = encode("", tc[7:])
|
||||
}
|
||||
if err == nil {
|
||||
if len(tc) > 256 {
|
||||
tc = tc[:100] + "..." + tc[len(tc)-100:]
|
||||
}
|
||||
t.Errorf("no error for %s", tc)
|
||||
}
|
||||
}
|
||||
}
|
4557
vendor/golang.org/x/net/idna/tables.go
generated
vendored
Normal file
4557
vendor/golang.org/x/net/idna/tables.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
72
vendor/golang.org/x/net/idna/trie.go
generated
vendored
Normal file
72
vendor/golang.org/x/net/idna/trie.go
generated
vendored
Normal file
|
@ -0,0 +1,72 @@
|
|||
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
||||
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package idna
|
||||
|
||||
// appendMapping appends the mapping for the respective rune. isMapped must be
|
||||
// true. A mapping is a categorization of a rune as defined in UTS #46.
|
||||
func (c info) appendMapping(b []byte, s string) []byte {
|
||||
index := int(c >> indexShift)
|
||||
if c&xorBit == 0 {
|
||||
s := mappings[index:]
|
||||
return append(b, s[1:s[0]+1]...)
|
||||
}
|
||||
b = append(b, s...)
|
||||
if c&inlineXOR == inlineXOR {
|
||||
// TODO: support and handle two-byte inline masks
|
||||
b[len(b)-1] ^= byte(index)
|
||||
} else {
|
||||
for p := len(b) - int(xorData[index]); p < len(b); p++ {
|
||||
index++
|
||||
b[p] ^= xorData[index]
|
||||
}
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// Sparse block handling code.
|
||||
|
||||
type valueRange struct {
|
||||
value uint16 // header: value:stride
|
||||
lo, hi byte // header: lo:n
|
||||
}
|
||||
|
||||
type sparseBlocks struct {
|
||||
values []valueRange
|
||||
offset []uint16
|
||||
}
|
||||
|
||||
var idnaSparse = sparseBlocks{
|
||||
values: idnaSparseValues[:],
|
||||
offset: idnaSparseOffset[:],
|
||||
}
|
||||
|
||||
// Don't use newIdnaTrie to avoid unconditional linking in of the table.
|
||||
var trie = &idnaTrie{}
|
||||
|
||||
// lookup determines the type of block n and looks up the value for b.
|
||||
// For n < t.cutoff, the block is a simple lookup table. Otherwise, the block
|
||||
// is a list of ranges with an accompanying value. Given a matching range r,
|
||||
// the value for b is by r.value + (b - r.lo) * stride.
|
||||
func (t *sparseBlocks) lookup(n uint32, b byte) uint16 {
|
||||
offset := t.offset[n]
|
||||
header := t.values[offset]
|
||||
lo := offset + 1
|
||||
hi := lo + uint16(header.lo)
|
||||
for lo < hi {
|
||||
m := lo + (hi-lo)/2
|
||||
r := t.values[m]
|
||||
if r.lo <= b && b <= r.hi {
|
||||
return r.value + uint16(b-r.lo)*header.value
|
||||
}
|
||||
if b < r.lo {
|
||||
hi = m
|
||||
} else {
|
||||
lo = m + 1
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
119
vendor/golang.org/x/net/idna/trieval.go
generated
vendored
Normal file
119
vendor/golang.org/x/net/idna/trieval.go
generated
vendored
Normal file
|
@ -0,0 +1,119 @@
|
|||
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
||||
|
||||
package idna
|
||||
|
||||
// This file contains definitions for interpreting the trie value of the idna
|
||||
// trie generated by "go run gen*.go". It is shared by both the generator
|
||||
// program and the resultant package. Sharing is achieved by the generator
|
||||
// copying gen_trieval.go to trieval.go and changing what's above this comment.
|
||||
|
||||
// info holds information from the IDNA mapping table for a single rune. It is
|
||||
// the value returned by a trie lookup. In most cases, all information fits in
|
||||
// a 16-bit value. For mappings, this value may contain an index into a slice
|
||||
// with the mapped string. Such mappings can consist of the actual mapped value
|
||||
// or an XOR pattern to be applied to the bytes of the UTF8 encoding of the
|
||||
// input rune. This technique is used by the cases packages and reduces the
|
||||
// table size significantly.
|
||||
//
|
||||
// The per-rune values have the following format:
|
||||
//
|
||||
// if mapped {
|
||||
// if inlinedXOR {
|
||||
// 15..13 inline XOR marker
|
||||
// 12..11 unused
|
||||
// 10..3 inline XOR mask
|
||||
// } else {
|
||||
// 15..3 index into xor or mapping table
|
||||
// }
|
||||
// } else {
|
||||
// 15..14 unused
|
||||
// 13 mayNeedNorm
|
||||
// 12..11 attributes
|
||||
// 10..8 joining type
|
||||
// 7..3 category type
|
||||
// }
|
||||
// 2 use xor pattern
|
||||
// 1..0 mapped category
|
||||
//
|
||||
// See the definitions below for a more detailed description of the various
|
||||
// bits.
|
||||
type info uint16
|
||||
|
||||
const (
|
||||
catSmallMask = 0x3
|
||||
catBigMask = 0xF8
|
||||
indexShift = 3
|
||||
xorBit = 0x4 // interpret the index as an xor pattern
|
||||
inlineXOR = 0xE000 // These bits are set if the XOR pattern is inlined.
|
||||
|
||||
joinShift = 8
|
||||
joinMask = 0x07
|
||||
|
||||
// Attributes
|
||||
attributesMask = 0x1800
|
||||
viramaModifier = 0x1800
|
||||
modifier = 0x1000
|
||||
rtl = 0x0800
|
||||
|
||||
mayNeedNorm = 0x2000
|
||||
)
|
||||
|
||||
// A category corresponds to a category defined in the IDNA mapping table.
|
||||
type category uint16
|
||||
|
||||
const (
|
||||
unknown category = 0 // not currently defined in unicode.
|
||||
mapped category = 1
|
||||
disallowedSTD3Mapped category = 2
|
||||
deviation category = 3
|
||||
)
|
||||
|
||||
const (
|
||||
valid category = 0x08
|
||||
validNV8 category = 0x18
|
||||
validXV8 category = 0x28
|
||||
disallowed category = 0x40
|
||||
disallowedSTD3Valid category = 0x80
|
||||
ignored category = 0xC0
|
||||
)
|
||||
|
||||
// join types and additional rune information
|
||||
const (
|
||||
joiningL = (iota + 1)
|
||||
joiningD
|
||||
joiningT
|
||||
joiningR
|
||||
|
||||
//the following types are derived during processing
|
||||
joinZWJ
|
||||
joinZWNJ
|
||||
joinVirama
|
||||
numJoinTypes
|
||||
)
|
||||
|
||||
func (c info) isMapped() bool {
|
||||
return c&0x3 != 0
|
||||
}
|
||||
|
||||
func (c info) category() category {
|
||||
small := c & catSmallMask
|
||||
if small != 0 {
|
||||
return category(small)
|
||||
}
|
||||
return category(c & catBigMask)
|
||||
}
|
||||
|
||||
func (c info) joinType() info {
|
||||
if c.isMapped() {
|
||||
return 0
|
||||
}
|
||||
return (c >> joinShift) & joinMask
|
||||
}
|
||||
|
||||
func (c info) isModifier() bool {
|
||||
return c&(modifier|catSmallMask) == modifier
|
||||
}
|
||||
|
||||
func (c info) isViramaModifier() bool {
|
||||
return c&(attributesMask|catSmallMask) == viramaModifier
|
||||
}
|
351
vendor/golang.org/x/net/lex/httplex/httplex.go
generated
vendored
Normal file
351
vendor/golang.org/x/net/lex/httplex/httplex.go
generated
vendored
Normal file
|
@ -0,0 +1,351 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package httplex contains rules around lexical matters of various
|
||||
// HTTP-related specifications.
|
||||
//
|
||||
// This package is shared by the standard library (which vendors it)
|
||||
// and x/net/http2. It comes with no API stability promise.
|
||||
package httplex
|
||||
|
||||
import (
|
||||
"net"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"golang.org/x/net/idna"
|
||||
)
|
||||
|
||||
var isTokenTable = [127]bool{
|
||||
'!': true,
|
||||
'#': true,
|
||||
'$': true,
|
||||
'%': true,
|
||||
'&': true,
|
||||
'\'': true,
|
||||
'*': true,
|
||||
'+': true,
|
||||
'-': true,
|
||||
'.': true,
|
||||
'0': true,
|
||||
'1': true,
|
||||
'2': true,
|
||||
'3': true,
|
||||
'4': true,
|
||||
'5': true,
|
||||
'6': true,
|
||||
'7': true,
|
||||
'8': true,
|
||||
'9': true,
|
||||
'A': true,
|
||||
'B': true,
|
||||
'C': true,
|
||||
'D': true,
|
||||
'E': true,
|
||||
'F': true,
|
||||
'G': true,
|
||||
'H': true,
|
||||
'I': true,
|
||||
'J': true,
|
||||
'K': true,
|
||||
'L': true,
|
||||
'M': true,
|
||||
'N': true,
|
||||
'O': true,
|
||||
'P': true,
|
||||
'Q': true,
|
||||
'R': true,
|
||||
'S': true,
|
||||
'T': true,
|
||||
'U': true,
|
||||
'W': true,
|
||||
'V': true,
|
||||
'X': true,
|
||||
'Y': true,
|
||||
'Z': true,
|
||||
'^': true,
|
||||
'_': true,
|
||||
'`': true,
|
||||
'a': true,
|
||||
'b': true,
|
||||
'c': true,
|
||||
'd': true,
|
||||
'e': true,
|
||||
'f': true,
|
||||
'g': true,
|
||||
'h': true,
|
||||
'i': true,
|
||||
'j': true,
|
||||
'k': true,
|
||||
'l': true,
|
||||
'm': true,
|
||||
'n': true,
|
||||
'o': true,
|
||||
'p': true,
|
||||
'q': true,
|
||||
'r': true,
|
||||
's': true,
|
||||
't': true,
|
||||
'u': true,
|
||||
'v': true,
|
||||
'w': true,
|
||||
'x': true,
|
||||
'y': true,
|
||||
'z': true,
|
||||
'|': true,
|
||||
'~': true,
|
||||
}
|
||||
|
||||
func IsTokenRune(r rune) bool {
|
||||
i := int(r)
|
||||
return i < len(isTokenTable) && isTokenTable[i]
|
||||
}
|
||||
|
||||
func isNotToken(r rune) bool {
|
||||
return !IsTokenRune(r)
|
||||
}
|
||||
|
||||
// HeaderValuesContainsToken reports whether any string in values
|
||||
// contains the provided token, ASCII case-insensitively.
|
||||
func HeaderValuesContainsToken(values []string, token string) bool {
|
||||
for _, v := range values {
|
||||
if headerValueContainsToken(v, token) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isOWS reports whether b is an optional whitespace byte, as defined
|
||||
// by RFC 7230 section 3.2.3.
|
||||
func isOWS(b byte) bool { return b == ' ' || b == '\t' }
|
||||
|
||||
// trimOWS returns x with all optional whitespace removes from the
|
||||
// beginning and end.
|
||||
func trimOWS(x string) string {
|
||||
// TODO: consider using strings.Trim(x, " \t") instead,
|
||||
// if and when it's fast enough. See issue 10292.
|
||||
// But this ASCII-only code will probably always beat UTF-8
|
||||
// aware code.
|
||||
for len(x) > 0 && isOWS(x[0]) {
|
||||
x = x[1:]
|
||||
}
|
||||
for len(x) > 0 && isOWS(x[len(x)-1]) {
|
||||
x = x[:len(x)-1]
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
// headerValueContainsToken reports whether v (assumed to be a
|
||||
// 0#element, in the ABNF extension described in RFC 7230 section 7)
|
||||
// contains token amongst its comma-separated tokens, ASCII
|
||||
// case-insensitively.
|
||||
func headerValueContainsToken(v string, token string) bool {
|
||||
v = trimOWS(v)
|
||||
if comma := strings.IndexByte(v, ','); comma != -1 {
|
||||
return tokenEqual(trimOWS(v[:comma]), token) || headerValueContainsToken(v[comma+1:], token)
|
||||
}
|
||||
return tokenEqual(v, token)
|
||||
}
|
||||
|
||||
// lowerASCII returns the ASCII lowercase version of b.
|
||||
func lowerASCII(b byte) byte {
|
||||
if 'A' <= b && b <= 'Z' {
|
||||
return b + ('a' - 'A')
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// tokenEqual reports whether t1 and t2 are equal, ASCII case-insensitively.
|
||||
func tokenEqual(t1, t2 string) bool {
|
||||
if len(t1) != len(t2) {
|
||||
return false
|
||||
}
|
||||
for i, b := range t1 {
|
||||
if b >= utf8.RuneSelf {
|
||||
// No UTF-8 or non-ASCII allowed in tokens.
|
||||
return false
|
||||
}
|
||||
if lowerASCII(byte(b)) != lowerASCII(t2[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// isLWS reports whether b is linear white space, according
|
||||
// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2
|
||||
// LWS = [CRLF] 1*( SP | HT )
|
||||
func isLWS(b byte) bool { return b == ' ' || b == '\t' }
|
||||
|
||||
// isCTL reports whether b is a control byte, according
|
||||
// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2
|
||||
// CTL = <any US-ASCII control character
|
||||
// (octets 0 - 31) and DEL (127)>
|
||||
func isCTL(b byte) bool {
|
||||
const del = 0x7f // a CTL
|
||||
return b < ' ' || b == del
|
||||
}
|
||||
|
||||
// ValidHeaderFieldName reports whether v is a valid HTTP/1.x header name.
|
||||
// HTTP/2 imposes the additional restriction that uppercase ASCII
|
||||
// letters are not allowed.
|
||||
//
|
||||
// RFC 7230 says:
|
||||
// header-field = field-name ":" OWS field-value OWS
|
||||
// field-name = token
|
||||
// token = 1*tchar
|
||||
// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." /
|
||||
// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA
|
||||
func ValidHeaderFieldName(v string) bool {
|
||||
if len(v) == 0 {
|
||||
return false
|
||||
}
|
||||
for _, r := range v {
|
||||
if !IsTokenRune(r) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// ValidHostHeader reports whether h is a valid host header.
|
||||
func ValidHostHeader(h string) bool {
|
||||
// The latest spec is actually this:
|
||||
//
|
||||
// http://tools.ietf.org/html/rfc7230#section-5.4
|
||||
// Host = uri-host [ ":" port ]
|
||||
//
|
||||
// Where uri-host is:
|
||||
// http://tools.ietf.org/html/rfc3986#section-3.2.2
|
||||
//
|
||||
// But we're going to be much more lenient for now and just
|
||||
// search for any byte that's not a valid byte in any of those
|
||||
// expressions.
|
||||
for i := 0; i < len(h); i++ {
|
||||
if !validHostByte[h[i]] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// See the validHostHeader comment.
|
||||
var validHostByte = [256]bool{
|
||||
'0': true, '1': true, '2': true, '3': true, '4': true, '5': true, '6': true, '7': true,
|
||||
'8': true, '9': true,
|
||||
|
||||
'a': true, 'b': true, 'c': true, 'd': true, 'e': true, 'f': true, 'g': true, 'h': true,
|
||||
'i': true, 'j': true, 'k': true, 'l': true, 'm': true, 'n': true, 'o': true, 'p': true,
|
||||
'q': true, 'r': true, 's': true, 't': true, 'u': true, 'v': true, 'w': true, 'x': true,
|
||||
'y': true, 'z': true,
|
||||
|
||||
'A': true, 'B': true, 'C': true, 'D': true, 'E': true, 'F': true, 'G': true, 'H': true,
|
||||
'I': true, 'J': true, 'K': true, 'L': true, 'M': true, 'N': true, 'O': true, 'P': true,
|
||||
'Q': true, 'R': true, 'S': true, 'T': true, 'U': true, 'V': true, 'W': true, 'X': true,
|
||||
'Y': true, 'Z': true,
|
||||
|
||||
'!': true, // sub-delims
|
||||
'$': true, // sub-delims
|
||||
'%': true, // pct-encoded (and used in IPv6 zones)
|
||||
'&': true, // sub-delims
|
||||
'(': true, // sub-delims
|
||||
')': true, // sub-delims
|
||||
'*': true, // sub-delims
|
||||
'+': true, // sub-delims
|
||||
',': true, // sub-delims
|
||||
'-': true, // unreserved
|
||||
'.': true, // unreserved
|
||||
':': true, // IPv6address + Host expression's optional port
|
||||
';': true, // sub-delims
|
||||
'=': true, // sub-delims
|
||||
'[': true,
|
||||
'\'': true, // sub-delims
|
||||
']': true,
|
||||
'_': true, // unreserved
|
||||
'~': true, // unreserved
|
||||
}
|
||||
|
||||
// ValidHeaderFieldValue reports whether v is a valid "field-value" according to
|
||||
// http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 :
|
||||
//
|
||||
// message-header = field-name ":" [ field-value ]
|
||||
// field-value = *( field-content | LWS )
|
||||
// field-content = <the OCTETs making up the field-value
|
||||
// and consisting of either *TEXT or combinations
|
||||
// of token, separators, and quoted-string>
|
||||
//
|
||||
// http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 :
|
||||
//
|
||||
// TEXT = <any OCTET except CTLs,
|
||||
// but including LWS>
|
||||
// LWS = [CRLF] 1*( SP | HT )
|
||||
// CTL = <any US-ASCII control character
|
||||
// (octets 0 - 31) and DEL (127)>
|
||||
//
|
||||
// RFC 7230 says:
|
||||
// field-value = *( field-content / obs-fold )
|
||||
// obj-fold = N/A to http2, and deprecated
|
||||
// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ]
|
||||
// field-vchar = VCHAR / obs-text
|
||||
// obs-text = %x80-FF
|
||||
// VCHAR = "any visible [USASCII] character"
|
||||
//
|
||||
// http2 further says: "Similarly, HTTP/2 allows header field values
|
||||
// that are not valid. While most of the values that can be encoded
|
||||
// will not alter header field parsing, carriage return (CR, ASCII
|
||||
// 0xd), line feed (LF, ASCII 0xa), and the zero character (NUL, ASCII
|
||||
// 0x0) might be exploited by an attacker if they are translated
|
||||
// verbatim. Any request or response that contains a character not
|
||||
// permitted in a header field value MUST be treated as malformed
|
||||
// (Section 8.1.2.6). Valid characters are defined by the
|
||||
// field-content ABNF rule in Section 3.2 of [RFC7230]."
|
||||
//
|
||||
// This function does not (yet?) properly handle the rejection of
|
||||
// strings that begin or end with SP or HTAB.
|
||||
func ValidHeaderFieldValue(v string) bool {
|
||||
for i := 0; i < len(v); i++ {
|
||||
b := v[i]
|
||||
if isCTL(b) && !isLWS(b) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func isASCII(s string) bool {
|
||||
for i := 0; i < len(s); i++ {
|
||||
if s[i] >= utf8.RuneSelf {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// PunycodeHostPort returns the IDNA Punycode version
|
||||
// of the provided "host" or "host:port" string.
|
||||
func PunycodeHostPort(v string) (string, error) {
|
||||
if isASCII(v) {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
host, port, err := net.SplitHostPort(v)
|
||||
if err != nil {
|
||||
// The input 'v' argument was just a "host" argument,
|
||||
// without a port. This error should not be returned
|
||||
// to the caller.
|
||||
host = v
|
||||
port = ""
|
||||
}
|
||||
host, err = idna.ToASCII(host)
|
||||
if err != nil {
|
||||
// Non-UTF-8? Not representable in Punycode, in any
|
||||
// case.
|
||||
return "", err
|
||||
}
|
||||
if port == "" {
|
||||
return host, nil
|
||||
}
|
||||
return net.JoinHostPort(host, port), nil
|
||||
}
|
119
vendor/golang.org/x/net/lex/httplex/httplex_test.go
generated
vendored
Normal file
119
vendor/golang.org/x/net/lex/httplex/httplex_test.go
generated
vendored
Normal file
|
@ -0,0 +1,119 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package httplex
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func isChar(c rune) bool { return c <= 127 }
|
||||
|
||||
func isCtl(c rune) bool { return c <= 31 || c == 127 }
|
||||
|
||||
func isSeparator(c rune) bool {
|
||||
switch c {
|
||||
case '(', ')', '<', '>', '@', ',', ';', ':', '\\', '"', '/', '[', ']', '?', '=', '{', '}', ' ', '\t':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func TestIsToken(t *testing.T) {
|
||||
for i := 0; i <= 130; i++ {
|
||||
r := rune(i)
|
||||
expected := isChar(r) && !isCtl(r) && !isSeparator(r)
|
||||
if IsTokenRune(r) != expected {
|
||||
t.Errorf("isToken(0x%x) = %v", r, !expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHeaderValuesContainsToken(t *testing.T) {
|
||||
tests := []struct {
|
||||
vals []string
|
||||
token string
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
vals: []string{"foo"},
|
||||
token: "foo",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
vals: []string{"bar", "foo"},
|
||||
token: "foo",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
vals: []string{"foo"},
|
||||
token: "FOO",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
vals: []string{"foo"},
|
||||
token: "bar",
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
vals: []string{" foo "},
|
||||
token: "FOO",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
vals: []string{"foo,bar"},
|
||||
token: "FOO",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
vals: []string{"bar,foo,bar"},
|
||||
token: "FOO",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
vals: []string{"bar , foo"},
|
||||
token: "FOO",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
vals: []string{"foo ,bar "},
|
||||
token: "FOO",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
vals: []string{"bar, foo ,bar"},
|
||||
token: "FOO",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
vals: []string{"bar , foo"},
|
||||
token: "FOO",
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
got := HeaderValuesContainsToken(tt.vals, tt.token)
|
||||
if got != tt.want {
|
||||
t.Errorf("headerValuesContainsToken(%q, %q) = %v; want %v", tt.vals, tt.token, got, tt.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPunycodeHostPort(t *testing.T) {
|
||||
tests := []struct {
|
||||
in, want string
|
||||
}{
|
||||
{"www.google.com", "www.google.com"},
|
||||
{"гофер.рф", "xn--c1ae0ajs.xn--p1ai"},
|
||||
{"bücher.de", "xn--bcher-kva.de"},
|
||||
{"bücher.de:8080", "xn--bcher-kva.de:8080"},
|
||||
{"[1::6]:8080", "[1::6]:8080"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
got, err := PunycodeHostPort(tt.in)
|
||||
if tt.want != got || err != nil {
|
||||
t.Errorf("PunycodeHostPort(%q) = %q, %v, want %q, nil", tt.in, got, err, tt.want)
|
||||
}
|
||||
}
|
||||
}
|
10
vendor/golang.org/x/text/.gitattributes
generated
vendored
Normal file
10
vendor/golang.org/x/text/.gitattributes
generated
vendored
Normal file
|
@ -0,0 +1,10 @@
|
|||
# Treat all files in this repo as binary, with no git magic updating
|
||||
# line endings. Windows users contributing to Go will need to use a
|
||||
# modern version of git and editors capable of LF line endings.
|
||||
#
|
||||
# We'll prevent accidental CRLF line endings from entering the repo
|
||||
# via the git-review gofmt checks.
|
||||
#
|
||||
# See golang.org/issue/9281
|
||||
|
||||
* -text
|
6
vendor/golang.org/x/text/.gitignore
generated
vendored
Normal file
6
vendor/golang.org/x/text/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,6 @@
|
|||
# Add no patterns to .gitignore except for files generated by the build.
|
||||
last-change
|
||||
/DATA
|
||||
# This file is rather large and the tests really only need to be run
|
||||
# after generation.
|
||||
/unicode/norm/data_test.go
|
3
vendor/golang.org/x/text/AUTHORS
generated
vendored
Normal file
3
vendor/golang.org/x/text/AUTHORS
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
# This source code refers to The Go Authors for copyright purposes.
|
||||
# The master list of authors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/AUTHORS.
|
31
vendor/golang.org/x/text/CONTRIBUTING.md
generated
vendored
Normal file
31
vendor/golang.org/x/text/CONTRIBUTING.md
generated
vendored
Normal file
|
@ -0,0 +1,31 @@
|
|||
# Contributing to Go
|
||||
|
||||
Go is an open source project.
|
||||
|
||||
It is the work of hundreds of contributors. We appreciate your help!
|
||||
|
||||
|
||||
## Filing issues
|
||||
|
||||
When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions:
|
||||
|
||||
1. What version of Go are you using (`go version`)?
|
||||
2. What operating system and processor architecture are you using?
|
||||
3. What did you do?
|
||||
4. What did you expect to see?
|
||||
5. What did you see instead?
|
||||
|
||||
General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker.
|
||||
The gophers there will answer or ask you to file an issue if you've tripped over a bug.
|
||||
|
||||
## Contributing code
|
||||
|
||||
Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html)
|
||||
before sending patches.
|
||||
|
||||
**We do not accept GitHub pull requests**
|
||||
(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review).
|
||||
|
||||
Unless otherwise noted, the Go source files are distributed under
|
||||
the BSD-style license found in the LICENSE file.
|
||||
|
3
vendor/golang.org/x/text/CONTRIBUTORS
generated
vendored
Normal file
3
vendor/golang.org/x/text/CONTRIBUTORS
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
# This source code was written by the Go contributors.
|
||||
# The master list of contributors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/CONTRIBUTORS.
|
27
vendor/golang.org/x/text/LICENSE
generated
vendored
Normal file
27
vendor/golang.org/x/text/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
22
vendor/golang.org/x/text/PATENTS
generated
vendored
Normal file
22
vendor/golang.org/x/text/PATENTS
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
Additional IP Rights Grant (Patents)
|
||||
|
||||
"This implementation" means the copyrightable works distributed by
|
||||
Google as part of the Go project.
|
||||
|
||||
Google hereby grants to You a perpetual, worldwide, non-exclusive,
|
||||
no-charge, royalty-free, irrevocable (except as stated in this section)
|
||||
patent license to make, have made, use, offer to sell, sell, import,
|
||||
transfer and otherwise run, modify and propagate the contents of this
|
||||
implementation of Go, where such license applies only to those patent
|
||||
claims, both currently owned or controlled by Google and acquired in
|
||||
the future, licensable by Google that are necessarily infringed by this
|
||||
implementation of Go. This grant does not include claims that would be
|
||||
infringed only as a consequence of further modification of this
|
||||
implementation. If you or your agent or exclusive licensee institute or
|
||||
order or agree to the institution of patent litigation against any
|
||||
entity (including a cross-claim or counterclaim in a lawsuit) alleging
|
||||
that this implementation of Go or any code incorporated within this
|
||||
implementation of Go constitutes direct or contributory patent
|
||||
infringement, or inducement of patent infringement, then any patent
|
||||
rights granted to you under this License for this implementation of Go
|
||||
shall terminate as of the date such litigation is filed.
|
93
vendor/golang.org/x/text/README.md
generated
vendored
Normal file
93
vendor/golang.org/x/text/README.md
generated
vendored
Normal file
|
@ -0,0 +1,93 @@
|
|||
# Go Text
|
||||
|
||||
This repository holds supplementary Go libraries for text processing, many involving Unicode.
|
||||
|
||||
## Semantic Versioning
|
||||
This repo uses Semantic versioning (http://semver.org/), so
|
||||
1. MAJOR version when you make incompatible API changes,
|
||||
1. MINOR version when you add functionality in a backwards-compatible manner,
|
||||
and
|
||||
1. PATCH version when you make backwards-compatible bug fixes.
|
||||
|
||||
Until version 1.0.0 of x/text is reached, the minor version is considered a
|
||||
major version. So going from 0.1.0 to 0.2.0 is considered to be a major version
|
||||
bump.
|
||||
|
||||
A major new CLDR version is mapped to a minor version increase in x/text.
|
||||
Any other new CLDR version is mapped to a patch version increase in x/text.
|
||||
|
||||
It is important that the Unicode version used in `x/text` matches the one used
|
||||
by your Go compiler. The `x/text` repository supports multiple versions of
|
||||
Unicode and will match the version of Unicode to that of the Go compiler. At the
|
||||
moment this is supported for Go compilers from version 1.7.
|
||||
|
||||
## Download/Install
|
||||
|
||||
The easiest way to install is to run `go get -u golang.org/x/text`. You can
|
||||
also manually git clone the repository to `$GOPATH/src/golang.org/x/text`.
|
||||
|
||||
## Contribute
|
||||
To submit changes to this repository, see http://golang.org/doc/contribute.html.
|
||||
|
||||
To generate the tables in this repository (except for the encoding tables),
|
||||
run go generate from this directory. By default tables are generated for the
|
||||
Unicode version in core and the CLDR version defined in
|
||||
golang.org/x/text/unicode/cldr.
|
||||
|
||||
Running go generate will as a side effect create a DATA subdirectory in this
|
||||
directory, which holds all files that are used as a source for generating the
|
||||
tables. This directory will also serve as a cache.
|
||||
|
||||
## Testing
|
||||
Run
|
||||
|
||||
go test ./...
|
||||
|
||||
from this directory to run all tests. Add the "-tags icu" flag to also run
|
||||
ICU conformance tests (if available). This requires that you have the correct
|
||||
ICU version installed on your system.
|
||||
|
||||
TODO:
|
||||
- updating unversioned source files.
|
||||
|
||||
## Generating Tables
|
||||
|
||||
To generate the tables in this repository (except for the encoding
|
||||
tables), run `go generate` from this directory. By default tables are
|
||||
generated for the Unicode version in core and the CLDR version defined in
|
||||
golang.org/x/text/unicode/cldr.
|
||||
|
||||
Running go generate will as a side effect create a DATA subdirectory in this
|
||||
directory which holds all files that are used as a source for generating the
|
||||
tables. This directory will also serve as a cache.
|
||||
|
||||
## Versions
|
||||
To update a Unicode version run
|
||||
|
||||
UNICODE_VERSION=x.x.x go generate
|
||||
|
||||
where `x.x.x` must correspond to a directory in http://www.unicode.org/Public/.
|
||||
If this version is newer than the version in core it will also update the
|
||||
relevant packages there. The idna package in x/net will always be updated.
|
||||
|
||||
To update a CLDR version run
|
||||
|
||||
CLDR_VERSION=version go generate
|
||||
|
||||
where `version` must correspond to a directory in
|
||||
http://www.unicode.org/Public/cldr/.
|
||||
|
||||
Note that the code gets adapted over time to changes in the data and that
|
||||
backwards compatibility is not maintained.
|
||||
So updating to a different version may not work.
|
||||
|
||||
The files in DATA/{iana|icu|w3|whatwg} are currently not versioned.
|
||||
|
||||
## Report Issues / Send Patches
|
||||
|
||||
This repository uses Gerrit for code changes. To learn how to submit changes to
|
||||
this repository, see https://golang.org/doc/contribute.html.
|
||||
|
||||
The main issue tracker for the image repository is located at
|
||||
https://github.com/golang/go/issues. Prefix your issue with "x/image:" in the
|
||||
subject line, so it is easy to find.
|
1
vendor/golang.org/x/text/codereview.cfg
generated
vendored
Normal file
1
vendor/golang.org/x/text/codereview.cfg
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
issuerepo: golang/go
|
702
vendor/golang.org/x/text/collate/build/builder.go
generated
vendored
Normal file
702
vendor/golang.org/x/text/collate/build/builder.go
generated
vendored
Normal file
|
@ -0,0 +1,702 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package build // import "golang.org/x/text/collate/build"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"sort"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"golang.org/x/text/internal/colltab"
|
||||
"golang.org/x/text/language"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
|
||||
// TODO: optimizations:
|
||||
// - expandElem is currently 20K. By putting unique colElems in a separate
|
||||
// table and having a byte array of indexes into this table, we can reduce
|
||||
// the total size to about 7K. By also factoring out the length bytes, we
|
||||
// can reduce this to about 6K.
|
||||
// - trie valueBlocks are currently 100K. There are a lot of sparse blocks
|
||||
// and many consecutive values with the same stride. This can be further
|
||||
// compacted.
|
||||
// - Compress secondary weights into 8 bits.
|
||||
// - Some LDML specs specify a context element. Currently we simply concatenate
|
||||
// those. Context can be implemented using the contraction trie. If Builder
|
||||
// could analyze and detect when using a context makes sense, there is no
|
||||
// need to expose this construct in the API.
|
||||
|
||||
// A Builder builds a root collation table. The user must specify the
|
||||
// collation elements for each entry. A common use will be to base the weights
|
||||
// on those specified in the allkeys* file as provided by the UCA or CLDR.
|
||||
type Builder struct {
|
||||
index *trieBuilder
|
||||
root ordering
|
||||
locale []*Tailoring
|
||||
t *table
|
||||
err error
|
||||
built bool
|
||||
|
||||
minNonVar int // lowest primary recorded for a variable
|
||||
varTop int // highest primary recorded for a non-variable
|
||||
|
||||
// indexes used for reusing expansions and contractions
|
||||
expIndex map[string]int // positions of expansions keyed by their string representation
|
||||
ctHandle map[string]ctHandle // contraction handles keyed by a concatenation of the suffixes
|
||||
ctElem map[string]int // contraction elements keyed by their string representation
|
||||
}
|
||||
|
||||
// A Tailoring builds a collation table based on another collation table.
|
||||
// The table is defined by specifying tailorings to the underlying table.
|
||||
// See http://unicode.org/reports/tr35/ for an overview of tailoring
|
||||
// collation tables. The CLDR contains pre-defined tailorings for a variety
|
||||
// of languages (See http://www.unicode.org/Public/cldr/<version>/core.zip.)
|
||||
type Tailoring struct {
|
||||
id string
|
||||
builder *Builder
|
||||
index *ordering
|
||||
|
||||
anchor *entry
|
||||
before bool
|
||||
}
|
||||
|
||||
// NewBuilder returns a new Builder.
|
||||
func NewBuilder() *Builder {
|
||||
return &Builder{
|
||||
index: newTrieBuilder(),
|
||||
root: makeRootOrdering(),
|
||||
expIndex: make(map[string]int),
|
||||
ctHandle: make(map[string]ctHandle),
|
||||
ctElem: make(map[string]int),
|
||||
}
|
||||
}
|
||||
|
||||
// Tailoring returns a Tailoring for the given locale. One should
|
||||
// have completed all calls to Add before calling Tailoring.
|
||||
func (b *Builder) Tailoring(loc language.Tag) *Tailoring {
|
||||
t := &Tailoring{
|
||||
id: loc.String(),
|
||||
builder: b,
|
||||
index: b.root.clone(),
|
||||
}
|
||||
t.index.id = t.id
|
||||
b.locale = append(b.locale, t)
|
||||
return t
|
||||
}
|
||||
|
||||
// Add adds an entry to the collation element table, mapping
|
||||
// a slice of runes to a sequence of collation elements.
|
||||
// A collation element is specified as list of weights: []int{primary, secondary, ...}.
|
||||
// The entries are typically obtained from a collation element table
|
||||
// as defined in http://www.unicode.org/reports/tr10/#Data_Table_Format.
|
||||
// Note that the collation elements specified by colelems are only used
|
||||
// as a guide. The actual weights generated by Builder may differ.
|
||||
// The argument variables is a list of indices into colelems that should contain
|
||||
// a value for each colelem that is a variable. (See the reference above.)
|
||||
func (b *Builder) Add(runes []rune, colelems [][]int, variables []int) error {
|
||||
str := string(runes)
|
||||
elems := make([]rawCE, len(colelems))
|
||||
for i, ce := range colelems {
|
||||
if len(ce) == 0 {
|
||||
break
|
||||
}
|
||||
elems[i] = makeRawCE(ce, 0)
|
||||
if len(ce) == 1 {
|
||||
elems[i].w[1] = defaultSecondary
|
||||
}
|
||||
if len(ce) <= 2 {
|
||||
elems[i].w[2] = defaultTertiary
|
||||
}
|
||||
if len(ce) <= 3 {
|
||||
elems[i].w[3] = ce[0]
|
||||
}
|
||||
}
|
||||
for i, ce := range elems {
|
||||
p := ce.w[0]
|
||||
isvar := false
|
||||
for _, j := range variables {
|
||||
if i == j {
|
||||
isvar = true
|
||||
}
|
||||
}
|
||||
if isvar {
|
||||
if p >= b.minNonVar && b.minNonVar > 0 {
|
||||
return fmt.Errorf("primary value %X of variable is larger than the smallest non-variable %X", p, b.minNonVar)
|
||||
}
|
||||
if p > b.varTop {
|
||||
b.varTop = p
|
||||
}
|
||||
} else if p > 1 { // 1 is a special primary value reserved for FFFE
|
||||
if p <= b.varTop {
|
||||
return fmt.Errorf("primary value %X of non-variable is smaller than the highest variable %X", p, b.varTop)
|
||||
}
|
||||
if b.minNonVar == 0 || p < b.minNonVar {
|
||||
b.minNonVar = p
|
||||
}
|
||||
}
|
||||
}
|
||||
elems, err := convertLargeWeights(elems)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cccs := []uint8{}
|
||||
nfd := norm.NFD.String(str)
|
||||
for i := range nfd {
|
||||
cccs = append(cccs, norm.NFD.PropertiesString(nfd[i:]).CCC())
|
||||
}
|
||||
if len(cccs) < len(elems) {
|
||||
if len(cccs) > 2 {
|
||||
return fmt.Errorf("number of decomposed characters should be greater or equal to the number of collation elements for len(colelems) > 3 (%d < %d)", len(cccs), len(elems))
|
||||
}
|
||||
p := len(elems) - 1
|
||||
for ; p > 0 && elems[p].w[0] == 0; p-- {
|
||||
elems[p].ccc = cccs[len(cccs)-1]
|
||||
}
|
||||
for ; p >= 0; p-- {
|
||||
elems[p].ccc = cccs[0]
|
||||
}
|
||||
} else {
|
||||
for i := range elems {
|
||||
elems[i].ccc = cccs[i]
|
||||
}
|
||||
}
|
||||
// doNorm in collate.go assumes that the following conditions hold.
|
||||
if len(elems) > 1 && len(cccs) > 1 && cccs[0] != 0 && cccs[0] != cccs[len(cccs)-1] {
|
||||
return fmt.Errorf("incompatible CCC values for expansion %X (%d)", runes, cccs)
|
||||
}
|
||||
b.root.newEntry(str, elems)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Tailoring) setAnchor(anchor string) error {
|
||||
anchor = norm.NFC.String(anchor)
|
||||
a := t.index.find(anchor)
|
||||
if a == nil {
|
||||
a = t.index.newEntry(anchor, nil)
|
||||
a.implicit = true
|
||||
a.modified = true
|
||||
for _, r := range []rune(anchor) {
|
||||
e := t.index.find(string(r))
|
||||
e.lock = true
|
||||
}
|
||||
}
|
||||
t.anchor = a
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetAnchor sets the point after which elements passed in subsequent calls to
|
||||
// Insert will be inserted. It is equivalent to the reset directive in an LDML
|
||||
// specification. See Insert for an example.
|
||||
// SetAnchor supports the following logical reset positions:
|
||||
// <first_tertiary_ignorable/>, <last_teriary_ignorable/>, <first_primary_ignorable/>,
|
||||
// and <last_non_ignorable/>.
|
||||
func (t *Tailoring) SetAnchor(anchor string) error {
|
||||
if err := t.setAnchor(anchor); err != nil {
|
||||
return err
|
||||
}
|
||||
t.before = false
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetAnchorBefore is similar to SetAnchor, except that subsequent calls to
|
||||
// Insert will insert entries before the anchor.
|
||||
func (t *Tailoring) SetAnchorBefore(anchor string) error {
|
||||
if err := t.setAnchor(anchor); err != nil {
|
||||
return err
|
||||
}
|
||||
t.before = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Insert sets the ordering of str relative to the entry set by the previous
|
||||
// call to SetAnchor or Insert. The argument extend corresponds
|
||||
// to the extend elements as defined in LDML. A non-empty value for extend
|
||||
// will cause the collation elements corresponding to extend to be appended
|
||||
// to the collation elements generated for the entry added by Insert.
|
||||
// This has the same net effect as sorting str after the string anchor+extend.
|
||||
// See http://www.unicode.org/reports/tr10/#Tailoring_Example for details
|
||||
// on parametric tailoring and http://unicode.org/reports/tr35/#Collation_Elements
|
||||
// for full details on LDML.
|
||||
//
|
||||
// Examples: create a tailoring for Swedish, where "ä" is ordered after "z"
|
||||
// at the primary sorting level:
|
||||
// t := b.Tailoring("se")
|
||||
// t.SetAnchor("z")
|
||||
// t.Insert(colltab.Primary, "ä", "")
|
||||
// Order "ü" after "ue" at the secondary sorting level:
|
||||
// t.SetAnchor("ue")
|
||||
// t.Insert(colltab.Secondary, "ü","")
|
||||
// or
|
||||
// t.SetAnchor("u")
|
||||
// t.Insert(colltab.Secondary, "ü", "e")
|
||||
// Order "q" afer "ab" at the secondary level and "Q" after "q"
|
||||
// at the tertiary level:
|
||||
// t.SetAnchor("ab")
|
||||
// t.Insert(colltab.Secondary, "q", "")
|
||||
// t.Insert(colltab.Tertiary, "Q", "")
|
||||
// Order "b" before "a":
|
||||
// t.SetAnchorBefore("a")
|
||||
// t.Insert(colltab.Primary, "b", "")
|
||||
// Order "0" after the last primary ignorable:
|
||||
// t.SetAnchor("<last_primary_ignorable/>")
|
||||
// t.Insert(colltab.Primary, "0", "")
|
||||
func (t *Tailoring) Insert(level colltab.Level, str, extend string) error {
|
||||
if t.anchor == nil {
|
||||
return fmt.Errorf("%s:Insert: no anchor point set for tailoring of %s", t.id, str)
|
||||
}
|
||||
str = norm.NFC.String(str)
|
||||
e := t.index.find(str)
|
||||
if e == nil {
|
||||
e = t.index.newEntry(str, nil)
|
||||
} else if e.logical != noAnchor {
|
||||
return fmt.Errorf("%s:Insert: cannot reinsert logical reset position %q", t.id, e.str)
|
||||
}
|
||||
if e.lock {
|
||||
return fmt.Errorf("%s:Insert: cannot reinsert element %q", t.id, e.str)
|
||||
}
|
||||
a := t.anchor
|
||||
// Find the first element after the anchor which differs at a level smaller or
|
||||
// equal to the given level. Then insert at this position.
|
||||
// See http://unicode.org/reports/tr35/#Collation_Elements, Section 5.14.5 for details.
|
||||
e.before = t.before
|
||||
if t.before {
|
||||
t.before = false
|
||||
if a.prev == nil {
|
||||
a.insertBefore(e)
|
||||
} else {
|
||||
for a = a.prev; a.level > level; a = a.prev {
|
||||
}
|
||||
a.insertAfter(e)
|
||||
}
|
||||
e.level = level
|
||||
} else {
|
||||
for ; a.level > level; a = a.next {
|
||||
}
|
||||
e.level = a.level
|
||||
if a != e {
|
||||
a.insertAfter(e)
|
||||
a.level = level
|
||||
} else {
|
||||
// We don't set a to prev itself. This has the effect of the entry
|
||||
// getting new collation elements that are an increment of itself.
|
||||
// This is intentional.
|
||||
a.prev.level = level
|
||||
}
|
||||
}
|
||||
e.extend = norm.NFD.String(extend)
|
||||
e.exclude = false
|
||||
e.modified = true
|
||||
e.elems = nil
|
||||
t.anchor = e
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *ordering) getWeight(e *entry) []rawCE {
|
||||
if len(e.elems) == 0 && e.logical == noAnchor {
|
||||
if e.implicit {
|
||||
for _, r := range e.runes {
|
||||
e.elems = append(e.elems, o.getWeight(o.find(string(r)))...)
|
||||
}
|
||||
} else if e.before {
|
||||
count := [colltab.Identity + 1]int{}
|
||||
a := e
|
||||
for ; a.elems == nil && !a.implicit; a = a.next {
|
||||
count[a.level]++
|
||||
}
|
||||
e.elems = []rawCE{makeRawCE(a.elems[0].w, a.elems[0].ccc)}
|
||||
for i := colltab.Primary; i < colltab.Quaternary; i++ {
|
||||
if count[i] != 0 {
|
||||
e.elems[0].w[i] -= count[i]
|
||||
break
|
||||
}
|
||||
}
|
||||
if e.prev != nil {
|
||||
o.verifyWeights(e.prev, e, e.prev.level)
|
||||
}
|
||||
} else {
|
||||
prev := e.prev
|
||||
e.elems = nextWeight(prev.level, o.getWeight(prev))
|
||||
o.verifyWeights(e, e.next, e.level)
|
||||
}
|
||||
}
|
||||
return e.elems
|
||||
}
|
||||
|
||||
func (o *ordering) addExtension(e *entry) {
|
||||
if ex := o.find(e.extend); ex != nil {
|
||||
e.elems = append(e.elems, ex.elems...)
|
||||
} else {
|
||||
for _, r := range []rune(e.extend) {
|
||||
e.elems = append(e.elems, o.find(string(r)).elems...)
|
||||
}
|
||||
}
|
||||
e.extend = ""
|
||||
}
|
||||
|
||||
func (o *ordering) verifyWeights(a, b *entry, level colltab.Level) error {
|
||||
if level == colltab.Identity || b == nil || b.elems == nil || a.elems == nil {
|
||||
return nil
|
||||
}
|
||||
for i := colltab.Primary; i < level; i++ {
|
||||
if a.elems[0].w[i] < b.elems[0].w[i] {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
if a.elems[0].w[level] >= b.elems[0].w[level] {
|
||||
err := fmt.Errorf("%s:overflow: collation elements of %q (%X) overflows those of %q (%X) at level %d (%X >= %X)", o.id, a.str, a.runes, b.str, b.runes, level, a.elems, b.elems)
|
||||
log.Println(err)
|
||||
// TODO: return the error instead, or better, fix the conflicting entry by making room.
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Builder) error(e error) {
|
||||
if e != nil {
|
||||
b.err = e
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Builder) errorID(locale string, e error) {
|
||||
if e != nil {
|
||||
b.err = fmt.Errorf("%s:%v", locale, e)
|
||||
}
|
||||
}
|
||||
|
||||
// patchNorm ensures that NFC and NFD counterparts are consistent.
|
||||
func (o *ordering) patchNorm() {
|
||||
// Insert the NFD counterparts, if necessary.
|
||||
for _, e := range o.ordered {
|
||||
nfd := norm.NFD.String(e.str)
|
||||
if nfd != e.str {
|
||||
if e0 := o.find(nfd); e0 != nil && !e0.modified {
|
||||
e0.elems = e.elems
|
||||
} else if e.modified && !equalCEArrays(o.genColElems(nfd), e.elems) {
|
||||
e := o.newEntry(nfd, e.elems)
|
||||
e.modified = true
|
||||
}
|
||||
}
|
||||
}
|
||||
// Update unchanged composed forms if one of their parts changed.
|
||||
for _, e := range o.ordered {
|
||||
nfd := norm.NFD.String(e.str)
|
||||
if e.modified || nfd == e.str {
|
||||
continue
|
||||
}
|
||||
if e0 := o.find(nfd); e0 != nil {
|
||||
e.elems = e0.elems
|
||||
} else {
|
||||
e.elems = o.genColElems(nfd)
|
||||
if norm.NFD.LastBoundary([]byte(nfd)) == 0 {
|
||||
r := []rune(nfd)
|
||||
head := string(r[0])
|
||||
tail := ""
|
||||
for i := 1; i < len(r); i++ {
|
||||
s := norm.NFC.String(head + string(r[i]))
|
||||
if e0 := o.find(s); e0 != nil && e0.modified {
|
||||
head = s
|
||||
} else {
|
||||
tail += string(r[i])
|
||||
}
|
||||
}
|
||||
e.elems = append(o.genColElems(head), o.genColElems(tail)...)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Exclude entries for which the individual runes generate the same collation elements.
|
||||
for _, e := range o.ordered {
|
||||
if len(e.runes) > 1 && equalCEArrays(o.genColElems(e.str), e.elems) {
|
||||
e.exclude = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Builder) buildOrdering(o *ordering) {
|
||||
for _, e := range o.ordered {
|
||||
o.getWeight(e)
|
||||
}
|
||||
for _, e := range o.ordered {
|
||||
o.addExtension(e)
|
||||
}
|
||||
o.patchNorm()
|
||||
o.sort()
|
||||
simplify(o)
|
||||
b.processExpansions(o) // requires simplify
|
||||
b.processContractions(o) // requires simplify
|
||||
|
||||
t := newNode()
|
||||
for e := o.front(); e != nil; e, _ = e.nextIndexed() {
|
||||
if !e.skip() {
|
||||
ce, err := e.encode()
|
||||
b.errorID(o.id, err)
|
||||
t.insert(e.runes[0], ce)
|
||||
}
|
||||
}
|
||||
o.handle = b.index.addTrie(t)
|
||||
}
|
||||
|
||||
func (b *Builder) build() (*table, error) {
|
||||
if b.built {
|
||||
return b.t, b.err
|
||||
}
|
||||
b.built = true
|
||||
b.t = &table{
|
||||
Table: colltab.Table{
|
||||
MaxContractLen: utf8.UTFMax,
|
||||
VariableTop: uint32(b.varTop),
|
||||
},
|
||||
}
|
||||
|
||||
b.buildOrdering(&b.root)
|
||||
b.t.root = b.root.handle
|
||||
for _, t := range b.locale {
|
||||
b.buildOrdering(t.index)
|
||||
if b.err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
i, err := b.index.generate()
|
||||
b.t.trie = *i
|
||||
b.t.Index = colltab.Trie{
|
||||
Index: i.index,
|
||||
Values: i.values,
|
||||
Index0: i.index[blockSize*b.t.root.lookupStart:],
|
||||
Values0: i.values[blockSize*b.t.root.valueStart:],
|
||||
}
|
||||
b.error(err)
|
||||
return b.t, b.err
|
||||
}
|
||||
|
||||
// Build builds the root Collator.
|
||||
func (b *Builder) Build() (colltab.Weighter, error) {
|
||||
table, err := b.build()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return table, nil
|
||||
}
|
||||
|
||||
// Build builds a Collator for Tailoring t.
|
||||
func (t *Tailoring) Build() (colltab.Weighter, error) {
|
||||
// TODO: implement.
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Print prints the tables for b and all its Tailorings as a Go file
|
||||
// that can be included in the Collate package.
|
||||
func (b *Builder) Print(w io.Writer) (n int, err error) {
|
||||
p := func(nn int, e error) {
|
||||
n += nn
|
||||
if err == nil {
|
||||
err = e
|
||||
}
|
||||
}
|
||||
t, err := b.build()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
p(fmt.Fprintf(w, `var availableLocales = "und`))
|
||||
for _, loc := range b.locale {
|
||||
if loc.id != "und" {
|
||||
p(fmt.Fprintf(w, ",%s", loc.id))
|
||||
}
|
||||
}
|
||||
p(fmt.Fprint(w, "\"\n\n"))
|
||||
p(fmt.Fprintf(w, "const varTop = 0x%x\n\n", b.varTop))
|
||||
p(fmt.Fprintln(w, "var locales = [...]tableIndex{"))
|
||||
for _, loc := range b.locale {
|
||||
if loc.id == "und" {
|
||||
p(t.fprintIndex(w, loc.index.handle, loc.id))
|
||||
}
|
||||
}
|
||||
for _, loc := range b.locale {
|
||||
if loc.id != "und" {
|
||||
p(t.fprintIndex(w, loc.index.handle, loc.id))
|
||||
}
|
||||
}
|
||||
p(fmt.Fprint(w, "}\n\n"))
|
||||
n, _, err = t.fprint(w, "main")
|
||||
return
|
||||
}
|
||||
|
||||
// reproducibleFromNFKD checks whether the given expansion could be generated
|
||||
// from an NFKD expansion.
|
||||
func reproducibleFromNFKD(e *entry, exp, nfkd []rawCE) bool {
|
||||
// Length must be equal.
|
||||
if len(exp) != len(nfkd) {
|
||||
return false
|
||||
}
|
||||
for i, ce := range exp {
|
||||
// Primary and secondary values should be equal.
|
||||
if ce.w[0] != nfkd[i].w[0] || ce.w[1] != nfkd[i].w[1] {
|
||||
return false
|
||||
}
|
||||
// Tertiary values should be equal to maxTertiary for third element onwards.
|
||||
// TODO: there seem to be a lot of cases in CLDR (e.g. ㏭ in zh.xml) that can
|
||||
// simply be dropped. Try this out by dropping the following code.
|
||||
if i >= 2 && ce.w[2] != maxTertiary {
|
||||
return false
|
||||
}
|
||||
if _, err := makeCE(ce); err != nil {
|
||||
// Simply return false. The error will be caught elsewhere.
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func simplify(o *ordering) {
|
||||
// Runes that are a starter of a contraction should not be removed.
|
||||
// (To date, there is only Kannada character 0CCA.)
|
||||
keep := make(map[rune]bool)
|
||||
for e := o.front(); e != nil; e, _ = e.nextIndexed() {
|
||||
if len(e.runes) > 1 {
|
||||
keep[e.runes[0]] = true
|
||||
}
|
||||
}
|
||||
// Tag entries for which the runes NFKD decompose to identical values.
|
||||
for e := o.front(); e != nil; e, _ = e.nextIndexed() {
|
||||
s := e.str
|
||||
nfkd := norm.NFKD.String(s)
|
||||
nfd := norm.NFD.String(s)
|
||||
if e.decompose || len(e.runes) > 1 || len(e.elems) == 1 || keep[e.runes[0]] || nfkd == nfd {
|
||||
continue
|
||||
}
|
||||
if reproducibleFromNFKD(e, e.elems, o.genColElems(nfkd)) {
|
||||
e.decompose = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// appendExpansion converts the given collation sequence to
|
||||
// collation elements and adds them to the expansion table.
|
||||
// It returns an index to the expansion table.
|
||||
func (b *Builder) appendExpansion(e *entry) int {
|
||||
t := b.t
|
||||
i := len(t.ExpandElem)
|
||||
ce := uint32(len(e.elems))
|
||||
t.ExpandElem = append(t.ExpandElem, ce)
|
||||
for _, w := range e.elems {
|
||||
ce, err := makeCE(w)
|
||||
if err != nil {
|
||||
b.error(err)
|
||||
return -1
|
||||
}
|
||||
t.ExpandElem = append(t.ExpandElem, ce)
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// processExpansions extracts data necessary to generate
|
||||
// the extraction tables.
|
||||
func (b *Builder) processExpansions(o *ordering) {
|
||||
for e := o.front(); e != nil; e, _ = e.nextIndexed() {
|
||||
if !e.expansion() {
|
||||
continue
|
||||
}
|
||||
key := fmt.Sprintf("%v", e.elems)
|
||||
i, ok := b.expIndex[key]
|
||||
if !ok {
|
||||
i = b.appendExpansion(e)
|
||||
b.expIndex[key] = i
|
||||
}
|
||||
e.expansionIndex = i
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Builder) processContractions(o *ordering) {
|
||||
// Collate contractions per starter rune.
|
||||
starters := []rune{}
|
||||
cm := make(map[rune][]*entry)
|
||||
for e := o.front(); e != nil; e, _ = e.nextIndexed() {
|
||||
if e.contraction() {
|
||||
if len(e.str) > b.t.MaxContractLen {
|
||||
b.t.MaxContractLen = len(e.str)
|
||||
}
|
||||
r := e.runes[0]
|
||||
if _, ok := cm[r]; !ok {
|
||||
starters = append(starters, r)
|
||||
}
|
||||
cm[r] = append(cm[r], e)
|
||||
}
|
||||
}
|
||||
// Add entries of single runes that are at a start of a contraction.
|
||||
for e := o.front(); e != nil; e, _ = e.nextIndexed() {
|
||||
if !e.contraction() {
|
||||
r := e.runes[0]
|
||||
if _, ok := cm[r]; ok {
|
||||
cm[r] = append(cm[r], e)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Build the tries for the contractions.
|
||||
t := b.t
|
||||
for _, r := range starters {
|
||||
l := cm[r]
|
||||
// Compute suffix strings. There are 31 different contraction suffix
|
||||
// sets for 715 contractions and 82 contraction starter runes as of
|
||||
// version 6.0.0.
|
||||
sufx := []string{}
|
||||
hasSingle := false
|
||||
for _, e := range l {
|
||||
if len(e.runes) > 1 {
|
||||
sufx = append(sufx, string(e.runes[1:]))
|
||||
} else {
|
||||
hasSingle = true
|
||||
}
|
||||
}
|
||||
if !hasSingle {
|
||||
b.error(fmt.Errorf("no single entry for starter rune %U found", r))
|
||||
continue
|
||||
}
|
||||
// Unique the suffix set.
|
||||
sort.Strings(sufx)
|
||||
key := strings.Join(sufx, "\n")
|
||||
handle, ok := b.ctHandle[key]
|
||||
if !ok {
|
||||
var err error
|
||||
handle, err = appendTrie(&t.ContractTries, sufx)
|
||||
if err != nil {
|
||||
b.error(err)
|
||||
}
|
||||
b.ctHandle[key] = handle
|
||||
}
|
||||
// Bucket sort entries in index order.
|
||||
es := make([]*entry, len(l))
|
||||
for _, e := range l {
|
||||
var p, sn int
|
||||
if len(e.runes) > 1 {
|
||||
str := []byte(string(e.runes[1:]))
|
||||
p, sn = lookup(&t.ContractTries, handle, str)
|
||||
if sn != len(str) {
|
||||
log.Fatalf("%s: processContractions: unexpected length for '%X'; len=%d; want %d", o.id, e.runes, sn, len(str))
|
||||
}
|
||||
}
|
||||
if es[p] != nil {
|
||||
log.Fatalf("%s: multiple contractions for position %d for rune %U", o.id, p, e.runes[0])
|
||||
}
|
||||
es[p] = e
|
||||
}
|
||||
// Create collation elements for contractions.
|
||||
elems := []uint32{}
|
||||
for _, e := range es {
|
||||
ce, err := e.encodeBase()
|
||||
b.errorID(o.id, err)
|
||||
elems = append(elems, ce)
|
||||
}
|
||||
key = fmt.Sprintf("%v", elems)
|
||||
i, ok := b.ctElem[key]
|
||||
if !ok {
|
||||
i = len(t.ContractElem)
|
||||
b.ctElem[key] = i
|
||||
t.ContractElem = append(t.ContractElem, elems...)
|
||||
}
|
||||
// Store info in entry for starter rune.
|
||||
es[0].contractionIndex = i
|
||||
es[0].contractionHandle = handle
|
||||
}
|
||||
}
|
290
vendor/golang.org/x/text/collate/build/builder_test.go
generated
vendored
Normal file
290
vendor/golang.org/x/text/collate/build/builder_test.go
generated
vendored
Normal file
|
@ -0,0 +1,290 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package build
|
||||
|
||||
import "testing"
|
||||
|
||||
// cjk returns an implicit collation element for a CJK rune.
|
||||
func cjk(r rune) []rawCE {
|
||||
// A CJK character C is represented in the DUCET as
|
||||
// [.AAAA.0020.0002.C][.BBBB.0000.0000.C]
|
||||
// Where AAAA is the most significant 15 bits plus a base value.
|
||||
// Any base value will work for the test, so we pick the common value of FB40.
|
||||
const base = 0xFB40
|
||||
return []rawCE{
|
||||
{w: []int{base + int(r>>15), defaultSecondary, defaultTertiary, int(r)}},
|
||||
{w: []int{int(r&0x7FFF) | 0x8000, 0, 0, int(r)}},
|
||||
}
|
||||
}
|
||||
|
||||
func pCE(p int) []rawCE {
|
||||
return mkCE([]int{p, defaultSecondary, defaultTertiary, 0}, 0)
|
||||
}
|
||||
|
||||
func pqCE(p, q int) []rawCE {
|
||||
return mkCE([]int{p, defaultSecondary, defaultTertiary, q}, 0)
|
||||
}
|
||||
|
||||
func ptCE(p, t int) []rawCE {
|
||||
return mkCE([]int{p, defaultSecondary, t, 0}, 0)
|
||||
}
|
||||
|
||||
func ptcCE(p, t int, ccc uint8) []rawCE {
|
||||
return mkCE([]int{p, defaultSecondary, t, 0}, ccc)
|
||||
}
|
||||
|
||||
func sCE(s int) []rawCE {
|
||||
return mkCE([]int{0, s, defaultTertiary, 0}, 0)
|
||||
}
|
||||
|
||||
func stCE(s, t int) []rawCE {
|
||||
return mkCE([]int{0, s, t, 0}, 0)
|
||||
}
|
||||
|
||||
func scCE(s int, ccc uint8) []rawCE {
|
||||
return mkCE([]int{0, s, defaultTertiary, 0}, ccc)
|
||||
}
|
||||
|
||||
func mkCE(w []int, ccc uint8) []rawCE {
|
||||
return []rawCE{rawCE{w, ccc}}
|
||||
}
|
||||
|
||||
// ducetElem is used to define test data that is used to generate a table.
|
||||
type ducetElem struct {
|
||||
str string
|
||||
ces []rawCE
|
||||
}
|
||||
|
||||
func newBuilder(t *testing.T, ducet []ducetElem) *Builder {
|
||||
b := NewBuilder()
|
||||
for _, e := range ducet {
|
||||
ces := [][]int{}
|
||||
for _, ce := range e.ces {
|
||||
ces = append(ces, ce.w)
|
||||
}
|
||||
if err := b.Add([]rune(e.str), ces, nil); err != nil {
|
||||
t.Errorf(err.Error())
|
||||
}
|
||||
}
|
||||
b.t = &table{}
|
||||
b.root.sort()
|
||||
return b
|
||||
}
|
||||
|
||||
type convertTest struct {
|
||||
in, out []rawCE
|
||||
err bool
|
||||
}
|
||||
|
||||
var convLargeTests = []convertTest{
|
||||
{pCE(0xFB39), pCE(0xFB39), false},
|
||||
{cjk(0x2F9B2), pqCE(0x3F9B2, 0x2F9B2), false},
|
||||
{pCE(0xFB40), pCE(0), true},
|
||||
{append(pCE(0xFB40), pCE(0)[0]), pCE(0), true},
|
||||
{pCE(0xFFFE), pCE(illegalOffset), false},
|
||||
{pCE(0xFFFF), pCE(illegalOffset + 1), false},
|
||||
}
|
||||
|
||||
func TestConvertLarge(t *testing.T) {
|
||||
for i, tt := range convLargeTests {
|
||||
e := new(entry)
|
||||
for _, ce := range tt.in {
|
||||
e.elems = append(e.elems, makeRawCE(ce.w, ce.ccc))
|
||||
}
|
||||
elems, err := convertLargeWeights(e.elems)
|
||||
if tt.err {
|
||||
if err == nil {
|
||||
t.Errorf("%d: expected error; none found", i)
|
||||
}
|
||||
continue
|
||||
} else if err != nil {
|
||||
t.Errorf("%d: unexpected error: %v", i, err)
|
||||
}
|
||||
if !equalCEArrays(elems, tt.out) {
|
||||
t.Errorf("%d: conversion was %x; want %x", i, elems, tt.out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Collation element table for simplify tests.
|
||||
var simplifyTest = []ducetElem{
|
||||
{"\u0300", sCE(30)}, // grave
|
||||
{"\u030C", sCE(40)}, // caron
|
||||
{"A", ptCE(100, 8)},
|
||||
{"D", ptCE(104, 8)},
|
||||
{"E", ptCE(105, 8)},
|
||||
{"I", ptCE(110, 8)},
|
||||
{"z", ptCE(130, 8)},
|
||||
{"\u05F2", append(ptCE(200, 4), ptCE(200, 4)[0])},
|
||||
{"\u05B7", sCE(80)},
|
||||
{"\u00C0", append(ptCE(100, 8), sCE(30)...)}, // A with grave, can be removed
|
||||
{"\u00C8", append(ptCE(105, 8), sCE(30)...)}, // E with grave
|
||||
{"\uFB1F", append(ptCE(200, 4), ptCE(200, 4)[0], sCE(80)[0])}, // eliminated by NFD
|
||||
{"\u00C8\u0302", ptCE(106, 8)}, // block previous from simplifying
|
||||
{"\u01C5", append(ptCE(104, 9), ptCE(130, 4)[0], stCE(40, maxTertiary)[0])}, // eliminated by NFKD
|
||||
// no removal: tertiary value of third element is not maxTertiary
|
||||
{"\u2162", append(ptCE(110, 9), ptCE(110, 4)[0], ptCE(110, 8)[0])},
|
||||
}
|
||||
|
||||
var genColTests = []ducetElem{
|
||||
{"\uFA70", pqCE(0x1FA70, 0xFA70)},
|
||||
{"A\u0300", append(ptCE(100, 8), sCE(30)...)},
|
||||
{"A\u0300\uFA70", append(ptCE(100, 8), sCE(30)[0], pqCE(0x1FA70, 0xFA70)[0])},
|
||||
{"A\u0300A\u0300", append(ptCE(100, 8), sCE(30)[0], ptCE(100, 8)[0], sCE(30)[0])},
|
||||
}
|
||||
|
||||
func TestGenColElems(t *testing.T) {
|
||||
b := newBuilder(t, simplifyTest[:5])
|
||||
|
||||
for i, tt := range genColTests {
|
||||
res := b.root.genColElems(tt.str)
|
||||
if !equalCEArrays(tt.ces, res) {
|
||||
t.Errorf("%d: result %X; want %X", i, res, tt.ces)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type strArray []string
|
||||
|
||||
func (sa strArray) contains(s string) bool {
|
||||
for _, e := range sa {
|
||||
if e == s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var simplifyRemoved = strArray{"\u00C0", "\uFB1F"}
|
||||
var simplifyMarked = strArray{"\u01C5"}
|
||||
|
||||
func TestSimplify(t *testing.T) {
|
||||
b := newBuilder(t, simplifyTest)
|
||||
o := &b.root
|
||||
simplify(o)
|
||||
|
||||
for i, tt := range simplifyTest {
|
||||
if simplifyRemoved.contains(tt.str) {
|
||||
continue
|
||||
}
|
||||
e := o.find(tt.str)
|
||||
if e.str != tt.str || !equalCEArrays(e.elems, tt.ces) {
|
||||
t.Errorf("%d: found element %s -> %X; want %s -> %X", i, e.str, e.elems, tt.str, tt.ces)
|
||||
break
|
||||
}
|
||||
}
|
||||
var i, k int
|
||||
for e := o.front(); e != nil; e, _ = e.nextIndexed() {
|
||||
gold := simplifyMarked.contains(e.str)
|
||||
if gold {
|
||||
k++
|
||||
}
|
||||
if gold != e.decompose {
|
||||
t.Errorf("%d: %s has decompose %v; want %v", i, e.str, e.decompose, gold)
|
||||
}
|
||||
i++
|
||||
}
|
||||
if k != len(simplifyMarked) {
|
||||
t.Errorf(" an entry that should be marked as decompose was deleted")
|
||||
}
|
||||
}
|
||||
|
||||
var expandTest = []ducetElem{
|
||||
{"\u0300", append(scCE(29, 230), scCE(30, 230)...)},
|
||||
{"\u00C0", append(ptCE(100, 8), scCE(30, 230)...)},
|
||||
{"\u00C8", append(ptCE(105, 8), scCE(30, 230)...)},
|
||||
{"\u00C9", append(ptCE(105, 8), scCE(30, 230)...)}, // identical expansion
|
||||
{"\u05F2", append(ptCE(200, 4), ptCE(200, 4)[0], ptCE(200, 4)[0])},
|
||||
{"\u01FF", append(ptCE(200, 4), ptcCE(201, 4, 0)[0], scCE(30, 230)[0])},
|
||||
}
|
||||
|
||||
func TestExpand(t *testing.T) {
|
||||
const (
|
||||
totalExpansions = 5
|
||||
totalElements = 2 + 2 + 2 + 3 + 3 + totalExpansions
|
||||
)
|
||||
b := newBuilder(t, expandTest)
|
||||
o := &b.root
|
||||
b.processExpansions(o)
|
||||
|
||||
e := o.front()
|
||||
for _, tt := range expandTest {
|
||||
exp := b.t.ExpandElem[e.expansionIndex:]
|
||||
if int(exp[0]) != len(tt.ces) {
|
||||
t.Errorf("%U: len(expansion)==%d; want %d", []rune(tt.str)[0], exp[0], len(tt.ces))
|
||||
}
|
||||
exp = exp[1:]
|
||||
for j, w := range tt.ces {
|
||||
if ce, _ := makeCE(w); exp[j] != ce {
|
||||
t.Errorf("%U: element %d is %X; want %X", []rune(tt.str)[0], j, exp[j], ce)
|
||||
}
|
||||
}
|
||||
e, _ = e.nextIndexed()
|
||||
}
|
||||
// Verify uniquing.
|
||||
if len(b.t.ExpandElem) != totalElements {
|
||||
t.Errorf("len(expandElem)==%d; want %d", len(b.t.ExpandElem), totalElements)
|
||||
}
|
||||
}
|
||||
|
||||
var contractTest = []ducetElem{
|
||||
{"abc", pCE(102)},
|
||||
{"abd", pCE(103)},
|
||||
{"a", pCE(100)},
|
||||
{"ab", pCE(101)},
|
||||
{"ac", pCE(104)},
|
||||
{"bcd", pCE(202)},
|
||||
{"b", pCE(200)},
|
||||
{"bc", pCE(201)},
|
||||
{"bd", pCE(203)},
|
||||
// shares suffixes with a*
|
||||
{"Ab", pCE(301)},
|
||||
{"A", pCE(300)},
|
||||
{"Ac", pCE(304)},
|
||||
{"Abc", pCE(302)},
|
||||
{"Abd", pCE(303)},
|
||||
// starter to be ignored
|
||||
{"z", pCE(1000)},
|
||||
}
|
||||
|
||||
func TestContract(t *testing.T) {
|
||||
const (
|
||||
totalElements = 5 + 5 + 4
|
||||
)
|
||||
b := newBuilder(t, contractTest)
|
||||
o := &b.root
|
||||
b.processContractions(o)
|
||||
|
||||
indexMap := make(map[int]bool)
|
||||
handleMap := make(map[rune]*entry)
|
||||
for e := o.front(); e != nil; e, _ = e.nextIndexed() {
|
||||
if e.contractionHandle.n > 0 {
|
||||
handleMap[e.runes[0]] = e
|
||||
indexMap[e.contractionHandle.index] = true
|
||||
}
|
||||
}
|
||||
// Verify uniquing.
|
||||
if len(indexMap) != 2 {
|
||||
t.Errorf("number of tries is %d; want %d", len(indexMap), 2)
|
||||
}
|
||||
for _, tt := range contractTest {
|
||||
e, ok := handleMap[[]rune(tt.str)[0]]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
str := tt.str[1:]
|
||||
offset, n := lookup(&b.t.ContractTries, e.contractionHandle, []byte(str))
|
||||
if len(str) != n {
|
||||
t.Errorf("%s: bytes consumed==%d; want %d", tt.str, n, len(str))
|
||||
}
|
||||
ce := b.t.ContractElem[offset+e.contractionIndex]
|
||||
if want, _ := makeCE(tt.ces[0]); want != ce {
|
||||
t.Errorf("%s: element %X; want %X", tt.str, ce, want)
|
||||
}
|
||||
}
|
||||
if len(b.t.ContractElem) != totalElements {
|
||||
t.Errorf("len(expandElem)==%d; want %d", len(b.t.ContractElem), totalElements)
|
||||
}
|
||||
}
|
294
vendor/golang.org/x/text/collate/build/colelem.go
generated
vendored
Normal file
294
vendor/golang.org/x/text/collate/build/colelem.go
generated
vendored
Normal file
|
@ -0,0 +1,294 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package build
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"unicode"
|
||||
|
||||
"golang.org/x/text/internal/colltab"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultSecondary = 0x20
|
||||
defaultTertiary = 0x2
|
||||
maxTertiary = 0x1F
|
||||
)
|
||||
|
||||
type rawCE struct {
|
||||
w []int
|
||||
ccc uint8
|
||||
}
|
||||
|
||||
func makeRawCE(w []int, ccc uint8) rawCE {
|
||||
ce := rawCE{w: make([]int, 4), ccc: ccc}
|
||||
copy(ce.w, w)
|
||||
return ce
|
||||
}
|
||||
|
||||
// A collation element is represented as an uint32.
|
||||
// In the typical case, a rune maps to a single collation element. If a rune
|
||||
// can be the start of a contraction or expands into multiple collation elements,
|
||||
// then the collation element that is associated with a rune will have a special
|
||||
// form to represent such m to n mappings. Such special collation elements
|
||||
// have a value >= 0x80000000.
|
||||
|
||||
const (
|
||||
maxPrimaryBits = 21
|
||||
maxSecondaryBits = 12
|
||||
maxTertiaryBits = 8
|
||||
)
|
||||
|
||||
func makeCE(ce rawCE) (uint32, error) {
|
||||
v, e := colltab.MakeElem(ce.w[0], ce.w[1], ce.w[2], ce.ccc)
|
||||
return uint32(v), e
|
||||
}
|
||||
|
||||
// For contractions, collation elements are of the form
|
||||
// 110bbbbb bbbbbbbb iiiiiiii iiiinnnn, where
|
||||
// - n* is the size of the first node in the contraction trie.
|
||||
// - i* is the index of the first node in the contraction trie.
|
||||
// - b* is the offset into the contraction collation element table.
|
||||
// See contract.go for details on the contraction trie.
|
||||
const (
|
||||
contractID = 0xC0000000
|
||||
maxNBits = 4
|
||||
maxTrieIndexBits = 12
|
||||
maxContractOffsetBits = 13
|
||||
)
|
||||
|
||||
func makeContractIndex(h ctHandle, offset int) (uint32, error) {
|
||||
if h.n >= 1<<maxNBits {
|
||||
return 0, fmt.Errorf("size of contraction trie node too large: %d >= %d", h.n, 1<<maxNBits)
|
||||
}
|
||||
if h.index >= 1<<maxTrieIndexBits {
|
||||
return 0, fmt.Errorf("size of contraction trie offset too large: %d >= %d", h.index, 1<<maxTrieIndexBits)
|
||||
}
|
||||
if offset >= 1<<maxContractOffsetBits {
|
||||
return 0, fmt.Errorf("contraction offset out of bounds: %x >= %x", offset, 1<<maxContractOffsetBits)
|
||||
}
|
||||
ce := uint32(contractID)
|
||||
ce += uint32(offset << (maxNBits + maxTrieIndexBits))
|
||||
ce += uint32(h.index << maxNBits)
|
||||
ce += uint32(h.n)
|
||||
return ce, nil
|
||||
}
|
||||
|
||||
// For expansions, collation elements are of the form
|
||||
// 11100000 00000000 bbbbbbbb bbbbbbbb,
|
||||
// where b* is the index into the expansion sequence table.
|
||||
const (
|
||||
expandID = 0xE0000000
|
||||
maxExpandIndexBits = 16
|
||||
)
|
||||
|
||||
func makeExpandIndex(index int) (uint32, error) {
|
||||
if index >= 1<<maxExpandIndexBits {
|
||||
return 0, fmt.Errorf("expansion index out of bounds: %x >= %x", index, 1<<maxExpandIndexBits)
|
||||
}
|
||||
return expandID + uint32(index), nil
|
||||
}
|
||||
|
||||
// Each list of collation elements corresponding to an expansion starts with
|
||||
// a header indicating the length of the sequence.
|
||||
func makeExpansionHeader(n int) (uint32, error) {
|
||||
return uint32(n), nil
|
||||
}
|
||||
|
||||
// Some runes can be expanded using NFKD decomposition. Instead of storing the full
|
||||
// sequence of collation elements, we decompose the rune and lookup the collation
|
||||
// elements for each rune in the decomposition and modify the tertiary weights.
|
||||
// The collation element, in this case, is of the form
|
||||
// 11110000 00000000 wwwwwwww vvvvvvvv, where
|
||||
// - v* is the replacement tertiary weight for the first rune,
|
||||
// - w* is the replacement tertiary weight for the second rune,
|
||||
// Tertiary weights of subsequent runes should be replaced with maxTertiary.
|
||||
// See http://www.unicode.org/reports/tr10/#Compatibility_Decompositions for more details.
|
||||
const (
|
||||
decompID = 0xF0000000
|
||||
)
|
||||
|
||||
func makeDecompose(t1, t2 int) (uint32, error) {
|
||||
if t1 >= 256 || t1 < 0 {
|
||||
return 0, fmt.Errorf("first tertiary weight out of bounds: %d >= 256", t1)
|
||||
}
|
||||
if t2 >= 256 || t2 < 0 {
|
||||
return 0, fmt.Errorf("second tertiary weight out of bounds: %d >= 256", t2)
|
||||
}
|
||||
return uint32(t2<<8+t1) + decompID, nil
|
||||
}
|
||||
|
||||
const (
|
||||
// These constants were taken from http://www.unicode.org/versions/Unicode6.0.0/ch12.pdf.
|
||||
minUnified rune = 0x4E00
|
||||
maxUnified = 0x9FFF
|
||||
minCompatibility = 0xF900
|
||||
maxCompatibility = 0xFAFF
|
||||
minRare = 0x3400
|
||||
maxRare = 0x4DBF
|
||||
)
|
||||
const (
|
||||
commonUnifiedOffset = 0x10000
|
||||
rareUnifiedOffset = 0x20000 // largest rune in common is U+FAFF
|
||||
otherOffset = 0x50000 // largest rune in rare is U+2FA1D
|
||||
illegalOffset = otherOffset + int(unicode.MaxRune)
|
||||
maxPrimary = illegalOffset + 1
|
||||
)
|
||||
|
||||
// implicitPrimary returns the primary weight for the a rune
|
||||
// for which there is no entry for the rune in the collation table.
|
||||
// We take a different approach from the one specified in
|
||||
// http://unicode.org/reports/tr10/#Implicit_Weights,
|
||||
// but preserve the resulting relative ordering of the runes.
|
||||
func implicitPrimary(r rune) int {
|
||||
if unicode.Is(unicode.Ideographic, r) {
|
||||
if r >= minUnified && r <= maxUnified {
|
||||
// The most common case for CJK.
|
||||
return int(r) + commonUnifiedOffset
|
||||
}
|
||||
if r >= minCompatibility && r <= maxCompatibility {
|
||||
// This will typically not hit. The DUCET explicitly specifies mappings
|
||||
// for all characters that do not decompose.
|
||||
return int(r) + commonUnifiedOffset
|
||||
}
|
||||
return int(r) + rareUnifiedOffset
|
||||
}
|
||||
return int(r) + otherOffset
|
||||
}
|
||||
|
||||
// convertLargeWeights converts collation elements with large
|
||||
// primaries (either double primaries or for illegal runes)
|
||||
// to our own representation.
|
||||
// A CJK character C is represented in the DUCET as
|
||||
// [.FBxx.0020.0002.C][.BBBB.0000.0000.C]
|
||||
// We will rewrite these characters to a single CE.
|
||||
// We assume the CJK values start at 0x8000.
|
||||
// See http://unicode.org/reports/tr10/#Implicit_Weights
|
||||
func convertLargeWeights(elems []rawCE) (res []rawCE, err error) {
|
||||
const (
|
||||
cjkPrimaryStart = 0xFB40
|
||||
rarePrimaryStart = 0xFB80
|
||||
otherPrimaryStart = 0xFBC0
|
||||
illegalPrimary = 0xFFFE
|
||||
highBitsMask = 0x3F
|
||||
lowBitsMask = 0x7FFF
|
||||
lowBitsFlag = 0x8000
|
||||
shiftBits = 15
|
||||
)
|
||||
for i := 0; i < len(elems); i++ {
|
||||
ce := elems[i].w
|
||||
p := ce[0]
|
||||
if p < cjkPrimaryStart {
|
||||
continue
|
||||
}
|
||||
if p > 0xFFFF {
|
||||
return elems, fmt.Errorf("found primary weight %X; should be <= 0xFFFF", p)
|
||||
}
|
||||
if p >= illegalPrimary {
|
||||
ce[0] = illegalOffset + p - illegalPrimary
|
||||
} else {
|
||||
if i+1 >= len(elems) {
|
||||
return elems, fmt.Errorf("second part of double primary weight missing: %v", elems)
|
||||
}
|
||||
if elems[i+1].w[0]&lowBitsFlag == 0 {
|
||||
return elems, fmt.Errorf("malformed second part of double primary weight: %v", elems)
|
||||
}
|
||||
np := ((p & highBitsMask) << shiftBits) + elems[i+1].w[0]&lowBitsMask
|
||||
switch {
|
||||
case p < rarePrimaryStart:
|
||||
np += commonUnifiedOffset
|
||||
case p < otherPrimaryStart:
|
||||
np += rareUnifiedOffset
|
||||
default:
|
||||
p += otherOffset
|
||||
}
|
||||
ce[0] = np
|
||||
for j := i + 1; j+1 < len(elems); j++ {
|
||||
elems[j] = elems[j+1]
|
||||
}
|
||||
elems = elems[:len(elems)-1]
|
||||
}
|
||||
}
|
||||
return elems, nil
|
||||
}
|
||||
|
||||
// nextWeight computes the first possible collation weights following elems
|
||||
// for the given level.
|
||||
func nextWeight(level colltab.Level, elems []rawCE) []rawCE {
|
||||
if level == colltab.Identity {
|
||||
next := make([]rawCE, len(elems))
|
||||
copy(next, elems)
|
||||
return next
|
||||
}
|
||||
next := []rawCE{makeRawCE(elems[0].w, elems[0].ccc)}
|
||||
next[0].w[level]++
|
||||
if level < colltab.Secondary {
|
||||
next[0].w[colltab.Secondary] = defaultSecondary
|
||||
}
|
||||
if level < colltab.Tertiary {
|
||||
next[0].w[colltab.Tertiary] = defaultTertiary
|
||||
}
|
||||
// Filter entries that cannot influence ordering.
|
||||
for _, ce := range elems[1:] {
|
||||
skip := true
|
||||
for i := colltab.Primary; i < level; i++ {
|
||||
skip = skip && ce.w[i] == 0
|
||||
}
|
||||
if !skip {
|
||||
next = append(next, ce)
|
||||
}
|
||||
}
|
||||
return next
|
||||
}
|
||||
|
||||
func nextVal(elems []rawCE, i int, level colltab.Level) (index, value int) {
|
||||
for ; i < len(elems) && elems[i].w[level] == 0; i++ {
|
||||
}
|
||||
if i < len(elems) {
|
||||
return i, elems[i].w[level]
|
||||
}
|
||||
return i, 0
|
||||
}
|
||||
|
||||
// compareWeights returns -1 if a < b, 1 if a > b, or 0 otherwise.
|
||||
// It also returns the collation level at which the difference is found.
|
||||
func compareWeights(a, b []rawCE) (result int, level colltab.Level) {
|
||||
for level := colltab.Primary; level < colltab.Identity; level++ {
|
||||
var va, vb int
|
||||
for ia, ib := 0, 0; ia < len(a) || ib < len(b); ia, ib = ia+1, ib+1 {
|
||||
ia, va = nextVal(a, ia, level)
|
||||
ib, vb = nextVal(b, ib, level)
|
||||
if va != vb {
|
||||
if va < vb {
|
||||
return -1, level
|
||||
} else {
|
||||
return 1, level
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0, colltab.Identity
|
||||
}
|
||||
|
||||
func equalCE(a, b rawCE) bool {
|
||||
for i := 0; i < 3; i++ {
|
||||
if b.w[i] != a.w[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func equalCEArrays(a, b []rawCE) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for i := range a {
|
||||
if !equalCE(a[i], b[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
215
vendor/golang.org/x/text/collate/build/colelem_test.go
generated
vendored
Normal file
215
vendor/golang.org/x/text/collate/build/colelem_test.go
generated
vendored
Normal file
|
@ -0,0 +1,215 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package build
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"golang.org/x/text/internal/colltab"
|
||||
)
|
||||
|
||||
type ceTest struct {
|
||||
f func(in []int) (uint32, error)
|
||||
arg []int
|
||||
val uint32
|
||||
}
|
||||
|
||||
func normalCE(in []int) (ce uint32, err error) {
|
||||
return makeCE(rawCE{w: in[:3], ccc: uint8(in[3])})
|
||||
}
|
||||
|
||||
func expandCE(in []int) (ce uint32, err error) {
|
||||
return makeExpandIndex(in[0])
|
||||
}
|
||||
|
||||
func contractCE(in []int) (ce uint32, err error) {
|
||||
return makeContractIndex(ctHandle{in[0], in[1]}, in[2])
|
||||
}
|
||||
|
||||
func decompCE(in []int) (ce uint32, err error) {
|
||||
return makeDecompose(in[0], in[1])
|
||||
}
|
||||
|
||||
var ceTests = []ceTest{
|
||||
{normalCE, []int{0, 0, 0, 0}, 0xA0000000},
|
||||
{normalCE, []int{0, 0x28, 3, 0}, 0xA0002803},
|
||||
{normalCE, []int{0, 0x28, 3, 0xFF}, 0xAFF02803},
|
||||
{normalCE, []int{100, defaultSecondary, 3, 0}, 0x0000C883},
|
||||
// non-ignorable primary with non-default secondary
|
||||
{normalCE, []int{100, 0x28, defaultTertiary, 0}, 0x4000C828},
|
||||
{normalCE, []int{100, defaultSecondary + 8, 3, 0}, 0x0000C983},
|
||||
{normalCE, []int{100, 0, 3, 0}, 0xFFFF}, // non-ignorable primary with non-supported secondary
|
||||
{normalCE, []int{100, 1, 3, 0}, 0xFFFF},
|
||||
{normalCE, []int{1 << maxPrimaryBits, defaultSecondary, 0, 0}, 0xFFFF},
|
||||
{normalCE, []int{0, 1 << maxSecondaryBits, 0, 0}, 0xFFFF},
|
||||
{normalCE, []int{100, defaultSecondary, 1 << maxTertiaryBits, 0}, 0xFFFF},
|
||||
{normalCE, []int{0x123, defaultSecondary, 8, 0xFF}, 0x88FF0123},
|
||||
{normalCE, []int{0x123, defaultSecondary + 1, 8, 0xFF}, 0xFFFF},
|
||||
|
||||
{contractCE, []int{0, 0, 0}, 0xC0000000},
|
||||
{contractCE, []int{1, 1, 1}, 0xC0010011},
|
||||
{contractCE, []int{1, (1 << maxNBits) - 1, 1}, 0xC001001F},
|
||||
{contractCE, []int{(1 << maxTrieIndexBits) - 1, 1, 1}, 0xC001FFF1},
|
||||
{contractCE, []int{1, 1, (1 << maxContractOffsetBits) - 1}, 0xDFFF0011},
|
||||
{contractCE, []int{1, (1 << maxNBits), 1}, 0xFFFF},
|
||||
{contractCE, []int{(1 << maxTrieIndexBits), 1, 1}, 0xFFFF},
|
||||
{contractCE, []int{1, (1 << maxContractOffsetBits), 1}, 0xFFFF},
|
||||
|
||||
{expandCE, []int{0}, 0xE0000000},
|
||||
{expandCE, []int{5}, 0xE0000005},
|
||||
{expandCE, []int{(1 << maxExpandIndexBits) - 1}, 0xE000FFFF},
|
||||
{expandCE, []int{1 << maxExpandIndexBits}, 0xFFFF},
|
||||
|
||||
{decompCE, []int{0, 0}, 0xF0000000},
|
||||
{decompCE, []int{1, 1}, 0xF0000101},
|
||||
{decompCE, []int{0x1F, 0x1F}, 0xF0001F1F},
|
||||
{decompCE, []int{256, 0x1F}, 0xFFFF},
|
||||
{decompCE, []int{0x1F, 256}, 0xFFFF},
|
||||
}
|
||||
|
||||
func TestColElem(t *testing.T) {
|
||||
for i, tt := range ceTests {
|
||||
in := make([]int, len(tt.arg))
|
||||
copy(in, tt.arg)
|
||||
ce, err := tt.f(in)
|
||||
if tt.val == 0xFFFF {
|
||||
if err == nil {
|
||||
t.Errorf("%d: expected error for args %x", i, tt.arg)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("%d: unexpected error: %v", i, err.Error())
|
||||
}
|
||||
if ce != tt.val {
|
||||
t.Errorf("%d: colElem=%X; want %X", i, ce, tt.val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func mkRawCES(in [][]int) []rawCE {
|
||||
out := []rawCE{}
|
||||
for _, w := range in {
|
||||
out = append(out, rawCE{w: w})
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
type weightsTest struct {
|
||||
a, b [][]int
|
||||
level colltab.Level
|
||||
result int
|
||||
}
|
||||
|
||||
var nextWeightTests = []weightsTest{
|
||||
{
|
||||
a: [][]int{{100, 20, 5, 0}},
|
||||
b: [][]int{{101, defaultSecondary, defaultTertiary, 0}},
|
||||
level: colltab.Primary,
|
||||
},
|
||||
{
|
||||
a: [][]int{{100, 20, 5, 0}},
|
||||
b: [][]int{{100, 21, defaultTertiary, 0}},
|
||||
level: colltab.Secondary,
|
||||
},
|
||||
{
|
||||
a: [][]int{{100, 20, 5, 0}},
|
||||
b: [][]int{{100, 20, 6, 0}},
|
||||
level: colltab.Tertiary,
|
||||
},
|
||||
{
|
||||
a: [][]int{{100, 20, 5, 0}},
|
||||
b: [][]int{{100, 20, 5, 0}},
|
||||
level: colltab.Identity,
|
||||
},
|
||||
}
|
||||
|
||||
var extra = [][]int{{200, 32, 8, 0}, {0, 32, 8, 0}, {0, 0, 8, 0}, {0, 0, 0, 0}}
|
||||
|
||||
func TestNextWeight(t *testing.T) {
|
||||
for i, tt := range nextWeightTests {
|
||||
test := func(l colltab.Level, tt weightsTest, a, gold [][]int) {
|
||||
res := nextWeight(tt.level, mkRawCES(a))
|
||||
if !equalCEArrays(mkRawCES(gold), res) {
|
||||
t.Errorf("%d:%d: expected weights %d; found %d", i, l, gold, res)
|
||||
}
|
||||
}
|
||||
test(-1, tt, tt.a, tt.b)
|
||||
for l := colltab.Primary; l <= colltab.Tertiary; l++ {
|
||||
if tt.level <= l {
|
||||
test(l, tt, append(tt.a, extra[l]), tt.b)
|
||||
} else {
|
||||
test(l, tt, append(tt.a, extra[l]), append(tt.b, extra[l]))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var compareTests = []weightsTest{
|
||||
{
|
||||
[][]int{{100, 20, 5, 0}},
|
||||
[][]int{{100, 20, 5, 0}},
|
||||
colltab.Identity,
|
||||
0,
|
||||
},
|
||||
{
|
||||
[][]int{{100, 20, 5, 0}, extra[0]},
|
||||
[][]int{{100, 20, 5, 1}},
|
||||
colltab.Primary,
|
||||
1,
|
||||
},
|
||||
{
|
||||
[][]int{{100, 20, 5, 0}},
|
||||
[][]int{{101, 20, 5, 0}},
|
||||
colltab.Primary,
|
||||
-1,
|
||||
},
|
||||
{
|
||||
[][]int{{101, 20, 5, 0}},
|
||||
[][]int{{100, 20, 5, 0}},
|
||||
colltab.Primary,
|
||||
1,
|
||||
},
|
||||
{
|
||||
[][]int{{100, 0, 0, 0}, {0, 20, 5, 0}},
|
||||
[][]int{{0, 20, 5, 0}, {100, 0, 0, 0}},
|
||||
colltab.Identity,
|
||||
0,
|
||||
},
|
||||
{
|
||||
[][]int{{100, 20, 5, 0}},
|
||||
[][]int{{100, 21, 5, 0}},
|
||||
colltab.Secondary,
|
||||
-1,
|
||||
},
|
||||
{
|
||||
[][]int{{100, 20, 5, 0}},
|
||||
[][]int{{100, 20, 2, 0}},
|
||||
colltab.Tertiary,
|
||||
1,
|
||||
},
|
||||
{
|
||||
[][]int{{100, 20, 5, 1}},
|
||||
[][]int{{100, 20, 5, 2}},
|
||||
colltab.Quaternary,
|
||||
-1,
|
||||
},
|
||||
}
|
||||
|
||||
func TestCompareWeights(t *testing.T) {
|
||||
for i, tt := range compareTests {
|
||||
test := func(tt weightsTest, a, b [][]int) {
|
||||
res, level := compareWeights(mkRawCES(a), mkRawCES(b))
|
||||
if res != tt.result {
|
||||
t.Errorf("%d: expected comparison result %d; found %d", i, tt.result, res)
|
||||
}
|
||||
if level != tt.level {
|
||||
t.Errorf("%d: expected level %d; found %d", i, tt.level, level)
|
||||
}
|
||||
}
|
||||
test(tt, tt.a, tt.b)
|
||||
test(tt, append(tt.a, extra[0]), append(tt.b, extra[0]))
|
||||
}
|
||||
}
|
309
vendor/golang.org/x/text/collate/build/contract.go
generated
vendored
Normal file
309
vendor/golang.org/x/text/collate/build/contract.go
generated
vendored
Normal file
|
@ -0,0 +1,309 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package build
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/text/internal/colltab"
|
||||
)
|
||||
|
||||
// This file contains code for detecting contractions and generating
|
||||
// the necessary tables.
|
||||
// Any Unicode Collation Algorithm (UCA) table entry that has more than
|
||||
// one rune one the left-hand side is called a contraction.
|
||||
// See http://www.unicode.org/reports/tr10/#Contractions for more details.
|
||||
//
|
||||
// We define the following terms:
|
||||
// initial: a rune that appears as the first rune in a contraction.
|
||||
// suffix: a sequence of runes succeeding the initial rune
|
||||
// in a given contraction.
|
||||
// non-initial: a rune that appears in a suffix.
|
||||
//
|
||||
// A rune may be both an initial and a non-initial and may be so in
|
||||
// many contractions. An initial may typically also appear by itself.
|
||||
// In case of ambiguities, the UCA requires we match the longest
|
||||
// contraction.
|
||||
//
|
||||
// Many contraction rules share the same set of possible suffixes.
|
||||
// We store sets of suffixes in a trie that associates an index with
|
||||
// each suffix in the set. This index can be used to look up a
|
||||
// collation element associated with the (starter rune, suffix) pair.
|
||||
//
|
||||
// The trie is defined on a UTF-8 byte sequence.
|
||||
// The overall trie is represented as an array of ctEntries. Each node of the trie
|
||||
// is represented as a subsequence of ctEntries, where each entry corresponds to
|
||||
// a possible match of a next character in the search string. An entry
|
||||
// also includes the length and offset to the next sequence of entries
|
||||
// to check in case of a match.
|
||||
|
||||
const (
|
||||
final = 0
|
||||
noIndex = 0xFF
|
||||
)
|
||||
|
||||
// ctEntry associates to a matching byte an offset and/or next sequence of
|
||||
// bytes to check. A ctEntry c is called final if a match means that the
|
||||
// longest suffix has been found. An entry c is final if c.N == 0.
|
||||
// A single final entry can match a range of characters to an offset.
|
||||
// A non-final entry always matches a single byte. Note that a non-final
|
||||
// entry might still resemble a completed suffix.
|
||||
// Examples:
|
||||
// The suffix strings "ab" and "ac" can be represented as:
|
||||
// []ctEntry{
|
||||
// {'a', 1, 1, noIndex}, // 'a' by itself does not match, so i is 0xFF.
|
||||
// {'b', 'c', 0, 1}, // "ab" -> 1, "ac" -> 2
|
||||
// }
|
||||
//
|
||||
// The suffix strings "ab", "abc", "abd", and "abcd" can be represented as:
|
||||
// []ctEntry{
|
||||
// {'a', 1, 1, noIndex}, // 'a' must be followed by 'b'.
|
||||
// {'b', 1, 2, 1}, // "ab" -> 1, may be followed by 'c' or 'd'.
|
||||
// {'d', 'd', final, 3}, // "abd" -> 3
|
||||
// {'c', 4, 1, 2}, // "abc" -> 2, may be followed by 'd'.
|
||||
// {'d', 'd', final, 4}, // "abcd" -> 4
|
||||
// }
|
||||
// See genStateTests in contract_test.go for more examples.
|
||||
type ctEntry struct {
|
||||
L uint8 // non-final: byte value to match; final: lowest match in range.
|
||||
H uint8 // non-final: relative index to next block; final: highest match in range.
|
||||
N uint8 // non-final: length of next block; final: final
|
||||
I uint8 // result offset. Will be noIndex if more bytes are needed to complete.
|
||||
}
|
||||
|
||||
// contractTrieSet holds a set of contraction tries. The tries are stored
|
||||
// consecutively in the entry field.
|
||||
type contractTrieSet []struct{ l, h, n, i uint8 }
|
||||
|
||||
// ctHandle is used to identify a trie in the trie set, consisting in an offset
|
||||
// in the array and the size of the first node.
|
||||
type ctHandle struct {
|
||||
index, n int
|
||||
}
|
||||
|
||||
// appendTrie adds a new trie for the given suffixes to the trie set and returns
|
||||
// a handle to it. The handle will be invalid on error.
|
||||
func appendTrie(ct *colltab.ContractTrieSet, suffixes []string) (ctHandle, error) {
|
||||
es := make([]stridx, len(suffixes))
|
||||
for i, s := range suffixes {
|
||||
es[i].str = s
|
||||
}
|
||||
sort.Sort(offsetSort(es))
|
||||
for i := range es {
|
||||
es[i].index = i + 1
|
||||
}
|
||||
sort.Sort(genidxSort(es))
|
||||
i := len(*ct)
|
||||
n, err := genStates(ct, es)
|
||||
if err != nil {
|
||||
*ct = (*ct)[:i]
|
||||
return ctHandle{}, err
|
||||
}
|
||||
return ctHandle{i, n}, nil
|
||||
}
|
||||
|
||||
// genStates generates ctEntries for a given suffix set and returns
|
||||
// the number of entries for the first node.
|
||||
func genStates(ct *colltab.ContractTrieSet, sis []stridx) (int, error) {
|
||||
if len(sis) == 0 {
|
||||
return 0, fmt.Errorf("genStates: list of suffices must be non-empty")
|
||||
}
|
||||
start := len(*ct)
|
||||
// create entries for differing first bytes.
|
||||
for _, si := range sis {
|
||||
s := si.str
|
||||
if len(s) == 0 {
|
||||
continue
|
||||
}
|
||||
added := false
|
||||
c := s[0]
|
||||
if len(s) > 1 {
|
||||
for j := len(*ct) - 1; j >= start; j-- {
|
||||
if (*ct)[j].L == c {
|
||||
added = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !added {
|
||||
*ct = append(*ct, ctEntry{L: c, I: noIndex})
|
||||
}
|
||||
} else {
|
||||
for j := len(*ct) - 1; j >= start; j-- {
|
||||
// Update the offset for longer suffixes with the same byte.
|
||||
if (*ct)[j].L == c {
|
||||
(*ct)[j].I = uint8(si.index)
|
||||
added = true
|
||||
}
|
||||
// Extend range of final ctEntry, if possible.
|
||||
if (*ct)[j].H+1 == c {
|
||||
(*ct)[j].H = c
|
||||
added = true
|
||||
}
|
||||
}
|
||||
if !added {
|
||||
*ct = append(*ct, ctEntry{L: c, H: c, N: final, I: uint8(si.index)})
|
||||
}
|
||||
}
|
||||
}
|
||||
n := len(*ct) - start
|
||||
// Append nodes for the remainder of the suffixes for each ctEntry.
|
||||
sp := 0
|
||||
for i, end := start, len(*ct); i < end; i++ {
|
||||
fe := (*ct)[i]
|
||||
if fe.H == 0 { // uninitialized non-final
|
||||
ln := len(*ct) - start - n
|
||||
if ln > 0xFF {
|
||||
return 0, fmt.Errorf("genStates: relative block offset too large: %d > 255", ln)
|
||||
}
|
||||
fe.H = uint8(ln)
|
||||
// Find first non-final strings with same byte as current entry.
|
||||
for ; sis[sp].str[0] != fe.L; sp++ {
|
||||
}
|
||||
se := sp + 1
|
||||
for ; se < len(sis) && len(sis[se].str) > 1 && sis[se].str[0] == fe.L; se++ {
|
||||
}
|
||||
sl := sis[sp:se]
|
||||
sp = se
|
||||
for i, si := range sl {
|
||||
sl[i].str = si.str[1:]
|
||||
}
|
||||
nn, err := genStates(ct, sl)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
fe.N = uint8(nn)
|
||||
(*ct)[i] = fe
|
||||
}
|
||||
}
|
||||
sort.Sort(entrySort((*ct)[start : start+n]))
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// There may be both a final and non-final entry for a byte if the byte
|
||||
// is implied in a range of matches in the final entry.
|
||||
// We need to ensure that the non-final entry comes first in that case.
|
||||
type entrySort colltab.ContractTrieSet
|
||||
|
||||
func (fe entrySort) Len() int { return len(fe) }
|
||||
func (fe entrySort) Swap(i, j int) { fe[i], fe[j] = fe[j], fe[i] }
|
||||
func (fe entrySort) Less(i, j int) bool {
|
||||
return fe[i].L > fe[j].L
|
||||
}
|
||||
|
||||
// stridx is used for sorting suffixes and their associated offsets.
|
||||
type stridx struct {
|
||||
str string
|
||||
index int
|
||||
}
|
||||
|
||||
// For computing the offsets, we first sort by size, and then by string.
|
||||
// This ensures that strings that only differ in the last byte by 1
|
||||
// are sorted consecutively in increasing order such that they can
|
||||
// be packed as a range in a final ctEntry.
|
||||
type offsetSort []stridx
|
||||
|
||||
func (si offsetSort) Len() int { return len(si) }
|
||||
func (si offsetSort) Swap(i, j int) { si[i], si[j] = si[j], si[i] }
|
||||
func (si offsetSort) Less(i, j int) bool {
|
||||
if len(si[i].str) != len(si[j].str) {
|
||||
return len(si[i].str) > len(si[j].str)
|
||||
}
|
||||
return si[i].str < si[j].str
|
||||
}
|
||||
|
||||
// For indexing, we want to ensure that strings are sorted in string order, where
|
||||
// for strings with the same prefix, we put longer strings before shorter ones.
|
||||
type genidxSort []stridx
|
||||
|
||||
func (si genidxSort) Len() int { return len(si) }
|
||||
func (si genidxSort) Swap(i, j int) { si[i], si[j] = si[j], si[i] }
|
||||
func (si genidxSort) Less(i, j int) bool {
|
||||
if strings.HasPrefix(si[j].str, si[i].str) {
|
||||
return false
|
||||
}
|
||||
if strings.HasPrefix(si[i].str, si[j].str) {
|
||||
return true
|
||||
}
|
||||
return si[i].str < si[j].str
|
||||
}
|
||||
|
||||
// lookup matches the longest suffix in str and returns the associated offset
|
||||
// and the number of bytes consumed.
|
||||
func lookup(ct *colltab.ContractTrieSet, h ctHandle, str []byte) (index, ns int) {
|
||||
states := (*ct)[h.index:]
|
||||
p := 0
|
||||
n := h.n
|
||||
for i := 0; i < n && p < len(str); {
|
||||
e := states[i]
|
||||
c := str[p]
|
||||
if c >= e.L {
|
||||
if e.L == c {
|
||||
p++
|
||||
if e.I != noIndex {
|
||||
index, ns = int(e.I), p
|
||||
}
|
||||
if e.N != final {
|
||||
// set to new state
|
||||
i, states, n = 0, states[int(e.H)+n:], int(e.N)
|
||||
} else {
|
||||
return
|
||||
}
|
||||
continue
|
||||
} else if e.N == final && c <= e.H {
|
||||
p++
|
||||
return int(c-e.L) + int(e.I), p
|
||||
}
|
||||
}
|
||||
i++
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// print writes the contractTrieSet t as compilable Go code to w. It returns
|
||||
// the total number of bytes written and the size of the resulting data structure in bytes.
|
||||
func print(t *colltab.ContractTrieSet, w io.Writer, name string) (n, size int, err error) {
|
||||
update3 := func(nn, sz int, e error) {
|
||||
n += nn
|
||||
if err == nil {
|
||||
err = e
|
||||
}
|
||||
size += sz
|
||||
}
|
||||
update2 := func(nn int, e error) { update3(nn, 0, e) }
|
||||
|
||||
update3(printArray(*t, w, name))
|
||||
update2(fmt.Fprintf(w, "var %sContractTrieSet = ", name))
|
||||
update3(printStruct(*t, w, name))
|
||||
update2(fmt.Fprintln(w))
|
||||
return
|
||||
}
|
||||
|
||||
func printArray(ct colltab.ContractTrieSet, w io.Writer, name string) (n, size int, err error) {
|
||||
p := func(f string, a ...interface{}) {
|
||||
nn, e := fmt.Fprintf(w, f, a...)
|
||||
n += nn
|
||||
if err == nil {
|
||||
err = e
|
||||
}
|
||||
}
|
||||
size = len(ct) * 4
|
||||
p("// %sCTEntries: %d entries, %d bytes\n", name, len(ct), size)
|
||||
p("var %sCTEntries = [%d]struct{L,H,N,I uint8}{\n", name, len(ct))
|
||||
for _, fe := range ct {
|
||||
p("\t{0x%X, 0x%X, %d, %d},\n", fe.L, fe.H, fe.N, fe.I)
|
||||
}
|
||||
p("}\n")
|
||||
return
|
||||
}
|
||||
|
||||
func printStruct(ct colltab.ContractTrieSet, w io.Writer, name string) (n, size int, err error) {
|
||||
n, err = fmt.Fprintf(w, "colltab.ContractTrieSet( %sCTEntries[:] )", name)
|
||||
size = int(reflect.TypeOf(ct).Size())
|
||||
return
|
||||
}
|
266
vendor/golang.org/x/text/collate/build/contract_test.go
generated
vendored
Normal file
266
vendor/golang.org/x/text/collate/build/contract_test.go
generated
vendored
Normal file
|
@ -0,0 +1,266 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package build
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/text/internal/colltab"
|
||||
)
|
||||
|
||||
var largetosmall = []stridx{
|
||||
{"a", 5},
|
||||
{"ab", 4},
|
||||
{"abc", 3},
|
||||
{"abcd", 2},
|
||||
{"abcde", 1},
|
||||
{"abcdef", 0},
|
||||
}
|
||||
|
||||
var offsetSortTests = [][]stridx{
|
||||
{
|
||||
{"bcde", 1},
|
||||
{"bc", 5},
|
||||
{"ab", 4},
|
||||
{"bcd", 3},
|
||||
{"abcd", 0},
|
||||
{"abc", 2},
|
||||
},
|
||||
largetosmall,
|
||||
}
|
||||
|
||||
func TestOffsetSort(t *testing.T) {
|
||||
for i, st := range offsetSortTests {
|
||||
sort.Sort(offsetSort(st))
|
||||
for j, si := range st {
|
||||
if j != si.index {
|
||||
t.Errorf("%d: failed: %v", i, st)
|
||||
}
|
||||
}
|
||||
}
|
||||
for i, tt := range genStateTests {
|
||||
// ensure input is well-formed
|
||||
sort.Sort(offsetSort(tt.in))
|
||||
for j, si := range tt.in {
|
||||
if si.index != j+1 {
|
||||
t.Errorf("%dth sort failed: %v", i, tt.in)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var genidxtest1 = []stridx{
|
||||
{"bcde", 3},
|
||||
{"bc", 6},
|
||||
{"ab", 2},
|
||||
{"bcd", 5},
|
||||
{"abcd", 0},
|
||||
{"abc", 1},
|
||||
{"bcdf", 4},
|
||||
}
|
||||
|
||||
var genidxSortTests = [][]stridx{
|
||||
genidxtest1,
|
||||
largetosmall,
|
||||
}
|
||||
|
||||
func TestGenIdxSort(t *testing.T) {
|
||||
for i, st := range genidxSortTests {
|
||||
sort.Sort(genidxSort(st))
|
||||
for j, si := range st {
|
||||
if j != si.index {
|
||||
t.Errorf("%dth sort failed %v", i, st)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var entrySortTests = []colltab.ContractTrieSet{
|
||||
{
|
||||
{10, 0, 1, 3},
|
||||
{99, 0, 1, 0},
|
||||
{20, 50, 0, 2},
|
||||
{30, 0, 1, 1},
|
||||
},
|
||||
}
|
||||
|
||||
func TestEntrySort(t *testing.T) {
|
||||
for i, et := range entrySortTests {
|
||||
sort.Sort(entrySort(et))
|
||||
for j, fe := range et {
|
||||
if j != int(fe.I) {
|
||||
t.Errorf("%dth sort failed %v", i, et)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type GenStateTest struct {
|
||||
in []stridx
|
||||
firstBlockLen int
|
||||
out colltab.ContractTrieSet
|
||||
}
|
||||
|
||||
var genStateTests = []GenStateTest{
|
||||
{[]stridx{
|
||||
{"abc", 1},
|
||||
},
|
||||
1,
|
||||
colltab.ContractTrieSet{
|
||||
{'a', 0, 1, noIndex},
|
||||
{'b', 0, 1, noIndex},
|
||||
{'c', 'c', final, 1},
|
||||
},
|
||||
},
|
||||
{[]stridx{
|
||||
{"abc", 1},
|
||||
{"abd", 2},
|
||||
{"abe", 3},
|
||||
},
|
||||
1,
|
||||
colltab.ContractTrieSet{
|
||||
{'a', 0, 1, noIndex},
|
||||
{'b', 0, 1, noIndex},
|
||||
{'c', 'e', final, 1},
|
||||
},
|
||||
},
|
||||
{[]stridx{
|
||||
{"abc", 1},
|
||||
{"ab", 2},
|
||||
{"a", 3},
|
||||
},
|
||||
1,
|
||||
colltab.ContractTrieSet{
|
||||
{'a', 0, 1, 3},
|
||||
{'b', 0, 1, 2},
|
||||
{'c', 'c', final, 1},
|
||||
},
|
||||
},
|
||||
{[]stridx{
|
||||
{"abc", 1},
|
||||
{"abd", 2},
|
||||
{"ab", 3},
|
||||
{"ac", 4},
|
||||
{"a", 5},
|
||||
{"b", 6},
|
||||
},
|
||||
2,
|
||||
colltab.ContractTrieSet{
|
||||
{'b', 'b', final, 6},
|
||||
{'a', 0, 2, 5},
|
||||
{'c', 'c', final, 4},
|
||||
{'b', 0, 1, 3},
|
||||
{'c', 'd', final, 1},
|
||||
},
|
||||
},
|
||||
{[]stridx{
|
||||
{"bcde", 2},
|
||||
{"bc", 7},
|
||||
{"ab", 6},
|
||||
{"bcd", 5},
|
||||
{"abcd", 1},
|
||||
{"abc", 4},
|
||||
{"bcdf", 3},
|
||||
},
|
||||
2,
|
||||
colltab.ContractTrieSet{
|
||||
{'b', 3, 1, noIndex},
|
||||
{'a', 0, 1, noIndex},
|
||||
{'b', 0, 1, 6},
|
||||
{'c', 0, 1, 4},
|
||||
{'d', 'd', final, 1},
|
||||
{'c', 0, 1, 7},
|
||||
{'d', 0, 1, 5},
|
||||
{'e', 'f', final, 2},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestGenStates(t *testing.T) {
|
||||
for i, tt := range genStateTests {
|
||||
si := []stridx{}
|
||||
for _, e := range tt.in {
|
||||
si = append(si, e)
|
||||
}
|
||||
// ensure input is well-formed
|
||||
sort.Sort(genidxSort(si))
|
||||
ct := colltab.ContractTrieSet{}
|
||||
n, _ := genStates(&ct, si)
|
||||
if nn := tt.firstBlockLen; nn != n {
|
||||
t.Errorf("%d: block len %v; want %v", i, n, nn)
|
||||
}
|
||||
if lv, lw := len(ct), len(tt.out); lv != lw {
|
||||
t.Errorf("%d: len %v; want %v", i, lv, lw)
|
||||
continue
|
||||
}
|
||||
for j, fe := range tt.out {
|
||||
const msg = "%d:%d: value %s=%v; want %v"
|
||||
if fe.L != ct[j].L {
|
||||
t.Errorf(msg, i, j, "l", ct[j].L, fe.L)
|
||||
}
|
||||
if fe.H != ct[j].H {
|
||||
t.Errorf(msg, i, j, "h", ct[j].H, fe.H)
|
||||
}
|
||||
if fe.N != ct[j].N {
|
||||
t.Errorf(msg, i, j, "n", ct[j].N, fe.N)
|
||||
}
|
||||
if fe.I != ct[j].I {
|
||||
t.Errorf(msg, i, j, "i", ct[j].I, fe.I)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLookupContraction(t *testing.T) {
|
||||
for i, tt := range genStateTests {
|
||||
input := []string{}
|
||||
for _, e := range tt.in {
|
||||
input = append(input, e.str)
|
||||
}
|
||||
cts := colltab.ContractTrieSet{}
|
||||
h, _ := appendTrie(&cts, input)
|
||||
for j, si := range tt.in {
|
||||
str := si.str
|
||||
for _, s := range []string{str, str + "X"} {
|
||||
msg := "%d:%d: %s(%s) %v; want %v"
|
||||
idx, sn := lookup(&cts, h, []byte(s))
|
||||
if idx != si.index {
|
||||
t.Errorf(msg, i, j, "index", s, idx, si.index)
|
||||
}
|
||||
if sn != len(str) {
|
||||
t.Errorf(msg, i, j, "sn", s, sn, len(str))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrintContractionTrieSet(t *testing.T) {
|
||||
testdata := colltab.ContractTrieSet(genStateTests[4].out)
|
||||
buf := &bytes.Buffer{}
|
||||
print(&testdata, buf, "test")
|
||||
if contractTrieOutput != buf.String() {
|
||||
t.Errorf("output differs; found\n%s", buf.String())
|
||||
println(string(buf.Bytes()))
|
||||
}
|
||||
}
|
||||
|
||||
const contractTrieOutput = `// testCTEntries: 8 entries, 32 bytes
|
||||
var testCTEntries = [8]struct{L,H,N,I uint8}{
|
||||
{0x62, 0x3, 1, 255},
|
||||
{0x61, 0x0, 1, 255},
|
||||
{0x62, 0x0, 1, 6},
|
||||
{0x63, 0x0, 1, 4},
|
||||
{0x64, 0x64, 0, 1},
|
||||
{0x63, 0x0, 1, 7},
|
||||
{0x64, 0x0, 1, 5},
|
||||
{0x65, 0x66, 0, 2},
|
||||
}
|
||||
var testContractTrieSet = colltab.ContractTrieSet( testCTEntries[:] )
|
||||
`
|
393
vendor/golang.org/x/text/collate/build/order.go
generated
vendored
Normal file
393
vendor/golang.org/x/text/collate/build/order.go
generated
vendored
Normal file
|
@ -0,0 +1,393 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package build
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"sort"
|
||||
"strings"
|
||||
"unicode"
|
||||
|
||||
"golang.org/x/text/internal/colltab"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
|
||||
type logicalAnchor int
|
||||
|
||||
const (
|
||||
firstAnchor logicalAnchor = -1
|
||||
noAnchor = 0
|
||||
lastAnchor = 1
|
||||
)
|
||||
|
||||
// entry is used to keep track of a single entry in the collation element table
|
||||
// during building. Examples of entries can be found in the Default Unicode
|
||||
// Collation Element Table.
|
||||
// See http://www.unicode.org/Public/UCA/6.0.0/allkeys.txt.
|
||||
type entry struct {
|
||||
str string // same as string(runes)
|
||||
runes []rune
|
||||
elems []rawCE // the collation elements
|
||||
extend string // weights of extend to be appended to elems
|
||||
before bool // weights relative to next instead of previous.
|
||||
lock bool // entry is used in extension and can no longer be moved.
|
||||
|
||||
// prev, next, and level are used to keep track of tailorings.
|
||||
prev, next *entry
|
||||
level colltab.Level // next differs at this level
|
||||
skipRemove bool // do not unlink when removed
|
||||
|
||||
decompose bool // can use NFKD decomposition to generate elems
|
||||
exclude bool // do not include in table
|
||||
implicit bool // derived, is not included in the list
|
||||
modified bool // entry was modified in tailoring
|
||||
logical logicalAnchor
|
||||
|
||||
expansionIndex int // used to store index into expansion table
|
||||
contractionHandle ctHandle
|
||||
contractionIndex int // index into contraction elements
|
||||
}
|
||||
|
||||
func (e *entry) String() string {
|
||||
return fmt.Sprintf("%X (%q) -> %X (ch:%x; ci:%d, ei:%d)",
|
||||
e.runes, e.str, e.elems, e.contractionHandle, e.contractionIndex, e.expansionIndex)
|
||||
}
|
||||
|
||||
func (e *entry) skip() bool {
|
||||
return e.contraction()
|
||||
}
|
||||
|
||||
func (e *entry) expansion() bool {
|
||||
return !e.decompose && len(e.elems) > 1
|
||||
}
|
||||
|
||||
func (e *entry) contraction() bool {
|
||||
return len(e.runes) > 1
|
||||
}
|
||||
|
||||
func (e *entry) contractionStarter() bool {
|
||||
return e.contractionHandle.n != 0
|
||||
}
|
||||
|
||||
// nextIndexed gets the next entry that needs to be stored in the table.
|
||||
// It returns the entry and the collation level at which the next entry differs
|
||||
// from the current entry.
|
||||
// Entries that can be explicitly derived and logical reset positions are
|
||||
// examples of entries that will not be indexed.
|
||||
func (e *entry) nextIndexed() (*entry, colltab.Level) {
|
||||
level := e.level
|
||||
for e = e.next; e != nil && (e.exclude || len(e.elems) == 0); e = e.next {
|
||||
if e.level < level {
|
||||
level = e.level
|
||||
}
|
||||
}
|
||||
return e, level
|
||||
}
|
||||
|
||||
// remove unlinks entry e from the sorted chain and clears the collation
|
||||
// elements. e may not be at the front or end of the list. This should always
|
||||
// be the case, as the front and end of the list are always logical anchors,
|
||||
// which may not be removed.
|
||||
func (e *entry) remove() {
|
||||
if e.logical != noAnchor {
|
||||
log.Fatalf("may not remove anchor %q", e.str)
|
||||
}
|
||||
// TODO: need to set e.prev.level to e.level if e.level is smaller?
|
||||
e.elems = nil
|
||||
if !e.skipRemove {
|
||||
if e.prev != nil {
|
||||
e.prev.next = e.next
|
||||
}
|
||||
if e.next != nil {
|
||||
e.next.prev = e.prev
|
||||
}
|
||||
}
|
||||
e.skipRemove = false
|
||||
}
|
||||
|
||||
// insertAfter inserts n after e.
|
||||
func (e *entry) insertAfter(n *entry) {
|
||||
if e == n {
|
||||
panic("e == anchor")
|
||||
}
|
||||
if e == nil {
|
||||
panic("unexpected nil anchor")
|
||||
}
|
||||
n.remove()
|
||||
n.decompose = false // redo decomposition test
|
||||
|
||||
n.next = e.next
|
||||
n.prev = e
|
||||
if e.next != nil {
|
||||
e.next.prev = n
|
||||
}
|
||||
e.next = n
|
||||
}
|
||||
|
||||
// insertBefore inserts n before e.
|
||||
func (e *entry) insertBefore(n *entry) {
|
||||
if e == n {
|
||||
panic("e == anchor")
|
||||
}
|
||||
if e == nil {
|
||||
panic("unexpected nil anchor")
|
||||
}
|
||||
n.remove()
|
||||
n.decompose = false // redo decomposition test
|
||||
|
||||
n.prev = e.prev
|
||||
n.next = e
|
||||
if e.prev != nil {
|
||||
e.prev.next = n
|
||||
}
|
||||
e.prev = n
|
||||
}
|
||||
|
||||
func (e *entry) encodeBase() (ce uint32, err error) {
|
||||
switch {
|
||||
case e.expansion():
|
||||
ce, err = makeExpandIndex(e.expansionIndex)
|
||||
default:
|
||||
if e.decompose {
|
||||
log.Fatal("decompose should be handled elsewhere")
|
||||
}
|
||||
ce, err = makeCE(e.elems[0])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (e *entry) encode() (ce uint32, err error) {
|
||||
if e.skip() {
|
||||
log.Fatal("cannot build colElem for entry that should be skipped")
|
||||
}
|
||||
switch {
|
||||
case e.decompose:
|
||||
t1 := e.elems[0].w[2]
|
||||
t2 := 0
|
||||
if len(e.elems) > 1 {
|
||||
t2 = e.elems[1].w[2]
|
||||
}
|
||||
ce, err = makeDecompose(t1, t2)
|
||||
case e.contractionStarter():
|
||||
ce, err = makeContractIndex(e.contractionHandle, e.contractionIndex)
|
||||
default:
|
||||
if len(e.runes) > 1 {
|
||||
log.Fatal("colElem: contractions are handled in contraction trie")
|
||||
}
|
||||
ce, err = e.encodeBase()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// entryLess returns true if a sorts before b and false otherwise.
|
||||
func entryLess(a, b *entry) bool {
|
||||
if res, _ := compareWeights(a.elems, b.elems); res != 0 {
|
||||
return res == -1
|
||||
}
|
||||
if a.logical != noAnchor {
|
||||
return a.logical == firstAnchor
|
||||
}
|
||||
if b.logical != noAnchor {
|
||||
return b.logical == lastAnchor
|
||||
}
|
||||
return a.str < b.str
|
||||
}
|
||||
|
||||
type sortedEntries []*entry
|
||||
|
||||
func (s sortedEntries) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func (s sortedEntries) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
func (s sortedEntries) Less(i, j int) bool {
|
||||
return entryLess(s[i], s[j])
|
||||
}
|
||||
|
||||
type ordering struct {
|
||||
id string
|
||||
entryMap map[string]*entry
|
||||
ordered []*entry
|
||||
handle *trieHandle
|
||||
}
|
||||
|
||||
// insert inserts e into both entryMap and ordered.
|
||||
// Note that insert simply appends e to ordered. To reattain a sorted
|
||||
// order, o.sort() should be called.
|
||||
func (o *ordering) insert(e *entry) {
|
||||
if e.logical == noAnchor {
|
||||
o.entryMap[e.str] = e
|
||||
} else {
|
||||
// Use key format as used in UCA rules.
|
||||
o.entryMap[fmt.Sprintf("[%s]", e.str)] = e
|
||||
// Also add index entry for XML format.
|
||||
o.entryMap[fmt.Sprintf("<%s/>", strings.Replace(e.str, " ", "_", -1))] = e
|
||||
}
|
||||
o.ordered = append(o.ordered, e)
|
||||
}
|
||||
|
||||
// newEntry creates a new entry for the given info and inserts it into
|
||||
// the index.
|
||||
func (o *ordering) newEntry(s string, ces []rawCE) *entry {
|
||||
e := &entry{
|
||||
runes: []rune(s),
|
||||
elems: ces,
|
||||
str: s,
|
||||
}
|
||||
o.insert(e)
|
||||
return e
|
||||
}
|
||||
|
||||
// find looks up and returns the entry for the given string.
|
||||
// It returns nil if str is not in the index and if an implicit value
|
||||
// cannot be derived, that is, if str represents more than one rune.
|
||||
func (o *ordering) find(str string) *entry {
|
||||
e := o.entryMap[str]
|
||||
if e == nil {
|
||||
r := []rune(str)
|
||||
if len(r) == 1 {
|
||||
const (
|
||||
firstHangul = 0xAC00
|
||||
lastHangul = 0xD7A3
|
||||
)
|
||||
if r[0] >= firstHangul && r[0] <= lastHangul {
|
||||
ce := []rawCE{}
|
||||
nfd := norm.NFD.String(str)
|
||||
for _, r := range nfd {
|
||||
ce = append(ce, o.find(string(r)).elems...)
|
||||
}
|
||||
e = o.newEntry(nfd, ce)
|
||||
} else {
|
||||
e = o.newEntry(string(r[0]), []rawCE{
|
||||
{w: []int{
|
||||
implicitPrimary(r[0]),
|
||||
defaultSecondary,
|
||||
defaultTertiary,
|
||||
int(r[0]),
|
||||
},
|
||||
},
|
||||
})
|
||||
e.modified = true
|
||||
}
|
||||
e.exclude = true // do not index implicits
|
||||
}
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// makeRootOrdering returns a newly initialized ordering value and populates
|
||||
// it with a set of logical reset points that can be used as anchors.
|
||||
// The anchors first_tertiary_ignorable and __END__ will always sort at
|
||||
// the beginning and end, respectively. This means that prev and next are non-nil
|
||||
// for any indexed entry.
|
||||
func makeRootOrdering() ordering {
|
||||
const max = unicode.MaxRune
|
||||
o := ordering{
|
||||
entryMap: make(map[string]*entry),
|
||||
}
|
||||
insert := func(typ logicalAnchor, s string, ce []int) {
|
||||
e := &entry{
|
||||
elems: []rawCE{{w: ce}},
|
||||
str: s,
|
||||
exclude: true,
|
||||
logical: typ,
|
||||
}
|
||||
o.insert(e)
|
||||
}
|
||||
insert(firstAnchor, "first tertiary ignorable", []int{0, 0, 0, 0})
|
||||
insert(lastAnchor, "last tertiary ignorable", []int{0, 0, 0, max})
|
||||
insert(lastAnchor, "last primary ignorable", []int{0, defaultSecondary, defaultTertiary, max})
|
||||
insert(lastAnchor, "last non ignorable", []int{maxPrimary, defaultSecondary, defaultTertiary, max})
|
||||
insert(lastAnchor, "__END__", []int{1 << maxPrimaryBits, defaultSecondary, defaultTertiary, max})
|
||||
return o
|
||||
}
|
||||
|
||||
// patchForInsert eleminates entries from the list with more than one collation element.
|
||||
// The next and prev fields of the eliminated entries still point to appropriate
|
||||
// values in the newly created list.
|
||||
// It requires that sort has been called.
|
||||
func (o *ordering) patchForInsert() {
|
||||
for i := 0; i < len(o.ordered)-1; {
|
||||
e := o.ordered[i]
|
||||
lev := e.level
|
||||
n := e.next
|
||||
for ; n != nil && len(n.elems) > 1; n = n.next {
|
||||
if n.level < lev {
|
||||
lev = n.level
|
||||
}
|
||||
n.skipRemove = true
|
||||
}
|
||||
for ; o.ordered[i] != n; i++ {
|
||||
o.ordered[i].level = lev
|
||||
o.ordered[i].next = n
|
||||
o.ordered[i+1].prev = e
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// clone copies all ordering of es into a new ordering value.
|
||||
func (o *ordering) clone() *ordering {
|
||||
o.sort()
|
||||
oo := ordering{
|
||||
entryMap: make(map[string]*entry),
|
||||
}
|
||||
for _, e := range o.ordered {
|
||||
ne := &entry{
|
||||
runes: e.runes,
|
||||
elems: e.elems,
|
||||
str: e.str,
|
||||
decompose: e.decompose,
|
||||
exclude: e.exclude,
|
||||
logical: e.logical,
|
||||
}
|
||||
oo.insert(ne)
|
||||
}
|
||||
oo.sort() // link all ordering.
|
||||
oo.patchForInsert()
|
||||
return &oo
|
||||
}
|
||||
|
||||
// front returns the first entry to be indexed.
|
||||
// It assumes that sort() has been called.
|
||||
func (o *ordering) front() *entry {
|
||||
e := o.ordered[0]
|
||||
if e.prev != nil {
|
||||
log.Panicf("unexpected first entry: %v", e)
|
||||
}
|
||||
// The first entry is always a logical position, which should not be indexed.
|
||||
e, _ = e.nextIndexed()
|
||||
return e
|
||||
}
|
||||
|
||||
// sort sorts all ordering based on their collation elements and initializes
|
||||
// the prev, next, and level fields accordingly.
|
||||
func (o *ordering) sort() {
|
||||
sort.Sort(sortedEntries(o.ordered))
|
||||
l := o.ordered
|
||||
for i := 1; i < len(l); i++ {
|
||||
k := i - 1
|
||||
l[k].next = l[i]
|
||||
_, l[k].level = compareWeights(l[k].elems, l[i].elems)
|
||||
l[i].prev = l[k]
|
||||
}
|
||||
}
|
||||
|
||||
// genColElems generates a collation element array from the runes in str. This
|
||||
// assumes that all collation elements have already been added to the Builder.
|
||||
func (o *ordering) genColElems(str string) []rawCE {
|
||||
elems := []rawCE{}
|
||||
for _, r := range []rune(str) {
|
||||
for _, ce := range o.find(string(r)).elems {
|
||||
if ce.w[0] != 0 || ce.w[1] != 0 || ce.w[2] != 0 {
|
||||
elems = append(elems, ce)
|
||||
}
|
||||
}
|
||||
}
|
||||
return elems
|
||||
}
|
229
vendor/golang.org/x/text/collate/build/order_test.go
generated
vendored
Normal file
229
vendor/golang.org/x/text/collate/build/order_test.go
generated
vendored
Normal file
|
@ -0,0 +1,229 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package build
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/text/internal/colltab"
|
||||
)
|
||||
|
||||
type entryTest struct {
|
||||
f func(in []int) (uint32, error)
|
||||
arg []int
|
||||
val uint32
|
||||
}
|
||||
|
||||
// makeList returns a list of entries of length n+2, with n normal
|
||||
// entries plus a leading and trailing anchor.
|
||||
func makeList(n int) []*entry {
|
||||
es := make([]*entry, n+2)
|
||||
weights := []rawCE{{w: []int{100, 20, 5, 0}}}
|
||||
for i := range es {
|
||||
runes := []rune{rune(i)}
|
||||
es[i] = &entry{
|
||||
runes: runes,
|
||||
elems: weights,
|
||||
}
|
||||
weights = nextWeight(colltab.Primary, weights)
|
||||
}
|
||||
for i := 1; i < len(es); i++ {
|
||||
es[i-1].next = es[i]
|
||||
es[i].prev = es[i-1]
|
||||
_, es[i-1].level = compareWeights(es[i-1].elems, es[i].elems)
|
||||
}
|
||||
es[0].exclude = true
|
||||
es[0].logical = firstAnchor
|
||||
es[len(es)-1].exclude = true
|
||||
es[len(es)-1].logical = lastAnchor
|
||||
return es
|
||||
}
|
||||
|
||||
func TestNextIndexed(t *testing.T) {
|
||||
const n = 5
|
||||
es := makeList(n)
|
||||
for i := int64(0); i < 1<<n; i++ {
|
||||
mask := strconv.FormatInt(i+(1<<n), 2)
|
||||
for i, c := range mask {
|
||||
es[i].exclude = c == '1'
|
||||
}
|
||||
e := es[0]
|
||||
for i, c := range mask {
|
||||
if c == '0' {
|
||||
e, _ = e.nextIndexed()
|
||||
if e != es[i] {
|
||||
t.Errorf("%d: expected entry %d; found %d", i, es[i].elems, e.elems)
|
||||
}
|
||||
}
|
||||
}
|
||||
if e, _ = e.nextIndexed(); e != nil {
|
||||
t.Errorf("%d: expected nil entry; found %d", i, e.elems)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemove(t *testing.T) {
|
||||
const n = 5
|
||||
for i := int64(0); i < 1<<n; i++ {
|
||||
es := makeList(n)
|
||||
mask := strconv.FormatInt(i+(1<<n), 2)
|
||||
for i, c := range mask {
|
||||
if c == '0' {
|
||||
es[i].remove()
|
||||
}
|
||||
}
|
||||
e := es[0]
|
||||
for i, c := range mask {
|
||||
if c == '1' {
|
||||
if e != es[i] {
|
||||
t.Errorf("%d: expected entry %d; found %d", i, es[i].elems, e.elems)
|
||||
}
|
||||
e, _ = e.nextIndexed()
|
||||
}
|
||||
}
|
||||
if e != nil {
|
||||
t.Errorf("%d: expected nil entry; found %d", i, e.elems)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// nextPerm generates the next permutation of the array. The starting
|
||||
// permutation is assumed to be a list of integers sorted in increasing order.
|
||||
// It returns false if there are no more permuations left.
|
||||
func nextPerm(a []int) bool {
|
||||
i := len(a) - 2
|
||||
for ; i >= 0; i-- {
|
||||
if a[i] < a[i+1] {
|
||||
break
|
||||
}
|
||||
}
|
||||
if i < 0 {
|
||||
return false
|
||||
}
|
||||
for j := len(a) - 1; j >= i; j-- {
|
||||
if a[j] > a[i] {
|
||||
a[i], a[j] = a[j], a[i]
|
||||
break
|
||||
}
|
||||
}
|
||||
for j := i + 1; j < (len(a)+i+1)/2; j++ {
|
||||
a[j], a[len(a)+i-j] = a[len(a)+i-j], a[j]
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func TestInsertAfter(t *testing.T) {
|
||||
const n = 5
|
||||
orig := makeList(n)
|
||||
perm := make([]int, n)
|
||||
for i := range perm {
|
||||
perm[i] = i + 1
|
||||
}
|
||||
for ok := true; ok; ok = nextPerm(perm) {
|
||||
es := makeList(n)
|
||||
last := es[0]
|
||||
for _, i := range perm {
|
||||
last.insertAfter(es[i])
|
||||
last = es[i]
|
||||
}
|
||||
for _, e := range es {
|
||||
e.elems = es[0].elems
|
||||
}
|
||||
e := es[0]
|
||||
for _, i := range perm {
|
||||
e, _ = e.nextIndexed()
|
||||
if e.runes[0] != orig[i].runes[0] {
|
||||
t.Errorf("%d:%d: expected entry %X; found %X", perm, i, orig[i].runes, e.runes)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInsertBefore(t *testing.T) {
|
||||
const n = 5
|
||||
orig := makeList(n)
|
||||
perm := make([]int, n)
|
||||
for i := range perm {
|
||||
perm[i] = i + 1
|
||||
}
|
||||
for ok := true; ok; ok = nextPerm(perm) {
|
||||
es := makeList(n)
|
||||
last := es[len(es)-1]
|
||||
for _, i := range perm {
|
||||
last.insertBefore(es[i])
|
||||
last = es[i]
|
||||
}
|
||||
for _, e := range es {
|
||||
e.elems = es[0].elems
|
||||
}
|
||||
e := es[0]
|
||||
for i := n - 1; i >= 0; i-- {
|
||||
e, _ = e.nextIndexed()
|
||||
if e.runes[0] != rune(perm[i]) {
|
||||
t.Errorf("%d:%d: expected entry %X; found %X", perm, i, orig[i].runes, e.runes)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type entryLessTest struct {
|
||||
a, b *entry
|
||||
res bool
|
||||
}
|
||||
|
||||
var (
|
||||
w1 = []rawCE{{w: []int{100, 20, 5, 5}}}
|
||||
w2 = []rawCE{{w: []int{101, 20, 5, 5}}}
|
||||
)
|
||||
|
||||
var entryLessTests = []entryLessTest{
|
||||
{&entry{str: "a", elems: w1},
|
||||
&entry{str: "a", elems: w1},
|
||||
false,
|
||||
},
|
||||
{&entry{str: "a", elems: w1},
|
||||
&entry{str: "a", elems: w2},
|
||||
true,
|
||||
},
|
||||
{&entry{str: "a", elems: w1},
|
||||
&entry{str: "b", elems: w1},
|
||||
true,
|
||||
},
|
||||
{&entry{str: "a", elems: w2},
|
||||
&entry{str: "a", elems: w1},
|
||||
false,
|
||||
},
|
||||
{&entry{str: "c", elems: w1},
|
||||
&entry{str: "b", elems: w1},
|
||||
false,
|
||||
},
|
||||
{&entry{str: "a", elems: w1, logical: firstAnchor},
|
||||
&entry{str: "a", elems: w1},
|
||||
true,
|
||||
},
|
||||
{&entry{str: "a", elems: w1},
|
||||
&entry{str: "b", elems: w1, logical: firstAnchor},
|
||||
false,
|
||||
},
|
||||
{&entry{str: "b", elems: w1},
|
||||
&entry{str: "a", elems: w1, logical: lastAnchor},
|
||||
true,
|
||||
},
|
||||
{&entry{str: "a", elems: w1, logical: lastAnchor},
|
||||
&entry{str: "c", elems: w1},
|
||||
false,
|
||||
},
|
||||
}
|
||||
|
||||
func TestEntryLess(t *testing.T) {
|
||||
for i, tt := range entryLessTests {
|
||||
if res := entryLess(tt.a, tt.b); res != tt.res {
|
||||
t.Errorf("%d: was %v; want %v", i, res, tt.res)
|
||||
}
|
||||
}
|
||||
}
|
81
vendor/golang.org/x/text/collate/build/table.go
generated
vendored
Normal file
81
vendor/golang.org/x/text/collate/build/table.go
generated
vendored
Normal file
|
@ -0,0 +1,81 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package build
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
|
||||
"golang.org/x/text/internal/colltab"
|
||||
)
|
||||
|
||||
// table is an intermediate structure that roughly resembles the table in collate.
|
||||
type table struct {
|
||||
colltab.Table
|
||||
trie trie
|
||||
root *trieHandle
|
||||
}
|
||||
|
||||
// print writes the table as Go compilable code to w. It prefixes the
|
||||
// variable names with name. It returns the number of bytes written
|
||||
// and the size of the resulting table.
|
||||
func (t *table) fprint(w io.Writer, name string) (n, size int, err error) {
|
||||
update := func(nn, sz int, e error) {
|
||||
n += nn
|
||||
if err == nil {
|
||||
err = e
|
||||
}
|
||||
size += sz
|
||||
}
|
||||
// Write arrays needed for the structure.
|
||||
update(printColElems(w, t.ExpandElem, name+"ExpandElem"))
|
||||
update(printColElems(w, t.ContractElem, name+"ContractElem"))
|
||||
update(t.trie.printArrays(w, name))
|
||||
update(printArray(t.ContractTries, w, name))
|
||||
|
||||
nn, e := fmt.Fprintf(w, "// Total size of %sTable is %d bytes\n", name, size)
|
||||
update(nn, 0, e)
|
||||
return
|
||||
}
|
||||
|
||||
func (t *table) fprintIndex(w io.Writer, h *trieHandle, id string) (n int, err error) {
|
||||
p := func(f string, a ...interface{}) {
|
||||
nn, e := fmt.Fprintf(w, f, a...)
|
||||
n += nn
|
||||
if err == nil {
|
||||
err = e
|
||||
}
|
||||
}
|
||||
p("\t{ // %s\n", id)
|
||||
p("\t\tlookupOffset: 0x%x,\n", h.lookupStart)
|
||||
p("\t\tvaluesOffset: 0x%x,\n", h.valueStart)
|
||||
p("\t},\n")
|
||||
return
|
||||
}
|
||||
|
||||
func printColElems(w io.Writer, a []uint32, name string) (n, sz int, err error) {
|
||||
p := func(f string, a ...interface{}) {
|
||||
nn, e := fmt.Fprintf(w, f, a...)
|
||||
n += nn
|
||||
if err == nil {
|
||||
err = e
|
||||
}
|
||||
}
|
||||
sz = len(a) * int(reflect.TypeOf(uint32(0)).Size())
|
||||
p("// %s: %d entries, %d bytes\n", name, len(a), sz)
|
||||
p("var %s = [%d]uint32 {", name, len(a))
|
||||
for i, c := range a {
|
||||
switch {
|
||||
case i%64 == 0:
|
||||
p("\n\t// Block %d, offset 0x%x\n", i/64, i)
|
||||
case (i%64)%6 == 0:
|
||||
p("\n\t")
|
||||
}
|
||||
p("0x%.8X, ", c)
|
||||
}
|
||||
p("\n}\n\n")
|
||||
return
|
||||
}
|
290
vendor/golang.org/x/text/collate/build/trie.go
generated
vendored
Normal file
290
vendor/golang.org/x/text/collate/build/trie.go
generated
vendored
Normal file
|
@ -0,0 +1,290 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// The trie in this file is used to associate the first full character
|
||||
// in a UTF-8 string to a collation element.
|
||||
// All but the last byte in a UTF-8 byte sequence are
|
||||
// used to look up offsets in the index table to be used for the next byte.
|
||||
// The last byte is used to index into a table of collation elements.
|
||||
// This file contains the code for the generation of the trie.
|
||||
|
||||
package build
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"io"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
const (
|
||||
blockSize = 64
|
||||
blockOffset = 2 // Subtract 2 blocks to compensate for the 0x80 added to continuation bytes.
|
||||
)
|
||||
|
||||
type trieHandle struct {
|
||||
lookupStart uint16 // offset in table for first byte
|
||||
valueStart uint16 // offset in table for first byte
|
||||
}
|
||||
|
||||
type trie struct {
|
||||
index []uint16
|
||||
values []uint32
|
||||
}
|
||||
|
||||
// trieNode is the intermediate trie structure used for generating a trie.
|
||||
type trieNode struct {
|
||||
index []*trieNode
|
||||
value []uint32
|
||||
b byte
|
||||
refValue uint16
|
||||
refIndex uint16
|
||||
}
|
||||
|
||||
func newNode() *trieNode {
|
||||
return &trieNode{
|
||||
index: make([]*trieNode, 64),
|
||||
value: make([]uint32, 128), // root node size is 128 instead of 64
|
||||
}
|
||||
}
|
||||
|
||||
func (n *trieNode) isInternal() bool {
|
||||
return n.value != nil
|
||||
}
|
||||
|
||||
func (n *trieNode) insert(r rune, value uint32) {
|
||||
const maskx = 0x3F // mask out two most-significant bits
|
||||
str := string(r)
|
||||
if len(str) == 1 {
|
||||
n.value[str[0]] = value
|
||||
return
|
||||
}
|
||||
for i := 0; i < len(str)-1; i++ {
|
||||
b := str[i] & maskx
|
||||
if n.index == nil {
|
||||
n.index = make([]*trieNode, blockSize)
|
||||
}
|
||||
nn := n.index[b]
|
||||
if nn == nil {
|
||||
nn = &trieNode{}
|
||||
nn.b = b
|
||||
n.index[b] = nn
|
||||
}
|
||||
n = nn
|
||||
}
|
||||
if n.value == nil {
|
||||
n.value = make([]uint32, blockSize)
|
||||
}
|
||||
b := str[len(str)-1] & maskx
|
||||
n.value[b] = value
|
||||
}
|
||||
|
||||
type trieBuilder struct {
|
||||
t *trie
|
||||
|
||||
roots []*trieHandle
|
||||
|
||||
lookupBlocks []*trieNode
|
||||
valueBlocks []*trieNode
|
||||
|
||||
lookupBlockIdx map[uint32]*trieNode
|
||||
valueBlockIdx map[uint32]*trieNode
|
||||
}
|
||||
|
||||
func newTrieBuilder() *trieBuilder {
|
||||
index := &trieBuilder{}
|
||||
index.lookupBlocks = make([]*trieNode, 0)
|
||||
index.valueBlocks = make([]*trieNode, 0)
|
||||
index.lookupBlockIdx = make(map[uint32]*trieNode)
|
||||
index.valueBlockIdx = make(map[uint32]*trieNode)
|
||||
// The third nil is the default null block. The other two blocks
|
||||
// are used to guarantee an offset of at least 3 for each block.
|
||||
index.lookupBlocks = append(index.lookupBlocks, nil, nil, nil)
|
||||
index.t = &trie{}
|
||||
return index
|
||||
}
|
||||
|
||||
func (b *trieBuilder) computeOffsets(n *trieNode) *trieNode {
|
||||
hasher := fnv.New32()
|
||||
if n.index != nil {
|
||||
for i, nn := range n.index {
|
||||
var vi, vv uint16
|
||||
if nn != nil {
|
||||
nn = b.computeOffsets(nn)
|
||||
n.index[i] = nn
|
||||
vi = nn.refIndex
|
||||
vv = nn.refValue
|
||||
}
|
||||
hasher.Write([]byte{byte(vi >> 8), byte(vi)})
|
||||
hasher.Write([]byte{byte(vv >> 8), byte(vv)})
|
||||
}
|
||||
h := hasher.Sum32()
|
||||
nn, ok := b.lookupBlockIdx[h]
|
||||
if !ok {
|
||||
n.refIndex = uint16(len(b.lookupBlocks)) - blockOffset
|
||||
b.lookupBlocks = append(b.lookupBlocks, n)
|
||||
b.lookupBlockIdx[h] = n
|
||||
} else {
|
||||
n = nn
|
||||
}
|
||||
} else {
|
||||
for _, v := range n.value {
|
||||
hasher.Write([]byte{byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)})
|
||||
}
|
||||
h := hasher.Sum32()
|
||||
nn, ok := b.valueBlockIdx[h]
|
||||
if !ok {
|
||||
n.refValue = uint16(len(b.valueBlocks)) - blockOffset
|
||||
n.refIndex = n.refValue
|
||||
b.valueBlocks = append(b.valueBlocks, n)
|
||||
b.valueBlockIdx[h] = n
|
||||
} else {
|
||||
n = nn
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (b *trieBuilder) addStartValueBlock(n *trieNode) uint16 {
|
||||
hasher := fnv.New32()
|
||||
for _, v := range n.value[:2*blockSize] {
|
||||
hasher.Write([]byte{byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)})
|
||||
}
|
||||
h := hasher.Sum32()
|
||||
nn, ok := b.valueBlockIdx[h]
|
||||
if !ok {
|
||||
n.refValue = uint16(len(b.valueBlocks))
|
||||
n.refIndex = n.refValue
|
||||
b.valueBlocks = append(b.valueBlocks, n)
|
||||
// Add a dummy block to accommodate the double block size.
|
||||
b.valueBlocks = append(b.valueBlocks, nil)
|
||||
b.valueBlockIdx[h] = n
|
||||
} else {
|
||||
n = nn
|
||||
}
|
||||
return n.refValue
|
||||
}
|
||||
|
||||
func genValueBlock(t *trie, n *trieNode) {
|
||||
if n != nil {
|
||||
for _, v := range n.value {
|
||||
t.values = append(t.values, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func genLookupBlock(t *trie, n *trieNode) {
|
||||
for _, nn := range n.index {
|
||||
v := uint16(0)
|
||||
if nn != nil {
|
||||
if n.index != nil {
|
||||
v = nn.refIndex
|
||||
} else {
|
||||
v = nn.refValue
|
||||
}
|
||||
}
|
||||
t.index = append(t.index, v)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *trieBuilder) addTrie(n *trieNode) *trieHandle {
|
||||
h := &trieHandle{}
|
||||
b.roots = append(b.roots, h)
|
||||
h.valueStart = b.addStartValueBlock(n)
|
||||
if len(b.roots) == 1 {
|
||||
// We insert a null block after the first start value block.
|
||||
// This ensures that continuation bytes UTF-8 sequences of length
|
||||
// greater than 2 will automatically hit a null block if there
|
||||
// was an undefined entry.
|
||||
b.valueBlocks = append(b.valueBlocks, nil)
|
||||
}
|
||||
n = b.computeOffsets(n)
|
||||
// Offset by one extra block as the first byte starts at 0xC0 instead of 0x80.
|
||||
h.lookupStart = n.refIndex - 1
|
||||
return h
|
||||
}
|
||||
|
||||
// generate generates and returns the trie for n.
|
||||
func (b *trieBuilder) generate() (t *trie, err error) {
|
||||
t = b.t
|
||||
if len(b.valueBlocks) >= 1<<16 {
|
||||
return nil, fmt.Errorf("maximum number of value blocks exceeded (%d > %d)", len(b.valueBlocks), 1<<16)
|
||||
}
|
||||
if len(b.lookupBlocks) >= 1<<16 {
|
||||
return nil, fmt.Errorf("maximum number of lookup blocks exceeded (%d > %d)", len(b.lookupBlocks), 1<<16)
|
||||
}
|
||||
genValueBlock(t, b.valueBlocks[0])
|
||||
genValueBlock(t, &trieNode{value: make([]uint32, 64)})
|
||||
for i := 2; i < len(b.valueBlocks); i++ {
|
||||
genValueBlock(t, b.valueBlocks[i])
|
||||
}
|
||||
n := &trieNode{index: make([]*trieNode, 64)}
|
||||
genLookupBlock(t, n)
|
||||
genLookupBlock(t, n)
|
||||
genLookupBlock(t, n)
|
||||
for i := 3; i < len(b.lookupBlocks); i++ {
|
||||
genLookupBlock(t, b.lookupBlocks[i])
|
||||
}
|
||||
return b.t, nil
|
||||
}
|
||||
|
||||
func (t *trie) printArrays(w io.Writer, name string) (n, size int, err error) {
|
||||
p := func(f string, a ...interface{}) {
|
||||
nn, e := fmt.Fprintf(w, f, a...)
|
||||
n += nn
|
||||
if err == nil {
|
||||
err = e
|
||||
}
|
||||
}
|
||||
nv := len(t.values)
|
||||
p("// %sValues: %d entries, %d bytes\n", name, nv, nv*4)
|
||||
p("// Block 2 is the null block.\n")
|
||||
p("var %sValues = [%d]uint32 {", name, nv)
|
||||
var printnewline bool
|
||||
for i, v := range t.values {
|
||||
if i%blockSize == 0 {
|
||||
p("\n\t// Block %#x, offset %#x", i/blockSize, i)
|
||||
}
|
||||
if i%4 == 0 {
|
||||
printnewline = true
|
||||
}
|
||||
if v != 0 {
|
||||
if printnewline {
|
||||
p("\n\t")
|
||||
printnewline = false
|
||||
}
|
||||
p("%#04x:%#08x, ", i, v)
|
||||
}
|
||||
}
|
||||
p("\n}\n\n")
|
||||
ni := len(t.index)
|
||||
p("// %sLookup: %d entries, %d bytes\n", name, ni, ni*2)
|
||||
p("// Block 0 is the null block.\n")
|
||||
p("var %sLookup = [%d]uint16 {", name, ni)
|
||||
printnewline = false
|
||||
for i, v := range t.index {
|
||||
if i%blockSize == 0 {
|
||||
p("\n\t// Block %#x, offset %#x", i/blockSize, i)
|
||||
}
|
||||
if i%8 == 0 {
|
||||
printnewline = true
|
||||
}
|
||||
if v != 0 {
|
||||
if printnewline {
|
||||
p("\n\t")
|
||||
printnewline = false
|
||||
}
|
||||
p("%#03x:%#02x, ", i, v)
|
||||
}
|
||||
}
|
||||
p("\n}\n\n")
|
||||
return n, nv*4 + ni*2, err
|
||||
}
|
||||
|
||||
func (t *trie) printStruct(w io.Writer, handle *trieHandle, name string) (n, sz int, err error) {
|
||||
const msg = "trie{ %sLookup[%d:], %sValues[%d:], %sLookup[:], %sValues[:]}"
|
||||
n, err = fmt.Fprintf(w, msg, name, handle.lookupStart*blockSize, name, handle.valueStart*blockSize, name, name)
|
||||
sz += int(reflect.TypeOf(trie{}).Size())
|
||||
return
|
||||
}
|
107
vendor/golang.org/x/text/collate/build/trie_test.go
generated
vendored
Normal file
107
vendor/golang.org/x/text/collate/build/trie_test.go
generated
vendored
Normal file
|
@ -0,0 +1,107 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package build
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// We take the smallest, largest and an arbitrary value for each
|
||||
// of the UTF-8 sequence lengths.
|
||||
var testRunes = []rune{
|
||||
0x01, 0x0C, 0x7F, // 1-byte sequences
|
||||
0x80, 0x100, 0x7FF, // 2-byte sequences
|
||||
0x800, 0x999, 0xFFFF, // 3-byte sequences
|
||||
0x10000, 0x10101, 0x10FFFF, // 4-byte sequences
|
||||
0x200, 0x201, 0x202, 0x210, 0x215, // five entries in one sparse block
|
||||
}
|
||||
|
||||
func makeTestTrie(t *testing.T) trie {
|
||||
n := newNode()
|
||||
for i, r := range testRunes {
|
||||
n.insert(r, uint32(i))
|
||||
}
|
||||
idx := newTrieBuilder()
|
||||
idx.addTrie(n)
|
||||
tr, err := idx.generate()
|
||||
if err != nil {
|
||||
t.Errorf(err.Error())
|
||||
}
|
||||
return *tr
|
||||
}
|
||||
|
||||
func TestGenerateTrie(t *testing.T) {
|
||||
testdata := makeTestTrie(t)
|
||||
buf := &bytes.Buffer{}
|
||||
testdata.printArrays(buf, "test")
|
||||
fmt.Fprintf(buf, "var testTrie = ")
|
||||
testdata.printStruct(buf, &trieHandle{19, 0}, "test")
|
||||
if output != buf.String() {
|
||||
t.Error("output differs")
|
||||
}
|
||||
}
|
||||
|
||||
var output = `// testValues: 832 entries, 3328 bytes
|
||||
// Block 2 is the null block.
|
||||
var testValues = [832]uint32 {
|
||||
// Block 0x0, offset 0x0
|
||||
0x000c:0x00000001,
|
||||
// Block 0x1, offset 0x40
|
||||
0x007f:0x00000002,
|
||||
// Block 0x2, offset 0x80
|
||||
// Block 0x3, offset 0xc0
|
||||
0x00c0:0x00000003,
|
||||
// Block 0x4, offset 0x100
|
||||
0x0100:0x00000004,
|
||||
// Block 0x5, offset 0x140
|
||||
0x0140:0x0000000c, 0x0141:0x0000000d, 0x0142:0x0000000e,
|
||||
0x0150:0x0000000f,
|
||||
0x0155:0x00000010,
|
||||
// Block 0x6, offset 0x180
|
||||
0x01bf:0x00000005,
|
||||
// Block 0x7, offset 0x1c0
|
||||
0x01c0:0x00000006,
|
||||
// Block 0x8, offset 0x200
|
||||
0x0219:0x00000007,
|
||||
// Block 0x9, offset 0x240
|
||||
0x027f:0x00000008,
|
||||
// Block 0xa, offset 0x280
|
||||
0x0280:0x00000009,
|
||||
// Block 0xb, offset 0x2c0
|
||||
0x02c1:0x0000000a,
|
||||
// Block 0xc, offset 0x300
|
||||
0x033f:0x0000000b,
|
||||
}
|
||||
|
||||
// testLookup: 640 entries, 1280 bytes
|
||||
// Block 0 is the null block.
|
||||
var testLookup = [640]uint16 {
|
||||
// Block 0x0, offset 0x0
|
||||
// Block 0x1, offset 0x40
|
||||
// Block 0x2, offset 0x80
|
||||
// Block 0x3, offset 0xc0
|
||||
0x0e0:0x05, 0x0e6:0x06,
|
||||
// Block 0x4, offset 0x100
|
||||
0x13f:0x07,
|
||||
// Block 0x5, offset 0x140
|
||||
0x140:0x08, 0x144:0x09,
|
||||
// Block 0x6, offset 0x180
|
||||
0x190:0x03,
|
||||
// Block 0x7, offset 0x1c0
|
||||
0x1ff:0x0a,
|
||||
// Block 0x8, offset 0x200
|
||||
0x20f:0x05,
|
||||
// Block 0x9, offset 0x240
|
||||
0x242:0x01, 0x244:0x02,
|
||||
0x248:0x03,
|
||||
0x25f:0x04,
|
||||
0x260:0x01,
|
||||
0x26f:0x02,
|
||||
0x270:0x04, 0x274:0x06,
|
||||
}
|
||||
|
||||
var testTrie = trie{ testLookup[1216:], testValues[0:], testLookup[:], testValues[:]}`
|
403
vendor/golang.org/x/text/collate/collate.go
generated
vendored
Normal file
403
vendor/golang.org/x/text/collate/collate.go
generated
vendored
Normal file
|
@ -0,0 +1,403 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// TODO: remove hard-coded versions when we have implemented fractional weights.
|
||||
// The current implementation is incompatible with later CLDR versions.
|
||||
//go:generate go run maketables.go -cldr=23 -unicode=6.2.0
|
||||
|
||||
// Package collate contains types for comparing and sorting Unicode strings
|
||||
// according to a given collation order.
|
||||
package collate // import "golang.org/x/text/collate"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/text/internal/colltab"
|
||||
"golang.org/x/text/language"
|
||||
)
|
||||
|
||||
// Collator provides functionality for comparing strings for a given
|
||||
// collation order.
|
||||
type Collator struct {
|
||||
options
|
||||
|
||||
sorter sorter
|
||||
|
||||
_iter [2]iter
|
||||
}
|
||||
|
||||
func (c *Collator) iter(i int) *iter {
|
||||
// TODO: evaluate performance for making the second iterator optional.
|
||||
return &c._iter[i]
|
||||
}
|
||||
|
||||
// Supported returns the list of languages for which collating differs from its parent.
|
||||
func Supported() []language.Tag {
|
||||
// TODO: use language.Coverage instead.
|
||||
|
||||
t := make([]language.Tag, len(tags))
|
||||
copy(t, tags)
|
||||
return t
|
||||
}
|
||||
|
||||
func init() {
|
||||
ids := strings.Split(availableLocales, ",")
|
||||
tags = make([]language.Tag, len(ids))
|
||||
for i, s := range ids {
|
||||
tags[i] = language.Raw.MustParse(s)
|
||||
}
|
||||
}
|
||||
|
||||
var tags []language.Tag
|
||||
|
||||
// New returns a new Collator initialized for the given locale.
|
||||
func New(t language.Tag, o ...Option) *Collator {
|
||||
index := colltab.MatchLang(t, tags)
|
||||
c := newCollator(getTable(locales[index]))
|
||||
|
||||
// Set options from the user-supplied tag.
|
||||
c.setFromTag(t)
|
||||
|
||||
// Set the user-supplied options.
|
||||
c.setOptions(o)
|
||||
|
||||
c.init()
|
||||
return c
|
||||
}
|
||||
|
||||
// NewFromTable returns a new Collator for the given Weighter.
|
||||
func NewFromTable(w colltab.Weighter, o ...Option) *Collator {
|
||||
c := newCollator(w)
|
||||
c.setOptions(o)
|
||||
c.init()
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Collator) init() {
|
||||
if c.numeric {
|
||||
c.t = colltab.NewNumericWeighter(c.t)
|
||||
}
|
||||
c._iter[0].init(c)
|
||||
c._iter[1].init(c)
|
||||
}
|
||||
|
||||
// Buffer holds keys generated by Key and KeyString.
|
||||
type Buffer struct {
|
||||
buf [4096]byte
|
||||
key []byte
|
||||
}
|
||||
|
||||
func (b *Buffer) init() {
|
||||
if b.key == nil {
|
||||
b.key = b.buf[:0]
|
||||
}
|
||||
}
|
||||
|
||||
// Reset clears the buffer from previous results generated by Key and KeyString.
|
||||
func (b *Buffer) Reset() {
|
||||
b.key = b.key[:0]
|
||||
}
|
||||
|
||||
// Compare returns an integer comparing the two byte slices.
|
||||
// The result will be 0 if a==b, -1 if a < b, and +1 if a > b.
|
||||
func (c *Collator) Compare(a, b []byte) int {
|
||||
// TODO: skip identical prefixes once we have a fast way to detect if a rune is
|
||||
// part of a contraction. This would lead to roughly a 10% speedup for the colcmp regtest.
|
||||
c.iter(0).SetInput(a)
|
||||
c.iter(1).SetInput(b)
|
||||
if res := c.compare(); res != 0 {
|
||||
return res
|
||||
}
|
||||
if !c.ignore[colltab.Identity] {
|
||||
return bytes.Compare(a, b)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// CompareString returns an integer comparing the two strings.
|
||||
// The result will be 0 if a==b, -1 if a < b, and +1 if a > b.
|
||||
func (c *Collator) CompareString(a, b string) int {
|
||||
// TODO: skip identical prefixes once we have a fast way to detect if a rune is
|
||||
// part of a contraction. This would lead to roughly a 10% speedup for the colcmp regtest.
|
||||
c.iter(0).SetInputString(a)
|
||||
c.iter(1).SetInputString(b)
|
||||
if res := c.compare(); res != 0 {
|
||||
return res
|
||||
}
|
||||
if !c.ignore[colltab.Identity] {
|
||||
if a < b {
|
||||
return -1
|
||||
} else if a > b {
|
||||
return 1
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func compareLevel(f func(i *iter) int, a, b *iter) int {
|
||||
a.pce = 0
|
||||
b.pce = 0
|
||||
for {
|
||||
va := f(a)
|
||||
vb := f(b)
|
||||
if va != vb {
|
||||
if va < vb {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
} else if va == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (c *Collator) compare() int {
|
||||
ia, ib := c.iter(0), c.iter(1)
|
||||
// Process primary level
|
||||
if c.alternate != altShifted {
|
||||
// TODO: implement script reordering
|
||||
if res := compareLevel((*iter).nextPrimary, ia, ib); res != 0 {
|
||||
return res
|
||||
}
|
||||
} else {
|
||||
// TODO: handle shifted
|
||||
}
|
||||
if !c.ignore[colltab.Secondary] {
|
||||
f := (*iter).nextSecondary
|
||||
if c.backwards {
|
||||
f = (*iter).prevSecondary
|
||||
}
|
||||
if res := compareLevel(f, ia, ib); res != 0 {
|
||||
return res
|
||||
}
|
||||
}
|
||||
// TODO: special case handling (Danish?)
|
||||
if !c.ignore[colltab.Tertiary] || c.caseLevel {
|
||||
if res := compareLevel((*iter).nextTertiary, ia, ib); res != 0 {
|
||||
return res
|
||||
}
|
||||
if !c.ignore[colltab.Quaternary] {
|
||||
if res := compareLevel((*iter).nextQuaternary, ia, ib); res != 0 {
|
||||
return res
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Key returns the collation key for str.
|
||||
// Passing the buffer buf may avoid memory allocations.
|
||||
// The returned slice will point to an allocation in Buffer and will remain
|
||||
// valid until the next call to buf.Reset().
|
||||
func (c *Collator) Key(buf *Buffer, str []byte) []byte {
|
||||
// See http://www.unicode.org/reports/tr10/#Main_Algorithm for more details.
|
||||
buf.init()
|
||||
return c.key(buf, c.getColElems(str))
|
||||
}
|
||||
|
||||
// KeyFromString returns the collation key for str.
|
||||
// Passing the buffer buf may avoid memory allocations.
|
||||
// The returned slice will point to an allocation in Buffer and will retain
|
||||
// valid until the next call to buf.ResetKeys().
|
||||
func (c *Collator) KeyFromString(buf *Buffer, str string) []byte {
|
||||
// See http://www.unicode.org/reports/tr10/#Main_Algorithm for more details.
|
||||
buf.init()
|
||||
return c.key(buf, c.getColElemsString(str))
|
||||
}
|
||||
|
||||
func (c *Collator) key(buf *Buffer, w []colltab.Elem) []byte {
|
||||
processWeights(c.alternate, c.t.Top(), w)
|
||||
kn := len(buf.key)
|
||||
c.keyFromElems(buf, w)
|
||||
return buf.key[kn:]
|
||||
}
|
||||
|
||||
func (c *Collator) getColElems(str []byte) []colltab.Elem {
|
||||
i := c.iter(0)
|
||||
i.SetInput(str)
|
||||
for i.Next() {
|
||||
}
|
||||
return i.Elems
|
||||
}
|
||||
|
||||
func (c *Collator) getColElemsString(str string) []colltab.Elem {
|
||||
i := c.iter(0)
|
||||
i.SetInputString(str)
|
||||
for i.Next() {
|
||||
}
|
||||
return i.Elems
|
||||
}
|
||||
|
||||
type iter struct {
|
||||
wa [512]colltab.Elem
|
||||
|
||||
colltab.Iter
|
||||
pce int
|
||||
}
|
||||
|
||||
func (i *iter) init(c *Collator) {
|
||||
i.Weighter = c.t
|
||||
i.Elems = i.wa[:0]
|
||||
}
|
||||
|
||||
func (i *iter) nextPrimary() int {
|
||||
for {
|
||||
for ; i.pce < i.N; i.pce++ {
|
||||
if v := i.Elems[i.pce].Primary(); v != 0 {
|
||||
i.pce++
|
||||
return v
|
||||
}
|
||||
}
|
||||
if !i.Next() {
|
||||
return 0
|
||||
}
|
||||
}
|
||||
panic("should not reach here")
|
||||
}
|
||||
|
||||
func (i *iter) nextSecondary() int {
|
||||
for ; i.pce < len(i.Elems); i.pce++ {
|
||||
if v := i.Elems[i.pce].Secondary(); v != 0 {
|
||||
i.pce++
|
||||
return v
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (i *iter) prevSecondary() int {
|
||||
for ; i.pce < len(i.Elems); i.pce++ {
|
||||
if v := i.Elems[len(i.Elems)-i.pce-1].Secondary(); v != 0 {
|
||||
i.pce++
|
||||
return v
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (i *iter) nextTertiary() int {
|
||||
for ; i.pce < len(i.Elems); i.pce++ {
|
||||
if v := i.Elems[i.pce].Tertiary(); v != 0 {
|
||||
i.pce++
|
||||
return int(v)
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (i *iter) nextQuaternary() int {
|
||||
for ; i.pce < len(i.Elems); i.pce++ {
|
||||
if v := i.Elems[i.pce].Quaternary(); v != 0 {
|
||||
i.pce++
|
||||
return v
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func appendPrimary(key []byte, p int) []byte {
|
||||
// Convert to variable length encoding; supports up to 23 bits.
|
||||
if p <= 0x7FFF {
|
||||
key = append(key, uint8(p>>8), uint8(p))
|
||||
} else {
|
||||
key = append(key, uint8(p>>16)|0x80, uint8(p>>8), uint8(p))
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
||||
// keyFromElems converts the weights ws to a compact sequence of bytes.
|
||||
// The result will be appended to the byte buffer in buf.
|
||||
func (c *Collator) keyFromElems(buf *Buffer, ws []colltab.Elem) {
|
||||
for _, v := range ws {
|
||||
if w := v.Primary(); w > 0 {
|
||||
buf.key = appendPrimary(buf.key, w)
|
||||
}
|
||||
}
|
||||
if !c.ignore[colltab.Secondary] {
|
||||
buf.key = append(buf.key, 0, 0)
|
||||
// TODO: we can use one 0 if we can guarantee that all non-zero weights are > 0xFF.
|
||||
if !c.backwards {
|
||||
for _, v := range ws {
|
||||
if w := v.Secondary(); w > 0 {
|
||||
buf.key = append(buf.key, uint8(w>>8), uint8(w))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for i := len(ws) - 1; i >= 0; i-- {
|
||||
if w := ws[i].Secondary(); w > 0 {
|
||||
buf.key = append(buf.key, uint8(w>>8), uint8(w))
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if c.caseLevel {
|
||||
buf.key = append(buf.key, 0, 0)
|
||||
}
|
||||
if !c.ignore[colltab.Tertiary] || c.caseLevel {
|
||||
buf.key = append(buf.key, 0, 0)
|
||||
for _, v := range ws {
|
||||
if w := v.Tertiary(); w > 0 {
|
||||
buf.key = append(buf.key, uint8(w))
|
||||
}
|
||||
}
|
||||
// Derive the quaternary weights from the options and other levels.
|
||||
// Note that we represent MaxQuaternary as 0xFF. The first byte of the
|
||||
// representation of a primary weight is always smaller than 0xFF,
|
||||
// so using this single byte value will compare correctly.
|
||||
if !c.ignore[colltab.Quaternary] && c.alternate >= altShifted {
|
||||
if c.alternate == altShiftTrimmed {
|
||||
lastNonFFFF := len(buf.key)
|
||||
buf.key = append(buf.key, 0)
|
||||
for _, v := range ws {
|
||||
if w := v.Quaternary(); w == colltab.MaxQuaternary {
|
||||
buf.key = append(buf.key, 0xFF)
|
||||
} else if w > 0 {
|
||||
buf.key = appendPrimary(buf.key, w)
|
||||
lastNonFFFF = len(buf.key)
|
||||
}
|
||||
}
|
||||
buf.key = buf.key[:lastNonFFFF]
|
||||
} else {
|
||||
buf.key = append(buf.key, 0)
|
||||
for _, v := range ws {
|
||||
if w := v.Quaternary(); w == colltab.MaxQuaternary {
|
||||
buf.key = append(buf.key, 0xFF)
|
||||
} else if w > 0 {
|
||||
buf.key = appendPrimary(buf.key, w)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func processWeights(vw alternateHandling, top uint32, wa []colltab.Elem) {
|
||||
ignore := false
|
||||
vtop := int(top)
|
||||
switch vw {
|
||||
case altShifted, altShiftTrimmed:
|
||||
for i := range wa {
|
||||
if p := wa[i].Primary(); p <= vtop && p != 0 {
|
||||
wa[i] = colltab.MakeQuaternary(p)
|
||||
ignore = true
|
||||
} else if p == 0 {
|
||||
if ignore {
|
||||
wa[i] = colltab.Ignore
|
||||
}
|
||||
} else {
|
||||
ignore = false
|
||||
}
|
||||
}
|
||||
case altBlanked:
|
||||
for i := range wa {
|
||||
if p := wa[i].Primary(); p <= vtop && (ignore || p != 0) {
|
||||
wa[i] = colltab.Ignore
|
||||
ignore = true
|
||||
} else {
|
||||
ignore = false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
482
vendor/golang.org/x/text/collate/collate_test.go
generated
vendored
Normal file
482
vendor/golang.org/x/text/collate/collate_test.go
generated
vendored
Normal file
|
@ -0,0 +1,482 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package collate
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/text/internal/colltab"
|
||||
"golang.org/x/text/language"
|
||||
)
|
||||
|
||||
type weightsTest struct {
|
||||
opt opts
|
||||
in, out ColElems
|
||||
}
|
||||
|
||||
type opts struct {
|
||||
lev int
|
||||
alt alternateHandling
|
||||
top int
|
||||
|
||||
backwards bool
|
||||
caseLevel bool
|
||||
}
|
||||
|
||||
// ignore returns an initialized boolean array based on the given Level.
|
||||
// A negative value means using the default setting of quaternary.
|
||||
func ignore(level colltab.Level) (ignore [colltab.NumLevels]bool) {
|
||||
if level < 0 {
|
||||
level = colltab.Quaternary
|
||||
}
|
||||
for i := range ignore {
|
||||
ignore[i] = level < colltab.Level(i)
|
||||
}
|
||||
return ignore
|
||||
}
|
||||
|
||||
func makeCE(w []int) colltab.Elem {
|
||||
ce, err := colltab.MakeElem(w[0], w[1], w[2], uint8(w[3]))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return ce
|
||||
}
|
||||
|
||||
func (o opts) collator() *Collator {
|
||||
c := &Collator{
|
||||
options: options{
|
||||
ignore: ignore(colltab.Level(o.lev - 1)),
|
||||
alternate: o.alt,
|
||||
backwards: o.backwards,
|
||||
caseLevel: o.caseLevel,
|
||||
variableTop: uint32(o.top),
|
||||
},
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
const (
|
||||
maxQ = 0x1FFFFF
|
||||
)
|
||||
|
||||
func wpq(p, q int) Weights {
|
||||
return W(p, defaults.Secondary, defaults.Tertiary, q)
|
||||
}
|
||||
|
||||
func wsq(s, q int) Weights {
|
||||
return W(0, s, defaults.Tertiary, q)
|
||||
}
|
||||
|
||||
func wq(q int) Weights {
|
||||
return W(0, 0, 0, q)
|
||||
}
|
||||
|
||||
var zero = W(0, 0, 0, 0)
|
||||
|
||||
var processTests = []weightsTest{
|
||||
// Shifted
|
||||
{ // simple sequence of non-variables
|
||||
opt: opts{alt: altShifted, top: 100},
|
||||
in: ColElems{W(200), W(300), W(400)},
|
||||
out: ColElems{wpq(200, maxQ), wpq(300, maxQ), wpq(400, maxQ)},
|
||||
},
|
||||
{ // first is a variable
|
||||
opt: opts{alt: altShifted, top: 250},
|
||||
in: ColElems{W(200), W(300), W(400)},
|
||||
out: ColElems{wq(200), wpq(300, maxQ), wpq(400, maxQ)},
|
||||
},
|
||||
{ // all but first are variable
|
||||
opt: opts{alt: altShifted, top: 999},
|
||||
in: ColElems{W(1000), W(200), W(300), W(400)},
|
||||
out: ColElems{wpq(1000, maxQ), wq(200), wq(300), wq(400)},
|
||||
},
|
||||
{ // first is a modifier
|
||||
opt: opts{alt: altShifted, top: 999},
|
||||
in: ColElems{W(0, 10), W(1000)},
|
||||
out: ColElems{wsq(10, maxQ), wpq(1000, maxQ)},
|
||||
},
|
||||
{ // primary ignorables
|
||||
opt: opts{alt: altShifted, top: 250},
|
||||
in: ColElems{W(200), W(0, 10), W(300), W(0, 15), W(400)},
|
||||
out: ColElems{wq(200), zero, wpq(300, maxQ), wsq(15, maxQ), wpq(400, maxQ)},
|
||||
},
|
||||
{ // secondary ignorables
|
||||
opt: opts{alt: altShifted, top: 250},
|
||||
in: ColElems{W(200), W(0, 0, 10), W(300), W(0, 0, 15), W(400)},
|
||||
out: ColElems{wq(200), zero, wpq(300, maxQ), W(0, 0, 15, maxQ), wpq(400, maxQ)},
|
||||
},
|
||||
{ // tertiary ignorables, no change
|
||||
opt: opts{alt: altShifted, top: 250},
|
||||
in: ColElems{W(200), zero, W(300), zero, W(400)},
|
||||
out: ColElems{wq(200), zero, wpq(300, maxQ), zero, wpq(400, maxQ)},
|
||||
},
|
||||
|
||||
// ShiftTrimmed (same as Shifted)
|
||||
{ // simple sequence of non-variables
|
||||
opt: opts{alt: altShiftTrimmed, top: 100},
|
||||
in: ColElems{W(200), W(300), W(400)},
|
||||
out: ColElems{wpq(200, maxQ), wpq(300, maxQ), wpq(400, maxQ)},
|
||||
},
|
||||
{ // first is a variable
|
||||
opt: opts{alt: altShiftTrimmed, top: 250},
|
||||
in: ColElems{W(200), W(300), W(400)},
|
||||
out: ColElems{wq(200), wpq(300, maxQ), wpq(400, maxQ)},
|
||||
},
|
||||
{ // all but first are variable
|
||||
opt: opts{alt: altShiftTrimmed, top: 999},
|
||||
in: ColElems{W(1000), W(200), W(300), W(400)},
|
||||
out: ColElems{wpq(1000, maxQ), wq(200), wq(300), wq(400)},
|
||||
},
|
||||
{ // first is a modifier
|
||||
opt: opts{alt: altShiftTrimmed, top: 999},
|
||||
in: ColElems{W(0, 10), W(1000)},
|
||||
out: ColElems{wsq(10, maxQ), wpq(1000, maxQ)},
|
||||
},
|
||||
{ // primary ignorables
|
||||
opt: opts{alt: altShiftTrimmed, top: 250},
|
||||
in: ColElems{W(200), W(0, 10), W(300), W(0, 15), W(400)},
|
||||
out: ColElems{wq(200), zero, wpq(300, maxQ), wsq(15, maxQ), wpq(400, maxQ)},
|
||||
},
|
||||
{ // secondary ignorables
|
||||
opt: opts{alt: altShiftTrimmed, top: 250},
|
||||
in: ColElems{W(200), W(0, 0, 10), W(300), W(0, 0, 15), W(400)},
|
||||
out: ColElems{wq(200), zero, wpq(300, maxQ), W(0, 0, 15, maxQ), wpq(400, maxQ)},
|
||||
},
|
||||
{ // tertiary ignorables, no change
|
||||
opt: opts{alt: altShiftTrimmed, top: 250},
|
||||
in: ColElems{W(200), zero, W(300), zero, W(400)},
|
||||
out: ColElems{wq(200), zero, wpq(300, maxQ), zero, wpq(400, maxQ)},
|
||||
},
|
||||
|
||||
// Blanked
|
||||
{ // simple sequence of non-variables
|
||||
opt: opts{alt: altBlanked, top: 100},
|
||||
in: ColElems{W(200), W(300), W(400)},
|
||||
out: ColElems{W(200), W(300), W(400)},
|
||||
},
|
||||
{ // first is a variable
|
||||
opt: opts{alt: altBlanked, top: 250},
|
||||
in: ColElems{W(200), W(300), W(400)},
|
||||
out: ColElems{zero, W(300), W(400)},
|
||||
},
|
||||
{ // all but first are variable
|
||||
opt: opts{alt: altBlanked, top: 999},
|
||||
in: ColElems{W(1000), W(200), W(300), W(400)},
|
||||
out: ColElems{W(1000), zero, zero, zero},
|
||||
},
|
||||
{ // first is a modifier
|
||||
opt: opts{alt: altBlanked, top: 999},
|
||||
in: ColElems{W(0, 10), W(1000)},
|
||||
out: ColElems{W(0, 10), W(1000)},
|
||||
},
|
||||
{ // primary ignorables
|
||||
opt: opts{alt: altBlanked, top: 250},
|
||||
in: ColElems{W(200), W(0, 10), W(300), W(0, 15), W(400)},
|
||||
out: ColElems{zero, zero, W(300), W(0, 15), W(400)},
|
||||
},
|
||||
{ // secondary ignorables
|
||||
opt: opts{alt: altBlanked, top: 250},
|
||||
in: ColElems{W(200), W(0, 0, 10), W(300), W(0, 0, 15), W(400)},
|
||||
out: ColElems{zero, zero, W(300), W(0, 0, 15), W(400)},
|
||||
},
|
||||
{ // tertiary ignorables, no change
|
||||
opt: opts{alt: altBlanked, top: 250},
|
||||
in: ColElems{W(200), zero, W(300), zero, W(400)},
|
||||
out: ColElems{zero, zero, W(300), zero, W(400)},
|
||||
},
|
||||
|
||||
// Non-ignorable: input is always equal to output.
|
||||
{ // all but first are variable
|
||||
opt: opts{alt: altNonIgnorable, top: 999},
|
||||
in: ColElems{W(1000), W(200), W(300), W(400)},
|
||||
out: ColElems{W(1000), W(200), W(300), W(400)},
|
||||
},
|
||||
{ // primary ignorables
|
||||
opt: opts{alt: altNonIgnorable, top: 250},
|
||||
in: ColElems{W(200), W(0, 10), W(300), W(0, 15), W(400)},
|
||||
out: ColElems{W(200), W(0, 10), W(300), W(0, 15), W(400)},
|
||||
},
|
||||
{ // secondary ignorables
|
||||
opt: opts{alt: altNonIgnorable, top: 250},
|
||||
in: ColElems{W(200), W(0, 0, 10), W(300), W(0, 0, 15), W(400)},
|
||||
out: ColElems{W(200), W(0, 0, 10), W(300), W(0, 0, 15), W(400)},
|
||||
},
|
||||
{ // tertiary ignorables, no change
|
||||
opt: opts{alt: altNonIgnorable, top: 250},
|
||||
in: ColElems{W(200), zero, W(300), zero, W(400)},
|
||||
out: ColElems{W(200), zero, W(300), zero, W(400)},
|
||||
},
|
||||
}
|
||||
|
||||
func TestProcessWeights(t *testing.T) {
|
||||
for i, tt := range processTests {
|
||||
in := convertFromWeights(tt.in)
|
||||
out := convertFromWeights(tt.out)
|
||||
processWeights(tt.opt.alt, uint32(tt.opt.top), in)
|
||||
for j, w := range in {
|
||||
if w != out[j] {
|
||||
t.Errorf("%d: Weights %d was %v; want %v", i, j, w, out[j])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type keyFromElemTest struct {
|
||||
opt opts
|
||||
in ColElems
|
||||
out []byte
|
||||
}
|
||||
|
||||
var defS = byte(defaults.Secondary)
|
||||
var defT = byte(defaults.Tertiary)
|
||||
|
||||
const sep = 0 // separator byte
|
||||
|
||||
var keyFromElemTests = []keyFromElemTest{
|
||||
{ // simple primary and secondary weights.
|
||||
opts{alt: altShifted},
|
||||
ColElems{W(0x200), W(0x7FFF), W(0, 0x30), W(0x100)},
|
||||
[]byte{0x2, 0, 0x7F, 0xFF, 0x1, 0x00, // primary
|
||||
sep, sep, 0, defS, 0, defS, 0, 0x30, 0, defS, // secondary
|
||||
sep, sep, defT, defT, defT, defT, // tertiary
|
||||
sep, 0xFF, 0xFF, 0xFF, 0xFF, // quaternary
|
||||
},
|
||||
},
|
||||
{ // same as first, but with zero element that need to be removed
|
||||
opts{alt: altShifted},
|
||||
ColElems{W(0x200), zero, W(0x7FFF), W(0, 0x30), zero, W(0x100)},
|
||||
[]byte{0x2, 0, 0x7F, 0xFF, 0x1, 0x00, // primary
|
||||
sep, sep, 0, defS, 0, defS, 0, 0x30, 0, defS, // secondary
|
||||
sep, sep, defT, defT, defT, defT, // tertiary
|
||||
sep, 0xFF, 0xFF, 0xFF, 0xFF, // quaternary
|
||||
},
|
||||
},
|
||||
{ // same as first, with large primary values
|
||||
opts{alt: altShifted},
|
||||
ColElems{W(0x200), W(0x8000), W(0, 0x30), W(0x12345)},
|
||||
[]byte{0x2, 0, 0x80, 0x80, 0x00, 0x81, 0x23, 0x45, // primary
|
||||
sep, sep, 0, defS, 0, defS, 0, 0x30, 0, defS, // secondary
|
||||
sep, sep, defT, defT, defT, defT, // tertiary
|
||||
sep, 0xFF, 0xFF, 0xFF, 0xFF, // quaternary
|
||||
},
|
||||
},
|
||||
{ // same as first, but with the secondary level backwards
|
||||
opts{alt: altShifted, backwards: true},
|
||||
ColElems{W(0x200), W(0x7FFF), W(0, 0x30), W(0x100)},
|
||||
[]byte{0x2, 0, 0x7F, 0xFF, 0x1, 0x00, // primary
|
||||
sep, sep, 0, defS, 0, 0x30, 0, defS, 0, defS, // secondary
|
||||
sep, sep, defT, defT, defT, defT, // tertiary
|
||||
sep, 0xFF, 0xFF, 0xFF, 0xFF, // quaternary
|
||||
},
|
||||
},
|
||||
{ // same as first, ignoring quaternary level
|
||||
opts{alt: altShifted, lev: 3},
|
||||
ColElems{W(0x200), zero, W(0x7FFF), W(0, 0x30), zero, W(0x100)},
|
||||
[]byte{0x2, 0, 0x7F, 0xFF, 0x1, 0x00, // primary
|
||||
sep, sep, 0, defS, 0, defS, 0, 0x30, 0, defS, // secondary
|
||||
sep, sep, defT, defT, defT, defT, // tertiary
|
||||
},
|
||||
},
|
||||
{ // same as first, ignoring tertiary level
|
||||
opts{alt: altShifted, lev: 2},
|
||||
ColElems{W(0x200), zero, W(0x7FFF), W(0, 0x30), zero, W(0x100)},
|
||||
[]byte{0x2, 0, 0x7F, 0xFF, 0x1, 0x00, // primary
|
||||
sep, sep, 0, defS, 0, defS, 0, 0x30, 0, defS, // secondary
|
||||
},
|
||||
},
|
||||
{ // same as first, ignoring secondary level
|
||||
opts{alt: altShifted, lev: 1},
|
||||
ColElems{W(0x200), zero, W(0x7FFF), W(0, 0x30), zero, W(0x100)},
|
||||
[]byte{0x2, 0, 0x7F, 0xFF, 0x1, 0x00},
|
||||
},
|
||||
{ // simple primary and secondary weights.
|
||||
opts{alt: altShiftTrimmed, top: 0x250},
|
||||
ColElems{W(0x300), W(0x200), W(0x7FFF), W(0, 0x30), W(0x800)},
|
||||
[]byte{0x3, 0, 0x7F, 0xFF, 0x8, 0x00, // primary
|
||||
sep, sep, 0, defS, 0, defS, 0, 0x30, 0, defS, // secondary
|
||||
sep, sep, defT, defT, defT, defT, // tertiary
|
||||
sep, 0xFF, 0x2, 0, // quaternary
|
||||
},
|
||||
},
|
||||
{ // as first, primary with case level enabled
|
||||
opts{alt: altShifted, lev: 1, caseLevel: true},
|
||||
ColElems{W(0x200), W(0x7FFF), W(0, 0x30), W(0x100)},
|
||||
[]byte{0x2, 0, 0x7F, 0xFF, 0x1, 0x00, // primary
|
||||
sep, sep, // secondary
|
||||
sep, sep, defT, defT, defT, defT, // tertiary
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestKeyFromElems(t *testing.T) {
|
||||
buf := Buffer{}
|
||||
for i, tt := range keyFromElemTests {
|
||||
buf.Reset()
|
||||
in := convertFromWeights(tt.in)
|
||||
processWeights(tt.opt.alt, uint32(tt.opt.top), in)
|
||||
tt.opt.collator().keyFromElems(&buf, in)
|
||||
res := buf.key
|
||||
if len(res) != len(tt.out) {
|
||||
t.Errorf("%d: len(ws) was %d; want %d (%X should be %X)", i, len(res), len(tt.out), res, tt.out)
|
||||
}
|
||||
n := len(res)
|
||||
if len(tt.out) < n {
|
||||
n = len(tt.out)
|
||||
}
|
||||
for j, c := range res[:n] {
|
||||
if c != tt.out[j] {
|
||||
t.Errorf("%d: byte %d was %X; want %X", i, j, c, tt.out[j])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetColElems(t *testing.T) {
|
||||
for i, tt := range appendNextTests {
|
||||
c, err := makeTable(tt.in)
|
||||
if err != nil {
|
||||
// error is reported in TestAppendNext
|
||||
continue
|
||||
}
|
||||
// Create one large test per table
|
||||
str := make([]byte, 0, 4000)
|
||||
out := ColElems{}
|
||||
for len(str) < 3000 {
|
||||
for _, chk := range tt.chk {
|
||||
str = append(str, chk.in[:chk.n]...)
|
||||
out = append(out, chk.out...)
|
||||
}
|
||||
}
|
||||
for j, chk := range append(tt.chk, check{string(str), len(str), out}) {
|
||||
out := convertFromWeights(chk.out)
|
||||
ce := c.getColElems([]byte(chk.in)[:chk.n])
|
||||
if len(ce) != len(out) {
|
||||
t.Errorf("%d:%d: len(ws) was %d; want %d", i, j, len(ce), len(out))
|
||||
continue
|
||||
}
|
||||
cnt := 0
|
||||
for k, w := range ce {
|
||||
w, _ = colltab.MakeElem(w.Primary(), w.Secondary(), int(w.Tertiary()), 0)
|
||||
if w != out[k] {
|
||||
t.Errorf("%d:%d: Weights %d was %X; want %X", i, j, k, w, out[k])
|
||||
cnt++
|
||||
}
|
||||
if cnt > 10 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type keyTest struct {
|
||||
in string
|
||||
out []byte
|
||||
}
|
||||
|
||||
var keyTests = []keyTest{
|
||||
{"abc",
|
||||
[]byte{0, 100, 0, 200, 1, 44, 0, 0, 0, 32, 0, 32, 0, 32, 0, 0, 2, 2, 2, 0, 255, 255, 255},
|
||||
},
|
||||
{"a\u0301",
|
||||
[]byte{0, 102, 0, 0, 0, 32, 0, 0, 2, 0, 255},
|
||||
},
|
||||
{"aaaaa",
|
||||
[]byte{0, 100, 0, 100, 0, 100, 0, 100, 0, 100, 0, 0,
|
||||
0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 0,
|
||||
2, 2, 2, 2, 2, 0,
|
||||
255, 255, 255, 255, 255,
|
||||
},
|
||||
},
|
||||
// Issue 16391: incomplete rune at end of UTF-8 sequence.
|
||||
{"\xc2", []byte{133, 255, 253, 0, 0, 0, 32, 0, 0, 2, 0, 255}},
|
||||
{"\xc2a", []byte{133, 255, 253, 0, 100, 0, 0, 0, 32, 0, 32, 0, 0, 2, 2, 0, 255, 255}},
|
||||
}
|
||||
|
||||
func TestKey(t *testing.T) {
|
||||
c, _ := makeTable(appendNextTests[4].in)
|
||||
c.alternate = altShifted
|
||||
c.ignore = ignore(colltab.Quaternary)
|
||||
buf := Buffer{}
|
||||
keys1 := [][]byte{}
|
||||
keys2 := [][]byte{}
|
||||
for _, tt := range keyTests {
|
||||
keys1 = append(keys1, c.Key(&buf, []byte(tt.in)))
|
||||
keys2 = append(keys2, c.KeyFromString(&buf, tt.in))
|
||||
}
|
||||
// Separate generation from testing to ensure buffers are not overwritten.
|
||||
for i, tt := range keyTests {
|
||||
if !bytes.Equal(keys1[i], tt.out) {
|
||||
t.Errorf("%d: Key(%q) = %d; want %d", i, tt.in, keys1[i], tt.out)
|
||||
}
|
||||
if !bytes.Equal(keys2[i], tt.out) {
|
||||
t.Errorf("%d: KeyFromString(%q) = %d; want %d", i, tt.in, keys2[i], tt.out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type compareTest struct {
|
||||
a, b string
|
||||
res int // comparison result
|
||||
}
|
||||
|
||||
var compareTests = []compareTest{
|
||||
{"a\u0301", "a", 1},
|
||||
{"a\u0301b", "ab", 1},
|
||||
{"a", "a\u0301", -1},
|
||||
{"ab", "a\u0301b", -1},
|
||||
{"bc", "a\u0301c", 1},
|
||||
{"ab", "aB", -1},
|
||||
{"a\u0301", "a\u0301", 0},
|
||||
{"a", "a", 0},
|
||||
// Only clip prefixes of whole runes.
|
||||
{"\u302E", "\u302F", 1},
|
||||
// Don't clip prefixes when last rune of prefix may be part of contraction.
|
||||
{"a\u035E", "a\u0301\u035F", -1},
|
||||
{"a\u0301\u035Fb", "a\u0301\u035F", -1},
|
||||
}
|
||||
|
||||
func TestCompare(t *testing.T) {
|
||||
c, _ := makeTable(appendNextTests[4].in)
|
||||
for i, tt := range compareTests {
|
||||
if res := c.Compare([]byte(tt.a), []byte(tt.b)); res != tt.res {
|
||||
t.Errorf("%d: Compare(%q, %q) == %d; want %d", i, tt.a, tt.b, res, tt.res)
|
||||
}
|
||||
if res := c.CompareString(tt.a, tt.b); res != tt.res {
|
||||
t.Errorf("%d: CompareString(%q, %q) == %d; want %d", i, tt.a, tt.b, res, tt.res)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNumeric(t *testing.T) {
|
||||
c := New(language.English, Loose, Numeric)
|
||||
|
||||
for i, tt := range []struct {
|
||||
a, b string
|
||||
want int
|
||||
}{
|
||||
{"1", "2", -1},
|
||||
{"2", "12", -1},
|
||||
{"2", "12", -1}, // Fullwidth is sorted as usual.
|
||||
{"₂", "₁₂", 1}, // Subscript is not sorted as numbers.
|
||||
{"②", "①②", 1}, // Circled is not sorted as numbers.
|
||||
{ // Imperial Aramaic, is not sorted as number.
|
||||
"\U00010859",
|
||||
"\U00010858\U00010859",
|
||||
1,
|
||||
},
|
||||
{"12", "2", 1},
|
||||
{"A-1", "A-2", -1},
|
||||
{"A-2", "A-12", -1},
|
||||
{"A-12", "A-2", 1},
|
||||
{"A-0001", "A-1", 0},
|
||||
} {
|
||||
if got := c.CompareString(tt.a, tt.b); got != tt.want {
|
||||
t.Errorf("%d: CompareString(%s, %s) = %d; want %d", i, tt.a, tt.b, got, tt.want)
|
||||
}
|
||||
}
|
||||
}
|
51
vendor/golang.org/x/text/collate/export_test.go
generated
vendored
Normal file
51
vendor/golang.org/x/text/collate/export_test.go
generated
vendored
Normal file
|
@ -0,0 +1,51 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package collate
|
||||
|
||||
// Export for testing.
|
||||
// TODO: no longer necessary. Remove at some point.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/text/internal/colltab"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultSecondary = 0x20
|
||||
defaultTertiary = 0x2
|
||||
)
|
||||
|
||||
type Weights struct {
|
||||
Primary, Secondary, Tertiary, Quaternary int
|
||||
}
|
||||
|
||||
func W(ce ...int) Weights {
|
||||
w := Weights{ce[0], defaultSecondary, defaultTertiary, 0}
|
||||
if len(ce) > 1 {
|
||||
w.Secondary = ce[1]
|
||||
}
|
||||
if len(ce) > 2 {
|
||||
w.Tertiary = ce[2]
|
||||
}
|
||||
if len(ce) > 3 {
|
||||
w.Quaternary = ce[3]
|
||||
}
|
||||
return w
|
||||
}
|
||||
func (w Weights) String() string {
|
||||
return fmt.Sprintf("[%X.%X.%X.%X]", w.Primary, w.Secondary, w.Tertiary, w.Quaternary)
|
||||
}
|
||||
|
||||
func convertFromWeights(ws []Weights) []colltab.Elem {
|
||||
out := make([]colltab.Elem, len(ws))
|
||||
for i, w := range ws {
|
||||
out[i], _ = colltab.MakeElem(w.Primary, w.Secondary, w.Tertiary, 0)
|
||||
if out[i] == colltab.Ignore && w.Quaternary > 0 {
|
||||
out[i] = colltab.MakeQuaternary(w.Quaternary)
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
32
vendor/golang.org/x/text/collate/index.go
generated
vendored
Normal file
32
vendor/golang.org/x/text/collate/index.go
generated
vendored
Normal file
|
@ -0,0 +1,32 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package collate
|
||||
|
||||
import "golang.org/x/text/internal/colltab"
|
||||
|
||||
const blockSize = 64
|
||||
|
||||
func getTable(t tableIndex) *colltab.Table {
|
||||
return &colltab.Table{
|
||||
Index: colltab.Trie{
|
||||
Index0: mainLookup[:][blockSize*t.lookupOffset:],
|
||||
Values0: mainValues[:][blockSize*t.valuesOffset:],
|
||||
Index: mainLookup[:],
|
||||
Values: mainValues[:],
|
||||
},
|
||||
ExpandElem: mainExpandElem[:],
|
||||
ContractTries: colltab.ContractTrieSet(mainCTEntries[:]),
|
||||
ContractElem: mainContractElem[:],
|
||||
MaxContractLen: 18,
|
||||
VariableTop: varTop,
|
||||
}
|
||||
}
|
||||
|
||||
// tableIndex holds information for constructing a table
|
||||
// for a certain locale based on the main table.
|
||||
type tableIndex struct {
|
||||
lookupOffset uint32
|
||||
valuesOffset uint32
|
||||
}
|
553
vendor/golang.org/x/text/collate/maketables.go
generated
vendored
Normal file
553
vendor/golang.org/x/text/collate/maketables.go
generated
vendored
Normal file
|
@ -0,0 +1,553 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
// Collation table generator.
|
||||
// Data read from the web.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"bufio"
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"golang.org/x/text/collate"
|
||||
"golang.org/x/text/collate/build"
|
||||
"golang.org/x/text/internal/colltab"
|
||||
"golang.org/x/text/internal/gen"
|
||||
"golang.org/x/text/language"
|
||||
"golang.org/x/text/unicode/cldr"
|
||||
)
|
||||
|
||||
var (
|
||||
test = flag.Bool("test", false,
|
||||
"test existing tables; can be used to compare web data with package data.")
|
||||
short = flag.Bool("short", false, `Use "short" alternatives, when available.`)
|
||||
draft = flag.Bool("draft", false, `Use draft versions, when available.`)
|
||||
tags = flag.String("tags", "", "build tags to be included after +build directive")
|
||||
pkg = flag.String("package", "collate",
|
||||
"the name of the package in which the generated file is to be included")
|
||||
|
||||
tables = flagStringSetAllowAll("tables", "collate", "collate,chars",
|
||||
"comma-spearated list of tables to generate.")
|
||||
exclude = flagStringSet("exclude", "zh2", "",
|
||||
"comma-separated list of languages to exclude.")
|
||||
include = flagStringSet("include", "", "",
|
||||
"comma-separated list of languages to include. Include trumps exclude.")
|
||||
// TODO: Not included: unihan gb2312han zhuyin big5han (for size reasons)
|
||||
// TODO: Not included: traditional (buggy for Bengali)
|
||||
types = flagStringSetAllowAll("types", "standard,phonebook,phonetic,reformed,pinyin,stroke", "",
|
||||
"comma-separated list of types that should be included.")
|
||||
)
|
||||
|
||||
// stringSet implements an ordered set based on a list. It implements flag.Value
|
||||
// to allow a set to be specified as a comma-separated list.
|
||||
type stringSet struct {
|
||||
s []string
|
||||
allowed *stringSet
|
||||
dirty bool // needs compaction if true
|
||||
all bool
|
||||
allowAll bool
|
||||
}
|
||||
|
||||
func flagStringSet(name, def, allowed, usage string) *stringSet {
|
||||
ss := &stringSet{}
|
||||
if allowed != "" {
|
||||
usage += fmt.Sprintf(" (allowed values: any of %s)", allowed)
|
||||
ss.allowed = &stringSet{}
|
||||
failOnError(ss.allowed.Set(allowed))
|
||||
}
|
||||
ss.Set(def)
|
||||
flag.Var(ss, name, usage)
|
||||
return ss
|
||||
}
|
||||
|
||||
func flagStringSetAllowAll(name, def, allowed, usage string) *stringSet {
|
||||
ss := &stringSet{allowAll: true}
|
||||
if allowed == "" {
|
||||
flag.Var(ss, name, usage+fmt.Sprintf(` Use "all" to select all.`))
|
||||
} else {
|
||||
ss.allowed = &stringSet{}
|
||||
failOnError(ss.allowed.Set(allowed))
|
||||
flag.Var(ss, name, usage+fmt.Sprintf(` (allowed values: "all" or any of %s)`, allowed))
|
||||
}
|
||||
ss.Set(def)
|
||||
return ss
|
||||
}
|
||||
|
||||
func (ss stringSet) Len() int {
|
||||
return len(ss.s)
|
||||
}
|
||||
|
||||
func (ss stringSet) String() string {
|
||||
return strings.Join(ss.s, ",")
|
||||
}
|
||||
|
||||
func (ss *stringSet) Set(s string) error {
|
||||
if ss.allowAll && s == "all" {
|
||||
ss.s = nil
|
||||
ss.all = true
|
||||
return nil
|
||||
}
|
||||
ss.s = ss.s[:0]
|
||||
for _, s := range strings.Split(s, ",") {
|
||||
if s := strings.TrimSpace(s); s != "" {
|
||||
if ss.allowed != nil && !ss.allowed.contains(s) {
|
||||
return fmt.Errorf("unsupported value %q; must be one of %s", s, ss.allowed)
|
||||
}
|
||||
ss.add(s)
|
||||
}
|
||||
}
|
||||
ss.compact()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ss *stringSet) add(s string) {
|
||||
ss.s = append(ss.s, s)
|
||||
ss.dirty = true
|
||||
}
|
||||
|
||||
func (ss *stringSet) values() []string {
|
||||
ss.compact()
|
||||
return ss.s
|
||||
}
|
||||
|
||||
func (ss *stringSet) contains(s string) bool {
|
||||
if ss.all {
|
||||
return true
|
||||
}
|
||||
for _, v := range ss.s {
|
||||
if v == s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (ss *stringSet) compact() {
|
||||
if !ss.dirty {
|
||||
return
|
||||
}
|
||||
a := ss.s
|
||||
sort.Strings(a)
|
||||
k := 0
|
||||
for i := 1; i < len(a); i++ {
|
||||
if a[k] != a[i] {
|
||||
a[k+1] = a[i]
|
||||
k++
|
||||
}
|
||||
}
|
||||
ss.s = a[:k+1]
|
||||
ss.dirty = false
|
||||
}
|
||||
|
||||
func skipLang(l string) bool {
|
||||
if include.Len() > 0 {
|
||||
return !include.contains(l)
|
||||
}
|
||||
return exclude.contains(l)
|
||||
}
|
||||
|
||||
// altInclude returns a list of alternatives (for the LDML alt attribute)
|
||||
// in order of preference. An empty string in this list indicates the
|
||||
// default entry.
|
||||
func altInclude() []string {
|
||||
l := []string{}
|
||||
if *short {
|
||||
l = append(l, "short")
|
||||
}
|
||||
l = append(l, "")
|
||||
// TODO: handle draft using cldr.SetDraftLevel
|
||||
if *draft {
|
||||
l = append(l, "proposed")
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
func failOnError(e error) {
|
||||
if e != nil {
|
||||
log.Panic(e)
|
||||
}
|
||||
}
|
||||
|
||||
func openArchive() *zip.Reader {
|
||||
f := gen.OpenCLDRCoreZip()
|
||||
buffer, err := ioutil.ReadAll(f)
|
||||
f.Close()
|
||||
failOnError(err)
|
||||
archive, err := zip.NewReader(bytes.NewReader(buffer), int64(len(buffer)))
|
||||
failOnError(err)
|
||||
return archive
|
||||
}
|
||||
|
||||
// parseUCA parses a Default Unicode Collation Element Table of the format
|
||||
// specified in http://www.unicode.org/reports/tr10/#File_Format.
|
||||
// It returns the variable top.
|
||||
func parseUCA(builder *build.Builder) {
|
||||
var r io.ReadCloser
|
||||
var err error
|
||||
for _, f := range openArchive().File {
|
||||
if strings.HasSuffix(f.Name, "allkeys_CLDR.txt") {
|
||||
r, err = f.Open()
|
||||
}
|
||||
}
|
||||
if r == nil {
|
||||
log.Fatal("File allkeys_CLDR.txt not found in archive.")
|
||||
}
|
||||
failOnError(err)
|
||||
defer r.Close()
|
||||
scanner := bufio.NewScanner(r)
|
||||
colelem := regexp.MustCompile(`\[([.*])([0-9A-F.]+)\]`)
|
||||
for i := 1; scanner.Scan(); i++ {
|
||||
line := scanner.Text()
|
||||
if len(line) == 0 || line[0] == '#' {
|
||||
continue
|
||||
}
|
||||
if line[0] == '@' {
|
||||
// parse properties
|
||||
switch {
|
||||
case strings.HasPrefix(line[1:], "version "):
|
||||
a := strings.Split(line[1:], " ")
|
||||
if a[1] != gen.UnicodeVersion() {
|
||||
log.Fatalf("incompatible version %s; want %s", a[1], gen.UnicodeVersion())
|
||||
}
|
||||
case strings.HasPrefix(line[1:], "backwards "):
|
||||
log.Fatalf("%d: unsupported option backwards", i)
|
||||
default:
|
||||
log.Printf("%d: unknown option %s", i, line[1:])
|
||||
}
|
||||
} else {
|
||||
// parse entries
|
||||
part := strings.Split(line, " ; ")
|
||||
if len(part) != 2 {
|
||||
log.Fatalf("%d: production rule without ';': %v", i, line)
|
||||
}
|
||||
lhs := []rune{}
|
||||
for _, v := range strings.Split(part[0], " ") {
|
||||
if v == "" {
|
||||
continue
|
||||
}
|
||||
lhs = append(lhs, rune(convHex(i, v)))
|
||||
}
|
||||
var n int
|
||||
var vars []int
|
||||
rhs := [][]int{}
|
||||
for i, m := range colelem.FindAllStringSubmatch(part[1], -1) {
|
||||
n += len(m[0])
|
||||
elem := []int{}
|
||||
for _, h := range strings.Split(m[2], ".") {
|
||||
elem = append(elem, convHex(i, h))
|
||||
}
|
||||
if m[1] == "*" {
|
||||
vars = append(vars, i)
|
||||
}
|
||||
rhs = append(rhs, elem)
|
||||
}
|
||||
if len(part[1]) < n+3 || part[1][n+1] != '#' {
|
||||
log.Fatalf("%d: expected comment; found %s", i, part[1][n:])
|
||||
}
|
||||
if *test {
|
||||
testInput.add(string(lhs))
|
||||
}
|
||||
failOnError(builder.Add(lhs, rhs, vars))
|
||||
}
|
||||
}
|
||||
if scanner.Err() != nil {
|
||||
log.Fatal(scanner.Err())
|
||||
}
|
||||
}
|
||||
|
||||
func convHex(line int, s string) int {
|
||||
r, e := strconv.ParseInt(s, 16, 32)
|
||||
if e != nil {
|
||||
log.Fatalf("%d: %v", line, e)
|
||||
}
|
||||
return int(r)
|
||||
}
|
||||
|
||||
var testInput = stringSet{}
|
||||
|
||||
var charRe = regexp.MustCompile(`&#x([0-9A-F]*);`)
|
||||
var tagRe = regexp.MustCompile(`<([a-z_]*) */>`)
|
||||
|
||||
var mainLocales = []string{}
|
||||
|
||||
// charsets holds a list of exemplar characters per category.
|
||||
type charSets map[string][]string
|
||||
|
||||
func (p charSets) fprint(w io.Writer) {
|
||||
fmt.Fprintln(w, "[exN]string{")
|
||||
for i, k := range []string{"", "contractions", "punctuation", "auxiliary", "currencySymbol", "index"} {
|
||||
if set := p[k]; len(set) != 0 {
|
||||
fmt.Fprintf(w, "\t\t%d: %q,\n", i, strings.Join(set, " "))
|
||||
}
|
||||
}
|
||||
fmt.Fprintln(w, "\t},")
|
||||
}
|
||||
|
||||
var localeChars = make(map[string]charSets)
|
||||
|
||||
const exemplarHeader = `
|
||||
type exemplarType int
|
||||
const (
|
||||
exCharacters exemplarType = iota
|
||||
exContractions
|
||||
exPunctuation
|
||||
exAuxiliary
|
||||
exCurrency
|
||||
exIndex
|
||||
exN
|
||||
)
|
||||
`
|
||||
|
||||
func printExemplarCharacters(w io.Writer) {
|
||||
fmt.Fprintln(w, exemplarHeader)
|
||||
fmt.Fprintln(w, "var exemplarCharacters = map[string][exN]string{")
|
||||
for _, loc := range mainLocales {
|
||||
fmt.Fprintf(w, "\t%q: ", loc)
|
||||
localeChars[loc].fprint(w)
|
||||
}
|
||||
fmt.Fprintln(w, "}")
|
||||
}
|
||||
|
||||
func decodeCLDR(d *cldr.Decoder) *cldr.CLDR {
|
||||
r := gen.OpenCLDRCoreZip()
|
||||
data, err := d.DecodeZip(r)
|
||||
failOnError(err)
|
||||
return data
|
||||
}
|
||||
|
||||
// parseMain parses XML files in the main directory of the CLDR core.zip file.
|
||||
func parseMain() {
|
||||
d := &cldr.Decoder{}
|
||||
d.SetDirFilter("main")
|
||||
d.SetSectionFilter("characters")
|
||||
data := decodeCLDR(d)
|
||||
for _, loc := range data.Locales() {
|
||||
x := data.RawLDML(loc)
|
||||
if skipLang(x.Identity.Language.Type) {
|
||||
continue
|
||||
}
|
||||
if x.Characters != nil {
|
||||
x, _ = data.LDML(loc)
|
||||
loc = language.Make(loc).String()
|
||||
for _, ec := range x.Characters.ExemplarCharacters {
|
||||
if ec.Draft != "" {
|
||||
continue
|
||||
}
|
||||
if _, ok := localeChars[loc]; !ok {
|
||||
mainLocales = append(mainLocales, loc)
|
||||
localeChars[loc] = make(charSets)
|
||||
}
|
||||
localeChars[loc][ec.Type] = parseCharacters(ec.Data())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func parseCharacters(chars string) []string {
|
||||
parseSingle := func(s string) (r rune, tail string, escaped bool) {
|
||||
if s[0] == '\\' {
|
||||
return rune(s[1]), s[2:], true
|
||||
}
|
||||
r, sz := utf8.DecodeRuneInString(s)
|
||||
return r, s[sz:], false
|
||||
}
|
||||
chars = strings.TrimSpace(chars)
|
||||
if n := len(chars) - 1; chars[n] == ']' && chars[0] == '[' {
|
||||
chars = chars[1:n]
|
||||
}
|
||||
list := []string{}
|
||||
var r, last, end rune
|
||||
for len(chars) > 0 {
|
||||
if chars[0] == '{' { // character sequence
|
||||
buf := []rune{}
|
||||
for chars = chars[1:]; len(chars) > 0; {
|
||||
r, chars, _ = parseSingle(chars)
|
||||
if r == '}' {
|
||||
break
|
||||
}
|
||||
if r == ' ' {
|
||||
log.Fatalf("space not supported in sequence %q", chars)
|
||||
}
|
||||
buf = append(buf, r)
|
||||
}
|
||||
list = append(list, string(buf))
|
||||
last = 0
|
||||
} else { // single character
|
||||
escaped := false
|
||||
r, chars, escaped = parseSingle(chars)
|
||||
if r != ' ' {
|
||||
if r == '-' && !escaped {
|
||||
if last == 0 {
|
||||
log.Fatal("'-' should be preceded by a character")
|
||||
}
|
||||
end, chars, _ = parseSingle(chars)
|
||||
for ; last <= end; last++ {
|
||||
list = append(list, string(last))
|
||||
}
|
||||
last = 0
|
||||
} else {
|
||||
list = append(list, string(r))
|
||||
last = r
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return list
|
||||
}
|
||||
|
||||
var fileRe = regexp.MustCompile(`.*/collation/(.*)\.xml`)
|
||||
|
||||
// typeMap translates legacy type keys to their BCP47 equivalent.
|
||||
var typeMap = map[string]string{
|
||||
"phonebook": "phonebk",
|
||||
"traditional": "trad",
|
||||
}
|
||||
|
||||
// parseCollation parses XML files in the collation directory of the CLDR core.zip file.
|
||||
func parseCollation(b *build.Builder) {
|
||||
d := &cldr.Decoder{}
|
||||
d.SetDirFilter("collation")
|
||||
data := decodeCLDR(d)
|
||||
for _, loc := range data.Locales() {
|
||||
x, err := data.LDML(loc)
|
||||
failOnError(err)
|
||||
if skipLang(x.Identity.Language.Type) {
|
||||
continue
|
||||
}
|
||||
cs := x.Collations.Collation
|
||||
sl := cldr.MakeSlice(&cs)
|
||||
if len(types.s) == 0 {
|
||||
sl.SelectAnyOf("type", x.Collations.Default())
|
||||
} else if !types.all {
|
||||
sl.SelectAnyOf("type", types.s...)
|
||||
}
|
||||
sl.SelectOnePerGroup("alt", altInclude())
|
||||
|
||||
for _, c := range cs {
|
||||
id, err := language.Parse(loc)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "invalid locale: %q", err)
|
||||
continue
|
||||
}
|
||||
// Support both old- and new-style defaults.
|
||||
d := c.Type
|
||||
if x.Collations.DefaultCollation == nil {
|
||||
d = x.Collations.Default()
|
||||
} else {
|
||||
d = x.Collations.DefaultCollation.Data()
|
||||
}
|
||||
// We assume tables are being built either for search or collation,
|
||||
// but not both. For search the default is always "search".
|
||||
if d != c.Type && c.Type != "search" {
|
||||
typ := c.Type
|
||||
if len(c.Type) > 8 {
|
||||
typ = typeMap[c.Type]
|
||||
}
|
||||
id, err = id.SetTypeForKey("co", typ)
|
||||
failOnError(err)
|
||||
}
|
||||
t := b.Tailoring(id)
|
||||
c.Process(processor{t})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type processor struct {
|
||||
t *build.Tailoring
|
||||
}
|
||||
|
||||
func (p processor) Reset(anchor string, before int) (err error) {
|
||||
if before != 0 {
|
||||
err = p.t.SetAnchorBefore(anchor)
|
||||
} else {
|
||||
err = p.t.SetAnchor(anchor)
|
||||
}
|
||||
failOnError(err)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p processor) Insert(level int, str, context, extend string) error {
|
||||
str = context + str
|
||||
if *test {
|
||||
testInput.add(str)
|
||||
}
|
||||
// TODO: mimic bug in old maketables: remove.
|
||||
err := p.t.Insert(colltab.Level(level-1), str, context+extend)
|
||||
failOnError(err)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p processor) Index(id string) {
|
||||
}
|
||||
|
||||
func testCollator(c *collate.Collator) {
|
||||
c0 := collate.New(language.Und)
|
||||
|
||||
// iterator over all characters for all locales and check
|
||||
// whether Key is equal.
|
||||
buf := collate.Buffer{}
|
||||
|
||||
// Add all common and not too uncommon runes to the test set.
|
||||
for i := rune(0); i < 0x30000; i++ {
|
||||
testInput.add(string(i))
|
||||
}
|
||||
for i := rune(0xE0000); i < 0xF0000; i++ {
|
||||
testInput.add(string(i))
|
||||
}
|
||||
for _, str := range testInput.values() {
|
||||
k0 := c0.KeyFromString(&buf, str)
|
||||
k := c.KeyFromString(&buf, str)
|
||||
if !bytes.Equal(k0, k) {
|
||||
failOnError(fmt.Errorf("test:%U: keys differ (%x vs %x)", []rune(str), k0, k))
|
||||
}
|
||||
buf.Reset()
|
||||
}
|
||||
fmt.Println("PASS")
|
||||
}
|
||||
|
||||
func main() {
|
||||
gen.Init()
|
||||
b := build.NewBuilder()
|
||||
parseUCA(b)
|
||||
if tables.contains("chars") {
|
||||
parseMain()
|
||||
}
|
||||
parseCollation(b)
|
||||
|
||||
c, err := b.Build()
|
||||
failOnError(err)
|
||||
|
||||
if *test {
|
||||
testCollator(collate.NewFromTable(c))
|
||||
} else {
|
||||
w := &bytes.Buffer{}
|
||||
|
||||
gen.WriteUnicodeVersion(w)
|
||||
gen.WriteCLDRVersion(w)
|
||||
|
||||
if tables.contains("collate") {
|
||||
_, err = b.Print(w)
|
||||
failOnError(err)
|
||||
}
|
||||
if tables.contains("chars") {
|
||||
printExemplarCharacters(w)
|
||||
}
|
||||
gen.WriteGoFile("tables.go", *pkg, w.Bytes())
|
||||
}
|
||||
}
|
239
vendor/golang.org/x/text/collate/option.go
generated
vendored
Normal file
239
vendor/golang.org/x/text/collate/option.go
generated
vendored
Normal file
|
@ -0,0 +1,239 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package collate
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"golang.org/x/text/internal/colltab"
|
||||
"golang.org/x/text/language"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
|
||||
// newCollator creates a new collator with default options configured.
|
||||
func newCollator(t colltab.Weighter) *Collator {
|
||||
// Initialize a collator with default options.
|
||||
c := &Collator{
|
||||
options: options{
|
||||
ignore: [colltab.NumLevels]bool{
|
||||
colltab.Quaternary: true,
|
||||
colltab.Identity: true,
|
||||
},
|
||||
f: norm.NFD,
|
||||
t: t,
|
||||
},
|
||||
}
|
||||
|
||||
// TODO: store vt in tags or remove.
|
||||
c.variableTop = t.Top()
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// An Option is used to change the behavior of a Collator. Options override the
|
||||
// settings passed through the locale identifier.
|
||||
type Option struct {
|
||||
priority int
|
||||
f func(o *options)
|
||||
}
|
||||
|
||||
type prioritizedOptions []Option
|
||||
|
||||
func (p prioritizedOptions) Len() int {
|
||||
return len(p)
|
||||
}
|
||||
|
||||
func (p prioritizedOptions) Swap(i, j int) {
|
||||
p[i], p[j] = p[j], p[i]
|
||||
}
|
||||
|
||||
func (p prioritizedOptions) Less(i, j int) bool {
|
||||
return p[i].priority < p[j].priority
|
||||
}
|
||||
|
||||
type options struct {
|
||||
// ignore specifies which levels to ignore.
|
||||
ignore [colltab.NumLevels]bool
|
||||
|
||||
// caseLevel is true if there is an additional level of case matching
|
||||
// between the secondary and tertiary levels.
|
||||
caseLevel bool
|
||||
|
||||
// backwards specifies the order of sorting at the secondary level.
|
||||
// This option exists predominantly to support reverse sorting of accents in French.
|
||||
backwards bool
|
||||
|
||||
// numeric specifies whether any sequence of decimal digits (category is Nd)
|
||||
// is sorted at a primary level with its numeric value.
|
||||
// For example, "A-21" < "A-123".
|
||||
// This option is set by wrapping the main Weighter with NewNumericWeighter.
|
||||
numeric bool
|
||||
|
||||
// alternate specifies an alternative handling of variables.
|
||||
alternate alternateHandling
|
||||
|
||||
// variableTop is the largest primary value that is considered to be
|
||||
// variable.
|
||||
variableTop uint32
|
||||
|
||||
t colltab.Weighter
|
||||
|
||||
f norm.Form
|
||||
}
|
||||
|
||||
func (o *options) setOptions(opts []Option) {
|
||||
sort.Sort(prioritizedOptions(opts))
|
||||
for _, x := range opts {
|
||||
x.f(o)
|
||||
}
|
||||
}
|
||||
|
||||
// OptionsFromTag extracts the BCP47 collation options from the tag and
|
||||
// configures a collator accordingly. These options are set before any other
|
||||
// option.
|
||||
func OptionsFromTag(t language.Tag) Option {
|
||||
return Option{0, func(o *options) {
|
||||
o.setFromTag(t)
|
||||
}}
|
||||
}
|
||||
|
||||
func (o *options) setFromTag(t language.Tag) {
|
||||
o.caseLevel = ldmlBool(t, o.caseLevel, "kc")
|
||||
o.backwards = ldmlBool(t, o.backwards, "kb")
|
||||
o.numeric = ldmlBool(t, o.numeric, "kn")
|
||||
|
||||
// Extract settings from the BCP47 u extension.
|
||||
switch t.TypeForKey("ks") { // strength
|
||||
case "level1":
|
||||
o.ignore[colltab.Secondary] = true
|
||||
o.ignore[colltab.Tertiary] = true
|
||||
case "level2":
|
||||
o.ignore[colltab.Tertiary] = true
|
||||
case "level3", "":
|
||||
// The default.
|
||||
case "level4":
|
||||
o.ignore[colltab.Quaternary] = false
|
||||
case "identic":
|
||||
o.ignore[colltab.Quaternary] = false
|
||||
o.ignore[colltab.Identity] = false
|
||||
}
|
||||
|
||||
switch t.TypeForKey("ka") {
|
||||
case "shifted":
|
||||
o.alternate = altShifted
|
||||
// The following two types are not official BCP47, but we support them to
|
||||
// give access to this otherwise hidden functionality. The name blanked is
|
||||
// derived from the LDML name blanked and posix reflects the main use of
|
||||
// the shift-trimmed option.
|
||||
case "blanked":
|
||||
o.alternate = altBlanked
|
||||
case "posix":
|
||||
o.alternate = altShiftTrimmed
|
||||
}
|
||||
|
||||
// TODO: caseFirst ("kf"), reorder ("kr"), and maybe variableTop ("vt").
|
||||
|
||||
// Not used:
|
||||
// - normalization ("kk", not necessary for this implementation)
|
||||
// - hiraganaQuatenary ("kh", obsolete)
|
||||
}
|
||||
|
||||
func ldmlBool(t language.Tag, old bool, key string) bool {
|
||||
switch t.TypeForKey(key) {
|
||||
case "true":
|
||||
return true
|
||||
case "false":
|
||||
return false
|
||||
default:
|
||||
return old
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
// IgnoreCase sets case-insensitive comparison.
|
||||
IgnoreCase Option = ignoreCase
|
||||
ignoreCase = Option{3, ignoreCaseF}
|
||||
|
||||
// IgnoreDiacritics causes diacritical marks to be ignored. ("o" == "ö").
|
||||
IgnoreDiacritics Option = ignoreDiacritics
|
||||
ignoreDiacritics = Option{3, ignoreDiacriticsF}
|
||||
|
||||
// IgnoreWidth causes full-width characters to match their half-width
|
||||
// equivalents.
|
||||
IgnoreWidth Option = ignoreWidth
|
||||
ignoreWidth = Option{2, ignoreWidthF}
|
||||
|
||||
// Loose sets the collator to ignore diacritics, case and weight.
|
||||
Loose Option = loose
|
||||
loose = Option{4, looseF}
|
||||
|
||||
// Force ordering if strings are equivalent but not equal.
|
||||
Force Option = force
|
||||
force = Option{5, forceF}
|
||||
|
||||
// Numeric specifies that numbers should sort numerically ("2" < "12").
|
||||
Numeric Option = numeric
|
||||
numeric = Option{5, numericF}
|
||||
)
|
||||
|
||||
func ignoreWidthF(o *options) {
|
||||
o.ignore[colltab.Tertiary] = true
|
||||
o.caseLevel = true
|
||||
}
|
||||
|
||||
func ignoreDiacriticsF(o *options) {
|
||||
o.ignore[colltab.Secondary] = true
|
||||
}
|
||||
|
||||
func ignoreCaseF(o *options) {
|
||||
o.ignore[colltab.Tertiary] = true
|
||||
o.caseLevel = false
|
||||
}
|
||||
|
||||
func looseF(o *options) {
|
||||
ignoreWidthF(o)
|
||||
ignoreDiacriticsF(o)
|
||||
ignoreCaseF(o)
|
||||
}
|
||||
|
||||
func forceF(o *options) {
|
||||
o.ignore[colltab.Identity] = false
|
||||
}
|
||||
|
||||
func numericF(o *options) { o.numeric = true }
|
||||
|
||||
// Reorder overrides the pre-defined ordering of scripts and character sets.
|
||||
func Reorder(s ...string) Option {
|
||||
// TODO: need fractional weights to implement this.
|
||||
panic("TODO: implement")
|
||||
}
|
||||
|
||||
// TODO: consider making these public again. These options cannot be fully
|
||||
// specified in BCP47, so an API interface seems warranted. Still a higher-level
|
||||
// interface would be nice (e.g. a POSIX option for enabling altShiftTrimmed)
|
||||
|
||||
// alternateHandling identifies the various ways in which variables are handled.
|
||||
// A rune with a primary weight lower than the variable top is considered a
|
||||
// variable.
|
||||
// See http://www.unicode.org/reports/tr10/#Variable_Weighting for details.
|
||||
type alternateHandling int
|
||||
|
||||
const (
|
||||
// altNonIgnorable turns off special handling of variables.
|
||||
altNonIgnorable alternateHandling = iota
|
||||
|
||||
// altBlanked sets variables and all subsequent primary ignorables to be
|
||||
// ignorable at all levels. This is identical to removing all variables
|
||||
// and subsequent primary ignorables from the input.
|
||||
altBlanked
|
||||
|
||||
// altShifted sets variables to be ignorable for levels one through three and
|
||||
// adds a fourth level based on the values of the ignored levels.
|
||||
altShifted
|
||||
|
||||
// altShiftTrimmed is a slight variant of altShifted that is used to
|
||||
// emulate POSIX.
|
||||
altShiftTrimmed
|
||||
)
|
209
vendor/golang.org/x/text/collate/option_test.go
generated
vendored
Normal file
209
vendor/golang.org/x/text/collate/option_test.go
generated
vendored
Normal file
|
@ -0,0 +1,209 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
package collate
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/text/internal/colltab"
|
||||
"golang.org/x/text/language"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultIgnore = ignore(colltab.Tertiary)
|
||||
defaultTable = getTable(locales[0])
|
||||
)
|
||||
|
||||
func TestOptions(t *testing.T) {
|
||||
for i, tt := range []struct {
|
||||
in []Option
|
||||
out options
|
||||
}{
|
||||
0: {
|
||||
out: options{
|
||||
ignore: defaultIgnore,
|
||||
},
|
||||
},
|
||||
1: {
|
||||
in: []Option{IgnoreDiacritics},
|
||||
out: options{
|
||||
ignore: [colltab.NumLevels]bool{false, true, false, true, true},
|
||||
},
|
||||
},
|
||||
2: {
|
||||
in: []Option{IgnoreCase, IgnoreDiacritics},
|
||||
out: options{
|
||||
ignore: ignore(colltab.Primary),
|
||||
},
|
||||
},
|
||||
3: {
|
||||
in: []Option{ignoreDiacritics, IgnoreWidth},
|
||||
out: options{
|
||||
ignore: ignore(colltab.Primary),
|
||||
caseLevel: true,
|
||||
},
|
||||
},
|
||||
4: {
|
||||
in: []Option{IgnoreWidth, ignoreDiacritics},
|
||||
out: options{
|
||||
ignore: ignore(colltab.Primary),
|
||||
caseLevel: true,
|
||||
},
|
||||
},
|
||||
5: {
|
||||
in: []Option{IgnoreCase, IgnoreWidth},
|
||||
out: options{
|
||||
ignore: ignore(colltab.Secondary),
|
||||
},
|
||||
},
|
||||
6: {
|
||||
in: []Option{IgnoreCase, IgnoreWidth, Loose},
|
||||
out: options{
|
||||
ignore: ignore(colltab.Primary),
|
||||
},
|
||||
},
|
||||
7: {
|
||||
in: []Option{Force, IgnoreCase, IgnoreWidth, Loose},
|
||||
out: options{
|
||||
ignore: [colltab.NumLevels]bool{false, true, true, true, false},
|
||||
},
|
||||
},
|
||||
8: {
|
||||
in: []Option{IgnoreDiacritics, IgnoreCase},
|
||||
out: options{
|
||||
ignore: ignore(colltab.Primary),
|
||||
},
|
||||
},
|
||||
9: {
|
||||
in: []Option{Numeric},
|
||||
out: options{
|
||||
ignore: defaultIgnore,
|
||||
numeric: true,
|
||||
},
|
||||
},
|
||||
10: {
|
||||
in: []Option{OptionsFromTag(language.MustParse("und-u-ks-level1"))},
|
||||
out: options{
|
||||
ignore: ignore(colltab.Primary),
|
||||
},
|
||||
},
|
||||
11: {
|
||||
in: []Option{OptionsFromTag(language.MustParse("und-u-ks-level4"))},
|
||||
out: options{
|
||||
ignore: ignore(colltab.Quaternary),
|
||||
},
|
||||
},
|
||||
12: {
|
||||
in: []Option{OptionsFromTag(language.MustParse("und-u-ks-identic"))},
|
||||
out: options{},
|
||||
},
|
||||
13: {
|
||||
in: []Option{
|
||||
OptionsFromTag(language.MustParse("und-u-kn-true-kb-true-kc-true")),
|
||||
},
|
||||
out: options{
|
||||
ignore: defaultIgnore,
|
||||
caseLevel: true,
|
||||
backwards: true,
|
||||
numeric: true,
|
||||
},
|
||||
},
|
||||
14: {
|
||||
in: []Option{
|
||||
OptionsFromTag(language.MustParse("und-u-kn-true-kb-true-kc-true")),
|
||||
OptionsFromTag(language.MustParse("und-u-kn-false-kb-false-kc-false")),
|
||||
},
|
||||
out: options{
|
||||
ignore: defaultIgnore,
|
||||
},
|
||||
},
|
||||
15: {
|
||||
in: []Option{
|
||||
OptionsFromTag(language.MustParse("und-u-kn-true-kb-true-kc-true")),
|
||||
OptionsFromTag(language.MustParse("und-u-kn-foo-kb-foo-kc-foo")),
|
||||
},
|
||||
out: options{
|
||||
ignore: defaultIgnore,
|
||||
caseLevel: true,
|
||||
backwards: true,
|
||||
numeric: true,
|
||||
},
|
||||
},
|
||||
16: { // Normal options take precedence over tag options.
|
||||
in: []Option{
|
||||
Numeric, IgnoreCase,
|
||||
OptionsFromTag(language.MustParse("und-u-kn-false-kc-true")),
|
||||
},
|
||||
out: options{
|
||||
ignore: ignore(colltab.Secondary),
|
||||
caseLevel: false,
|
||||
numeric: true,
|
||||
},
|
||||
},
|
||||
17: {
|
||||
in: []Option{
|
||||
OptionsFromTag(language.MustParse("und-u-ka-shifted")),
|
||||
},
|
||||
out: options{
|
||||
ignore: defaultIgnore,
|
||||
alternate: altShifted,
|
||||
},
|
||||
},
|
||||
18: {
|
||||
in: []Option{
|
||||
OptionsFromTag(language.MustParse("und-u-ka-blanked")),
|
||||
},
|
||||
out: options{
|
||||
ignore: defaultIgnore,
|
||||
alternate: altBlanked,
|
||||
},
|
||||
},
|
||||
19: {
|
||||
in: []Option{
|
||||
OptionsFromTag(language.MustParse("und-u-ka-posix")),
|
||||
},
|
||||
out: options{
|
||||
ignore: defaultIgnore,
|
||||
alternate: altShiftTrimmed,
|
||||
},
|
||||
},
|
||||
} {
|
||||
c := newCollator(defaultTable)
|
||||
c.t = nil
|
||||
c.variableTop = 0
|
||||
c.f = 0
|
||||
|
||||
c.setOptions(tt.in)
|
||||
if !reflect.DeepEqual(c.options, tt.out) {
|
||||
t.Errorf("%d: got %v; want %v", i, c.options, tt.out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAlternateSortTypes(t *testing.T) {
|
||||
testCases := []struct {
|
||||
lang string
|
||||
in []string
|
||||
want []string
|
||||
}{{
|
||||
lang: "zh,cmn,zh-Hant-u-co-pinyin,zh-HK-u-co-pinyin,zh-pinyin",
|
||||
in: []string{"爸爸", "妈妈", "儿子", "女儿"},
|
||||
want: []string{"爸爸", "儿子", "妈妈", "女儿"},
|
||||
}, {
|
||||
lang: "zh-Hant,zh-u-co-stroke,zh-Hant-u-co-stroke",
|
||||
in: []string{"爸爸", "妈妈", "儿子", "女儿"},
|
||||
want: []string{"儿子", "女儿", "妈妈", "爸爸"},
|
||||
}}
|
||||
for _, tc := range testCases {
|
||||
for _, tag := range strings.Split(tc.lang, ",") {
|
||||
got := append([]string{}, tc.in...)
|
||||
New(language.MustParse(tag)).SortStrings(got)
|
||||
if !reflect.DeepEqual(got, tc.want) {
|
||||
t.Errorf("New(%s).SortStrings(%v) = %v; want %v", tag, tc.in, got, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
230
vendor/golang.org/x/text/collate/reg_test.go
generated
vendored
Normal file
230
vendor/golang.org/x/text/collate/reg_test.go
generated
vendored
Normal file
|
@ -0,0 +1,230 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package collate
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"bufio"
|
||||
"bytes"
|
||||
"flag"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"unicode/utf8"
|
||||
|
||||
"golang.org/x/text/collate/build"
|
||||
"golang.org/x/text/internal/gen"
|
||||
"golang.org/x/text/language"
|
||||
)
|
||||
|
||||
var long = flag.Bool("long", false,
|
||||
"run time-consuming tests, such as tests that fetch data online")
|
||||
|
||||
// This regression test runs tests for the test files in CollationTest.zip
|
||||
// (taken from http://www.unicode.org/Public/UCA/<gen.UnicodeVersion()>/).
|
||||
//
|
||||
// The test files have the following form:
|
||||
// # header
|
||||
// 0009 0021; # ('\u0009') <CHARACTER TABULATION> [| | | 0201 025E]
|
||||
// 0009 003F; # ('\u0009') <CHARACTER TABULATION> [| | | 0201 0263]
|
||||
// 000A 0021; # ('\u000A') <LINE FEED (LF)> [| | | 0202 025E]
|
||||
// 000A 003F; # ('\u000A') <LINE FEED (LF)> [| | | 0202 0263]
|
||||
//
|
||||
// The part before the semicolon is the hex representation of a sequence
|
||||
// of runes. After the hash mark is a comment. The strings
|
||||
// represented by rune sequence are in the file in sorted order, as
|
||||
// defined by the DUCET.
|
||||
|
||||
type Test struct {
|
||||
name string
|
||||
str [][]byte
|
||||
comment []string
|
||||
}
|
||||
|
||||
var versionRe = regexp.MustCompile(`# UCA Version: (.*)\n?$`)
|
||||
var testRe = regexp.MustCompile(`^([\dA-F ]+);.*# (.*)\n?$`)
|
||||
|
||||
func TestCollation(t *testing.T) {
|
||||
if !gen.IsLocal() && !*long {
|
||||
t.Skip("skipping test to prevent downloading; to run use -long or use -local to specify a local source")
|
||||
}
|
||||
t.Skip("must first update to new file format to support test")
|
||||
for _, test := range loadTestData() {
|
||||
doTest(t, test)
|
||||
}
|
||||
}
|
||||
|
||||
func Error(e error) {
|
||||
if e != nil {
|
||||
log.Fatal(e)
|
||||
}
|
||||
}
|
||||
|
||||
// parseUCA parses a Default Unicode Collation Element Table of the format
|
||||
// specified in http://www.unicode.org/reports/tr10/#File_Format.
|
||||
// It returns the variable top.
|
||||
func parseUCA(builder *build.Builder) {
|
||||
r := gen.OpenUnicodeFile("UCA", "", "allkeys.txt")
|
||||
defer r.Close()
|
||||
input := bufio.NewReader(r)
|
||||
colelem := regexp.MustCompile(`\[([.*])([0-9A-F.]+)\]`)
|
||||
for i := 1; true; i++ {
|
||||
l, prefix, err := input.ReadLine()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
Error(err)
|
||||
line := string(l)
|
||||
if prefix {
|
||||
log.Fatalf("%d: buffer overflow", i)
|
||||
}
|
||||
if len(line) == 0 || line[0] == '#' {
|
||||
continue
|
||||
}
|
||||
if line[0] == '@' {
|
||||
if strings.HasPrefix(line[1:], "version ") {
|
||||
if v := strings.Split(line[1:], " ")[1]; v != gen.UnicodeVersion() {
|
||||
log.Fatalf("incompatible version %s; want %s", v, gen.UnicodeVersion())
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// parse entries
|
||||
part := strings.Split(line, " ; ")
|
||||
if len(part) != 2 {
|
||||
log.Fatalf("%d: production rule without ';': %v", i, line)
|
||||
}
|
||||
lhs := []rune{}
|
||||
for _, v := range strings.Split(part[0], " ") {
|
||||
if v != "" {
|
||||
lhs = append(lhs, rune(convHex(i, v)))
|
||||
}
|
||||
}
|
||||
vars := []int{}
|
||||
rhs := [][]int{}
|
||||
for i, m := range colelem.FindAllStringSubmatch(part[1], -1) {
|
||||
if m[1] == "*" {
|
||||
vars = append(vars, i)
|
||||
}
|
||||
elem := []int{}
|
||||
for _, h := range strings.Split(m[2], ".") {
|
||||
elem = append(elem, convHex(i, h))
|
||||
}
|
||||
rhs = append(rhs, elem)
|
||||
}
|
||||
builder.Add(lhs, rhs, vars)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func convHex(line int, s string) int {
|
||||
r, e := strconv.ParseInt(s, 16, 32)
|
||||
if e != nil {
|
||||
log.Fatalf("%d: %v", line, e)
|
||||
}
|
||||
return int(r)
|
||||
}
|
||||
|
||||
func loadTestData() []Test {
|
||||
f := gen.OpenUnicodeFile("UCA", "", "CollationTest.zip")
|
||||
buffer, err := ioutil.ReadAll(f)
|
||||
f.Close()
|
||||
Error(err)
|
||||
archive, err := zip.NewReader(bytes.NewReader(buffer), int64(len(buffer)))
|
||||
Error(err)
|
||||
tests := []Test{}
|
||||
for _, f := range archive.File {
|
||||
// Skip the short versions, which are simply duplicates of the long versions.
|
||||
if strings.Contains(f.Name, "SHORT") || f.FileInfo().IsDir() {
|
||||
continue
|
||||
}
|
||||
ff, err := f.Open()
|
||||
Error(err)
|
||||
defer ff.Close()
|
||||
scanner := bufio.NewScanner(ff)
|
||||
test := Test{name: path.Base(f.Name)}
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if len(line) <= 1 || line[0] == '#' {
|
||||
if m := versionRe.FindStringSubmatch(line); m != nil {
|
||||
if m[1] != gen.UnicodeVersion() {
|
||||
log.Printf("warning:%s: version is %s; want %s", f.Name, m[1], gen.UnicodeVersion())
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
m := testRe.FindStringSubmatch(line)
|
||||
if m == nil || len(m) < 3 {
|
||||
log.Fatalf(`Failed to parse: "%s" result: %#v`, line, m)
|
||||
}
|
||||
str := []byte{}
|
||||
// In the regression test data (unpaired) surrogates are assigned a weight
|
||||
// corresponding to their code point value. However, utf8.DecodeRune,
|
||||
// which is used to compute the implicit weight, assigns FFFD to surrogates.
|
||||
// We therefore skip tests with surrogates. This skips about 35 entries
|
||||
// per test.
|
||||
valid := true
|
||||
for _, split := range strings.Split(m[1], " ") {
|
||||
r, err := strconv.ParseUint(split, 16, 64)
|
||||
Error(err)
|
||||
valid = valid && utf8.ValidRune(rune(r))
|
||||
str = append(str, string(rune(r))...)
|
||||
}
|
||||
if valid {
|
||||
test.str = append(test.str, str)
|
||||
test.comment = append(test.comment, m[2])
|
||||
}
|
||||
}
|
||||
if scanner.Err() != nil {
|
||||
log.Fatal(scanner.Err())
|
||||
}
|
||||
tests = append(tests, test)
|
||||
}
|
||||
return tests
|
||||
}
|
||||
|
||||
var errorCount int
|
||||
|
||||
func runes(b []byte) []rune {
|
||||
return []rune(string(b))
|
||||
}
|
||||
|
||||
var shifted = language.MustParse("und-u-ka-shifted-ks-level4")
|
||||
|
||||
func doTest(t *testing.T, tc Test) {
|
||||
bld := build.NewBuilder()
|
||||
parseUCA(bld)
|
||||
w, err := bld.Build()
|
||||
Error(err)
|
||||
var tag language.Tag
|
||||
if !strings.Contains(tc.name, "NON_IGNOR") {
|
||||
tag = shifted
|
||||
}
|
||||
c := NewFromTable(w, OptionsFromTag(tag))
|
||||
b := &Buffer{}
|
||||
prev := tc.str[0]
|
||||
for i := 1; i < len(tc.str); i++ {
|
||||
b.Reset()
|
||||
s := tc.str[i]
|
||||
ka := c.Key(b, prev)
|
||||
kb := c.Key(b, s)
|
||||
if r := bytes.Compare(ka, kb); r == 1 {
|
||||
t.Errorf("%s:%d: Key(%.4X) < Key(%.4X) (%X < %X) == %d; want -1 or 0", tc.name, i, []rune(string(prev)), []rune(string(s)), ka, kb, r)
|
||||
prev = s
|
||||
continue
|
||||
}
|
||||
if r := c.Compare(prev, s); r == 1 {
|
||||
t.Errorf("%s:%d: Compare(%.4X, %.4X) == %d; want -1 or 0", tc.name, i, runes(prev), runes(s), r)
|
||||
}
|
||||
if r := c.Compare(s, prev); r == -1 {
|
||||
t.Errorf("%s:%d: Compare(%.4X, %.4X) == %d; want 1 or 0", tc.name, i, runes(s), runes(prev), r)
|
||||
}
|
||||
prev = s
|
||||
}
|
||||
}
|
81
vendor/golang.org/x/text/collate/sort.go
generated
vendored
Normal file
81
vendor/golang.org/x/text/collate/sort.go
generated
vendored
Normal file
|
@ -0,0 +1,81 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package collate
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"sort"
|
||||
)
|
||||
|
||||
const (
|
||||
maxSortBuffer = 40960
|
||||
maxSortEntries = 4096
|
||||
)
|
||||
|
||||
type swapper interface {
|
||||
Swap(i, j int)
|
||||
}
|
||||
|
||||
type sorter struct {
|
||||
buf *Buffer
|
||||
keys [][]byte
|
||||
src swapper
|
||||
}
|
||||
|
||||
func (s *sorter) init(n int) {
|
||||
if s.buf == nil {
|
||||
s.buf = &Buffer{}
|
||||
s.buf.init()
|
||||
}
|
||||
if cap(s.keys) < n {
|
||||
s.keys = make([][]byte, n)
|
||||
}
|
||||
s.keys = s.keys[0:n]
|
||||
}
|
||||
|
||||
func (s *sorter) sort(src swapper) {
|
||||
s.src = src
|
||||
sort.Sort(s)
|
||||
}
|
||||
|
||||
func (s sorter) Len() int {
|
||||
return len(s.keys)
|
||||
}
|
||||
|
||||
func (s sorter) Less(i, j int) bool {
|
||||
return bytes.Compare(s.keys[i], s.keys[j]) == -1
|
||||
}
|
||||
|
||||
func (s sorter) Swap(i, j int) {
|
||||
s.keys[i], s.keys[j] = s.keys[j], s.keys[i]
|
||||
s.src.Swap(i, j)
|
||||
}
|
||||
|
||||
// A Lister can be sorted by Collator's Sort method.
|
||||
type Lister interface {
|
||||
Len() int
|
||||
Swap(i, j int)
|
||||
// Bytes returns the bytes of the text at index i.
|
||||
Bytes(i int) []byte
|
||||
}
|
||||
|
||||
// Sort uses sort.Sort to sort the strings represented by x using the rules of c.
|
||||
func (c *Collator) Sort(x Lister) {
|
||||
n := x.Len()
|
||||
c.sorter.init(n)
|
||||
for i := 0; i < n; i++ {
|
||||
c.sorter.keys[i] = c.Key(c.sorter.buf, x.Bytes(i))
|
||||
}
|
||||
c.sorter.sort(x)
|
||||
}
|
||||
|
||||
// SortStrings uses sort.Sort to sort the strings in x using the rules of c.
|
||||
func (c *Collator) SortStrings(x []string) {
|
||||
c.sorter.init(len(x))
|
||||
for i, s := range x {
|
||||
c.sorter.keys[i] = c.KeyFromString(c.sorter.buf, s)
|
||||
}
|
||||
c.sorter.sort(sort.StringSlice(x))
|
||||
}
|
55
vendor/golang.org/x/text/collate/sort_test.go
generated
vendored
Normal file
55
vendor/golang.org/x/text/collate/sort_test.go
generated
vendored
Normal file
|
@ -0,0 +1,55 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package collate_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/text/collate"
|
||||
"golang.org/x/text/language"
|
||||
)
|
||||
|
||||
func ExampleCollator_Strings() {
|
||||
c := collate.New(language.Und)
|
||||
strings := []string{
|
||||
"ad",
|
||||
"ab",
|
||||
"äb",
|
||||
"ac",
|
||||
}
|
||||
c.SortStrings(strings)
|
||||
fmt.Println(strings)
|
||||
// Output: [ab äb ac ad]
|
||||
}
|
||||
|
||||
type sorter []string
|
||||
|
||||
func (s sorter) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func (s sorter) Swap(i, j int) {
|
||||
s[j], s[i] = s[i], s[j]
|
||||
}
|
||||
|
||||
func (s sorter) Bytes(i int) []byte {
|
||||
return []byte(s[i])
|
||||
}
|
||||
|
||||
func TestSort(t *testing.T) {
|
||||
c := collate.New(language.English)
|
||||
strings := []string{
|
||||
"bcd",
|
||||
"abc",
|
||||
"ddd",
|
||||
}
|
||||
c.Sort(sorter(strings))
|
||||
res := fmt.Sprint(strings)
|
||||
want := "[abc bcd ddd]"
|
||||
if res != want {
|
||||
t.Errorf("found %s; want %s", res, want)
|
||||
}
|
||||
}
|
291
vendor/golang.org/x/text/collate/table_test.go
generated
vendored
Normal file
291
vendor/golang.org/x/text/collate/table_test.go
generated
vendored
Normal file
|
@ -0,0 +1,291 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package collate
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"golang.org/x/text/collate/build"
|
||||
"golang.org/x/text/internal/colltab"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
|
||||
type ColElems []Weights
|
||||
|
||||
type input struct {
|
||||
str string
|
||||
ces [][]int
|
||||
}
|
||||
|
||||
type check struct {
|
||||
in string
|
||||
n int
|
||||
out ColElems
|
||||
}
|
||||
|
||||
type tableTest struct {
|
||||
in []input
|
||||
chk []check
|
||||
}
|
||||
|
||||
func w(ce ...int) Weights {
|
||||
return W(ce...)
|
||||
}
|
||||
|
||||
var defaults = w(0)
|
||||
|
||||
func pt(p, t int) []int {
|
||||
return []int{p, defaults.Secondary, t}
|
||||
}
|
||||
|
||||
func makeTable(in []input) (*Collator, error) {
|
||||
b := build.NewBuilder()
|
||||
for _, r := range in {
|
||||
if e := b.Add([]rune(r.str), r.ces, nil); e != nil {
|
||||
panic(e)
|
||||
}
|
||||
}
|
||||
t, err := b.Build()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewFromTable(t), nil
|
||||
}
|
||||
|
||||
// modSeq holds a seqeunce of modifiers in increasing order of CCC long enough
|
||||
// to cause a segment overflow if not handled correctly. The last rune in this
|
||||
// list has a CCC of 214.
|
||||
var modSeq = []rune{
|
||||
0x05B1, 0x05B2, 0x05B3, 0x05B4, 0x05B5, 0x05B6, 0x05B7, 0x05B8, 0x05B9, 0x05BB,
|
||||
0x05BC, 0x05BD, 0x05BF, 0x05C1, 0x05C2, 0xFB1E, 0x064B, 0x064C, 0x064D, 0x064E,
|
||||
0x064F, 0x0650, 0x0651, 0x0652, 0x0670, 0x0711, 0x0C55, 0x0C56, 0x0E38, 0x0E48,
|
||||
0x0EB8, 0x0EC8, 0x0F71, 0x0F72, 0x0F74, 0x0321, 0x1DCE,
|
||||
}
|
||||
|
||||
var mods []input
|
||||
var modW = func() ColElems {
|
||||
ws := ColElems{}
|
||||
for _, r := range modSeq {
|
||||
rune := norm.NFC.PropertiesString(string(r))
|
||||
ws = append(ws, w(0, int(rune.CCC())))
|
||||
mods = append(mods, input{string(r), [][]int{{0, int(rune.CCC())}}})
|
||||
}
|
||||
return ws
|
||||
}()
|
||||
|
||||
var appendNextTests = []tableTest{
|
||||
{ // test getWeights
|
||||
[]input{
|
||||
{"a", [][]int{{100}}},
|
||||
{"b", [][]int{{105}}},
|
||||
{"c", [][]int{{110}}},
|
||||
{"ß", [][]int{{120}}},
|
||||
},
|
||||
[]check{
|
||||
{"a", 1, ColElems{w(100)}},
|
||||
{"b", 1, ColElems{w(105)}},
|
||||
{"c", 1, ColElems{w(110)}},
|
||||
{"d", 1, ColElems{w(0x50064)}},
|
||||
{"ab", 1, ColElems{w(100)}},
|
||||
{"bc", 1, ColElems{w(105)}},
|
||||
{"dd", 1, ColElems{w(0x50064)}},
|
||||
{"ß", 2, ColElems{w(120)}},
|
||||
},
|
||||
},
|
||||
{ // test expansion
|
||||
[]input{
|
||||
{"u", [][]int{{100}}},
|
||||
{"U", [][]int{{100}, {0, 25}}},
|
||||
{"w", [][]int{{100}, {100}}},
|
||||
{"W", [][]int{{100}, {0, 25}, {100}, {0, 25}}},
|
||||
},
|
||||
[]check{
|
||||
{"u", 1, ColElems{w(100)}},
|
||||
{"U", 1, ColElems{w(100), w(0, 25)}},
|
||||
{"w", 1, ColElems{w(100), w(100)}},
|
||||
{"W", 1, ColElems{w(100), w(0, 25), w(100), w(0, 25)}},
|
||||
},
|
||||
},
|
||||
{ // test decompose
|
||||
[]input{
|
||||
{"D", [][]int{pt(104, 8)}},
|
||||
{"z", [][]int{pt(130, 8)}},
|
||||
{"\u030C", [][]int{{0, 40}}}, // Caron
|
||||
{"\u01C5", [][]int{pt(104, 9), pt(130, 4), {0, 40, 0x1F}}}, // Dž = D+z+caron
|
||||
},
|
||||
[]check{
|
||||
{"\u01C5", 2, ColElems{w(pt(104, 9)...), w(pt(130, 4)...), w(0, 40, 0x1F)}},
|
||||
},
|
||||
},
|
||||
{ // test basic contraction
|
||||
[]input{
|
||||
{"a", [][]int{{100}}},
|
||||
{"ab", [][]int{{101}}},
|
||||
{"aab", [][]int{{101}, {101}}},
|
||||
{"abc", [][]int{{102}}},
|
||||
{"b", [][]int{{200}}},
|
||||
{"c", [][]int{{300}}},
|
||||
{"d", [][]int{{400}}},
|
||||
},
|
||||
[]check{
|
||||
{"a", 1, ColElems{w(100)}},
|
||||
{"aa", 1, ColElems{w(100)}},
|
||||
{"aac", 1, ColElems{w(100)}},
|
||||
{"d", 1, ColElems{w(400)}},
|
||||
{"ab", 2, ColElems{w(101)}},
|
||||
{"abb", 2, ColElems{w(101)}},
|
||||
{"aab", 3, ColElems{w(101), w(101)}},
|
||||
{"aaba", 3, ColElems{w(101), w(101)}},
|
||||
{"abc", 3, ColElems{w(102)}},
|
||||
{"abcd", 3, ColElems{w(102)}},
|
||||
},
|
||||
},
|
||||
{ // test discontinuous contraction
|
||||
append(mods, []input{
|
||||
// modifiers; secondary weight equals ccc
|
||||
{"\u0316", [][]int{{0, 220}}},
|
||||
{"\u0317", [][]int{{0, 220}, {0, 220}}},
|
||||
{"\u302D", [][]int{{0, 222}}},
|
||||
{"\u302E", [][]int{{0, 225}}}, // used as starter
|
||||
{"\u302F", [][]int{{0, 224}}}, // used as starter
|
||||
{"\u18A9", [][]int{{0, 228}}},
|
||||
{"\u0300", [][]int{{0, 230}}},
|
||||
{"\u0301", [][]int{{0, 230}}},
|
||||
{"\u0315", [][]int{{0, 232}}},
|
||||
{"\u031A", [][]int{{0, 232}}},
|
||||
{"\u035C", [][]int{{0, 233}}},
|
||||
{"\u035F", [][]int{{0, 233}}},
|
||||
{"\u035D", [][]int{{0, 234}}},
|
||||
{"\u035E", [][]int{{0, 234}}},
|
||||
{"\u0345", [][]int{{0, 240}}},
|
||||
|
||||
// starters
|
||||
{"a", [][]int{{100}}},
|
||||
{"b", [][]int{{200}}},
|
||||
{"c", [][]int{{300}}},
|
||||
{"\u03B1", [][]int{{900}}},
|
||||
{"\x01", [][]int{{0, 0, 0, 0}}},
|
||||
|
||||
// contractions
|
||||
{"a\u0300", [][]int{{101}}},
|
||||
{"a\u0301", [][]int{{102}}},
|
||||
{"a\u035E", [][]int{{110}}},
|
||||
{"a\u035Eb\u035E", [][]int{{115}}},
|
||||
{"ac\u035Eaca\u035E", [][]int{{116}}},
|
||||
{"a\u035Db\u035D", [][]int{{117}}},
|
||||
{"a\u0301\u035Db", [][]int{{120}}},
|
||||
{"a\u0301\u035F", [][]int{{121}}},
|
||||
{"a\u0301\u035Fb", [][]int{{119}}},
|
||||
{"\u03B1\u0345", [][]int{{901}, {902}}},
|
||||
{"\u302E\u302F", [][]int{{0, 131}, {0, 131}}},
|
||||
{"\u302F\u18A9", [][]int{{0, 130}}},
|
||||
}...),
|
||||
[]check{
|
||||
{"a\x01\u0300", 1, ColElems{w(100)}},
|
||||
{"ab", 1, ColElems{w(100)}}, // closing segment
|
||||
{"a\u0316\u0300b", 5, ColElems{w(101), w(0, 220)}}, // closing segment
|
||||
{"a\u0316\u0300", 5, ColElems{w(101), w(0, 220)}}, // no closing segment
|
||||
{"a\u0316\u0300\u035Cb", 5, ColElems{w(101), w(0, 220)}}, // completes before segment end
|
||||
{"a\u0316\u0300\u035C", 5, ColElems{w(101), w(0, 220)}}, // completes before segment end
|
||||
|
||||
{"a\u0316\u0301b", 5, ColElems{w(102), w(0, 220)}}, // closing segment
|
||||
{"a\u0316\u0301", 5, ColElems{w(102), w(0, 220)}}, // no closing segment
|
||||
{"a\u0316\u0301\u035Cb", 5, ColElems{w(102), w(0, 220)}}, // completes before segment end
|
||||
{"a\u0316\u0301\u035C", 5, ColElems{w(102), w(0, 220)}}, // completes before segment end
|
||||
|
||||
// match blocked by modifier with same ccc
|
||||
{"a\u0301\u0315\u031A\u035Fb", 3, ColElems{w(102)}},
|
||||
|
||||
// multiple gaps
|
||||
{"a\u0301\u035Db", 6, ColElems{w(120)}},
|
||||
{"a\u0301\u035F", 5, ColElems{w(121)}},
|
||||
{"a\u0301\u035Fb", 6, ColElems{w(119)}},
|
||||
{"a\u0316\u0301\u035F", 7, ColElems{w(121), w(0, 220)}},
|
||||
{"a\u0301\u0315\u035Fb", 7, ColElems{w(121), w(0, 232)}},
|
||||
{"a\u0316\u0301\u0315\u035Db", 5, ColElems{w(102), w(0, 220)}},
|
||||
{"a\u0316\u0301\u0315\u035F", 9, ColElems{w(121), w(0, 220), w(0, 232)}},
|
||||
{"a\u0316\u0301\u0315\u035Fb", 9, ColElems{w(121), w(0, 220), w(0, 232)}},
|
||||
{"a\u0316\u0301\u0315\u035F\u035D", 9, ColElems{w(121), w(0, 220), w(0, 232)}},
|
||||
{"a\u0316\u0301\u0315\u035F\u035Db", 9, ColElems{w(121), w(0, 220), w(0, 232)}},
|
||||
|
||||
// handling of segment overflow
|
||||
{ // just fits within segment
|
||||
"a" + string(modSeq[:30]) + "\u0301",
|
||||
3 + len(string(modSeq[:30])),
|
||||
append(ColElems{w(102)}, modW[:30]...),
|
||||
},
|
||||
{"a" + string(modSeq[:31]) + "\u0301", 1, ColElems{w(100)}}, // overflow
|
||||
{"a" + string(modSeq) + "\u0301", 1, ColElems{w(100)}},
|
||||
{ // just fits within segment with two interstitial runes
|
||||
"a" + string(modSeq[:28]) + "\u0301\u0315\u035F",
|
||||
7 + len(string(modSeq[:28])),
|
||||
append(append(ColElems{w(121)}, modW[:28]...), w(0, 232)),
|
||||
},
|
||||
{ // second half does not fit within segment
|
||||
"a" + string(modSeq[:29]) + "\u0301\u0315\u035F",
|
||||
3 + len(string(modSeq[:29])),
|
||||
append(ColElems{w(102)}, modW[:29]...),
|
||||
},
|
||||
|
||||
// discontinuity can only occur in last normalization segment
|
||||
{"a\u035Eb\u035E", 6, ColElems{w(115)}},
|
||||
{"a\u0316\u035Eb\u035E", 5, ColElems{w(110), w(0, 220)}},
|
||||
{"a\u035Db\u035D", 6, ColElems{w(117)}},
|
||||
{"a\u0316\u035Db\u035D", 1, ColElems{w(100)}},
|
||||
{"a\u035Eb\u0316\u035E", 8, ColElems{w(115), w(0, 220)}},
|
||||
{"a\u035Db\u0316\u035D", 8, ColElems{w(117), w(0, 220)}},
|
||||
{"ac\u035Eaca\u035E", 9, ColElems{w(116)}},
|
||||
{"a\u0316c\u035Eaca\u035E", 1, ColElems{w(100)}},
|
||||
{"ac\u035Eac\u0316a\u035E", 1, ColElems{w(100)}},
|
||||
|
||||
// expanding contraction
|
||||
{"\u03B1\u0345", 4, ColElems{w(901), w(902)}},
|
||||
|
||||
// Theoretical possibilities
|
||||
// contraction within a gap
|
||||
{"a\u302F\u18A9\u0301", 9, ColElems{w(102), w(0, 130)}},
|
||||
// expansion within a gap
|
||||
{"a\u0317\u0301", 5, ColElems{w(102), w(0, 220), w(0, 220)}},
|
||||
// repeating CCC blocks last modifier
|
||||
{"a\u302E\u302F\u0301", 1, ColElems{w(100)}},
|
||||
// The trailing combining characters (with lower CCC) should block the first one.
|
||||
// TODO: make the following pass.
|
||||
// {"a\u035E\u0316\u0316", 1, ColElems{w(100)}},
|
||||
{"a\u035F\u035Eb", 5, ColElems{w(110), w(0, 233)}},
|
||||
// Last combiner should match after normalization.
|
||||
// TODO: make the following pass.
|
||||
// {"a\u035D\u0301", 3, ColElems{w(102), w(0, 234)}},
|
||||
// The first combiner is blocking the second one as they have the same CCC.
|
||||
{"a\u035D\u035Eb", 1, ColElems{w(100)}},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestAppendNext(t *testing.T) {
|
||||
for i, tt := range appendNextTests {
|
||||
c, err := makeTable(tt.in)
|
||||
if err != nil {
|
||||
t.Errorf("%d: error creating table: %v", i, err)
|
||||
continue
|
||||
}
|
||||
for j, chk := range tt.chk {
|
||||
ws, n := c.t.AppendNext(nil, []byte(chk.in))
|
||||
if n != chk.n {
|
||||
t.Errorf("%d:%d: bytes consumed was %d; want %d", i, j, n, chk.n)
|
||||
}
|
||||
out := convertFromWeights(chk.out)
|
||||
if len(ws) != len(out) {
|
||||
t.Errorf("%d:%d: len(ws) was %d; want %d (%X vs %X)\n%X", i, j, len(ws), len(out), ws, out, chk.in)
|
||||
continue
|
||||
}
|
||||
for k, w := range ws {
|
||||
w, _ = colltab.MakeElem(w.Primary(), w.Secondary(), int(w.Tertiary()), 0)
|
||||
if w != out[k] {
|
||||
t.Errorf("%d:%d: Weights %d was %X; want %X", i, j, k, w, out[k])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
73789
vendor/golang.org/x/text/collate/tables.go
generated
vendored
Normal file
73789
vendor/golang.org/x/text/collate/tables.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
13
vendor/golang.org/x/text/doc.go
generated
vendored
Normal file
13
vendor/golang.org/x/text/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:generate go run gen.go
|
||||
|
||||
// text is a repository of text-related packages related to internationalization
|
||||
// (i18n) and localization (l10n), such as character encodings, text
|
||||
// transformations, and locale-specific text handling.
|
||||
package text
|
||||
|
||||
// TODO: more documentation on general concepts, such as Transformers, use
|
||||
// of normalization, etc.
|
319
vendor/golang.org/x/text/gen.go
generated
vendored
Normal file
319
vendor/golang.org/x/text/gen.go
generated
vendored
Normal file
|
@ -0,0 +1,319 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
// gen runs go generate on Unicode- and CLDR-related package in the text
|
||||
// repositories, taking into account dependencies and versions.
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/build"
|
||||
"go/format"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"unicode"
|
||||
|
||||
"golang.org/x/text/collate"
|
||||
"golang.org/x/text/internal/gen"
|
||||
"golang.org/x/text/language"
|
||||
)
|
||||
|
||||
var (
|
||||
verbose = flag.Bool("v", false, "verbose output")
|
||||
force = flag.Bool("force", false, "ignore failing dependencies")
|
||||
doCore = flag.Bool("core", false, "force an update to core")
|
||||
excludeList = flag.String("exclude", "",
|
||||
"comma-separated list of packages to exclude")
|
||||
|
||||
// The user can specify a selection of packages to build on the command line.
|
||||
args []string
|
||||
)
|
||||
|
||||
func exclude(pkg string) bool {
|
||||
if len(args) > 0 {
|
||||
return !contains(args, pkg)
|
||||
}
|
||||
return contains(strings.Split(*excludeList, ","), pkg)
|
||||
}
|
||||
|
||||
// TODO:
|
||||
// - Better version handling.
|
||||
// - Generate tables for the core unicode package?
|
||||
// - Add generation for encodings. This requires some retooling here and there.
|
||||
// - Running repo-wide "long" tests.
|
||||
|
||||
var vprintf = fmt.Printf
|
||||
|
||||
func main() {
|
||||
gen.Init()
|
||||
args = flag.Args()
|
||||
if !*verbose {
|
||||
// Set vprintf to a no-op.
|
||||
vprintf = func(string, ...interface{}) (int, error) { return 0, nil }
|
||||
}
|
||||
|
||||
// TODO: create temporary cache directory to load files and create and set
|
||||
// a "cache" option if the user did not specify the UNICODE_DIR environment
|
||||
// variable. This will prevent duplicate downloads and also will enable long
|
||||
// tests, which really need to be run after each generated package.
|
||||
|
||||
updateCore := *doCore
|
||||
if gen.UnicodeVersion() != unicode.Version {
|
||||
fmt.Printf("Requested Unicode version %s; core unicode version is %s.\n",
|
||||
gen.UnicodeVersion(),
|
||||
unicode.Version)
|
||||
c := collate.New(language.Und, collate.Numeric)
|
||||
if c.CompareString(gen.UnicodeVersion(), unicode.Version) < 0 && !*force {
|
||||
os.Exit(2)
|
||||
}
|
||||
updateCore = true
|
||||
goroot := os.Getenv("GOROOT")
|
||||
appendToFile(
|
||||
filepath.Join(goroot, "api", "except.txt"),
|
||||
fmt.Sprintf("pkg unicode, const Version = %q\n", unicode.Version),
|
||||
)
|
||||
const lines = `pkg unicode, const Version = %q
|
||||
// TODO: add a new line of the following form for each new script and property.
|
||||
pkg unicode, var <new script or property> *RangeTable
|
||||
`
|
||||
appendToFile(
|
||||
filepath.Join(goroot, "api", "next.txt"),
|
||||
fmt.Sprintf(lines, gen.UnicodeVersion()),
|
||||
)
|
||||
}
|
||||
|
||||
var unicode = &dependency{}
|
||||
if updateCore {
|
||||
fmt.Printf("Updating core to version %s...\n", gen.UnicodeVersion())
|
||||
unicode = generate("unicode")
|
||||
|
||||
// Test some users of the unicode packages, especially the ones that
|
||||
// keep a mirrored table. These may need to be corrected by hand.
|
||||
generate("regexp", unicode)
|
||||
generate("strconv", unicode) // mimics Unicode table
|
||||
generate("strings", unicode)
|
||||
generate("testing", unicode) // mimics Unicode table
|
||||
}
|
||||
|
||||
var (
|
||||
cldr = generate("./unicode/cldr", unicode)
|
||||
language = generate("./language", cldr)
|
||||
internal = generate("./internal", unicode, language)
|
||||
norm = generate("./unicode/norm", unicode)
|
||||
rangetable = generate("./unicode/rangetable", unicode)
|
||||
cases = generate("./cases", unicode, norm, language, rangetable)
|
||||
width = generate("./width", unicode)
|
||||
bidi = generate("./unicode/bidi", unicode, norm, rangetable)
|
||||
mib = generate("./encoding/internal/identifier", unicode)
|
||||
number = generate("./internal/number", unicode, cldr, language, internal)
|
||||
_ = generate("./encoding/htmlindex", unicode, language, mib)
|
||||
_ = generate("./encoding/ianaindex", unicode, language, mib)
|
||||
_ = generate("./secure/precis", unicode, norm, rangetable, cases, width, bidi)
|
||||
_ = generate("./internal/cldrtree", language)
|
||||
_ = generate("./currency", unicode, cldr, language, internal, number)
|
||||
_ = generate("./feature/plural", unicode, cldr, language, internal, number)
|
||||
_ = generate("./internal/export/idna", unicode, bidi, norm)
|
||||
_ = generate("./language/display", unicode, cldr, language, internal, number)
|
||||
_ = generate("./collate", unicode, norm, cldr, language, rangetable)
|
||||
_ = generate("./search", unicode, norm, cldr, language, rangetable)
|
||||
)
|
||||
all.Wait()
|
||||
|
||||
// Copy exported packages to the destination golang.org repo.
|
||||
copyExported("golang.org/x/net/idna")
|
||||
|
||||
if updateCore {
|
||||
copyVendored()
|
||||
}
|
||||
|
||||
if hasErrors {
|
||||
fmt.Println("FAIL")
|
||||
os.Exit(1)
|
||||
}
|
||||
vprintf("SUCCESS\n")
|
||||
}
|
||||
|
||||
func appendToFile(file, text string) {
|
||||
fmt.Println("Augmenting", file)
|
||||
w, err := os.OpenFile(file, os.O_APPEND|os.O_WRONLY, 0600)
|
||||
if err != nil {
|
||||
fmt.Println("Failed to open file:", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer w.Close()
|
||||
if _, err := w.WriteString(text); err != nil {
|
||||
fmt.Println("Failed to write to file:", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
all sync.WaitGroup
|
||||
hasErrors bool
|
||||
)
|
||||
|
||||
type dependency struct {
|
||||
sync.WaitGroup
|
||||
hasErrors bool
|
||||
}
|
||||
|
||||
func generate(pkg string, deps ...*dependency) *dependency {
|
||||
var wg dependency
|
||||
if exclude(pkg) {
|
||||
return &wg
|
||||
}
|
||||
wg.Add(1)
|
||||
all.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
defer all.Done()
|
||||
// Wait for dependencies to finish.
|
||||
for _, d := range deps {
|
||||
d.Wait()
|
||||
if d.hasErrors && !*force {
|
||||
fmt.Printf("--- ABORT: %s\n", pkg)
|
||||
wg.hasErrors = true
|
||||
return
|
||||
}
|
||||
}
|
||||
vprintf("=== GENERATE %s\n", pkg)
|
||||
args := []string{"generate"}
|
||||
if *verbose {
|
||||
args = append(args, "-v")
|
||||
}
|
||||
args = append(args, pkg)
|
||||
cmd := exec.Command(filepath.Join(runtime.GOROOT(), "bin", "go"), args...)
|
||||
w := &bytes.Buffer{}
|
||||
cmd.Stderr = w
|
||||
cmd.Stdout = w
|
||||
if err := cmd.Run(); err != nil {
|
||||
fmt.Printf("--- FAIL: %s:\n\t%v\n\tError: %v\n", pkg, indent(w), err)
|
||||
hasErrors = true
|
||||
wg.hasErrors = true
|
||||
return
|
||||
}
|
||||
|
||||
vprintf("=== TEST %s\n", pkg)
|
||||
args[0] = "test"
|
||||
cmd = exec.Command(filepath.Join(runtime.GOROOT(), "bin", "go"), args...)
|
||||
wt := &bytes.Buffer{}
|
||||
cmd.Stderr = wt
|
||||
cmd.Stdout = wt
|
||||
if err := cmd.Run(); err != nil {
|
||||
fmt.Printf("--- FAIL: %s:\n\t%v\n\tError: %v\n", pkg, indent(wt), err)
|
||||
hasErrors = true
|
||||
wg.hasErrors = true
|
||||
return
|
||||
}
|
||||
vprintf("--- SUCCESS: %s\n\t%v\n", pkg, indent(w))
|
||||
fmt.Print(wt.String())
|
||||
}()
|
||||
return &wg
|
||||
}
|
||||
|
||||
// copyExported copies a package in x/text/internal/export to the
|
||||
// destination repository.
|
||||
func copyExported(p string) {
|
||||
copyPackage(
|
||||
filepath.Join("internal", "export", path.Base(p)),
|
||||
filepath.Join("..", filepath.FromSlash(p[len("golang.org/x"):])),
|
||||
"golang.org/x/text/internal/export/"+path.Base(p),
|
||||
p)
|
||||
}
|
||||
|
||||
// copyVendored copies packages used by Go core into the vendored directory.
|
||||
func copyVendored() {
|
||||
root := filepath.Join(build.Default.GOROOT, filepath.FromSlash("src/vendor/golang_org/x"))
|
||||
|
||||
err := filepath.Walk(root, func(dir string, info os.FileInfo, err error) error {
|
||||
if err != nil || !info.IsDir() || root == dir {
|
||||
return err
|
||||
}
|
||||
src := dir[len(root)+1:]
|
||||
const slash = string(filepath.Separator)
|
||||
if c := strings.Split(src, slash); c[0] == "text" {
|
||||
// Copy a text repo package from its normal location.
|
||||
src = strings.Join(c[1:], slash)
|
||||
} else {
|
||||
// Copy the vendored package if it exists in the export directory.
|
||||
src = filepath.Join("internal", "export", filepath.Base(src))
|
||||
}
|
||||
copyPackage(src, dir, "golang.org", "golang_org")
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Printf("Seeding directory %s has failed %v:", root, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// goGenRE is used to remove go:generate lines.
|
||||
var goGenRE = regexp.MustCompile("//go:generate[^\n]*\n")
|
||||
|
||||
// copyPackage copies relevant files from a directory in x/text to the
|
||||
// destination package directory. The destination package is assumed to have
|
||||
// the same name. For each copied file go:generate lines are removed and
|
||||
// and package comments are rewritten to the new path.
|
||||
func copyPackage(dirSrc, dirDst, search, replace string) {
|
||||
err := filepath.Walk(dirSrc, func(file string, info os.FileInfo, err error) error {
|
||||
base := filepath.Base(file)
|
||||
if err != nil || info.IsDir() ||
|
||||
!strings.HasSuffix(base, ".go") ||
|
||||
strings.HasSuffix(base, "_test.go") ||
|
||||
// Don't process subdirectories.
|
||||
filepath.Dir(file) != dirSrc {
|
||||
return nil
|
||||
}
|
||||
b, err := ioutil.ReadFile(file)
|
||||
if err != nil || bytes.Contains(b, []byte("\n// +build ignore")) {
|
||||
return err
|
||||
}
|
||||
// Fix paths.
|
||||
b = bytes.Replace(b, []byte(search), []byte(replace), -1)
|
||||
// Remove go:generate lines.
|
||||
b = goGenRE.ReplaceAllLiteral(b, nil)
|
||||
comment := "// Code generated by running \"go generate\" in golang.org/x/text. DO NOT EDIT.\n\n"
|
||||
if *doCore {
|
||||
comment = "// Code generated by running \"go run gen.go -core\" in golang.org/x/text. DO NOT EDIT.\n\n"
|
||||
}
|
||||
if !bytes.HasPrefix(b, []byte(comment)) {
|
||||
b = append([]byte(comment), b...)
|
||||
}
|
||||
if b, err = format.Source(b); err != nil {
|
||||
fmt.Println("Failed to format file:", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
file = filepath.Join(dirDst, base)
|
||||
vprintf("=== COPY %s\n", file)
|
||||
return ioutil.WriteFile(file, b, 0666)
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Println("Copying exported files failed:", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func contains(a []string, s string) bool {
|
||||
for _, e := range a {
|
||||
if s == e {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func indent(b *bytes.Buffer) string {
|
||||
return strings.Replace(strings.TrimSpace(b.String()), "\n", "\n\t", -1)
|
||||
}
|
121
vendor/golang.org/x/text/internal/colltab/collate_test.go
generated
vendored
Normal file
121
vendor/golang.org/x/text/internal/colltab/collate_test.go
generated
vendored
Normal file
|
@ -0,0 +1,121 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package colltab_test
|
||||
|
||||
// This file contains tests which need to import package collate, which causes
|
||||
// an import cycle when done within package colltab itself.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
"unicode"
|
||||
|
||||
"golang.org/x/text/collate"
|
||||
"golang.org/x/text/language"
|
||||
"golang.org/x/text/unicode/rangetable"
|
||||
)
|
||||
|
||||
// assigned is used to only test runes that are inside the scope of the Unicode
|
||||
// version used to generation the collation table.
|
||||
var assigned = rangetable.Assigned(collate.UnicodeVersion)
|
||||
|
||||
func TestNonDigits(t *testing.T) {
|
||||
c := collate.New(language.English, collate.Loose, collate.Numeric)
|
||||
|
||||
// Verify that all non-digit numbers sort outside of the number range.
|
||||
for r, hi := rune(unicode.N.R16[0].Lo), rune(unicode.N.R32[0].Hi); r <= hi; r++ {
|
||||
if unicode.In(r, unicode.Nd) || !unicode.In(r, assigned) {
|
||||
continue
|
||||
}
|
||||
if a := string(r); c.CompareString(a, "0") != -1 && c.CompareString(a, "999999") != 1 {
|
||||
t.Errorf("%+q non-digit number is collated as digit", a)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNumericCompare(t *testing.T) {
|
||||
c := collate.New(language.English, collate.Loose, collate.Numeric)
|
||||
|
||||
// Iterate over all digits.
|
||||
for _, r16 := range unicode.Nd.R16 {
|
||||
testDigitCompare(t, c, rune(r16.Lo), rune(r16.Hi))
|
||||
}
|
||||
for _, r32 := range unicode.Nd.R32 {
|
||||
testDigitCompare(t, c, rune(r32.Lo), rune(r32.Hi))
|
||||
}
|
||||
}
|
||||
|
||||
func testDigitCompare(t *testing.T, c *collate.Collator, zero, nine rune) {
|
||||
if !unicode.In(zero, assigned) {
|
||||
return
|
||||
}
|
||||
n := int(nine - zero + 1)
|
||||
if n%10 != 0 {
|
||||
t.Fatalf("len([%+q, %+q]) = %d; want a multiple of 10", zero, nine, n)
|
||||
}
|
||||
for _, tt := range []struct {
|
||||
prefix string
|
||||
b [11]string
|
||||
}{
|
||||
{
|
||||
prefix: "",
|
||||
b: [11]string{
|
||||
"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10",
|
||||
},
|
||||
},
|
||||
{
|
||||
prefix: "1",
|
||||
b: [11]string{
|
||||
"10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20",
|
||||
},
|
||||
},
|
||||
{
|
||||
prefix: "0",
|
||||
b: [11]string{
|
||||
"00", "01", "02", "03", "04", "05", "06", "07", "08", "09", "10",
|
||||
},
|
||||
},
|
||||
{
|
||||
prefix: "00",
|
||||
b: [11]string{
|
||||
"000", "001", "002", "003", "004", "005", "006", "007", "008", "009", "010",
|
||||
},
|
||||
},
|
||||
{
|
||||
prefix: "9",
|
||||
b: [11]string{
|
||||
"90", "91", "92", "93", "94", "95", "96", "97", "98", "99", "100",
|
||||
},
|
||||
},
|
||||
} {
|
||||
for k := 0; k <= n; k++ {
|
||||
i := k % 10
|
||||
a := tt.prefix + string(zero+rune(i))
|
||||
for j, b := range tt.b {
|
||||
want := 0
|
||||
switch {
|
||||
case i < j:
|
||||
want = -1
|
||||
case i > j:
|
||||
want = 1
|
||||
}
|
||||
got := c.CompareString(a, b)
|
||||
if got != want {
|
||||
t.Errorf("Compare(%+q, %+q) = %d; want %d", a, b, got, want)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkNumericWeighter(b *testing.B) {
|
||||
c := collate.New(language.English, collate.Numeric)
|
||||
input := bytes.Repeat([]byte("Testing, testing 123..."), 100)
|
||||
b.SetBytes(int64(2 * len(input)))
|
||||
for i := 0; i < b.N; i++ {
|
||||
c.Compare(input, input)
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue