mirror of
https://github.com/DNSCrypt/dnscrypt-proxy.git
synced 2025-03-04 02:14:40 +01:00
Update quic-go
This commit is contained in:
parent
0ba23128cc
commit
4c659acad9
283 changed files with 6124 additions and 14020 deletions
22
go.mod
22
go.mod
|
@ -1,6 +1,6 @@
|
||||||
module github.com/dnscrypt/dnscrypt-proxy
|
module github.com/dnscrypt/dnscrypt-proxy
|
||||||
|
|
||||||
go 1.23.4
|
go 1.23.5
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/BurntSushi/toml v1.4.0
|
github.com/BurntSushi/toml v1.4.0
|
||||||
|
@ -18,10 +18,10 @@ require (
|
||||||
github.com/k-sone/critbitgo v1.4.0
|
github.com/k-sone/critbitgo v1.4.0
|
||||||
github.com/kardianos/service v1.2.2
|
github.com/kardianos/service v1.2.2
|
||||||
github.com/lifenjoiner/dhcpdns v0.0.6
|
github.com/lifenjoiner/dhcpdns v0.0.6
|
||||||
github.com/miekg/dns v1.1.62
|
github.com/miekg/dns v1.1.63
|
||||||
github.com/opencoff/go-sieve v0.2.1
|
github.com/opencoff/go-sieve v0.2.1
|
||||||
github.com/powerman/check v1.8.0
|
github.com/powerman/check v1.8.0
|
||||||
github.com/quic-go/quic-go v0.48.2
|
github.com/quic-go/quic-go v0.49.0
|
||||||
golang.org/x/crypto v0.32.0
|
golang.org/x/crypto v0.32.0
|
||||||
golang.org/x/net v0.34.0
|
golang.org/x/net v0.34.0
|
||||||
golang.org/x/sys v0.29.0
|
golang.org/x/sys v0.29.0
|
||||||
|
@ -30,24 +30,24 @@ require (
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
|
||||||
github.com/golang/protobuf v1.5.3 // indirect
|
github.com/golang/protobuf v1.5.3 // indirect
|
||||||
github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect
|
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 // indirect
|
||||||
github.com/hashicorp/go-syslog v1.0.0 // indirect
|
github.com/hashicorp/go-syslog v1.0.0 // indirect
|
||||||
github.com/hashicorp/golang-lru v1.0.2 // indirect
|
github.com/hashicorp/golang-lru v0.5.0 // indirect
|
||||||
github.com/onsi/ginkgo/v2 v2.22.2 // indirect
|
github.com/onsi/ginkgo/v2 v2.9.5 // indirect
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
github.com/powerman/deepequal v0.1.0 // indirect
|
github.com/powerman/deepequal v0.1.0 // indirect
|
||||||
github.com/quic-go/qpack v0.5.1 // indirect
|
github.com/quic-go/qpack v0.5.1 // indirect
|
||||||
github.com/smartystreets/goconvey v1.8.1 // indirect
|
github.com/smartystreets/goconvey v1.8.1 // indirect
|
||||||
go.uber.org/mock v0.5.0 // indirect
|
go.uber.org/mock v0.5.0 // indirect
|
||||||
golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect
|
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect
|
||||||
golang.org/x/mod v0.22.0 // indirect
|
golang.org/x/mod v0.18.0 // indirect
|
||||||
golang.org/x/sync v0.10.0 // indirect
|
golang.org/x/sync v0.10.0 // indirect
|
||||||
golang.org/x/text v0.21.0 // indirect
|
golang.org/x/text v0.21.0 // indirect
|
||||||
golang.org/x/tools v0.29.0 // indirect
|
golang.org/x/tools v0.22.0 // indirect
|
||||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
|
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
|
||||||
google.golang.org/grpc v1.56.3 // indirect
|
google.golang.org/grpc v1.56.3 // indirect
|
||||||
google.golang.org/protobuf v1.36.1 // indirect
|
google.golang.org/protobuf v1.34.2 // indirect
|
||||||
)
|
)
|
||||||
|
|
57
go.sum
57
go.sum
|
@ -2,24 +2,28 @@ github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0
|
||||||
github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||||
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
|
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
|
||||||
github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
|
github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
|
||||||
|
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||||
|
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||||
|
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU=
|
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU=
|
||||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||||
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/dchest/safefile v0.0.0-20151022103144-855e8d98f185 h1:3T8ZyTDp5QxTx3NU48JVb2u+75xc040fofcBaN+6jPA=
|
github.com/dchest/safefile v0.0.0-20151022103144-855e8d98f185 h1:3T8ZyTDp5QxTx3NU48JVb2u+75xc040fofcBaN+6jPA=
|
||||||
github.com/dchest/safefile v0.0.0-20151022103144-855e8d98f185/go.mod h1:cFRxtTwTOJkz2x3rQUNCYKWC93yP1VKjR8NUhqFxZNU=
|
github.com/dchest/safefile v0.0.0-20151022103144-855e8d98f185/go.mod h1:cFRxtTwTOJkz2x3rQUNCYKWC93yP1VKjR8NUhqFxZNU=
|
||||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
|
||||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
|
||||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||||
github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg=
|
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 h1:yAJXTCF9TqKcTiHJAE8dj7HMvPfh66eeA2JYW7eFpSE=
|
||||||
github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
|
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||||
github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g=
|
github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g=
|
||||||
github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k=
|
github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k=
|
||||||
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
|
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
|
||||||
|
@ -28,11 +32,11 @@ github.com/hashicorp/go-syslog v1.0.0 h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwM
|
||||||
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
|
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
|
||||||
github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM=
|
github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM=
|
||||||
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||||
|
github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo=
|
||||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||||
github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c=
|
|
||||||
github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
|
||||||
github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb h1:PGufWXXDq9yaev6xX1YQauaO1MV90e6Mpoq1I7Lz/VM=
|
github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb h1:PGufWXXDq9yaev6xX1YQauaO1MV90e6Mpoq1I7Lz/VM=
|
||||||
github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb/go.mod h1:QiyDdbZLaJ/mZP4Zwc9g2QsfaEA4o7XvvgZegSci5/E=
|
github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb/go.mod h1:QiyDdbZLaJ/mZP4Zwc9g2QsfaEA4o7XvvgZegSci5/E=
|
||||||
|
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||||
github.com/jedisct1/dlog v0.0.0-20241212093805-3c5fd791b405 h1:6j/0utSiy3KhZSpFJgobk+ME1BIwXeq9jepJaDLW3Yg=
|
github.com/jedisct1/dlog v0.0.0-20241212093805-3c5fd791b405 h1:6j/0utSiy3KhZSpFJgobk+ME1BIwXeq9jepJaDLW3Yg=
|
||||||
github.com/jedisct1/dlog v0.0.0-20241212093805-3c5fd791b405/go.mod h1:OO1HpQNlMCMaPdHPuI00fhChZQZ8npbVTTjMvJUxUqQ=
|
github.com/jedisct1/dlog v0.0.0-20241212093805-3c5fd791b405/go.mod h1:OO1HpQNlMCMaPdHPuI00fhChZQZ8npbVTTjMvJUxUqQ=
|
||||||
github.com/jedisct1/go-clocksmith v0.0.0-20230211133011-392c1afea73e h1:tzG4EjKgHIqKVkLIAC4pXTIapuM2BR05uXokEEysAXA=
|
github.com/jedisct1/go-clocksmith v0.0.0-20230211133011-392c1afea73e h1:tzG4EjKgHIqKVkLIAC4pXTIapuM2BR05uXokEEysAXA=
|
||||||
|
@ -53,12 +57,12 @@ github.com/kardianos/service v1.2.2 h1:ZvePhAHfvo0A7Mftk/tEzqEZ7Q4lgnR8sGz4xu1YX
|
||||||
github.com/kardianos/service v1.2.2/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM=
|
github.com/kardianos/service v1.2.2/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM=
|
||||||
github.com/lifenjoiner/dhcpdns v0.0.6 h1:rn4Y5RRR5sgQ6RjWenwhA7i/uHzHW9hbZpCobA4CAJs=
|
github.com/lifenjoiner/dhcpdns v0.0.6 h1:rn4Y5RRR5sgQ6RjWenwhA7i/uHzHW9hbZpCobA4CAJs=
|
||||||
github.com/lifenjoiner/dhcpdns v0.0.6/go.mod h1:BixeaGeafYzDIuDCYIUbSOdi4m+TScpzI9cZGYgzgSk=
|
github.com/lifenjoiner/dhcpdns v0.0.6/go.mod h1:BixeaGeafYzDIuDCYIUbSOdi4m+TScpzI9cZGYgzgSk=
|
||||||
github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ=
|
github.com/miekg/dns v1.1.63 h1:8M5aAw6OMZfFXTT7K5V0Eu5YiiL8l7nUAkyN6C9YwaY=
|
||||||
github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ=
|
github.com/miekg/dns v1.1.63/go.mod h1:6NGHfjhpmr5lt3XPLuyfDJi5AXbNIPM9PY6H6sF1Nfs=
|
||||||
github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU=
|
github.com/onsi/ginkgo/v2 v2.9.5 h1:+6Hr4uxzP4XIUyAkg61dWBw8lb/gc4/X5luuxN/EC+Q=
|
||||||
github.com/onsi/ginkgo/v2 v2.22.2/go.mod h1:oeMosUL+8LtarXBHu/c0bx2D/K9zyQ6uX3cTyztHwsk=
|
github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3RonqW57k=
|
||||||
github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8=
|
github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE=
|
||||||
github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY=
|
github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg=
|
||||||
github.com/opencoff/go-sieve v0.2.1 h1:5Pv6rd3zRquNmXcYHFndjVoolTgcv0ua2XTdMQ+gw0M=
|
github.com/opencoff/go-sieve v0.2.1 h1:5Pv6rd3zRquNmXcYHFndjVoolTgcv0ua2XTdMQ+gw0M=
|
||||||
github.com/opencoff/go-sieve v0.2.1/go.mod h1:CndxLpW4R8fDq04XfBSCOZ+qWwDCcxjfUJbr0GPqWHY=
|
github.com/opencoff/go-sieve v0.2.1/go.mod h1:CndxLpW4R8fDq04XfBSCOZ+qWwDCcxjfUJbr0GPqWHY=
|
||||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||||
|
@ -71,27 +75,30 @@ github.com/powerman/deepequal v0.1.0 h1:sVwtyTsBuYIvdbLR1O2wzRY63YgPqdGZmk/o80l+
|
||||||
github.com/powerman/deepequal v0.1.0/go.mod h1:3k7aG/slufBhUANdN67o/UPg8i5YaiJ6FmibWX0cn04=
|
github.com/powerman/deepequal v0.1.0/go.mod h1:3k7aG/slufBhUANdN67o/UPg8i5YaiJ6FmibWX0cn04=
|
||||||
github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI=
|
github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI=
|
||||||
github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg=
|
github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg=
|
||||||
github.com/quic-go/quic-go v0.48.2 h1:wsKXZPeGWpMpCGSWqOcqpW2wZYic/8T3aqiOID0/KWE=
|
github.com/quic-go/quic-go v0.49.0 h1:w5iJHXwHxs1QxyBv1EHKuC50GX5to8mJAxvtnttJp94=
|
||||||
github.com/quic-go/quic-go v0.48.2/go.mod h1:yBgs3rWBOADpga7F+jJsb6Ybg1LSYiQvwWlLX+/6HMs=
|
github.com/quic-go/quic-go v0.49.0/go.mod h1:s2wDnmCdooUQBmQfpUSTCYBl1/D4FcqbULMMkASvR6s=
|
||||||
github.com/smarty/assertions v1.15.0 h1:cR//PqUBUiQRakZWqBiFFQ9wb8emQGDb0HeGdqGByCY=
|
github.com/smarty/assertions v1.15.0 h1:cR//PqUBUiQRakZWqBiFFQ9wb8emQGDb0HeGdqGByCY=
|
||||||
github.com/smarty/assertions v1.15.0/go.mod h1:yABtdzeQs6l1brC900WlRNwj6ZR55d7B+E8C6HtKdec=
|
github.com/smarty/assertions v1.15.0/go.mod h1:yABtdzeQs6l1brC900WlRNwj6ZR55d7B+E8C6HtKdec=
|
||||||
github.com/smartystreets/goconvey v1.8.1 h1:qGjIddxOk4grTu9JPOU31tVfq3cNdBlNa5sSznIX1xY=
|
github.com/smartystreets/goconvey v1.8.1 h1:qGjIddxOk4grTu9JPOU31tVfq3cNdBlNa5sSznIX1xY=
|
||||||
github.com/smartystreets/goconvey v1.8.1/go.mod h1:+/u4qLyY6x1jReYOp7GOM2FSt8aP9CzCZL03bI28W60=
|
github.com/smartystreets/goconvey v1.8.1/go.mod h1:+/u4qLyY6x1jReYOp7GOM2FSt8aP9CzCZL03bI28W60=
|
||||||
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||||
go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU=
|
go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU=
|
||||||
go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM=
|
go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM=
|
||||||
golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
|
golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
|
||||||
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
|
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
|
||||||
golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA=
|
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM=
|
||||||
golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU=
|
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc=
|
||||||
golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4=
|
golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0=
|
||||||
golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||||
golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0=
|
golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0=
|
||||||
golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k=
|
golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k=
|
||||||
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
||||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||||
golang.org/x/sys v0.0.0-20190529164535-6a60838ec259/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190529164535-6a60838ec259/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
|
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
|
||||||
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
|
@ -99,8 +106,8 @@ golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||||
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
||||||
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||||
golang.org/x/tools v0.29.0 h1:Xx0h3TtM9rzQpQuR4dKLrdglAmCEN5Oi+P74JdhdzXE=
|
golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA=
|
||||||
golang.org/x/tools v0.29.0/go.mod h1:KMQVMRsVxU6nHCFXrBPhDB8XncLNLM0lIy/F14RP588=
|
golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A=
|
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A=
|
||||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU=
|
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU=
|
||||||
|
@ -108,9 +115,11 @@ google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc=
|
||||||
google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
|
google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
|
||||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||||
google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk=
|
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
|
||||||
google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
|
||||||
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
|
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
|
||||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
|
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
|
||||||
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
|
|
@ -1,24 +1,5 @@
|
||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
## Release 3.2.3 (2022-11-29)
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
- Updated docs (thanks @book987 @aJetHorn @neelayu @pellizzetti @apricote @SaigyoujiYuyuko233 @AlekSi)
|
|
||||||
- #348: Updated huandu/xstrings which fixed a snake case bug (thanks @yxxhero)
|
|
||||||
- #353: Updated masterminds/semver which included bug fixes
|
|
||||||
- #354: Updated golang.org/x/crypto which included bug fixes
|
|
||||||
|
|
||||||
## Release 3.2.2 (2021-02-04)
|
|
||||||
|
|
||||||
This is a re-release of 3.2.1 to satisfy something with the Go module system.
|
|
||||||
|
|
||||||
## Release 3.2.1 (2021-02-04)
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
- Upgraded `Masterminds/goutils` to `v1.1.1`. see the [Security Advisory](https://github.com/Masterminds/goutils/security/advisories/GHSA-xg2h-wx96-xgxr)
|
|
||||||
|
|
||||||
## Release 3.2.0 (2020-12-14)
|
## Release 3.2.0 (2020-12-14)
|
||||||
|
|
||||||
### Added
|
### Added
|
|
@ -1,4 +1,4 @@
|
||||||
# Slim-Sprig: Template functions for Go templates [](https://pkg.go.dev/github.com/go-task/slim-sprig/v3)
|
# Slim-Sprig: Template functions for Go templates [](https://godoc.org/github.com/go-task/slim-sprig) [](https://goreportcard.com/report/github.com/go-task/slim-sprig)
|
||||||
|
|
||||||
Slim-Sprig is a fork of [Sprig](https://github.com/Masterminds/sprig), but with
|
Slim-Sprig is a fork of [Sprig](https://github.com/Masterminds/sprig), but with
|
||||||
all functions that depend on external (non standard library) or crypto packages
|
all functions that depend on external (non standard library) or crypto packages
|
|
@ -1,6 +1,6 @@
|
||||||
# https://taskfile.dev
|
# https://taskfile.dev
|
||||||
|
|
||||||
version: '3'
|
version: '2'
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
default:
|
default:
|
37
vendor/github.com/google/pprof/profile/encode.go
generated
vendored
37
vendor/github.com/google/pprof/profile/encode.go
generated
vendored
|
@ -17,7 +17,6 @@ package profile
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func (p *Profile) decoder() []decoder {
|
func (p *Profile) decoder() []decoder {
|
||||||
|
@ -122,7 +121,6 @@ func (p *Profile) preEncode() {
|
||||||
}
|
}
|
||||||
|
|
||||||
p.defaultSampleTypeX = addString(strings, p.DefaultSampleType)
|
p.defaultSampleTypeX = addString(strings, p.DefaultSampleType)
|
||||||
p.docURLX = addString(strings, p.DocURL)
|
|
||||||
|
|
||||||
p.stringTable = make([]string, len(strings))
|
p.stringTable = make([]string, len(strings))
|
||||||
for s, i := range strings {
|
for s, i := range strings {
|
||||||
|
@ -157,7 +155,6 @@ func (p *Profile) encode(b *buffer) {
|
||||||
encodeInt64Opt(b, 12, p.Period)
|
encodeInt64Opt(b, 12, p.Period)
|
||||||
encodeInt64s(b, 13, p.commentX)
|
encodeInt64s(b, 13, p.commentX)
|
||||||
encodeInt64(b, 14, p.defaultSampleTypeX)
|
encodeInt64(b, 14, p.defaultSampleTypeX)
|
||||||
encodeInt64Opt(b, 15, p.docURLX)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var profileDecoder = []decoder{
|
var profileDecoder = []decoder{
|
||||||
|
@ -186,13 +183,12 @@ var profileDecoder = []decoder{
|
||||||
// repeated Location location = 4
|
// repeated Location location = 4
|
||||||
func(b *buffer, m message) error {
|
func(b *buffer, m message) error {
|
||||||
x := new(Location)
|
x := new(Location)
|
||||||
x.Line = b.tmpLines[:0] // Use shared space temporarily
|
x.Line = make([]Line, 0, 8) // Pre-allocate Line buffer
|
||||||
pp := m.(*Profile)
|
pp := m.(*Profile)
|
||||||
pp.Location = append(pp.Location, x)
|
pp.Location = append(pp.Location, x)
|
||||||
err := decodeMessage(b, x)
|
err := decodeMessage(b, x)
|
||||||
b.tmpLines = x.Line[:0]
|
var tmp []Line
|
||||||
// Copy to shrink size and detach from shared space.
|
x.Line = append(tmp, x.Line...) // Shrink to allocated size
|
||||||
x.Line = append([]Line(nil), x.Line...)
|
|
||||||
return err
|
return err
|
||||||
},
|
},
|
||||||
// repeated Function function = 5
|
// repeated Function function = 5
|
||||||
|
@ -239,8 +235,6 @@ var profileDecoder = []decoder{
|
||||||
func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Profile).commentX) },
|
func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Profile).commentX) },
|
||||||
// int64 defaultSampleType = 14
|
// int64 defaultSampleType = 14
|
||||||
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).defaultSampleTypeX) },
|
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).defaultSampleTypeX) },
|
||||||
// string doc_link = 15;
|
|
||||||
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).docURLX) },
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// postDecode takes the unexported fields populated by decode (with
|
// postDecode takes the unexported fields populated by decode (with
|
||||||
|
@ -258,14 +252,6 @@ func (p *Profile) postDecode() error {
|
||||||
} else {
|
} else {
|
||||||
mappings[m.ID] = m
|
mappings[m.ID] = m
|
||||||
}
|
}
|
||||||
|
|
||||||
// If this a main linux kernel mapping with a relocation symbol suffix
|
|
||||||
// ("[kernel.kallsyms]_text"), extract said suffix.
|
|
||||||
// It is fairly hacky to handle at this level, but the alternatives appear even worse.
|
|
||||||
const prefix = "[kernel.kallsyms]"
|
|
||||||
if strings.HasPrefix(m.File, prefix) {
|
|
||||||
m.KernelRelocationSymbol = m.File[len(prefix):]
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
functions := make(map[uint64]*Function, len(p.Function))
|
functions := make(map[uint64]*Function, len(p.Function))
|
||||||
|
@ -312,15 +298,7 @@ func (p *Profile) postDecode() error {
|
||||||
st.Unit, err = getString(p.stringTable, &st.unitX, err)
|
st.Unit, err = getString(p.stringTable, &st.unitX, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Pre-allocate space for all locations.
|
|
||||||
numLocations := 0
|
|
||||||
for _, s := range p.Sample {
|
for _, s := range p.Sample {
|
||||||
numLocations += len(s.locationIDX)
|
|
||||||
}
|
|
||||||
locBuffer := make([]*Location, numLocations)
|
|
||||||
|
|
||||||
for _, s := range p.Sample {
|
|
||||||
if len(s.labelX) > 0 {
|
|
||||||
labels := make(map[string][]string, len(s.labelX))
|
labels := make(map[string][]string, len(s.labelX))
|
||||||
numLabels := make(map[string][]int64, len(s.labelX))
|
numLabels := make(map[string][]int64, len(s.labelX))
|
||||||
numUnits := make(map[string][]string, len(s.labelX))
|
numUnits := make(map[string][]string, len(s.labelX))
|
||||||
|
@ -354,10 +332,7 @@ func (p *Profile) postDecode() error {
|
||||||
}
|
}
|
||||||
s.NumUnit = numUnits
|
s.NumUnit = numUnits
|
||||||
}
|
}
|
||||||
}
|
s.Location = make([]*Location, len(s.locationIDX))
|
||||||
|
|
||||||
s.Location = locBuffer[:len(s.locationIDX)]
|
|
||||||
locBuffer = locBuffer[len(s.locationIDX):]
|
|
||||||
for i, lid := range s.locationIDX {
|
for i, lid := range s.locationIDX {
|
||||||
if lid < uint64(len(locationIds)) {
|
if lid < uint64(len(locationIds)) {
|
||||||
s.Location[i] = locationIds[lid]
|
s.Location[i] = locationIds[lid]
|
||||||
|
@ -388,7 +363,6 @@ func (p *Profile) postDecode() error {
|
||||||
|
|
||||||
p.commentX = nil
|
p.commentX = nil
|
||||||
p.DefaultSampleType, err = getString(p.stringTable, &p.defaultSampleTypeX, err)
|
p.DefaultSampleType, err = getString(p.stringTable, &p.defaultSampleTypeX, err)
|
||||||
p.DocURL, err = getString(p.stringTable, &p.docURLX, err)
|
|
||||||
p.stringTable = nil
|
p.stringTable = nil
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -535,7 +509,6 @@ func (p *Line) decoder() []decoder {
|
||||||
func (p *Line) encode(b *buffer) {
|
func (p *Line) encode(b *buffer) {
|
||||||
encodeUint64Opt(b, 1, p.functionIDX)
|
encodeUint64Opt(b, 1, p.functionIDX)
|
||||||
encodeInt64Opt(b, 2, p.Line)
|
encodeInt64Opt(b, 2, p.Line)
|
||||||
encodeInt64Opt(b, 3, p.Column)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var lineDecoder = []decoder{
|
var lineDecoder = []decoder{
|
||||||
|
@ -544,8 +517,6 @@ var lineDecoder = []decoder{
|
||||||
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Line).functionIDX) },
|
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Line).functionIDX) },
|
||||||
// optional int64 line = 2
|
// optional int64 line = 2
|
||||||
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Line) },
|
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Line) },
|
||||||
// optional int64 column = 3
|
|
||||||
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Column) },
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Function) decoder() []decoder {
|
func (p *Function) decoder() []decoder {
|
||||||
|
|
4
vendor/github.com/google/pprof/profile/filter.go
generated
vendored
4
vendor/github.com/google/pprof/profile/filter.go
generated
vendored
|
@ -22,10 +22,6 @@ import "regexp"
|
||||||
// samples where at least one frame matches focus but none match ignore.
|
// samples where at least one frame matches focus but none match ignore.
|
||||||
// Returns true is the corresponding regexp matched at least one sample.
|
// Returns true is the corresponding regexp matched at least one sample.
|
||||||
func (p *Profile) FilterSamplesByName(focus, ignore, hide, show *regexp.Regexp) (fm, im, hm, hnm bool) {
|
func (p *Profile) FilterSamplesByName(focus, ignore, hide, show *regexp.Regexp) (fm, im, hm, hnm bool) {
|
||||||
if focus == nil && ignore == nil && hide == nil && show == nil {
|
|
||||||
fm = true // Missing focus implies a match
|
|
||||||
return
|
|
||||||
}
|
|
||||||
focusOrIgnore := make(map[uint64]bool)
|
focusOrIgnore := make(map[uint64]bool)
|
||||||
hidden := make(map[uint64]bool)
|
hidden := make(map[uint64]bool)
|
||||||
for _, l := range p.Location {
|
for _, l := range p.Location {
|
||||||
|
|
4
vendor/github.com/google/pprof/profile/legacy_java_profile.go
generated
vendored
4
vendor/github.com/google/pprof/profile/legacy_java_profile.go
generated
vendored
|
@ -56,7 +56,7 @@ func javaCPUProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
// Strip out addresses for better merge.
|
// Strip out addresses for better merge.
|
||||||
if err = p.Aggregate(true, true, true, true, false, false); err != nil {
|
if err = p.Aggregate(true, true, true, true, false); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -99,7 +99,7 @@ func parseJavaProfile(b []byte) (*Profile, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Strip out addresses for better merge.
|
// Strip out addresses for better merge.
|
||||||
if err = p.Aggregate(true, true, true, true, false, false); err != nil {
|
if err = p.Aggregate(true, true, true, true, false); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
5
vendor/github.com/google/pprof/profile/legacy_profile.go
generated
vendored
5
vendor/github.com/google/pprof/profile/legacy_profile.go
generated
vendored
|
@ -295,7 +295,6 @@ func get64b(b []byte) (uint64, []byte) {
|
||||||
//
|
//
|
||||||
// The general format for profilez samples is a sequence of words in
|
// The general format for profilez samples is a sequence of words in
|
||||||
// binary format. The first words are a header with the following data:
|
// binary format. The first words are a header with the following data:
|
||||||
//
|
|
||||||
// 1st word -- 0
|
// 1st word -- 0
|
||||||
// 2nd word -- 3
|
// 2nd word -- 3
|
||||||
// 3rd word -- 0 if a c++ application, 1 if a java application.
|
// 3rd word -- 0 if a c++ application, 1 if a java application.
|
||||||
|
@ -404,15 +403,12 @@ func cleanupDuplicateLocations(p *Profile) {
|
||||||
//
|
//
|
||||||
// profilez samples are a repeated sequence of stack frames of the
|
// profilez samples are a repeated sequence of stack frames of the
|
||||||
// form:
|
// form:
|
||||||
//
|
|
||||||
// 1st word -- The number of times this stack was encountered.
|
// 1st word -- The number of times this stack was encountered.
|
||||||
// 2nd word -- The size of the stack (StackSize).
|
// 2nd word -- The size of the stack (StackSize).
|
||||||
// 3rd word -- The first address on the stack.
|
// 3rd word -- The first address on the stack.
|
||||||
// ...
|
// ...
|
||||||
// StackSize + 2 -- The last address on the stack
|
// StackSize + 2 -- The last address on the stack
|
||||||
//
|
|
||||||
// The last stack trace is of the form:
|
// The last stack trace is of the form:
|
||||||
//
|
|
||||||
// 1st word -- 0
|
// 1st word -- 0
|
||||||
// 2nd word -- 1
|
// 2nd word -- 1
|
||||||
// 3rd word -- 0
|
// 3rd word -- 0
|
||||||
|
@ -865,6 +861,7 @@ func parseThread(b []byte) (*Profile, error) {
|
||||||
// Recognize each thread and populate profile samples.
|
// Recognize each thread and populate profile samples.
|
||||||
for !isMemoryMapSentinel(line) {
|
for !isMemoryMapSentinel(line) {
|
||||||
if strings.HasPrefix(line, "---- no stack trace for") {
|
if strings.HasPrefix(line, "---- no stack trace for") {
|
||||||
|
line = ""
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 {
|
if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 {
|
||||||
|
|
267
vendor/github.com/google/pprof/profile/merge.go
generated
vendored
267
vendor/github.com/google/pprof/profile/merge.go
generated
vendored
|
@ -15,7 +15,6 @@
|
||||||
package profile
|
package profile
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/binary"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
@ -59,7 +58,7 @@ func Merge(srcs []*Profile) (*Profile, error) {
|
||||||
|
|
||||||
for _, src := range srcs {
|
for _, src := range srcs {
|
||||||
// Clear the profile-specific hash tables
|
// Clear the profile-specific hash tables
|
||||||
pm.locationsByID = makeLocationIDMap(len(src.Location))
|
pm.locationsByID = make(map[uint64]*Location, len(src.Location))
|
||||||
pm.functionsByID = make(map[uint64]*Function, len(src.Function))
|
pm.functionsByID = make(map[uint64]*Function, len(src.Function))
|
||||||
pm.mappingsByID = make(map[uint64]mapInfo, len(src.Mapping))
|
pm.mappingsByID = make(map[uint64]mapInfo, len(src.Mapping))
|
||||||
|
|
||||||
|
@ -137,7 +136,7 @@ type profileMerger struct {
|
||||||
p *Profile
|
p *Profile
|
||||||
|
|
||||||
// Memoization tables within a profile.
|
// Memoization tables within a profile.
|
||||||
locationsByID locationIDMap
|
locationsByID map[uint64]*Location
|
||||||
functionsByID map[uint64]*Function
|
functionsByID map[uint64]*Function
|
||||||
mappingsByID map[uint64]mapInfo
|
mappingsByID map[uint64]mapInfo
|
||||||
|
|
||||||
|
@ -154,16 +153,6 @@ type mapInfo struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pm *profileMerger) mapSample(src *Sample) *Sample {
|
func (pm *profileMerger) mapSample(src *Sample) *Sample {
|
||||||
// Check memoization table
|
|
||||||
k := pm.sampleKey(src)
|
|
||||||
if ss, ok := pm.samples[k]; ok {
|
|
||||||
for i, v := range src.Value {
|
|
||||||
ss.Value[i] += v
|
|
||||||
}
|
|
||||||
return ss
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make new sample.
|
|
||||||
s := &Sample{
|
s := &Sample{
|
||||||
Location: make([]*Location, len(src.Location)),
|
Location: make([]*Location, len(src.Location)),
|
||||||
Value: make([]int64, len(src.Value)),
|
Value: make([]int64, len(src.Value)),
|
||||||
|
@ -188,98 +177,52 @@ func (pm *profileMerger) mapSample(src *Sample) *Sample {
|
||||||
s.NumLabel[k] = vv
|
s.NumLabel[k] = vv
|
||||||
s.NumUnit[k] = uu
|
s.NumUnit[k] = uu
|
||||||
}
|
}
|
||||||
|
// Check memoization table. Must be done on the remapped location to
|
||||||
|
// account for the remapped mapping. Add current values to the
|
||||||
|
// existing sample.
|
||||||
|
k := s.key()
|
||||||
|
if ss, ok := pm.samples[k]; ok {
|
||||||
|
for i, v := range src.Value {
|
||||||
|
ss.Value[i] += v
|
||||||
|
}
|
||||||
|
return ss
|
||||||
|
}
|
||||||
copy(s.Value, src.Value)
|
copy(s.Value, src.Value)
|
||||||
pm.samples[k] = s
|
pm.samples[k] = s
|
||||||
pm.p.Sample = append(pm.p.Sample, s)
|
pm.p.Sample = append(pm.p.Sample, s)
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pm *profileMerger) sampleKey(sample *Sample) sampleKey {
|
// key generates sampleKey to be used as a key for maps.
|
||||||
// Accumulate contents into a string.
|
func (sample *Sample) key() sampleKey {
|
||||||
var buf strings.Builder
|
ids := make([]string, len(sample.Location))
|
||||||
buf.Grow(64) // Heuristic to avoid extra allocs
|
for i, l := range sample.Location {
|
||||||
|
ids[i] = strconv.FormatUint(l.ID, 16)
|
||||||
// encode a number
|
|
||||||
putNumber := func(v uint64) {
|
|
||||||
var num [binary.MaxVarintLen64]byte
|
|
||||||
n := binary.PutUvarint(num[:], v)
|
|
||||||
buf.Write(num[:n])
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// encode a string prefixed with its length.
|
labels := make([]string, 0, len(sample.Label))
|
||||||
putDelimitedString := func(s string) {
|
for k, v := range sample.Label {
|
||||||
putNumber(uint64(len(s)))
|
labels = append(labels, fmt.Sprintf("%q%q", k, v))
|
||||||
buf.WriteString(s)
|
|
||||||
}
|
}
|
||||||
|
sort.Strings(labels)
|
||||||
|
|
||||||
for _, l := range sample.Location {
|
numlabels := make([]string, 0, len(sample.NumLabel))
|
||||||
// Get the location in the merged profile, which may have a different ID.
|
for k, v := range sample.NumLabel {
|
||||||
if loc := pm.mapLocation(l); loc != nil {
|
numlabels = append(numlabels, fmt.Sprintf("%q%x%x", k, v, sample.NumUnit[k]))
|
||||||
putNumber(loc.ID)
|
|
||||||
}
|
}
|
||||||
}
|
sort.Strings(numlabels)
|
||||||
putNumber(0) // Delimiter
|
|
||||||
|
|
||||||
for _, l := range sortedKeys1(sample.Label) {
|
return sampleKey{
|
||||||
putDelimitedString(l)
|
strings.Join(ids, "|"),
|
||||||
values := sample.Label[l]
|
strings.Join(labels, ""),
|
||||||
putNumber(uint64(len(values)))
|
strings.Join(numlabels, ""),
|
||||||
for _, v := range values {
|
|
||||||
putDelimitedString(v)
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
for _, l := range sortedKeys2(sample.NumLabel) {
|
|
||||||
putDelimitedString(l)
|
|
||||||
values := sample.NumLabel[l]
|
|
||||||
putNumber(uint64(len(values)))
|
|
||||||
for _, v := range values {
|
|
||||||
putNumber(uint64(v))
|
|
||||||
}
|
|
||||||
units := sample.NumUnit[l]
|
|
||||||
putNumber(uint64(len(units)))
|
|
||||||
for _, v := range units {
|
|
||||||
putDelimitedString(v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return sampleKey(buf.String())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type sampleKey string
|
type sampleKey struct {
|
||||||
|
locations string
|
||||||
// sortedKeys1 returns the sorted keys found in a string->[]string map.
|
labels string
|
||||||
//
|
numlabels string
|
||||||
// Note: this is currently non-generic since github pprof runs golint,
|
|
||||||
// which does not support generics. When that issue is fixed, it can
|
|
||||||
// be merged with sortedKeys2 and made into a generic function.
|
|
||||||
func sortedKeys1(m map[string][]string) []string {
|
|
||||||
if len(m) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
keys := make([]string, 0, len(m))
|
|
||||||
for k := range m {
|
|
||||||
keys = append(keys, k)
|
|
||||||
}
|
|
||||||
sort.Strings(keys)
|
|
||||||
return keys
|
|
||||||
}
|
|
||||||
|
|
||||||
// sortedKeys2 returns the sorted keys found in a string->[]int64 map.
|
|
||||||
//
|
|
||||||
// Note: this is currently non-generic since github pprof runs golint,
|
|
||||||
// which does not support generics. When that issue is fixed, it can
|
|
||||||
// be merged with sortedKeys1 and made into a generic function.
|
|
||||||
func sortedKeys2(m map[string][]int64) []string {
|
|
||||||
if len(m) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
keys := make([]string, 0, len(m))
|
|
||||||
for k := range m {
|
|
||||||
keys = append(keys, k)
|
|
||||||
}
|
|
||||||
sort.Strings(keys)
|
|
||||||
return keys
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pm *profileMerger) mapLocation(src *Location) *Location {
|
func (pm *profileMerger) mapLocation(src *Location) *Location {
|
||||||
|
@ -287,7 +230,7 @@ func (pm *profileMerger) mapLocation(src *Location) *Location {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if l := pm.locationsByID.get(src.ID); l != nil {
|
if l, ok := pm.locationsByID[src.ID]; ok {
|
||||||
return l
|
return l
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -306,10 +249,10 @@ func (pm *profileMerger) mapLocation(src *Location) *Location {
|
||||||
// account for the remapped mapping ID.
|
// account for the remapped mapping ID.
|
||||||
k := l.key()
|
k := l.key()
|
||||||
if ll, ok := pm.locations[k]; ok {
|
if ll, ok := pm.locations[k]; ok {
|
||||||
pm.locationsByID.set(src.ID, ll)
|
pm.locationsByID[src.ID] = ll
|
||||||
return ll
|
return ll
|
||||||
}
|
}
|
||||||
pm.locationsByID.set(src.ID, l)
|
pm.locationsByID[src.ID] = l
|
||||||
pm.locations[k] = l
|
pm.locations[k] = l
|
||||||
pm.p.Location = append(pm.p.Location, l)
|
pm.p.Location = append(pm.p.Location, l)
|
||||||
return l
|
return l
|
||||||
|
@ -326,13 +269,12 @@ func (l *Location) key() locationKey {
|
||||||
key.addr -= l.Mapping.Start
|
key.addr -= l.Mapping.Start
|
||||||
key.mappingID = l.Mapping.ID
|
key.mappingID = l.Mapping.ID
|
||||||
}
|
}
|
||||||
lines := make([]string, len(l.Line)*3)
|
lines := make([]string, len(l.Line)*2)
|
||||||
for i, line := range l.Line {
|
for i, line := range l.Line {
|
||||||
if line.Function != nil {
|
if line.Function != nil {
|
||||||
lines[i*2] = strconv.FormatUint(line.Function.ID, 16)
|
lines[i*2] = strconv.FormatUint(line.Function.ID, 16)
|
||||||
}
|
}
|
||||||
lines[i*2+1] = strconv.FormatInt(line.Line, 16)
|
lines[i*2+1] = strconv.FormatInt(line.Line, 16)
|
||||||
lines[i*2+2] = strconv.FormatInt(line.Column, 16)
|
|
||||||
}
|
}
|
||||||
key.lines = strings.Join(lines, "|")
|
key.lines = strings.Join(lines, "|")
|
||||||
return key
|
return key
|
||||||
|
@ -366,7 +308,6 @@ func (pm *profileMerger) mapMapping(src *Mapping) mapInfo {
|
||||||
Limit: src.Limit,
|
Limit: src.Limit,
|
||||||
Offset: src.Offset,
|
Offset: src.Offset,
|
||||||
File: src.File,
|
File: src.File,
|
||||||
KernelRelocationSymbol: src.KernelRelocationSymbol,
|
|
||||||
BuildID: src.BuildID,
|
BuildID: src.BuildID,
|
||||||
HasFunctions: src.HasFunctions,
|
HasFunctions: src.HasFunctions,
|
||||||
HasFilenames: src.HasFilenames,
|
HasFilenames: src.HasFilenames,
|
||||||
|
@ -419,7 +360,6 @@ func (pm *profileMerger) mapLine(src Line) Line {
|
||||||
ln := Line{
|
ln := Line{
|
||||||
Function: pm.mapFunction(src.Function),
|
Function: pm.mapFunction(src.Function),
|
||||||
Line: src.Line,
|
Line: src.Line,
|
||||||
Column: src.Column,
|
|
||||||
}
|
}
|
||||||
return ln
|
return ln
|
||||||
}
|
}
|
||||||
|
@ -476,7 +416,6 @@ func combineHeaders(srcs []*Profile) (*Profile, error) {
|
||||||
var timeNanos, durationNanos, period int64
|
var timeNanos, durationNanos, period int64
|
||||||
var comments []string
|
var comments []string
|
||||||
seenComments := map[string]bool{}
|
seenComments := map[string]bool{}
|
||||||
var docURL string
|
|
||||||
var defaultSampleType string
|
var defaultSampleType string
|
||||||
for _, s := range srcs {
|
for _, s := range srcs {
|
||||||
if timeNanos == 0 || s.TimeNanos < timeNanos {
|
if timeNanos == 0 || s.TimeNanos < timeNanos {
|
||||||
|
@ -495,9 +434,6 @@ func combineHeaders(srcs []*Profile) (*Profile, error) {
|
||||||
if defaultSampleType == "" {
|
if defaultSampleType == "" {
|
||||||
defaultSampleType = s.DefaultSampleType
|
defaultSampleType = s.DefaultSampleType
|
||||||
}
|
}
|
||||||
if docURL == "" {
|
|
||||||
docURL = s.DocURL
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
p := &Profile{
|
p := &Profile{
|
||||||
|
@ -513,7 +449,6 @@ func combineHeaders(srcs []*Profile) (*Profile, error) {
|
||||||
|
|
||||||
Comments: comments,
|
Comments: comments,
|
||||||
DefaultSampleType: defaultSampleType,
|
DefaultSampleType: defaultSampleType,
|
||||||
DocURL: docURL,
|
|
||||||
}
|
}
|
||||||
copy(p.SampleType, srcs[0].SampleType)
|
copy(p.SampleType, srcs[0].SampleType)
|
||||||
return p, nil
|
return p, nil
|
||||||
|
@ -544,131 +479,3 @@ func (p *Profile) compatible(pb *Profile) error {
|
||||||
func equalValueType(st1, st2 *ValueType) bool {
|
func equalValueType(st1, st2 *ValueType) bool {
|
||||||
return st1.Type == st2.Type && st1.Unit == st2.Unit
|
return st1.Type == st2.Type && st1.Unit == st2.Unit
|
||||||
}
|
}
|
||||||
|
|
||||||
// locationIDMap is like a map[uint64]*Location, but provides efficiency for
|
|
||||||
// ids that are densely numbered, which is often the case.
|
|
||||||
type locationIDMap struct {
|
|
||||||
dense []*Location // indexed by id for id < len(dense)
|
|
||||||
sparse map[uint64]*Location // indexed by id for id >= len(dense)
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeLocationIDMap(n int) locationIDMap {
|
|
||||||
return locationIDMap{
|
|
||||||
dense: make([]*Location, n),
|
|
||||||
sparse: map[uint64]*Location{},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lm locationIDMap) get(id uint64) *Location {
|
|
||||||
if id < uint64(len(lm.dense)) {
|
|
||||||
return lm.dense[int(id)]
|
|
||||||
}
|
|
||||||
return lm.sparse[id]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lm locationIDMap) set(id uint64, loc *Location) {
|
|
||||||
if id < uint64(len(lm.dense)) {
|
|
||||||
lm.dense[id] = loc
|
|
||||||
return
|
|
||||||
}
|
|
||||||
lm.sparse[id] = loc
|
|
||||||
}
|
|
||||||
|
|
||||||
// CompatibilizeSampleTypes makes profiles compatible to be compared/merged. It
|
|
||||||
// keeps sample types that appear in all profiles only and drops/reorders the
|
|
||||||
// sample types as necessary.
|
|
||||||
//
|
|
||||||
// In the case of sample types order is not the same for given profiles the
|
|
||||||
// order is derived from the first profile.
|
|
||||||
//
|
|
||||||
// Profiles are modified in-place.
|
|
||||||
//
|
|
||||||
// It returns an error if the sample type's intersection is empty.
|
|
||||||
func CompatibilizeSampleTypes(ps []*Profile) error {
|
|
||||||
sTypes := commonSampleTypes(ps)
|
|
||||||
if len(sTypes) == 0 {
|
|
||||||
return fmt.Errorf("profiles have empty common sample type list")
|
|
||||||
}
|
|
||||||
for _, p := range ps {
|
|
||||||
if err := compatibilizeSampleTypes(p, sTypes); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// commonSampleTypes returns sample types that appear in all profiles in the
|
|
||||||
// order how they ordered in the first profile.
|
|
||||||
func commonSampleTypes(ps []*Profile) []string {
|
|
||||||
if len(ps) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
sTypes := map[string]int{}
|
|
||||||
for _, p := range ps {
|
|
||||||
for _, st := range p.SampleType {
|
|
||||||
sTypes[st.Type]++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
var res []string
|
|
||||||
for _, st := range ps[0].SampleType {
|
|
||||||
if sTypes[st.Type] == len(ps) {
|
|
||||||
res = append(res, st.Type)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
// compatibilizeSampleTypes drops sample types that are not present in sTypes
|
|
||||||
// list and reorder them if needed.
|
|
||||||
//
|
|
||||||
// It sets DefaultSampleType to sType[0] if it is not in sType list.
|
|
||||||
//
|
|
||||||
// It assumes that all sample types from the sTypes list are present in the
|
|
||||||
// given profile otherwise it returns an error.
|
|
||||||
func compatibilizeSampleTypes(p *Profile, sTypes []string) error {
|
|
||||||
if len(sTypes) == 0 {
|
|
||||||
return fmt.Errorf("sample type list is empty")
|
|
||||||
}
|
|
||||||
defaultSampleType := sTypes[0]
|
|
||||||
reMap, needToModify := make([]int, len(sTypes)), false
|
|
||||||
for i, st := range sTypes {
|
|
||||||
if st == p.DefaultSampleType {
|
|
||||||
defaultSampleType = p.DefaultSampleType
|
|
||||||
}
|
|
||||||
idx := searchValueType(p.SampleType, st)
|
|
||||||
if idx < 0 {
|
|
||||||
return fmt.Errorf("%q sample type is not found in profile", st)
|
|
||||||
}
|
|
||||||
reMap[i] = idx
|
|
||||||
if idx != i {
|
|
||||||
needToModify = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !needToModify && len(sTypes) == len(p.SampleType) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
p.DefaultSampleType = defaultSampleType
|
|
||||||
oldSampleTypes := p.SampleType
|
|
||||||
p.SampleType = make([]*ValueType, len(sTypes))
|
|
||||||
for i, idx := range reMap {
|
|
||||||
p.SampleType[i] = oldSampleTypes[idx]
|
|
||||||
}
|
|
||||||
values := make([]int64, len(sTypes))
|
|
||||||
for _, s := range p.Sample {
|
|
||||||
for i, idx := range reMap {
|
|
||||||
values[i] = s.Value[idx]
|
|
||||||
}
|
|
||||||
s.Value = s.Value[:len(values)]
|
|
||||||
copy(s.Value, values)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func searchValueType(vts []*ValueType, s string) int {
|
|
||||||
for i, vt := range vts {
|
|
||||||
if vt.Type == s {
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
|
|
80
vendor/github.com/google/pprof/profile/profile.go
generated
vendored
80
vendor/github.com/google/pprof/profile/profile.go
generated
vendored
|
@ -21,6 +21,7 @@ import (
|
||||||
"compress/gzip"
|
"compress/gzip"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
"math"
|
"math"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
@ -39,7 +40,6 @@ type Profile struct {
|
||||||
Location []*Location
|
Location []*Location
|
||||||
Function []*Function
|
Function []*Function
|
||||||
Comments []string
|
Comments []string
|
||||||
DocURL string
|
|
||||||
|
|
||||||
DropFrames string
|
DropFrames string
|
||||||
KeepFrames string
|
KeepFrames string
|
||||||
|
@ -54,7 +54,6 @@ type Profile struct {
|
||||||
encodeMu sync.Mutex
|
encodeMu sync.Mutex
|
||||||
|
|
||||||
commentX []int64
|
commentX []int64
|
||||||
docURLX int64
|
|
||||||
dropFramesX int64
|
dropFramesX int64
|
||||||
keepFramesX int64
|
keepFramesX int64
|
||||||
stringTable []string
|
stringTable []string
|
||||||
|
@ -74,22 +73,8 @@ type ValueType struct {
|
||||||
type Sample struct {
|
type Sample struct {
|
||||||
Location []*Location
|
Location []*Location
|
||||||
Value []int64
|
Value []int64
|
||||||
// Label is a per-label-key map to values for string labels.
|
|
||||||
//
|
|
||||||
// In general, having multiple values for the given label key is strongly
|
|
||||||
// discouraged - see docs for the sample label field in profile.proto. The
|
|
||||||
// main reason this unlikely state is tracked here is to make the
|
|
||||||
// decoding->encoding roundtrip not lossy. But we expect that the value
|
|
||||||
// slices present in this map are always of length 1.
|
|
||||||
Label map[string][]string
|
Label map[string][]string
|
||||||
// NumLabel is a per-label-key map to values for numeric labels. See a note
|
|
||||||
// above on handling multiple values for a label.
|
|
||||||
NumLabel map[string][]int64
|
NumLabel map[string][]int64
|
||||||
// NumUnit is a per-label-key map to the unit names of corresponding numeric
|
|
||||||
// label values. The unit info may be missing even if the label is in
|
|
||||||
// NumLabel, see the docs in profile.proto for details. When the value is
|
|
||||||
// slice is present and not nil, its length must be equal to the length of
|
|
||||||
// the corresponding value slice in NumLabel.
|
|
||||||
NumUnit map[string][]string
|
NumUnit map[string][]string
|
||||||
|
|
||||||
locationIDX []uint64
|
locationIDX []uint64
|
||||||
|
@ -121,15 +106,6 @@ type Mapping struct {
|
||||||
|
|
||||||
fileX int64
|
fileX int64
|
||||||
buildIDX int64
|
buildIDX int64
|
||||||
|
|
||||||
// Name of the kernel relocation symbol ("_text" or "_stext"), extracted from File.
|
|
||||||
// For linux kernel mappings generated by some tools, correct symbolization depends
|
|
||||||
// on knowing which of the two possible relocation symbols was used for `Start`.
|
|
||||||
// This is given to us as a suffix in `File` (e.g. "[kernel.kallsyms]_stext").
|
|
||||||
//
|
|
||||||
// Note, this public field is not persisted in the proto. For the purposes of
|
|
||||||
// copying / merging / hashing profiles, it is considered subsumed by `File`.
|
|
||||||
KernelRelocationSymbol string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Location corresponds to Profile.Location
|
// Location corresponds to Profile.Location
|
||||||
|
@ -147,7 +123,6 @@ type Location struct {
|
||||||
type Line struct {
|
type Line struct {
|
||||||
Function *Function
|
Function *Function
|
||||||
Line int64
|
Line int64
|
||||||
Column int64
|
|
||||||
|
|
||||||
functionIDX uint64
|
functionIDX uint64
|
||||||
}
|
}
|
||||||
|
@ -169,7 +144,7 @@ type Function struct {
|
||||||
// may be a gzip-compressed encoded protobuf or one of many legacy
|
// may be a gzip-compressed encoded protobuf or one of many legacy
|
||||||
// profile formats which may be unsupported in the future.
|
// profile formats which may be unsupported in the future.
|
||||||
func Parse(r io.Reader) (*Profile, error) {
|
func Parse(r io.Reader) (*Profile, error) {
|
||||||
data, err := io.ReadAll(r)
|
data, err := ioutil.ReadAll(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -184,7 +159,7 @@ func ParseData(data []byte) (*Profile, error) {
|
||||||
if len(data) >= 2 && data[0] == 0x1f && data[1] == 0x8b {
|
if len(data) >= 2 && data[0] == 0x1f && data[1] == 0x8b {
|
||||||
gz, err := gzip.NewReader(bytes.NewBuffer(data))
|
gz, err := gzip.NewReader(bytes.NewBuffer(data))
|
||||||
if err == nil {
|
if err == nil {
|
||||||
data, err = io.ReadAll(gz)
|
data, err = ioutil.ReadAll(gz)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("decompressing profile: %v", err)
|
return nil, fmt.Errorf("decompressing profile: %v", err)
|
||||||
|
@ -439,7 +414,7 @@ func (p *Profile) CheckValid() error {
|
||||||
// Aggregate merges the locations in the profile into equivalence
|
// Aggregate merges the locations in the profile into equivalence
|
||||||
// classes preserving the request attributes. It also updates the
|
// classes preserving the request attributes. It also updates the
|
||||||
// samples to point to the merged locations.
|
// samples to point to the merged locations.
|
||||||
func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, columnnumber, address bool) error {
|
func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address bool) error {
|
||||||
for _, m := range p.Mapping {
|
for _, m := range p.Mapping {
|
||||||
m.HasInlineFrames = m.HasInlineFrames && inlineFrame
|
m.HasInlineFrames = m.HasInlineFrames && inlineFrame
|
||||||
m.HasFunctions = m.HasFunctions && function
|
m.HasFunctions = m.HasFunctions && function
|
||||||
|
@ -461,7 +436,7 @@ func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, columnn
|
||||||
}
|
}
|
||||||
|
|
||||||
// Aggregate locations
|
// Aggregate locations
|
||||||
if !inlineFrame || !address || !linenumber || !columnnumber {
|
if !inlineFrame || !address || !linenumber {
|
||||||
for _, l := range p.Location {
|
for _, l := range p.Location {
|
||||||
if !inlineFrame && len(l.Line) > 1 {
|
if !inlineFrame && len(l.Line) > 1 {
|
||||||
l.Line = l.Line[len(l.Line)-1:]
|
l.Line = l.Line[len(l.Line)-1:]
|
||||||
|
@ -469,12 +444,6 @@ func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, columnn
|
||||||
if !linenumber {
|
if !linenumber {
|
||||||
for i := range l.Line {
|
for i := range l.Line {
|
||||||
l.Line[i].Line = 0
|
l.Line[i].Line = 0
|
||||||
l.Line[i].Column = 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !columnnumber {
|
|
||||||
for i := range l.Line {
|
|
||||||
l.Line[i].Column = 0
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !address {
|
if !address {
|
||||||
|
@ -557,9 +526,6 @@ func (p *Profile) String() string {
|
||||||
for _, c := range p.Comments {
|
for _, c := range p.Comments {
|
||||||
ss = append(ss, "Comment: "+c)
|
ss = append(ss, "Comment: "+c)
|
||||||
}
|
}
|
||||||
if url := p.DocURL; url != "" {
|
|
||||||
ss = append(ss, fmt.Sprintf("Doc: %s", url))
|
|
||||||
}
|
|
||||||
if pt := p.PeriodType; pt != nil {
|
if pt := p.PeriodType; pt != nil {
|
||||||
ss = append(ss, fmt.Sprintf("PeriodType: %s %s", pt.Type, pt.Unit))
|
ss = append(ss, fmt.Sprintf("PeriodType: %s %s", pt.Type, pt.Unit))
|
||||||
}
|
}
|
||||||
|
@ -639,11 +605,10 @@ func (l *Location) string() string {
|
||||||
for li := range l.Line {
|
for li := range l.Line {
|
||||||
lnStr := "??"
|
lnStr := "??"
|
||||||
if fn := l.Line[li].Function; fn != nil {
|
if fn := l.Line[li].Function; fn != nil {
|
||||||
lnStr = fmt.Sprintf("%s %s:%d:%d s=%d",
|
lnStr = fmt.Sprintf("%s %s:%d s=%d",
|
||||||
fn.Name,
|
fn.Name,
|
||||||
fn.Filename,
|
fn.Filename,
|
||||||
l.Line[li].Line,
|
l.Line[li].Line,
|
||||||
l.Line[li].Column,
|
|
||||||
fn.StartLine)
|
fn.StartLine)
|
||||||
if fn.Name != fn.SystemName {
|
if fn.Name != fn.SystemName {
|
||||||
lnStr = lnStr + "(" + fn.SystemName + ")"
|
lnStr = lnStr + "(" + fn.SystemName + ")"
|
||||||
|
@ -742,35 +707,6 @@ func (s *Sample) HasLabel(key, value string) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetNumLabel sets the specified key to the specified value for all samples in the
|
|
||||||
// profile. "unit" is a slice that describes the units that each corresponding member
|
|
||||||
// of "values" is measured in (e.g. bytes or seconds). If there is no relevant
|
|
||||||
// unit for a given value, that member of "unit" should be the empty string.
|
|
||||||
// "unit" must either have the same length as "value", or be nil.
|
|
||||||
func (p *Profile) SetNumLabel(key string, value []int64, unit []string) {
|
|
||||||
for _, sample := range p.Sample {
|
|
||||||
if sample.NumLabel == nil {
|
|
||||||
sample.NumLabel = map[string][]int64{key: value}
|
|
||||||
} else {
|
|
||||||
sample.NumLabel[key] = value
|
|
||||||
}
|
|
||||||
if sample.NumUnit == nil {
|
|
||||||
sample.NumUnit = map[string][]string{key: unit}
|
|
||||||
} else {
|
|
||||||
sample.NumUnit[key] = unit
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveNumLabel removes all numerical labels associated with the specified key for all
|
|
||||||
// samples in the profile.
|
|
||||||
func (p *Profile) RemoveNumLabel(key string) {
|
|
||||||
for _, sample := range p.Sample {
|
|
||||||
delete(sample.NumLabel, key)
|
|
||||||
delete(sample.NumUnit, key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DiffBaseSample returns true if a sample belongs to the diff base and false
|
// DiffBaseSample returns true if a sample belongs to the diff base and false
|
||||||
// otherwise.
|
// otherwise.
|
||||||
func (s *Sample) DiffBaseSample() bool {
|
func (s *Sample) DiffBaseSample() bool {
|
||||||
|
@ -849,10 +785,10 @@ func (p *Profile) HasFileLines() bool {
|
||||||
|
|
||||||
// Unsymbolizable returns true if a mapping points to a binary for which
|
// Unsymbolizable returns true if a mapping points to a binary for which
|
||||||
// locations can't be symbolized in principle, at least now. Examples are
|
// locations can't be symbolized in principle, at least now. Examples are
|
||||||
// "[vdso]", "[vsyscall]" and some others, see the code.
|
// "[vdso]", [vsyscall]" and some others, see the code.
|
||||||
func (m *Mapping) Unsymbolizable() bool {
|
func (m *Mapping) Unsymbolizable() bool {
|
||||||
name := filepath.Base(m.File)
|
name := filepath.Base(m.File)
|
||||||
return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/") || m.File == "//anon"
|
return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy makes a fully independent copy of a profile.
|
// Copy makes a fully independent copy of a profile.
|
||||||
|
|
9
vendor/github.com/google/pprof/profile/proto.go
generated
vendored
9
vendor/github.com/google/pprof/profile/proto.go
generated
vendored
|
@ -44,7 +44,6 @@ type buffer struct {
|
||||||
u64 uint64
|
u64 uint64
|
||||||
data []byte
|
data []byte
|
||||||
tmp [16]byte
|
tmp [16]byte
|
||||||
tmpLines []Line // temporary storage used while decoding "repeated Line".
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type decoder func(*buffer, message) error
|
type decoder func(*buffer, message) error
|
||||||
|
@ -287,6 +286,7 @@ func decodeInt64s(b *buffer, x *[]int64) error {
|
||||||
if b.typ == 2 {
|
if b.typ == 2 {
|
||||||
// Packed encoding
|
// Packed encoding
|
||||||
data := b.data
|
data := b.data
|
||||||
|
tmp := make([]int64, 0, len(data)) // Maximally sized
|
||||||
for len(data) > 0 {
|
for len(data) > 0 {
|
||||||
var u uint64
|
var u uint64
|
||||||
var err error
|
var err error
|
||||||
|
@ -294,8 +294,9 @@ func decodeInt64s(b *buffer, x *[]int64) error {
|
||||||
if u, data, err = decodeVarint(data); err != nil {
|
if u, data, err = decodeVarint(data); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
*x = append(*x, int64(u))
|
tmp = append(tmp, int64(u))
|
||||||
}
|
}
|
||||||
|
*x = append(*x, tmp...)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
var i int64
|
var i int64
|
||||||
|
@ -318,6 +319,7 @@ func decodeUint64s(b *buffer, x *[]uint64) error {
|
||||||
if b.typ == 2 {
|
if b.typ == 2 {
|
||||||
data := b.data
|
data := b.data
|
||||||
// Packed encoding
|
// Packed encoding
|
||||||
|
tmp := make([]uint64, 0, len(data)) // Maximally sized
|
||||||
for len(data) > 0 {
|
for len(data) > 0 {
|
||||||
var u uint64
|
var u uint64
|
||||||
var err error
|
var err error
|
||||||
|
@ -325,8 +327,9 @@ func decodeUint64s(b *buffer, x *[]uint64) error {
|
||||||
if u, data, err = decodeVarint(data); err != nil {
|
if u, data, err = decodeVarint(data); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
*x = append(*x, u)
|
tmp = append(tmp, u)
|
||||||
}
|
}
|
||||||
|
*x = append(*x, tmp...)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
var u uint64
|
var u uint64
|
||||||
|
|
24
vendor/github.com/google/pprof/profile/prune.go
generated
vendored
24
vendor/github.com/google/pprof/profile/prune.go
generated
vendored
|
@ -62,34 +62,18 @@ func (p *Profile) Prune(dropRx, keepRx *regexp.Regexp) {
|
||||||
prune := make(map[uint64]bool)
|
prune := make(map[uint64]bool)
|
||||||
pruneBeneath := make(map[uint64]bool)
|
pruneBeneath := make(map[uint64]bool)
|
||||||
|
|
||||||
// simplifyFunc can be expensive, so cache results.
|
|
||||||
// Note that the same function name can be encountered many times due
|
|
||||||
// different lines and addresses in the same function.
|
|
||||||
pruneCache := map[string]bool{} // Map from function to whether or not to prune
|
|
||||||
pruneFromHere := func(s string) bool {
|
|
||||||
if r, ok := pruneCache[s]; ok {
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
funcName := simplifyFunc(s)
|
|
||||||
if dropRx.MatchString(funcName) {
|
|
||||||
if keepRx == nil || !keepRx.MatchString(funcName) {
|
|
||||||
pruneCache[s] = true
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pruneCache[s] = false
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, loc := range p.Location {
|
for _, loc := range p.Location {
|
||||||
var i int
|
var i int
|
||||||
for i = len(loc.Line) - 1; i >= 0; i-- {
|
for i = len(loc.Line) - 1; i >= 0; i-- {
|
||||||
if fn := loc.Line[i].Function; fn != nil && fn.Name != "" {
|
if fn := loc.Line[i].Function; fn != nil && fn.Name != "" {
|
||||||
if pruneFromHere(fn.Name) {
|
funcName := simplifyFunc(fn.Name)
|
||||||
|
if dropRx.MatchString(funcName) {
|
||||||
|
if keepRx == nil || !keepRx.MatchString(funcName) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if i >= 0 {
|
if i >= 0 {
|
||||||
// Found matching entry to prune.
|
// Found matching entry to prune.
|
||||||
|
|
2
vendor/github.com/hashicorp/golang-lru/LICENSE
generated
vendored
2
vendor/github.com/hashicorp/golang-lru/LICENSE
generated
vendored
|
@ -1,5 +1,3 @@
|
||||||
Copyright (c) 2014 HashiCorp, Inc.
|
|
||||||
|
|
||||||
Mozilla Public License, version 2.0
|
Mozilla Public License, version 2.0
|
||||||
|
|
||||||
1. Definitions
|
1. Definitions
|
||||||
|
|
22
vendor/github.com/hashicorp/golang-lru/simplelru/lru.go
generated
vendored
22
vendor/github.com/hashicorp/golang-lru/simplelru/lru.go
generated
vendored
|
@ -25,7 +25,7 @@ type entry struct {
|
||||||
// NewLRU constructs an LRU of the given size
|
// NewLRU constructs an LRU of the given size
|
||||||
func NewLRU(size int, onEvict EvictCallback) (*LRU, error) {
|
func NewLRU(size int, onEvict EvictCallback) (*LRU, error) {
|
||||||
if size <= 0 {
|
if size <= 0 {
|
||||||
return nil, errors.New("must provide a positive size")
|
return nil, errors.New("Must provide a positive size")
|
||||||
}
|
}
|
||||||
c := &LRU{
|
c := &LRU{
|
||||||
size: size,
|
size: size,
|
||||||
|
@ -73,9 +73,6 @@ func (c *LRU) Add(key, value interface{}) (evicted bool) {
|
||||||
func (c *LRU) Get(key interface{}) (value interface{}, ok bool) {
|
func (c *LRU) Get(key interface{}) (value interface{}, ok bool) {
|
||||||
if ent, ok := c.items[key]; ok {
|
if ent, ok := c.items[key]; ok {
|
||||||
c.evictList.MoveToFront(ent)
|
c.evictList.MoveToFront(ent)
|
||||||
if ent.Value.(*entry) == nil {
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
return ent.Value.(*entry).value, true
|
return ent.Value.(*entry).value, true
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
|
@ -109,7 +106,7 @@ func (c *LRU) Remove(key interface{}) (present bool) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemoveOldest removes the oldest item from the cache.
|
// RemoveOldest removes the oldest item from the cache.
|
||||||
func (c *LRU) RemoveOldest() (key, value interface{}, ok bool) {
|
func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) {
|
||||||
ent := c.evictList.Back()
|
ent := c.evictList.Back()
|
||||||
if ent != nil {
|
if ent != nil {
|
||||||
c.removeElement(ent)
|
c.removeElement(ent)
|
||||||
|
@ -120,7 +117,7 @@ func (c *LRU) RemoveOldest() (key, value interface{}, ok bool) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetOldest returns the oldest entry
|
// GetOldest returns the oldest entry
|
||||||
func (c *LRU) GetOldest() (key, value interface{}, ok bool) {
|
func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) {
|
||||||
ent := c.evictList.Back()
|
ent := c.evictList.Back()
|
||||||
if ent != nil {
|
if ent != nil {
|
||||||
kv := ent.Value.(*entry)
|
kv := ent.Value.(*entry)
|
||||||
|
@ -145,19 +142,6 @@ func (c *LRU) Len() int {
|
||||||
return c.evictList.Len()
|
return c.evictList.Len()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Resize changes the cache size.
|
|
||||||
func (c *LRU) Resize(size int) (evicted int) {
|
|
||||||
diff := c.Len() - size
|
|
||||||
if diff < 0 {
|
|
||||||
diff = 0
|
|
||||||
}
|
|
||||||
for i := 0; i < diff; i++ {
|
|
||||||
c.removeOldest()
|
|
||||||
}
|
|
||||||
c.size = size
|
|
||||||
return diff
|
|
||||||
}
|
|
||||||
|
|
||||||
// removeOldest removes the oldest item from the cache.
|
// removeOldest removes the oldest item from the cache.
|
||||||
func (c *LRU) removeOldest() {
|
func (c *LRU) removeOldest() {
|
||||||
ent := c.evictList.Back()
|
ent := c.evictList.Back()
|
||||||
|
|
8
vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go
generated
vendored
8
vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go
generated
vendored
|
@ -1,4 +1,3 @@
|
||||||
// Package simplelru provides simple LRU implementation based on build-in container/list.
|
|
||||||
package simplelru
|
package simplelru
|
||||||
|
|
||||||
// LRUCache is the interface for simple LRU cache.
|
// LRUCache is the interface for simple LRU cache.
|
||||||
|
@ -11,7 +10,7 @@ type LRUCache interface {
|
||||||
// updates the "recently used"-ness of the key. #value, isFound
|
// updates the "recently used"-ness of the key. #value, isFound
|
||||||
Get(key interface{}) (value interface{}, ok bool)
|
Get(key interface{}) (value interface{}, ok bool)
|
||||||
|
|
||||||
// Checks if a key exists in cache without updating the recent-ness.
|
// Check if a key exsists in cache without updating the recent-ness.
|
||||||
Contains(key interface{}) (ok bool)
|
Contains(key interface{}) (ok bool)
|
||||||
|
|
||||||
// Returns key's value without updating the "recently used"-ness of the key.
|
// Returns key's value without updating the "recently used"-ness of the key.
|
||||||
|
@ -32,9 +31,6 @@ type LRUCache interface {
|
||||||
// Returns the number of items in the cache.
|
// Returns the number of items in the cache.
|
||||||
Len() int
|
Len() int
|
||||||
|
|
||||||
// Clears all cache entries.
|
// Clear all cache entries
|
||||||
Purge()
|
Purge()
|
||||||
|
|
||||||
// Resizes cache, returning number evicted
|
|
||||||
Resize(int) int
|
|
||||||
}
|
}
|
||||||
|
|
1
vendor/github.com/miekg/dns/README.md
generated
vendored
1
vendor/github.com/miekg/dns/README.md
generated
vendored
|
@ -85,6 +85,7 @@ A not-so-up-to-date-list-that-may-be-actually-current:
|
||||||
* https://github.com/wintbiit/NineDNS
|
* https://github.com/wintbiit/NineDNS
|
||||||
* https://linuxcontainers.org/incus/
|
* https://linuxcontainers.org/incus/
|
||||||
* https://ifconfig.es
|
* https://ifconfig.es
|
||||||
|
* https://github.com/zmap/zdns
|
||||||
|
|
||||||
|
|
||||||
Send pull request if you want to be listed here.
|
Send pull request if you want to be listed here.
|
||||||
|
|
42
vendor/github.com/miekg/dns/dnssec.go
generated
vendored
42
vendor/github.com/miekg/dns/dnssec.go
generated
vendored
|
@ -250,14 +250,6 @@ func (d *DS) ToCDS() *CDS {
|
||||||
// zero, it is used as-is, otherwise the TTL of the RRset is used as the
|
// zero, it is used as-is, otherwise the TTL of the RRset is used as the
|
||||||
// OrigTTL.
|
// OrigTTL.
|
||||||
func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error {
|
func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error {
|
||||||
if k == nil {
|
|
||||||
return ErrPrivKey
|
|
||||||
}
|
|
||||||
// s.Inception and s.Expiration may be 0 (rollover etc.), the rest must be set
|
|
||||||
if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 {
|
|
||||||
return ErrKey
|
|
||||||
}
|
|
||||||
|
|
||||||
h0 := rrset[0].Header()
|
h0 := rrset[0].Header()
|
||||||
rr.Hdr.Rrtype = TypeRRSIG
|
rr.Hdr.Rrtype = TypeRRSIG
|
||||||
rr.Hdr.Name = h0.Name
|
rr.Hdr.Name = h0.Name
|
||||||
|
@ -272,6 +264,18 @@ func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error {
|
||||||
rr.Labels-- // wildcard, remove from label count
|
rr.Labels-- // wildcard, remove from label count
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return rr.signAsIs(k, rrset)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rr *RRSIG) signAsIs(k crypto.Signer, rrset []RR) error {
|
||||||
|
if k == nil {
|
||||||
|
return ErrPrivKey
|
||||||
|
}
|
||||||
|
// s.Inception and s.Expiration may be 0 (rollover etc.), the rest must be set
|
||||||
|
if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 {
|
||||||
|
return ErrKey
|
||||||
|
}
|
||||||
|
|
||||||
sigwire := new(rrsigWireFmt)
|
sigwire := new(rrsigWireFmt)
|
||||||
sigwire.TypeCovered = rr.TypeCovered
|
sigwire.TypeCovered = rr.TypeCovered
|
||||||
sigwire.Algorithm = rr.Algorithm
|
sigwire.Algorithm = rr.Algorithm
|
||||||
|
@ -370,9 +374,12 @@ func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error {
|
||||||
if rr.Algorithm != k.Algorithm {
|
if rr.Algorithm != k.Algorithm {
|
||||||
return ErrKey
|
return ErrKey
|
||||||
}
|
}
|
||||||
if !strings.EqualFold(rr.SignerName, k.Hdr.Name) {
|
|
||||||
|
signerName := CanonicalName(rr.SignerName)
|
||||||
|
if !equal(signerName, k.Hdr.Name) {
|
||||||
return ErrKey
|
return ErrKey
|
||||||
}
|
}
|
||||||
|
|
||||||
if k.Protocol != 3 {
|
if k.Protocol != 3 {
|
||||||
return ErrKey
|
return ErrKey
|
||||||
}
|
}
|
||||||
|
@ -384,9 +391,18 @@ func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsRRset checked that we have at least one RR and that the RRs in
|
// IsRRset checked that we have at least one RR and that the RRs in
|
||||||
// the set have consistent type, class, and name. Also check that type and
|
// the set have consistent type, class, and name. Also check that type,
|
||||||
// class matches the RRSIG record.
|
// class and name matches the RRSIG record.
|
||||||
if h0 := rrset[0].Header(); h0.Class != rr.Hdr.Class || h0.Rrtype != rr.TypeCovered {
|
// Also checks RFC 4035 5.3.1 the number of labels in the RRset owner
|
||||||
|
// name MUST be greater than or equal to the value in the RRSIG RR's Labels field.
|
||||||
|
// RFC 4035 5.3.1 Signer's Name MUST be the name of the zone that [contains the RRset].
|
||||||
|
// Since we don't have SOA info, checking suffix may be the best we can do...?
|
||||||
|
if h0 := rrset[0].Header(); h0.Class != rr.Hdr.Class ||
|
||||||
|
h0.Rrtype != rr.TypeCovered ||
|
||||||
|
uint8(CountLabel(h0.Name)) < rr.Labels ||
|
||||||
|
!equal(h0.Name, rr.Hdr.Name) ||
|
||||||
|
!strings.HasSuffix(CanonicalName(h0.Name), signerName) {
|
||||||
|
|
||||||
return ErrRRset
|
return ErrRRset
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -400,7 +416,7 @@ func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error {
|
||||||
sigwire.Expiration = rr.Expiration
|
sigwire.Expiration = rr.Expiration
|
||||||
sigwire.Inception = rr.Inception
|
sigwire.Inception = rr.Inception
|
||||||
sigwire.KeyTag = rr.KeyTag
|
sigwire.KeyTag = rr.KeyTag
|
||||||
sigwire.SignerName = CanonicalName(rr.SignerName)
|
sigwire.SignerName = signerName
|
||||||
// Create the desired binary blob
|
// Create the desired binary blob
|
||||||
signeddata := make([]byte, DefaultMsgSize)
|
signeddata := make([]byte, DefaultMsgSize)
|
||||||
n, err := packSigWire(sigwire, signeddata)
|
n, err := packSigWire(sigwire, signeddata)
|
||||||
|
|
36
vendor/github.com/miekg/dns/edns.go
generated
vendored
36
vendor/github.com/miekg/dns/edns.go
generated
vendored
|
@ -58,7 +58,7 @@ func makeDataOpt(code uint16) EDNS0 {
|
||||||
case EDNS0EDE:
|
case EDNS0EDE:
|
||||||
return new(EDNS0_EDE)
|
return new(EDNS0_EDE)
|
||||||
case EDNS0ESU:
|
case EDNS0ESU:
|
||||||
return &EDNS0_ESU{Code: EDNS0ESU}
|
return new(EDNS0_ESU)
|
||||||
default:
|
default:
|
||||||
e := new(EDNS0_LOCAL)
|
e := new(EDNS0_LOCAL)
|
||||||
e.Code = code
|
e.Code = code
|
||||||
|
@ -66,8 +66,7 @@ func makeDataOpt(code uint16) EDNS0 {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// OPT is the EDNS0 RR appended to messages to convey extra (meta) information.
|
// OPT is the EDNS0 RR appended to messages to convey extra (meta) information. See RFC 6891.
|
||||||
// See RFC 6891.
|
|
||||||
type OPT struct {
|
type OPT struct {
|
||||||
Hdr RR_Header
|
Hdr RR_Header
|
||||||
Option []EDNS0 `dns:"opt"`
|
Option []EDNS0 `dns:"opt"`
|
||||||
|
@ -144,8 +143,6 @@ func (*OPT) parse(c *zlexer, origin string) *ParseError {
|
||||||
|
|
||||||
func (rr *OPT) isDuplicate(r2 RR) bool { return false }
|
func (rr *OPT) isDuplicate(r2 RR) bool { return false }
|
||||||
|
|
||||||
// return the old value -> delete SetVersion?
|
|
||||||
|
|
||||||
// Version returns the EDNS version used. Only zero is defined.
|
// Version returns the EDNS version used. Only zero is defined.
|
||||||
func (rr *OPT) Version() uint8 {
|
func (rr *OPT) Version() uint8 {
|
||||||
return uint8(rr.Hdr.Ttl & 0x00FF0000 >> 16)
|
return uint8(rr.Hdr.Ttl & 0x00FF0000 >> 16)
|
||||||
|
@ -236,8 +233,8 @@ type EDNS0 interface {
|
||||||
// e.Nsid = "AA"
|
// e.Nsid = "AA"
|
||||||
// o.Option = append(o.Option, e)
|
// o.Option = append(o.Option, e)
|
||||||
type EDNS0_NSID struct {
|
type EDNS0_NSID struct {
|
||||||
Code uint16 // Always EDNS0NSID
|
Code uint16 // always EDNS0NSID
|
||||||
Nsid string // This string needs to be hex encoded
|
Nsid string // string needs to be hex encoded
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *EDNS0_NSID) pack() ([]byte, error) {
|
func (e *EDNS0_NSID) pack() ([]byte, error) {
|
||||||
|
@ -275,7 +272,7 @@ func (e *EDNS0_NSID) copy() EDNS0 { return &EDNS0_NSID{e.Code, e.Nsid}
|
||||||
// When packing it will apply SourceNetmask. If you need more advanced logic,
|
// When packing it will apply SourceNetmask. If you need more advanced logic,
|
||||||
// patches welcome and good luck.
|
// patches welcome and good luck.
|
||||||
type EDNS0_SUBNET struct {
|
type EDNS0_SUBNET struct {
|
||||||
Code uint16 // Always EDNS0SUBNET
|
Code uint16 // always EDNS0SUBNET
|
||||||
Family uint16 // 1 for IP, 2 for IP6
|
Family uint16 // 1 for IP, 2 for IP6
|
||||||
SourceNetmask uint8
|
SourceNetmask uint8
|
||||||
SourceScope uint8
|
SourceScope uint8
|
||||||
|
@ -399,8 +396,8 @@ func (e *EDNS0_SUBNET) copy() EDNS0 {
|
||||||
//
|
//
|
||||||
// There is no guarantee that the Cookie string has a specific length.
|
// There is no guarantee that the Cookie string has a specific length.
|
||||||
type EDNS0_COOKIE struct {
|
type EDNS0_COOKIE struct {
|
||||||
Code uint16 // Always EDNS0COOKIE
|
Code uint16 // always EDNS0COOKIE
|
||||||
Cookie string // Hex-encoded cookie data
|
Cookie string // hex encoded cookie data
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *EDNS0_COOKIE) pack() ([]byte, error) {
|
func (e *EDNS0_COOKIE) pack() ([]byte, error) {
|
||||||
|
@ -430,7 +427,7 @@ func (e *EDNS0_COOKIE) copy() EDNS0 { return &EDNS0_COOKIE{e.Code, e.C
|
||||||
// e.Lease = 120 // in seconds
|
// e.Lease = 120 // in seconds
|
||||||
// o.Option = append(o.Option, e)
|
// o.Option = append(o.Option, e)
|
||||||
type EDNS0_UL struct {
|
type EDNS0_UL struct {
|
||||||
Code uint16 // Always EDNS0UL
|
Code uint16 // always EDNS0UL
|
||||||
Lease uint32
|
Lease uint32
|
||||||
KeyLease uint32
|
KeyLease uint32
|
||||||
}
|
}
|
||||||
|
@ -469,7 +466,7 @@ func (e *EDNS0_UL) unpack(b []byte) error {
|
||||||
// EDNS0_LLQ stands for Long Lived Queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01
|
// EDNS0_LLQ stands for Long Lived Queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01
|
||||||
// Implemented for completeness, as the EDNS0 type code is assigned.
|
// Implemented for completeness, as the EDNS0 type code is assigned.
|
||||||
type EDNS0_LLQ struct {
|
type EDNS0_LLQ struct {
|
||||||
Code uint16 // Always EDNS0LLQ
|
Code uint16 // always EDNS0LLQ
|
||||||
Version uint16
|
Version uint16
|
||||||
Opcode uint16
|
Opcode uint16
|
||||||
Error uint16
|
Error uint16
|
||||||
|
@ -515,7 +512,7 @@ func (e *EDNS0_LLQ) copy() EDNS0 {
|
||||||
|
|
||||||
// EDNS0_DAU implements the EDNS0 "DNSSEC Algorithm Understood" option. See RFC 6975.
|
// EDNS0_DAU implements the EDNS0 "DNSSEC Algorithm Understood" option. See RFC 6975.
|
||||||
type EDNS0_DAU struct {
|
type EDNS0_DAU struct {
|
||||||
Code uint16 // Always EDNS0DAU
|
Code uint16 // always EDNS0DAU
|
||||||
AlgCode []uint8
|
AlgCode []uint8
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -539,7 +536,7 @@ func (e *EDNS0_DAU) copy() EDNS0 { return &EDNS0_DAU{e.Code, e.AlgCode} }
|
||||||
|
|
||||||
// EDNS0_DHU implements the EDNS0 "DS Hash Understood" option. See RFC 6975.
|
// EDNS0_DHU implements the EDNS0 "DS Hash Understood" option. See RFC 6975.
|
||||||
type EDNS0_DHU struct {
|
type EDNS0_DHU struct {
|
||||||
Code uint16 // Always EDNS0DHU
|
Code uint16 // always EDNS0DHU
|
||||||
AlgCode []uint8
|
AlgCode []uint8
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -563,7 +560,7 @@ func (e *EDNS0_DHU) copy() EDNS0 { return &EDNS0_DHU{e.Code, e.AlgCode} }
|
||||||
|
|
||||||
// EDNS0_N3U implements the EDNS0 "NSEC3 Hash Understood" option. See RFC 6975.
|
// EDNS0_N3U implements the EDNS0 "NSEC3 Hash Understood" option. See RFC 6975.
|
||||||
type EDNS0_N3U struct {
|
type EDNS0_N3U struct {
|
||||||
Code uint16 // Always EDNS0N3U
|
Code uint16 // always EDNS0N3U
|
||||||
AlgCode []uint8
|
AlgCode []uint8
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -588,7 +585,7 @@ func (e *EDNS0_N3U) copy() EDNS0 { return &EDNS0_N3U{e.Code, e.AlgCode} }
|
||||||
|
|
||||||
// EDNS0_EXPIRE implements the EDNS0 option as described in RFC 7314.
|
// EDNS0_EXPIRE implements the EDNS0 option as described in RFC 7314.
|
||||||
type EDNS0_EXPIRE struct {
|
type EDNS0_EXPIRE struct {
|
||||||
Code uint16 // Always EDNS0EXPIRE
|
Code uint16 // always EDNS0EXPIRE
|
||||||
Expire uint32
|
Expire uint32
|
||||||
Empty bool // Empty is used to signal an empty Expire option in a backwards compatible way, it's not used on the wire.
|
Empty bool // Empty is used to signal an empty Expire option in a backwards compatible way, it's not used on the wire.
|
||||||
}
|
}
|
||||||
|
@ -668,7 +665,7 @@ func (e *EDNS0_LOCAL) unpack(b []byte) error {
|
||||||
// EDNS0_TCP_KEEPALIVE is an EDNS0 option that instructs the server to keep
|
// EDNS0_TCP_KEEPALIVE is an EDNS0 option that instructs the server to keep
|
||||||
// the TCP connection alive. See RFC 7828.
|
// the TCP connection alive. See RFC 7828.
|
||||||
type EDNS0_TCP_KEEPALIVE struct {
|
type EDNS0_TCP_KEEPALIVE struct {
|
||||||
Code uint16 // Always EDNSTCPKEEPALIVE
|
Code uint16 // always EDNSTCPKEEPALIVE
|
||||||
|
|
||||||
// Timeout is an idle timeout value for the TCP connection, specified in
|
// Timeout is an idle timeout value for the TCP connection, specified in
|
||||||
// units of 100 milliseconds, encoded in network byte order. If set to 0,
|
// units of 100 milliseconds, encoded in network byte order. If set to 0,
|
||||||
|
@ -839,13 +836,12 @@ func (e *EDNS0_EDE) unpack(b []byte) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// The EDNS0_ESU option for ENUM Source-URI Extension
|
// The EDNS0_ESU option for ENUM Source-URI Extension.
|
||||||
type EDNS0_ESU struct {
|
type EDNS0_ESU struct {
|
||||||
Code uint16
|
Code uint16 // always EDNS0ESU
|
||||||
Uri string
|
Uri string
|
||||||
}
|
}
|
||||||
|
|
||||||
// Option implements the EDNS0 interface.
|
|
||||||
func (e *EDNS0_ESU) Option() uint16 { return EDNS0ESU }
|
func (e *EDNS0_ESU) Option() uint16 { return EDNS0ESU }
|
||||||
func (e *EDNS0_ESU) String() string { return e.Uri }
|
func (e *EDNS0_ESU) String() string { return e.Uri }
|
||||||
func (e *EDNS0_ESU) copy() EDNS0 { return &EDNS0_ESU{e.Code, e.Uri} }
|
func (e *EDNS0_ESU) copy() EDNS0 { return &EDNS0_ESU{e.Code, e.Uri} }
|
||||||
|
|
|
@ -3,9 +3,15 @@
|
||||||
|
|
||||||
package dns
|
package dns
|
||||||
|
|
||||||
import "net"
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
)
|
||||||
|
|
||||||
const supportsReusePort = false
|
const (
|
||||||
|
supportsReusePort = false
|
||||||
|
supportsReuseAddr = false
|
||||||
|
)
|
||||||
|
|
||||||
func listenTCP(network, addr string, reuseport, reuseaddr bool) (net.Listener, error) {
|
func listenTCP(network, addr string, reuseport, reuseaddr bool) (net.Listener, error) {
|
||||||
if reuseport || reuseaddr {
|
if reuseport || reuseaddr {
|
||||||
|
@ -15,8 +21,6 @@ func listenTCP(network, addr string, reuseport, reuseaddr bool) (net.Listener, e
|
||||||
return net.Listen(network, addr)
|
return net.Listen(network, addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
const supportsReuseAddr = false
|
|
||||||
|
|
||||||
func listenUDP(network, addr string, reuseport, reuseaddr bool) (net.PacketConn, error) {
|
func listenUDP(network, addr string, reuseport, reuseaddr bool) (net.PacketConn, error) {
|
||||||
if reuseport || reuseaddr {
|
if reuseport || reuseaddr {
|
||||||
// TODO(tmthrgd): return an error?
|
// TODO(tmthrgd): return an error?
|
||||||
|
@ -24,3 +28,13 @@ func listenUDP(network, addr string, reuseport, reuseaddr bool) (net.PacketConn,
|
||||||
|
|
||||||
return net.ListenPacket(network, addr)
|
return net.ListenPacket(network, addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// this is just for test compatibility
|
||||||
|
func checkReuseport(fd uintptr) (bool, error) {
|
||||||
|
return false, fmt.Errorf("not supported")
|
||||||
|
}
|
||||||
|
|
||||||
|
// this is just for test compatibility
|
||||||
|
func checkReuseaddr(fd uintptr) (bool, error) {
|
||||||
|
return false, fmt.Errorf("not supported")
|
||||||
|
}
|
|
@ -39,10 +39,40 @@ func reuseaddrControl(network, address string, c syscall.RawConn) error {
|
||||||
return opErr
|
return opErr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func reuseaddrandportControl(network, address string, c syscall.RawConn) error {
|
||||||
|
err := reuseaddrControl(network, address, c)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return reuseportControl(network, address, c)
|
||||||
|
}
|
||||||
|
|
||||||
|
// this is just for test compatibility
|
||||||
|
func checkReuseport(fd uintptr) (bool, error) {
|
||||||
|
v, err := unix.GetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_REUSEPORT)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return v == 1, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// this is just for test compatibility
|
||||||
|
func checkReuseaddr(fd uintptr) (bool, error) {
|
||||||
|
v, err := unix.GetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_REUSEADDR)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return v == 1, nil
|
||||||
|
}
|
||||||
|
|
||||||
func listenTCP(network, addr string, reuseport, reuseaddr bool) (net.Listener, error) {
|
func listenTCP(network, addr string, reuseport, reuseaddr bool) (net.Listener, error) {
|
||||||
var lc net.ListenConfig
|
var lc net.ListenConfig
|
||||||
switch {
|
switch {
|
||||||
case reuseaddr && reuseport:
|
case reuseaddr && reuseport:
|
||||||
|
lc.Control = reuseaddrandportControl
|
||||||
case reuseport:
|
case reuseport:
|
||||||
lc.Control = reuseportControl
|
lc.Control = reuseportControl
|
||||||
case reuseaddr:
|
case reuseaddr:
|
||||||
|
@ -56,6 +86,7 @@ func listenUDP(network, addr string, reuseport, reuseaddr bool) (net.PacketConn,
|
||||||
var lc net.ListenConfig
|
var lc net.ListenConfig
|
||||||
switch {
|
switch {
|
||||||
case reuseaddr && reuseport:
|
case reuseaddr && reuseport:
|
||||||
|
lc.Control = reuseaddrandportControl
|
||||||
case reuseport:
|
case reuseport:
|
||||||
lc.Control = reuseportControl
|
lc.Control = reuseportControl
|
||||||
case reuseaddr:
|
case reuseaddr:
|
1
vendor/github.com/miekg/dns/server.go
generated
vendored
1
vendor/github.com/miekg/dns/server.go
generated
vendored
|
@ -226,6 +226,7 @@ type Server struct {
|
||||||
// If NotifyStartedFunc is set it is called once the server has started listening.
|
// If NotifyStartedFunc is set it is called once the server has started listening.
|
||||||
NotifyStartedFunc func()
|
NotifyStartedFunc func()
|
||||||
// DecorateReader is optional, allows customization of the process that reads raw DNS messages.
|
// DecorateReader is optional, allows customization of the process that reads raw DNS messages.
|
||||||
|
// The decorated reader must not mutate the data read from the conn.
|
||||||
DecorateReader DecorateReader
|
DecorateReader DecorateReader
|
||||||
// DecorateWriter is optional, allows customization of the process that writes raw DNS messages.
|
// DecorateWriter is optional, allows customization of the process that writes raw DNS messages.
|
||||||
DecorateWriter DecorateWriter
|
DecorateWriter DecorateWriter
|
||||||
|
|
3
vendor/github.com/miekg/dns/sig0.go
generated
vendored
3
vendor/github.com/miekg/dns/sig0.go
generated
vendored
|
@ -7,7 +7,6 @@ import (
|
||||||
"crypto/rsa"
|
"crypto/rsa"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"math/big"
|
"math/big"
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -151,7 +150,7 @@ func (rr *SIG) Verify(k *KEY, buf []byte) error {
|
||||||
}
|
}
|
||||||
// If key has come from the DNS name compression might
|
// If key has come from the DNS name compression might
|
||||||
// have mangled the case of the name
|
// have mangled the case of the name
|
||||||
if !strings.EqualFold(signername, k.Header().Name) {
|
if !equal(signername, k.Header().Name) {
|
||||||
return &Error{err: "signer name doesn't match key name"}
|
return &Error{err: "signer name doesn't match key name"}
|
||||||
}
|
}
|
||||||
sigend := offset
|
sigend := offset
|
||||||
|
|
2
vendor/github.com/miekg/dns/version.go
generated
vendored
2
vendor/github.com/miekg/dns/version.go
generated
vendored
|
@ -3,7 +3,7 @@ package dns
|
||||||
import "fmt"
|
import "fmt"
|
||||||
|
|
||||||
// Version is current version of this library.
|
// Version is current version of this library.
|
||||||
var Version = v{1, 1, 62}
|
var Version = v{1, 1, 63}
|
||||||
|
|
||||||
// v holds the version of this library.
|
// v holds the version of this library.
|
||||||
type v struct {
|
type v struct {
|
||||||
|
|
4
vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go
generated
vendored
4
vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go
generated
vendored
|
@ -82,10 +82,6 @@ func New(colorMode ColorMode) Formatter {
|
||||||
return fmt.Sprintf("\x1b[38;5;%dm", colorCode)
|
return fmt.Sprintf("\x1b[38;5;%dm", colorCode)
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, noColor := os.LookupEnv("GINKGO_NO_COLOR"); noColor {
|
|
||||||
colorMode = ColorModeNone
|
|
||||||
}
|
|
||||||
|
|
||||||
f := Formatter{
|
f := Formatter{
|
||||||
ColorMode: colorMode,
|
ColorMode: colorMode,
|
||||||
colors: map[string]string{
|
colors: map[string]string{
|
||||||
|
|
15
vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go
generated
vendored
15
vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go
generated
vendored
|
@ -2,8 +2,6 @@ package build
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/v2/ginkgo/command"
|
"github.com/onsi/ginkgo/v2/ginkgo/command"
|
||||||
"github.com/onsi/ginkgo/v2/ginkgo/internal"
|
"github.com/onsi/ginkgo/v2/ginkgo/internal"
|
||||||
|
@ -55,18 +53,7 @@ func buildSpecs(args []string, cliConfig types.CLIConfig, goFlagsConfig types.Go
|
||||||
if suite.State.Is(internal.TestSuiteStateFailedToCompile) {
|
if suite.State.Is(internal.TestSuiteStateFailedToCompile) {
|
||||||
fmt.Println(suite.CompilationError.Error())
|
fmt.Println(suite.CompilationError.Error())
|
||||||
} else {
|
} else {
|
||||||
if len(goFlagsConfig.O) == 0 {
|
fmt.Printf("Compiled %s.test\n", suite.PackageName)
|
||||||
goFlagsConfig.O = path.Join(suite.Path, suite.PackageName+".test")
|
|
||||||
} else {
|
|
||||||
stat, err := os.Stat(goFlagsConfig.O)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
if stat.IsDir() {
|
|
||||||
goFlagsConfig.O += "/" + suite.PackageName + ".test"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fmt.Printf("Compiled %s\n", goFlagsConfig.O)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
2
vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go
generated
vendored
2
vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go
generated
vendored
|
@ -7,7 +7,7 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"text/template"
|
"text/template"
|
||||||
|
|
||||||
sprig "github.com/go-task/slim-sprig/v3"
|
sprig "github.com/go-task/slim-sprig"
|
||||||
"github.com/onsi/ginkgo/v2/ginkgo/command"
|
"github.com/onsi/ginkgo/v2/ginkgo/command"
|
||||||
"github.com/onsi/ginkgo/v2/ginkgo/internal"
|
"github.com/onsi/ginkgo/v2/ginkgo/internal"
|
||||||
"github.com/onsi/ginkgo/v2/types"
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
|
|
8
vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go
generated
vendored
8
vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go
generated
vendored
|
@ -10,7 +10,7 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"text/template"
|
"text/template"
|
||||||
|
|
||||||
sprig "github.com/go-task/slim-sprig/v3"
|
sprig "github.com/go-task/slim-sprig"
|
||||||
"github.com/onsi/ginkgo/v2/ginkgo/command"
|
"github.com/onsi/ginkgo/v2/ginkgo/command"
|
||||||
"github.com/onsi/ginkgo/v2/ginkgo/internal"
|
"github.com/onsi/ginkgo/v2/ginkgo/internal"
|
||||||
"github.com/onsi/ginkgo/v2/types"
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
|
@ -32,9 +32,6 @@ func BuildGenerateCommand() command.Command {
|
||||||
{Name: "template-data", KeyPath: "CustomTemplateData",
|
{Name: "template-data", KeyPath: "CustomTemplateData",
|
||||||
UsageArgument: "template-data-file",
|
UsageArgument: "template-data-file",
|
||||||
Usage: "If specified, generate will use the contents of the file passed as data to be rendered in the test file template"},
|
Usage: "If specified, generate will use the contents of the file passed as data to be rendered in the test file template"},
|
||||||
{Name: "tags", KeyPath: "Tags",
|
|
||||||
UsageArgument: "build-tags",
|
|
||||||
Usage: "If specified, generate will create a test file that uses the given build tags (i.e. `--tags e2e,!unit` will add `//go:build e2e,!unit`)"},
|
|
||||||
},
|
},
|
||||||
&conf,
|
&conf,
|
||||||
types.GinkgoFlagSections{},
|
types.GinkgoFlagSections{},
|
||||||
|
@ -62,7 +59,6 @@ You can also pass a <filename> of the form "file.go" and generate will emit "fil
|
||||||
}
|
}
|
||||||
|
|
||||||
type specData struct {
|
type specData struct {
|
||||||
BuildTags string
|
|
||||||
Package string
|
Package string
|
||||||
Subject string
|
Subject string
|
||||||
PackageImportPath string
|
PackageImportPath string
|
||||||
|
@ -97,7 +93,6 @@ func generateTestFileForSubject(subject string, conf GeneratorsConfig) {
|
||||||
}
|
}
|
||||||
|
|
||||||
data := specData{
|
data := specData{
|
||||||
BuildTags: getBuildTags(conf.Tags),
|
|
||||||
Package: determinePackageName(packageName, conf.Internal),
|
Package: determinePackageName(packageName, conf.Internal),
|
||||||
Subject: formattedName,
|
Subject: formattedName,
|
||||||
PackageImportPath: getPackageImportPath(),
|
PackageImportPath: getPackageImportPath(),
|
||||||
|
@ -174,7 +169,6 @@ func moduleName(modRoot string) string {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
defer modFile.Close()
|
|
||||||
|
|
||||||
mod := make([]byte, 128)
|
mod := make([]byte, 128)
|
||||||
_, err = modFile.Read(mod)
|
_, err = modFile.Read(mod)
|
||||||
|
|
6
vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go
generated
vendored
6
vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go
generated
vendored
|
@ -1,7 +1,6 @@
|
||||||
package generators
|
package generators
|
||||||
|
|
||||||
var specText = `{{.BuildTags}}
|
var specText = `package {{.Package}}
|
||||||
package {{.Package}}
|
|
||||||
|
|
||||||
import (
|
import (
|
||||||
{{.GinkgoImport}}
|
{{.GinkgoImport}}
|
||||||
|
@ -15,8 +14,7 @@ var _ = {{.GinkgoPackage}}Describe("{{.Subject}}", func() {
|
||||||
})
|
})
|
||||||
`
|
`
|
||||||
|
|
||||||
var agoutiSpecText = `{{.BuildTags}}
|
var agoutiSpecText = `package {{.Package}}
|
||||||
package {{.Package}}
|
|
||||||
|
|
||||||
import (
|
import (
|
||||||
{{.GinkgoImport}}
|
{{.GinkgoImport}}
|
||||||
|
|
12
vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go
generated
vendored
12
vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go
generated
vendored
|
@ -1,7 +1,6 @@
|
||||||
package generators
|
package generators
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"go/build"
|
"go/build"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
@ -15,7 +14,6 @@ type GeneratorsConfig struct {
|
||||||
Agouti, NoDot, Internal bool
|
Agouti, NoDot, Internal bool
|
||||||
CustomTemplate string
|
CustomTemplate string
|
||||||
CustomTemplateData string
|
CustomTemplateData string
|
||||||
Tags string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func getPackageAndFormattedName() (string, string, string) {
|
func getPackageAndFormattedName() (string, string, string) {
|
||||||
|
@ -64,13 +62,3 @@ func determinePackageName(name string, internal bool) string {
|
||||||
|
|
||||||
return name + "_test"
|
return name + "_test"
|
||||||
}
|
}
|
||||||
|
|
||||||
// getBuildTags returns the resultant string to be added.
|
|
||||||
// If the input string is not empty, then returns a `//go:build {}` string,
|
|
||||||
// otherwise returns an empty string.
|
|
||||||
func getBuildTags(tags string) string {
|
|
||||||
if tags != "" {
|
|
||||||
return fmt.Sprintf("//go:build %s\n", tags)
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
14
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go
generated
vendored
14
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go
generated
vendored
|
@ -25,18 +25,6 @@ func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig) TestSuite
|
||||||
return suite
|
return suite
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(goFlagsConfig.O) > 0 {
|
|
||||||
userDefinedPath, err := filepath.Abs(goFlagsConfig.O)
|
|
||||||
if err != nil {
|
|
||||||
suite.State = TestSuiteStateFailedToCompile
|
|
||||||
suite.CompilationError = fmt.Errorf("Failed to compute compilation target path %s:\n%s", goFlagsConfig.O, err.Error())
|
|
||||||
return suite
|
|
||||||
}
|
|
||||||
path = userDefinedPath
|
|
||||||
}
|
|
||||||
|
|
||||||
goFlagsConfig.O = path
|
|
||||||
|
|
||||||
ginkgoInvocationPath, _ := os.Getwd()
|
ginkgoInvocationPath, _ := os.Getwd()
|
||||||
ginkgoInvocationPath, _ = filepath.Abs(ginkgoInvocationPath)
|
ginkgoInvocationPath, _ = filepath.Abs(ginkgoInvocationPath)
|
||||||
packagePath := suite.AbsPath()
|
packagePath := suite.AbsPath()
|
||||||
|
@ -46,7 +34,7 @@ func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig) TestSuite
|
||||||
suite.CompilationError = fmt.Errorf("Failed to get relative path from package to the current working directory:\n%s", err.Error())
|
suite.CompilationError = fmt.Errorf("Failed to get relative path from package to the current working directory:\n%s", err.Error())
|
||||||
return suite
|
return suite
|
||||||
}
|
}
|
||||||
args, err := types.GenerateGoTestCompileArgs(goFlagsConfig, "./", pathToInvocationPath)
|
args, err := types.GenerateGoTestCompileArgs(goFlagsConfig, path, "./", pathToInvocationPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
suite.State = TestSuiteStateFailedToCompile
|
suite.State = TestSuiteStateFailedToCompile
|
||||||
suite.CompilationError = fmt.Errorf("Failed to generate go test compile flags:\n%s", err.Error())
|
suite.CompilationError = fmt.Errorf("Failed to generate go test compile flags:\n%s", err.Error())
|
||||||
|
|
129
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go
generated
vendored
129
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go
generated
vendored
|
@ -1,129 +0,0 @@
|
||||||
// Copyright (c) 2015, Wade Simmons
|
|
||||||
// All rights reserved.
|
|
||||||
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are met:
|
|
||||||
|
|
||||||
// 1. Redistributions of source code must retain the above copyright notice, this
|
|
||||||
// list of conditions and the following disclaimer.
|
|
||||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
|
||||||
// this list of conditions and the following disclaimer in the documentation
|
|
||||||
// and/or other materials provided with the distribution.
|
|
||||||
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
|
||||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
|
||||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
|
||||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
|
||||||
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
|
||||||
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
|
||||||
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
|
||||||
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
|
||||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
// Package gocovmerge takes the results from multiple `go test -coverprofile`
|
|
||||||
// runs and merges them into one profile
|
|
||||||
|
|
||||||
// this file was originally taken from the gocovmerge project
|
|
||||||
// see also: https://go.shabbyrobe.org/gocovmerge
|
|
||||||
package internal
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"sort"
|
|
||||||
|
|
||||||
"golang.org/x/tools/cover"
|
|
||||||
)
|
|
||||||
|
|
||||||
func AddCoverProfile(profiles []*cover.Profile, p *cover.Profile) []*cover.Profile {
|
|
||||||
i := sort.Search(len(profiles), func(i int) bool { return profiles[i].FileName >= p.FileName })
|
|
||||||
if i < len(profiles) && profiles[i].FileName == p.FileName {
|
|
||||||
MergeCoverProfiles(profiles[i], p)
|
|
||||||
} else {
|
|
||||||
profiles = append(profiles, nil)
|
|
||||||
copy(profiles[i+1:], profiles[i:])
|
|
||||||
profiles[i] = p
|
|
||||||
}
|
|
||||||
return profiles
|
|
||||||
}
|
|
||||||
|
|
||||||
func DumpCoverProfiles(profiles []*cover.Profile, out io.Writer) error {
|
|
||||||
if len(profiles) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if _, err := fmt.Fprintf(out, "mode: %s\n", profiles[0].Mode); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for _, p := range profiles {
|
|
||||||
for _, b := range p.Blocks {
|
|
||||||
if _, err := fmt.Fprintf(out, "%s:%d.%d,%d.%d %d %d\n", p.FileName, b.StartLine, b.StartCol, b.EndLine, b.EndCol, b.NumStmt, b.Count); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func MergeCoverProfiles(into *cover.Profile, merge *cover.Profile) error {
|
|
||||||
if into.Mode != merge.Mode {
|
|
||||||
return fmt.Errorf("cannot merge profiles with different modes")
|
|
||||||
}
|
|
||||||
// Since the blocks are sorted, we can keep track of where the last block
|
|
||||||
// was inserted and only look at the blocks after that as targets for merge
|
|
||||||
startIndex := 0
|
|
||||||
for _, b := range merge.Blocks {
|
|
||||||
var err error
|
|
||||||
startIndex, err = mergeProfileBlock(into, b, startIndex)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func mergeProfileBlock(p *cover.Profile, pb cover.ProfileBlock, startIndex int) (int, error) {
|
|
||||||
sortFunc := func(i int) bool {
|
|
||||||
pi := p.Blocks[i+startIndex]
|
|
||||||
return pi.StartLine >= pb.StartLine && (pi.StartLine != pb.StartLine || pi.StartCol >= pb.StartCol)
|
|
||||||
}
|
|
||||||
|
|
||||||
i := 0
|
|
||||||
if sortFunc(i) != true {
|
|
||||||
i = sort.Search(len(p.Blocks)-startIndex, sortFunc)
|
|
||||||
}
|
|
||||||
|
|
||||||
i += startIndex
|
|
||||||
if i < len(p.Blocks) && p.Blocks[i].StartLine == pb.StartLine && p.Blocks[i].StartCol == pb.StartCol {
|
|
||||||
if p.Blocks[i].EndLine != pb.EndLine || p.Blocks[i].EndCol != pb.EndCol {
|
|
||||||
return i, fmt.Errorf("gocovmerge: overlapping merge %v %v %v", p.FileName, p.Blocks[i], pb)
|
|
||||||
}
|
|
||||||
switch p.Mode {
|
|
||||||
case "set":
|
|
||||||
p.Blocks[i].Count |= pb.Count
|
|
||||||
case "count", "atomic":
|
|
||||||
p.Blocks[i].Count += pb.Count
|
|
||||||
default:
|
|
||||||
return i, fmt.Errorf("gocovmerge: unsupported covermode '%s'", p.Mode)
|
|
||||||
}
|
|
||||||
|
|
||||||
} else {
|
|
||||||
if i > 0 {
|
|
||||||
pa := p.Blocks[i-1]
|
|
||||||
if pa.EndLine >= pb.EndLine && (pa.EndLine != pb.EndLine || pa.EndCol > pb.EndCol) {
|
|
||||||
return i, fmt.Errorf("gocovmerge: overlap before %v %v %v", p.FileName, pa, pb)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if i < len(p.Blocks)-1 {
|
|
||||||
pa := p.Blocks[i+1]
|
|
||||||
if pa.StartLine <= pb.StartLine && (pa.StartLine != pb.StartLine || pa.StartCol < pb.StartCol) {
|
|
||||||
return i, fmt.Errorf("gocovmerge: overlap after %v %v %v", p.FileName, pa, pb)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
p.Blocks = append(p.Blocks, cover.ProfileBlock{})
|
|
||||||
copy(p.Blocks[i+1:], p.Blocks[i:])
|
|
||||||
p.Blocks[i] = pb
|
|
||||||
}
|
|
||||||
|
|
||||||
return i + 1, nil
|
|
||||||
}
|
|
42
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go
generated
vendored
42
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go
generated
vendored
|
@ -1,6 +1,7 @@
|
||||||
package internal
|
package internal
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
|
@ -11,7 +12,6 @@ import (
|
||||||
"github.com/google/pprof/profile"
|
"github.com/google/pprof/profile"
|
||||||
"github.com/onsi/ginkgo/v2/reporters"
|
"github.com/onsi/ginkgo/v2/reporters"
|
||||||
"github.com/onsi/ginkgo/v2/types"
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
"golang.org/x/tools/cover"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func AbsPathForGeneratedAsset(assetName string, suite TestSuite, cliConfig types.CLIConfig, process int) string {
|
func AbsPathForGeneratedAsset(assetName string, suite TestSuite, cliConfig types.CLIConfig, process int) string {
|
||||||
|
@ -144,27 +144,38 @@ func FinalizeProfilesAndReportsForSuites(suites TestSuites, cliConfig types.CLIC
|
||||||
return messages, nil
|
return messages, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// loads each profile, merges them, deletes them, stores them in destination
|
//loads each profile, combines them, deletes them, stores them in destination
|
||||||
func MergeAndCleanupCoverProfiles(profiles []string, destination string) error {
|
func MergeAndCleanupCoverProfiles(profiles []string, destination string) error {
|
||||||
var merged []*cover.Profile
|
combined := &bytes.Buffer{}
|
||||||
for _, file := range profiles {
|
modeRegex := regexp.MustCompile(`^mode: .*\n`)
|
||||||
parsedProfiles, err := cover.ParseProfiles(file)
|
for i, profile := range profiles {
|
||||||
|
contents, err := os.ReadFile(profile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return fmt.Errorf("Unable to read coverage file %s:\n%s", profile, err.Error())
|
||||||
}
|
}
|
||||||
os.Remove(file)
|
os.Remove(profile)
|
||||||
for _, p := range parsedProfiles {
|
|
||||||
merged = AddCoverProfile(merged, p)
|
// remove the cover mode line from every file
|
||||||
|
// except the first one
|
||||||
|
if i > 0 {
|
||||||
|
contents = modeRegex.ReplaceAll(contents, []byte{})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
_, err = combined.Write(contents)
|
||||||
|
|
||||||
|
// Add a newline to the end of every file if missing.
|
||||||
|
if err == nil && len(contents) > 0 && contents[len(contents)-1] != '\n' {
|
||||||
|
_, err = combined.Write([]byte("\n"))
|
||||||
}
|
}
|
||||||
dst, err := os.OpenFile(destination, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return fmt.Errorf("Unable to append to coverprofile:\n%s", err.Error())
|
||||||
}
|
}
|
||||||
defer dst.Close()
|
}
|
||||||
err = DumpCoverProfiles(merged, dst)
|
|
||||||
|
err := os.WriteFile(destination, combined.Bytes(), 0666)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return fmt.Errorf("Unable to create combined cover profile:\n%s", err.Error())
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -173,7 +184,7 @@ func GetCoverageFromCoverProfile(profile string) (float64, error) {
|
||||||
cmd := exec.Command("go", "tool", "cover", "-func", profile)
|
cmd := exec.Command("go", "tool", "cover", "-func", profile)
|
||||||
output, err := cmd.CombinedOutput()
|
output, err := cmd.CombinedOutput()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, fmt.Errorf("Could not process Coverprofile %s: %s - %s", profile, err.Error(), string(output))
|
return 0, fmt.Errorf("Could not process Coverprofile %s: %s", profile, err.Error())
|
||||||
}
|
}
|
||||||
re := regexp.MustCompile(`total:\s*\(statements\)\s*(\d*\.\d*)\%`)
|
re := regexp.MustCompile(`total:\s*\(statements\)\s*(\d*\.\d*)\%`)
|
||||||
matches := re.FindStringSubmatch(string(output))
|
matches := re.FindStringSubmatch(string(output))
|
||||||
|
@ -197,7 +208,6 @@ func MergeProfiles(profilePaths []string, destination string) error {
|
||||||
return fmt.Errorf("Could not open profile: %s\n%s", profilePath, err.Error())
|
return fmt.Errorf("Could not open profile: %s\n%s", profilePath, err.Error())
|
||||||
}
|
}
|
||||||
prof, err := profile.Parse(proFile)
|
prof, err := profile.Parse(proFile)
|
||||||
_ = proFile.Close()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Could not parse profile: %s\n%s", profilePath, err.Error())
|
return fmt.Errorf("Could not parse profile: %s\n%s", profilePath, err.Error())
|
||||||
}
|
}
|
||||||
|
|
9
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go
generated
vendored
9
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go
generated
vendored
|
@ -7,7 +7,6 @@ import (
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"regexp"
|
"regexp"
|
||||||
"runtime"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/v2/types"
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
|
@ -193,7 +192,7 @@ func precompiledTestSuite(path string) (TestSuite, error) {
|
||||||
return TestSuite{}, errors.New("this is not a .test binary")
|
return TestSuite{}, errors.New("this is not a .test binary")
|
||||||
}
|
}
|
||||||
|
|
||||||
if filepath.Ext(path) == ".test" && runtime.GOOS != "windows" && info.Mode()&0111 == 0 {
|
if filepath.Ext(path) == ".test" && info.Mode()&0111 == 0 {
|
||||||
return TestSuite{}, errors.New("this is not executable")
|
return TestSuite{}, errors.New("this is not executable")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -226,7 +225,7 @@ func suitesInDir(dir string, recurse bool) TestSuites {
|
||||||
files, _ := os.ReadDir(dir)
|
files, _ := os.ReadDir(dir)
|
||||||
re := regexp.MustCompile(`^[^._].*_test\.go$`)
|
re := regexp.MustCompile(`^[^._].*_test\.go$`)
|
||||||
for _, file := range files {
|
for _, file := range files {
|
||||||
if !file.IsDir() && re.MatchString(file.Name()) {
|
if !file.IsDir() && re.Match([]byte(file.Name())) {
|
||||||
suite := TestSuite{
|
suite := TestSuite{
|
||||||
Path: relPath(dir),
|
Path: relPath(dir),
|
||||||
PackageName: packageNameForSuite(dir),
|
PackageName: packageNameForSuite(dir),
|
||||||
|
@ -241,7 +240,7 @@ func suitesInDir(dir string, recurse bool) TestSuites {
|
||||||
if recurse {
|
if recurse {
|
||||||
re = regexp.MustCompile(`^[._]`)
|
re = regexp.MustCompile(`^[._]`)
|
||||||
for _, file := range files {
|
for _, file := range files {
|
||||||
if file.IsDir() && !re.MatchString(file.Name()) {
|
if file.IsDir() && !re.Match([]byte(file.Name())) {
|
||||||
suites = append(suites, suitesInDir(dir+"/"+file.Name(), recurse)...)
|
suites = append(suites, suitesInDir(dir+"/"+file.Name(), recurse)...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -272,7 +271,7 @@ func filesHaveGinkgoSuite(dir string, files []os.DirEntry) bool {
|
||||||
reGinkgo := regexp.MustCompile(`package ginkgo|\/ginkgo"|\/ginkgo\/v2"|\/ginkgo\/v2/dsl/`)
|
reGinkgo := regexp.MustCompile(`package ginkgo|\/ginkgo"|\/ginkgo\/v2"|\/ginkgo\/v2/dsl/`)
|
||||||
|
|
||||||
for _, file := range files {
|
for _, file := range files {
|
||||||
if !file.IsDir() && reTestFile.MatchString(file.Name()) {
|
if !file.IsDir() && reTestFile.Match([]byte(file.Name())) {
|
||||||
contents, _ := os.ReadFile(dir + "/" + file.Name())
|
contents, _ := os.ReadFile(dir + "/" + file.Name())
|
||||||
if reGinkgo.Match(contents) {
|
if reGinkgo.Match(contents) {
|
||||||
return true
|
return true
|
||||||
|
|
7
vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go
generated
vendored
7
vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go
generated
vendored
|
@ -1,11 +1,10 @@
|
||||||
package outline
|
package outline
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
"go/ast"
|
"go/ast"
|
||||||
"go/token"
|
"go/token"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/v2/types"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -245,7 +244,9 @@ func labelFromCallExpr(ce *ast.CallExpr) []string {
|
||||||
}
|
}
|
||||||
if id.Name == "Label" {
|
if id.Name == "Label" {
|
||||||
ls := extractLabels(expr)
|
ls := extractLabels(expr)
|
||||||
labels = append(labels, ls...)
|
for _, label := range ls {
|
||||||
|
labels = append(labels, label)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
9
vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go
generated
vendored
9
vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go
generated
vendored
|
@ -28,7 +28,14 @@ func packageNameForImport(f *ast.File, path string) *string {
|
||||||
}
|
}
|
||||||
name := spec.Name.String()
|
name := spec.Name.String()
|
||||||
if name == "<nil>" {
|
if name == "<nil>" {
|
||||||
name = "ginkgo"
|
// If the package name is not explicitly specified,
|
||||||
|
// make an educated guess. This is not guaranteed to be correct.
|
||||||
|
lastSlash := strings.LastIndex(path, "/")
|
||||||
|
if lastSlash == -1 {
|
||||||
|
name = path
|
||||||
|
} else {
|
||||||
|
name = path[lastSlash+1:]
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if name == "." {
|
if name == "." {
|
||||||
name = ""
|
name = ""
|
||||||
|
|
26
vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go
generated
vendored
26
vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go
generated
vendored
|
@ -1,13 +1,10 @@
|
||||||
package outline
|
package outline
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"encoding/csv"
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"go/ast"
|
"go/ast"
|
||||||
"go/token"
|
"go/token"
|
||||||
"strconv"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"golang.org/x/tools/go/ast/inspector"
|
"golang.org/x/tools/go/ast/inspector"
|
||||||
|
@ -87,11 +84,9 @@ func (o *outline) String() string {
|
||||||
// StringIndent returns a CSV-formated outline, but every line is indented by
|
// StringIndent returns a CSV-formated outline, but every line is indented by
|
||||||
// one 'width' of spaces for every level of nesting.
|
// one 'width' of spaces for every level of nesting.
|
||||||
func (o *outline) StringIndent(width int) string {
|
func (o *outline) StringIndent(width int) string {
|
||||||
var b bytes.Buffer
|
var b strings.Builder
|
||||||
b.WriteString("Name,Text,Start,End,Spec,Focused,Pending,Labels\n")
|
b.WriteString("Name,Text,Start,End,Spec,Focused,Pending,Labels\n")
|
||||||
|
|
||||||
csvWriter := csv.NewWriter(&b)
|
|
||||||
|
|
||||||
currentIndent := 0
|
currentIndent := 0
|
||||||
pre := func(n *ginkgoNode) {
|
pre := func(n *ginkgoNode) {
|
||||||
b.WriteString(fmt.Sprintf("%*s", currentIndent, ""))
|
b.WriteString(fmt.Sprintf("%*s", currentIndent, ""))
|
||||||
|
@ -101,22 +96,8 @@ func (o *outline) StringIndent(width int) string {
|
||||||
} else {
|
} else {
|
||||||
labels = strings.Join(n.Labels, ", ")
|
labels = strings.Join(n.Labels, ", ")
|
||||||
}
|
}
|
||||||
|
//enclosing labels in a double quoted comma separate listed so that when inmported into a CSV app the Labels column has comma separate strings
|
||||||
row := []string{
|
b.WriteString(fmt.Sprintf("%s,%s,%d,%d,%t,%t,%t,\"%s\"\n", n.Name, n.Text, n.Start, n.End, n.Spec, n.Focused, n.Pending, labels))
|
||||||
n.Name,
|
|
||||||
n.Text,
|
|
||||||
strconv.Itoa(n.Start),
|
|
||||||
strconv.Itoa(n.End),
|
|
||||||
strconv.FormatBool(n.Spec),
|
|
||||||
strconv.FormatBool(n.Focused),
|
|
||||||
strconv.FormatBool(n.Pending),
|
|
||||||
labels,
|
|
||||||
}
|
|
||||||
csvWriter.Write(row)
|
|
||||||
|
|
||||||
// Ensure we write to `b' before the next `b.WriteString()', which might be adding indentation
|
|
||||||
csvWriter.Flush()
|
|
||||||
|
|
||||||
currentIndent += width
|
currentIndent += width
|
||||||
}
|
}
|
||||||
post := func(n *ginkgoNode) {
|
post := func(n *ginkgoNode) {
|
||||||
|
@ -125,6 +106,5 @@ func (o *outline) StringIndent(width int) string {
|
||||||
for _, n := range o.Nodes {
|
for _, n := range o.Nodes {
|
||||||
n.Walk(pre, post)
|
n.Walk(pre, post)
|
||||||
}
|
}
|
||||||
|
|
||||||
return b.String()
|
return b.String()
|
||||||
}
|
}
|
||||||
|
|
2
vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go
generated
vendored
2
vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go
generated
vendored
|
@ -78,7 +78,7 @@ func (d Dependencies) resolveAndAdd(deps []string, depth int) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if !pkg.Goroot && (!ginkgoAndGomegaFilter.MatchString(pkg.Dir) || ginkgoIntegrationTestFilter.MatchString(pkg.Dir)) {
|
if !pkg.Goroot && (!ginkgoAndGomegaFilter.Match([]byte(pkg.Dir)) || ginkgoIntegrationTestFilter.Match([]byte(pkg.Dir))) {
|
||||||
d.addDepIfNotPresent(pkg.Dir, depth)
|
d.addDepIfNotPresent(pkg.Dir, depth)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
13
vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go
generated
vendored
13
vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go
generated
vendored
|
@ -4,7 +4,6 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -80,11 +79,7 @@ func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Ti
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if isHiddenFile(info) {
|
if goTestRegExp.Match([]byte(info.Name())) {
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if goTestRegExp.MatchString(info.Name()) {
|
|
||||||
testHash += p.hashForFileInfo(info)
|
testHash += p.hashForFileInfo(info)
|
||||||
if info.ModTime().After(testModifiedTime) {
|
if info.ModTime().After(testModifiedTime) {
|
||||||
testModifiedTime = info.ModTime()
|
testModifiedTime = info.ModTime()
|
||||||
|
@ -92,7 +87,7 @@ func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Ti
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if p.watchRegExp.MatchString(info.Name()) {
|
if p.watchRegExp.Match([]byte(info.Name())) {
|
||||||
codeHash += p.hashForFileInfo(info)
|
codeHash += p.hashForFileInfo(info)
|
||||||
if info.ModTime().After(codeModifiedTime) {
|
if info.ModTime().After(codeModifiedTime) {
|
||||||
codeModifiedTime = info.ModTime()
|
codeModifiedTime = info.ModTime()
|
||||||
|
@ -108,10 +103,6 @@ func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Ti
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func isHiddenFile(info os.FileInfo) bool {
|
|
||||||
return strings.HasPrefix(info.Name(), ".") || strings.HasPrefix(info.Name(), "_")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PackageHash) hashForFileInfo(info os.FileInfo) string {
|
func (p *PackageHash) hashForFileInfo(info os.FileInfo) string {
|
||||||
return fmt.Sprintf("%s_%d_%d", info.Name(), info.Size(), info.ModTime().UnixNano())
|
return fmt.Sprintf("%s_%d_%d", info.Name(), info.Size(), info.ModTime().UnixNano())
|
||||||
}
|
}
|
||||||
|
|
49
vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go
generated
vendored
49
vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go
generated
vendored
|
@ -182,31 +182,10 @@ func (r *DefaultReporter) WillRun(report types.SpecReport) {
|
||||||
r.emitBlock(r.f(r.codeLocationBlock(report, "{{/}}", v.Is(types.VerbosityLevelVeryVerbose), false)))
|
r.emitBlock(r.f(r.codeLocationBlock(report, "{{/}}", v.Is(types.VerbosityLevelVeryVerbose), false)))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *DefaultReporter) wrapTextBlock(sectionName string, fn func()) {
|
|
||||||
r.emitBlock("\n")
|
|
||||||
if r.conf.GithubOutput {
|
|
||||||
r.emitBlock(r.fi(1, "::group::%s", sectionName))
|
|
||||||
} else {
|
|
||||||
r.emitBlock(r.fi(1, "{{gray}}%s >>{{/}}", sectionName))
|
|
||||||
}
|
|
||||||
fn()
|
|
||||||
if r.conf.GithubOutput {
|
|
||||||
r.emitBlock(r.fi(1, "::endgroup::"))
|
|
||||||
} else {
|
|
||||||
r.emitBlock(r.fi(1, "{{gray}}<< %s{{/}}", sectionName))
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *DefaultReporter) DidRun(report types.SpecReport) {
|
func (r *DefaultReporter) DidRun(report types.SpecReport) {
|
||||||
v := r.conf.Verbosity()
|
v := r.conf.Verbosity()
|
||||||
inParallel := report.RunningInParallel
|
inParallel := report.RunningInParallel
|
||||||
|
|
||||||
//should we completely omit this spec?
|
|
||||||
if report.State.Is(types.SpecStateSkipped) && r.conf.SilenceSkips {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
header := r.specDenoter
|
header := r.specDenoter
|
||||||
if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) {
|
if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) {
|
||||||
header = fmt.Sprintf("[%s]", report.LeafNodeType)
|
header = fmt.Sprintf("[%s]", report.LeafNodeType)
|
||||||
|
@ -283,12 +262,9 @@ func (r *DefaultReporter) DidRun(report types.SpecReport) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we have no content to show, just emit the header and return
|
// If we have no content to show, jsut emit the header and return
|
||||||
if !reportHasContent {
|
if !reportHasContent {
|
||||||
r.emit(r.f(highlightColor + header + "{{/}}"))
|
r.emit(r.f(highlightColor + header + "{{/}}"))
|
||||||
if r.conf.ForceNewlines {
|
|
||||||
r.emit("\n")
|
|
||||||
}
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -307,23 +283,26 @@ func (r *DefaultReporter) DidRun(report types.SpecReport) {
|
||||||
|
|
||||||
//Emit Stdout/Stderr Output
|
//Emit Stdout/Stderr Output
|
||||||
if showSeparateStdSection {
|
if showSeparateStdSection {
|
||||||
r.wrapTextBlock("Captured StdOut/StdErr Output", func() {
|
r.emitBlock("\n")
|
||||||
|
r.emitBlock(r.fi(1, "{{gray}}Captured StdOut/StdErr Output >>{{/}}"))
|
||||||
r.emitBlock(r.fi(1, "%s", report.CapturedStdOutErr))
|
r.emitBlock(r.fi(1, "%s", report.CapturedStdOutErr))
|
||||||
})
|
r.emitBlock(r.fi(1, "{{gray}}<< Captured StdOut/StdErr Output{{/}}"))
|
||||||
}
|
}
|
||||||
|
|
||||||
if showSeparateVisibilityAlwaysReportsSection {
|
if showSeparateVisibilityAlwaysReportsSection {
|
||||||
r.wrapTextBlock("Report Entries", func() {
|
r.emitBlock("\n")
|
||||||
|
r.emitBlock(r.fi(1, "{{gray}}Report Entries >>{{/}}"))
|
||||||
for _, entry := range report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) {
|
for _, entry := range report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) {
|
||||||
r.emitReportEntry(1, entry)
|
r.emitReportEntry(1, entry)
|
||||||
}
|
}
|
||||||
})
|
r.emitBlock(r.fi(1, "{{gray}}<< Report Entries{{/}}"))
|
||||||
}
|
}
|
||||||
|
|
||||||
if showTimeline {
|
if showTimeline {
|
||||||
r.wrapTextBlock("Timeline", func() {
|
r.emitBlock("\n")
|
||||||
|
r.emitBlock(r.fi(1, "{{gray}}Timeline >>{{/}}"))
|
||||||
r.emitTimeline(1, report, timeline)
|
r.emitTimeline(1, report, timeline)
|
||||||
})
|
r.emitBlock(r.fi(1, "{{gray}}<< Timeline{{/}}"))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Emit Failure Message
|
// Emit Failure Message
|
||||||
|
@ -426,15 +405,7 @@ func (r *DefaultReporter) emitShortFailure(indent uint, state types.SpecState, f
|
||||||
func (r *DefaultReporter) emitFailure(indent uint, state types.SpecState, failure types.Failure, includeAdditionalFailure bool) {
|
func (r *DefaultReporter) emitFailure(indent uint, state types.SpecState, failure types.Failure, includeAdditionalFailure bool) {
|
||||||
highlightColor := r.highlightColorForState(state)
|
highlightColor := r.highlightColorForState(state)
|
||||||
r.emitBlock(r.fi(indent, highlightColor+"[%s] %s{{/}}", r.humanReadableState(state), failure.Message))
|
r.emitBlock(r.fi(indent, highlightColor+"[%s] %s{{/}}", r.humanReadableState(state), failure.Message))
|
||||||
if r.conf.GithubOutput {
|
|
||||||
level := "error"
|
|
||||||
if state.Is(types.SpecStateSkipped) {
|
|
||||||
level = "notice"
|
|
||||||
}
|
|
||||||
r.emitBlock(r.fi(indent, "::%s file=%s,line=%d::%s %s", level, failure.Location.FileName, failure.Location.LineNumber, failure.FailureNodeType, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
|
|
||||||
} else {
|
|
||||||
r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
|
r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
|
||||||
}
|
|
||||||
if failure.ForwardedPanic != "" {
|
if failure.ForwardedPanic != "" {
|
||||||
r.emitBlock("\n")
|
r.emitBlock("\n")
|
||||||
r.emitBlock(r.fi(indent, highlightColor+"%s{{/}}", failure.ForwardedPanic))
|
r.emitBlock(r.fi(indent, highlightColor+"%s{{/}}", failure.ForwardedPanic))
|
||||||
|
|
19
vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go
generated
vendored
19
vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go
generated
vendored
|
@ -4,21 +4,16 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/v2/types"
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
// GenerateJSONReport produces a JSON-formatted report at the passed in destination
|
//GenerateJSONReport produces a JSON-formatted report at the passed in destination
|
||||||
func GenerateJSONReport(report types.Report, destination string) error {
|
func GenerateJSONReport(report types.Report, destination string) error {
|
||||||
if err := os.MkdirAll(path.Dir(destination), 0770); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
f, err := os.Create(destination)
|
f, err := os.Create(destination)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer f.Close()
|
|
||||||
enc := json.NewEncoder(f)
|
enc := json.NewEncoder(f)
|
||||||
enc.SetIndent("", " ")
|
enc.SetIndent("", " ")
|
||||||
err = enc.Encode([]types.Report{
|
err = enc.Encode([]types.Report{
|
||||||
|
@ -27,11 +22,11 @@ func GenerateJSONReport(report types.Report, destination string) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return f.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
// MergeJSONReports produces a single JSON-formatted report at the passed in destination by merging the JSON-formatted reports provided in sources
|
//MergeJSONReports produces a single JSON-formatted report at the passed in destination by merging the JSON-formatted reports provided in sources
|
||||||
// It skips over reports that fail to decode but reports on them via the returned messages []string
|
//It skips over reports that fail to decode but reports on them via the returned messages []string
|
||||||
func MergeAndCleanupJSONReports(sources []string, destination string) ([]string, error) {
|
func MergeAndCleanupJSONReports(sources []string, destination string) ([]string, error) {
|
||||||
messages := []string{}
|
messages := []string{}
|
||||||
allReports := []types.Report{}
|
allReports := []types.Report{}
|
||||||
|
@ -51,19 +46,15 @@ func MergeAndCleanupJSONReports(sources []string, destination string) ([]string,
|
||||||
allReports = append(allReports, reports...)
|
allReports = append(allReports, reports...)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := os.MkdirAll(path.Dir(destination), 0770); err != nil {
|
|
||||||
return messages, err
|
|
||||||
}
|
|
||||||
f, err := os.Create(destination)
|
f, err := os.Create(destination)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return messages, err
|
return messages, err
|
||||||
}
|
}
|
||||||
defer f.Close()
|
|
||||||
enc := json.NewEncoder(f)
|
enc := json.NewEncoder(f)
|
||||||
enc.SetIndent("", " ")
|
enc.SetIndent("", " ")
|
||||||
err = enc.Encode(allReports)
|
err = enc.Encode(allReports)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return messages, err
|
return messages, err
|
||||||
}
|
}
|
||||||
return messages, nil
|
return messages, f.Close()
|
||||||
}
|
}
|
||||||
|
|
21
vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go
generated
vendored
21
vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go
generated
vendored
|
@ -14,8 +14,6 @@ import (
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
|
||||||
"regexp"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/v2/config"
|
"github.com/onsi/ginkgo/v2/config"
|
||||||
|
@ -105,8 +103,6 @@ type JUnitProperty struct {
|
||||||
Value string `xml:"value,attr"`
|
Value string `xml:"value,attr"`
|
||||||
}
|
}
|
||||||
|
|
||||||
var ownerRE = regexp.MustCompile(`(?i)^owner:(.*)$`)
|
|
||||||
|
|
||||||
type JUnitTestCase struct {
|
type JUnitTestCase struct {
|
||||||
// Name maps onto the full text of the spec - equivalent to "[SpecReport.LeafNodeType] SpecReport.FullText()"
|
// Name maps onto the full text of the spec - equivalent to "[SpecReport.LeafNodeType] SpecReport.FullText()"
|
||||||
Name string `xml:"name,attr"`
|
Name string `xml:"name,attr"`
|
||||||
|
@ -116,8 +112,6 @@ type JUnitTestCase struct {
|
||||||
Status string `xml:"status,attr"`
|
Status string `xml:"status,attr"`
|
||||||
// Time is the time in seconds to execute the spec - maps onto SpecReport.RunTime
|
// Time is the time in seconds to execute the spec - maps onto SpecReport.RunTime
|
||||||
Time float64 `xml:"time,attr"`
|
Time float64 `xml:"time,attr"`
|
||||||
// Owner is the owner the spec - is set if a label matching Label("owner:X") is provided. The last matching label is used as the owner, thereby allowing specs to override owners specified in container nodes.
|
|
||||||
Owner string `xml:"owner,attr,omitempty"`
|
|
||||||
//Skipped is populated with a message if the test was skipped or pending
|
//Skipped is populated with a message if the test was skipped or pending
|
||||||
Skipped *JUnitSkipped `xml:"skipped,omitempty"`
|
Skipped *JUnitSkipped `xml:"skipped,omitempty"`
|
||||||
//Error is populated if the test panicked or was interrupted
|
//Error is populated if the test panicked or was interrupted
|
||||||
|
@ -177,7 +171,6 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit
|
||||||
{"FocusFiles", strings.Join(report.SuiteConfig.FocusFiles, ";")},
|
{"FocusFiles", strings.Join(report.SuiteConfig.FocusFiles, ";")},
|
||||||
{"SkipFiles", strings.Join(report.SuiteConfig.SkipFiles, ";")},
|
{"SkipFiles", strings.Join(report.SuiteConfig.SkipFiles, ";")},
|
||||||
{"FailOnPending", fmt.Sprintf("%t", report.SuiteConfig.FailOnPending)},
|
{"FailOnPending", fmt.Sprintf("%t", report.SuiteConfig.FailOnPending)},
|
||||||
{"FailOnEmpty", fmt.Sprintf("%t", report.SuiteConfig.FailOnEmpty)},
|
|
||||||
{"FailFast", fmt.Sprintf("%t", report.SuiteConfig.FailFast)},
|
{"FailFast", fmt.Sprintf("%t", report.SuiteConfig.FailFast)},
|
||||||
{"FlakeAttempts", fmt.Sprintf("%d", report.SuiteConfig.FlakeAttempts)},
|
{"FlakeAttempts", fmt.Sprintf("%d", report.SuiteConfig.FlakeAttempts)},
|
||||||
{"DryRun", fmt.Sprintf("%t", report.SuiteConfig.DryRun)},
|
{"DryRun", fmt.Sprintf("%t", report.SuiteConfig.DryRun)},
|
||||||
|
@ -201,12 +194,6 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit
|
||||||
if len(labels) > 0 && !config.OmitSpecLabels {
|
if len(labels) > 0 && !config.OmitSpecLabels {
|
||||||
name = name + " [" + strings.Join(labels, ", ") + "]"
|
name = name + " [" + strings.Join(labels, ", ") + "]"
|
||||||
}
|
}
|
||||||
owner := ""
|
|
||||||
for _, label := range labels {
|
|
||||||
if matches := ownerRE.FindStringSubmatch(label); len(matches) == 2 {
|
|
||||||
owner = matches[1]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
name = strings.TrimSpace(name)
|
name = strings.TrimSpace(name)
|
||||||
|
|
||||||
test := JUnitTestCase{
|
test := JUnitTestCase{
|
||||||
|
@ -214,7 +201,6 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit
|
||||||
Classname: report.SuiteDescription,
|
Classname: report.SuiteDescription,
|
||||||
Status: spec.State.String(),
|
Status: spec.State.String(),
|
||||||
Time: spec.RunTime.Seconds(),
|
Time: spec.RunTime.Seconds(),
|
||||||
Owner: owner,
|
|
||||||
}
|
}
|
||||||
if !spec.State.Is(config.OmitTimelinesForSpecState) {
|
if !spec.State.Is(config.OmitTimelinesForSpecState) {
|
||||||
test.SystemErr = systemErrForUnstructuredReporters(spec)
|
test.SystemErr = systemErrForUnstructuredReporters(spec)
|
||||||
|
@ -299,9 +285,6 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit
|
||||||
TestSuites: []JUnitTestSuite{suite},
|
TestSuites: []JUnitTestSuite{suite},
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := os.MkdirAll(path.Dir(dst), 0770); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
f, err := os.Create(dst)
|
f, err := os.Create(dst)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -325,7 +308,6 @@ func MergeAndCleanupJUnitReports(sources []string, dst string) ([]string, error)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
err = xml.NewDecoder(f).Decode(&report)
|
err = xml.NewDecoder(f).Decode(&report)
|
||||||
_ = f.Close()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
messages = append(messages, fmt.Sprintf("Could not decode %s:\n%s", source, err.Error()))
|
messages = append(messages, fmt.Sprintf("Could not decode %s:\n%s", source, err.Error()))
|
||||||
continue
|
continue
|
||||||
|
@ -340,9 +322,6 @@ func MergeAndCleanupJUnitReports(sources []string, dst string) ([]string, error)
|
||||||
mergedReport.TestSuites = append(mergedReport.TestSuites, report.TestSuites...)
|
mergedReport.TestSuites = append(mergedReport.TestSuites, report.TestSuites...)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := os.MkdirAll(path.Dir(dst), 0770); err != nil {
|
|
||||||
return messages, err
|
|
||||||
}
|
|
||||||
f, err := os.Create(dst)
|
f, err := os.Create(dst)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return messages, err
|
return messages, err
|
||||||
|
|
4
vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go
generated
vendored
4
vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go
generated
vendored
|
@ -11,7 +11,6 @@ package reporters
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/v2/types"
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
|
@ -28,9 +27,6 @@ func tcEscape(s string) string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func GenerateTeamcityReport(report types.Report, dst string) error {
|
func GenerateTeamcityReport(report types.Report, dst string) error {
|
||||||
if err := os.MkdirAll(path.Dir(dst), 0770); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
f, err := os.Create(dst)
|
f, err := os.Create(dst)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
2
vendor/github.com/onsi/ginkgo/v2/types/code_location.go
generated
vendored
2
vendor/github.com/onsi/ginkgo/v2/types/code_location.go
generated
vendored
|
@ -149,7 +149,7 @@ func PruneStack(fullStackTrace string, skip int) string {
|
||||||
re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`)
|
re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`)
|
||||||
for i := 0; i < len(stack)/2; i++ {
|
for i := 0; i < len(stack)/2; i++ {
|
||||||
// We filter out based on the source code file name.
|
// We filter out based on the source code file name.
|
||||||
if !re.MatchString(stack[i*2+1]) {
|
if !re.Match([]byte(stack[i*2+1])) {
|
||||||
prunedStack = append(prunedStack, stack[i*2])
|
prunedStack = append(prunedStack, stack[i*2])
|
||||||
prunedStack = append(prunedStack, stack[i*2+1])
|
prunedStack = append(prunedStack, stack[i*2+1])
|
||||||
}
|
}
|
||||||
|
|
29
vendor/github.com/onsi/ginkgo/v2/types/config.go
generated
vendored
29
vendor/github.com/onsi/ginkgo/v2/types/config.go
generated
vendored
|
@ -25,10 +25,8 @@ type SuiteConfig struct {
|
||||||
SkipFiles []string
|
SkipFiles []string
|
||||||
LabelFilter string
|
LabelFilter string
|
||||||
FailOnPending bool
|
FailOnPending bool
|
||||||
FailOnEmpty bool
|
|
||||||
FailFast bool
|
FailFast bool
|
||||||
FlakeAttempts int
|
FlakeAttempts int
|
||||||
MustPassRepeatedly int
|
|
||||||
DryRun bool
|
DryRun bool
|
||||||
PollProgressAfter time.Duration
|
PollProgressAfter time.Duration
|
||||||
PollProgressInterval time.Duration
|
PollProgressInterval time.Duration
|
||||||
|
@ -90,9 +88,6 @@ type ReporterConfig struct {
|
||||||
VeryVerbose bool
|
VeryVerbose bool
|
||||||
FullTrace bool
|
FullTrace bool
|
||||||
ShowNodeEvents bool
|
ShowNodeEvents bool
|
||||||
GithubOutput bool
|
|
||||||
SilenceSkips bool
|
|
||||||
ForceNewlines bool
|
|
||||||
|
|
||||||
JSONReport string
|
JSONReport string
|
||||||
JUnitReport string
|
JUnitReport string
|
||||||
|
@ -202,7 +197,6 @@ type GoFlagsConfig struct {
|
||||||
A bool
|
A bool
|
||||||
ASMFlags string
|
ASMFlags string
|
||||||
BuildMode string
|
BuildMode string
|
||||||
BuildVCS bool
|
|
||||||
Compiler string
|
Compiler string
|
||||||
GCCGoFlags string
|
GCCGoFlags string
|
||||||
GCFlags string
|
GCFlags string
|
||||||
|
@ -220,7 +214,6 @@ type GoFlagsConfig struct {
|
||||||
ToolExec string
|
ToolExec string
|
||||||
Work bool
|
Work bool
|
||||||
X bool
|
X bool
|
||||||
O string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewDefaultGoFlagsConfig() GoFlagsConfig {
|
func NewDefaultGoFlagsConfig() GoFlagsConfig {
|
||||||
|
@ -270,7 +263,7 @@ var FlagSections = GinkgoFlagSections{
|
||||||
// SuiteConfigFlags provides flags for the Ginkgo test process, and CLI
|
// SuiteConfigFlags provides flags for the Ginkgo test process, and CLI
|
||||||
var SuiteConfigFlags = GinkgoFlags{
|
var SuiteConfigFlags = GinkgoFlags{
|
||||||
{KeyPath: "S.RandomSeed", Name: "seed", SectionKey: "order", UsageDefaultValue: "randomly generated by Ginkgo",
|
{KeyPath: "S.RandomSeed", Name: "seed", SectionKey: "order", UsageDefaultValue: "randomly generated by Ginkgo",
|
||||||
Usage: "The seed used to randomize the spec suite.", AlwaysExport: true},
|
Usage: "The seed used to randomize the spec suite."},
|
||||||
{KeyPath: "S.RandomizeAllSpecs", Name: "randomize-all", SectionKey: "order", DeprecatedName: "randomizeAllSpecs", DeprecatedDocLink: "changed-command-line-flags",
|
{KeyPath: "S.RandomizeAllSpecs", Name: "randomize-all", SectionKey: "order", DeprecatedName: "randomizeAllSpecs", DeprecatedDocLink: "changed-command-line-flags",
|
||||||
Usage: "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When containers."},
|
Usage: "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When containers."},
|
||||||
|
|
||||||
|
@ -280,8 +273,6 @@ var SuiteConfigFlags = GinkgoFlags{
|
||||||
Usage: "If set, ginkgo will stop running a test suite after a failure occurs."},
|
Usage: "If set, ginkgo will stop running a test suite after a failure occurs."},
|
||||||
{KeyPath: "S.FlakeAttempts", Name: "flake-attempts", SectionKey: "failure", UsageDefaultValue: "0 - failed tests are not retried", DeprecatedName: "flakeAttempts", DeprecatedDocLink: "changed-command-line-flags",
|
{KeyPath: "S.FlakeAttempts", Name: "flake-attempts", SectionKey: "failure", UsageDefaultValue: "0 - failed tests are not retried", DeprecatedName: "flakeAttempts", DeprecatedDocLink: "changed-command-line-flags",
|
||||||
Usage: "Make up to this many attempts to run each spec. If any of the attempts succeed, the suite will not be failed."},
|
Usage: "Make up to this many attempts to run each spec. If any of the attempts succeed, the suite will not be failed."},
|
||||||
{KeyPath: "S.FailOnEmpty", Name: "fail-on-empty", SectionKey: "failure",
|
|
||||||
Usage: "If set, ginkgo will mark the test suite as failed if no specs are run."},
|
|
||||||
|
|
||||||
{KeyPath: "S.DryRun", Name: "dry-run", SectionKey: "debug", DeprecatedName: "dryRun", DeprecatedDocLink: "changed-command-line-flags",
|
{KeyPath: "S.DryRun", Name: "dry-run", SectionKey: "debug", DeprecatedName: "dryRun", DeprecatedDocLink: "changed-command-line-flags",
|
||||||
Usage: "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v."},
|
Usage: "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v."},
|
||||||
|
@ -328,7 +319,7 @@ var ParallelConfigFlags = GinkgoFlags{
|
||||||
// ReporterConfigFlags provides flags for the Ginkgo test process, and CLI
|
// ReporterConfigFlags provides flags for the Ginkgo test process, and CLI
|
||||||
var ReporterConfigFlags = GinkgoFlags{
|
var ReporterConfigFlags = GinkgoFlags{
|
||||||
{KeyPath: "R.NoColor", Name: "no-color", SectionKey: "output", DeprecatedName: "noColor", DeprecatedDocLink: "changed-command-line-flags",
|
{KeyPath: "R.NoColor", Name: "no-color", SectionKey: "output", DeprecatedName: "noColor", DeprecatedDocLink: "changed-command-line-flags",
|
||||||
Usage: "If set, suppress color output in default reporter. You can also set the environment variable GINKGO_NO_COLOR=TRUE"},
|
Usage: "If set, suppress color output in default reporter."},
|
||||||
{KeyPath: "R.Verbose", Name: "v", SectionKey: "output",
|
{KeyPath: "R.Verbose", Name: "v", SectionKey: "output",
|
||||||
Usage: "If set, emits more output including GinkgoWriter contents."},
|
Usage: "If set, emits more output including GinkgoWriter contents."},
|
||||||
{KeyPath: "R.VeryVerbose", Name: "vv", SectionKey: "output",
|
{KeyPath: "R.VeryVerbose", Name: "vv", SectionKey: "output",
|
||||||
|
@ -339,12 +330,6 @@ var ReporterConfigFlags = GinkgoFlags{
|
||||||
Usage: "If set, default reporter prints out the full stack trace when a failure occurs"},
|
Usage: "If set, default reporter prints out the full stack trace when a failure occurs"},
|
||||||
{KeyPath: "R.ShowNodeEvents", Name: "show-node-events", SectionKey: "output",
|
{KeyPath: "R.ShowNodeEvents", Name: "show-node-events", SectionKey: "output",
|
||||||
Usage: "If set, default reporter prints node > Enter and < Exit events when specs fail"},
|
Usage: "If set, default reporter prints node > Enter and < Exit events when specs fail"},
|
||||||
{KeyPath: "R.GithubOutput", Name: "github-output", SectionKey: "output",
|
|
||||||
Usage: "If set, default reporter prints easier to manage output in Github Actions."},
|
|
||||||
{KeyPath: "R.SilenceSkips", Name: "silence-skips", SectionKey: "output",
|
|
||||||
Usage: "If set, default reporter will not print out skipped tests."},
|
|
||||||
{KeyPath: "R.ForceNewlines", Name: "force-newlines", SectionKey: "output",
|
|
||||||
Usage: "If set, default reporter will ensure a newline appears after each test."},
|
|
||||||
|
|
||||||
{KeyPath: "R.JSONReport", Name: "json-report", UsageArgument: "filename.json", SectionKey: "output",
|
{KeyPath: "R.JSONReport", Name: "json-report", UsageArgument: "filename.json", SectionKey: "output",
|
||||||
Usage: "If set, Ginkgo will generate a JSON-formatted test report at the specified location."},
|
Usage: "If set, Ginkgo will generate a JSON-formatted test report at the specified location."},
|
||||||
|
@ -513,7 +498,7 @@ var GinkgoCLIWatchFlags = GinkgoFlags{
|
||||||
// GoBuildFlags provides flags for the Ginkgo CLI build, run, and watch commands that capture go's build-time flags. These are passed to go test -c by the ginkgo CLI
|
// GoBuildFlags provides flags for the Ginkgo CLI build, run, and watch commands that capture go's build-time flags. These are passed to go test -c by the ginkgo CLI
|
||||||
var GoBuildFlags = GinkgoFlags{
|
var GoBuildFlags = GinkgoFlags{
|
||||||
{KeyPath: "Go.Race", Name: "race", SectionKey: "code-and-coverage-analysis",
|
{KeyPath: "Go.Race", Name: "race", SectionKey: "code-and-coverage-analysis",
|
||||||
Usage: "enable data race detection. Supported on linux/amd64, linux/ppc64le, linux/arm64, linux/s390x, freebsd/amd64, netbsd/amd64, darwin/amd64, darwin/arm64, and windows/amd64."},
|
Usage: "enable data race detection. Supported only on linux/amd64, freebsd/amd64, darwin/amd64, windows/amd64, linux/ppc64le and linux/arm64 (only for 48-bit VMA)."},
|
||||||
{KeyPath: "Go.Vet", Name: "vet", UsageArgument: "list", SectionKey: "code-and-coverage-analysis",
|
{KeyPath: "Go.Vet", Name: "vet", UsageArgument: "list", SectionKey: "code-and-coverage-analysis",
|
||||||
Usage: `Configure the invocation of "go vet" during "go test" to use the comma-separated list of vet checks. If list is empty, "go test" runs "go vet" with a curated list of checks believed to be always worth addressing. If list is "off", "go test" does not run "go vet" at all. Available checks can be found by running 'go doc cmd/vet'`},
|
Usage: `Configure the invocation of "go vet" during "go test" to use the comma-separated list of vet checks. If list is empty, "go test" runs "go vet" with a curated list of checks believed to be always worth addressing. If list is "off", "go test" does not run "go vet" at all. Available checks can be found by running 'go doc cmd/vet'`},
|
||||||
{KeyPath: "Go.Cover", Name: "cover", SectionKey: "code-and-coverage-analysis",
|
{KeyPath: "Go.Cover", Name: "cover", SectionKey: "code-and-coverage-analysis",
|
||||||
|
@ -529,8 +514,6 @@ var GoBuildFlags = GinkgoFlags{
|
||||||
Usage: "arguments to pass on each go tool asm invocation."},
|
Usage: "arguments to pass on each go tool asm invocation."},
|
||||||
{KeyPath: "Go.BuildMode", Name: "buildmode", UsageArgument: "mode", SectionKey: "go-build",
|
{KeyPath: "Go.BuildMode", Name: "buildmode", UsageArgument: "mode", SectionKey: "go-build",
|
||||||
Usage: "build mode to use. See 'go help buildmode' for more."},
|
Usage: "build mode to use. See 'go help buildmode' for more."},
|
||||||
{KeyPath: "Go.BuildVCS", Name: "buildvcs", SectionKey: "go-build",
|
|
||||||
Usage: "adds version control information."},
|
|
||||||
{KeyPath: "Go.Compiler", Name: "compiler", UsageArgument: "name", SectionKey: "go-build",
|
{KeyPath: "Go.Compiler", Name: "compiler", UsageArgument: "name", SectionKey: "go-build",
|
||||||
Usage: "name of compiler to use, as in runtime.Compiler (gccgo or gc)."},
|
Usage: "name of compiler to use, as in runtime.Compiler (gccgo or gc)."},
|
||||||
{KeyPath: "Go.GCCGoFlags", Name: "gccgoflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build",
|
{KeyPath: "Go.GCCGoFlags", Name: "gccgoflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build",
|
||||||
|
@ -565,8 +548,6 @@ var GoBuildFlags = GinkgoFlags{
|
||||||
Usage: "print the name of the temporary work directory and do not delete it when exiting."},
|
Usage: "print the name of the temporary work directory and do not delete it when exiting."},
|
||||||
{KeyPath: "Go.X", Name: "x", SectionKey: "go-build",
|
{KeyPath: "Go.X", Name: "x", SectionKey: "go-build",
|
||||||
Usage: "print the commands."},
|
Usage: "print the commands."},
|
||||||
{KeyPath: "Go.O", Name: "o", SectionKey: "go-build",
|
|
||||||
Usage: "output binary path (including name)."},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// GoRunFlags provides flags for the Ginkgo CLI run, and watch commands that capture go's run-time flags. These are passed to the compiled test binary by the ginkgo CLI
|
// GoRunFlags provides flags for the Ginkgo CLI run, and watch commands that capture go's run-time flags. These are passed to the compiled test binary by the ginkgo CLI
|
||||||
|
@ -620,7 +601,7 @@ func VetAndInitializeCLIAndGoConfig(cliConfig CLIConfig, goFlagsConfig GoFlagsCo
|
||||||
}
|
}
|
||||||
|
|
||||||
// GenerateGoTestCompileArgs is used by the Ginkgo CLI to generate command line arguments to pass to the go test -c command when compiling the test
|
// GenerateGoTestCompileArgs is used by the Ginkgo CLI to generate command line arguments to pass to the go test -c command when compiling the test
|
||||||
func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, packageToBuild string, pathToInvocationPath string) ([]string, error) {
|
func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, destination string, packageToBuild string, pathToInvocationPath string) ([]string, error) {
|
||||||
// if the user has set the CoverProfile run-time flag make sure to set the build-time cover flag to make sure
|
// if the user has set the CoverProfile run-time flag make sure to set the build-time cover flag to make sure
|
||||||
// the built test binary can generate a coverprofile
|
// the built test binary can generate a coverprofile
|
||||||
if goFlagsConfig.CoverProfile != "" {
|
if goFlagsConfig.CoverProfile != "" {
|
||||||
|
@ -643,7 +624,7 @@ func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, packageToBuild strin
|
||||||
goFlagsConfig.CoverPkg = strings.Join(adjustedCoverPkgs, ",")
|
goFlagsConfig.CoverPkg = strings.Join(adjustedCoverPkgs, ",")
|
||||||
}
|
}
|
||||||
|
|
||||||
args := []string{"test", "-c", packageToBuild}
|
args := []string{"test", "-c", "-o", destination, packageToBuild}
|
||||||
goArgs, err := GenerateFlagArgs(
|
goArgs, err := GenerateFlagArgs(
|
||||||
GoBuildFlags,
|
GoBuildFlags,
|
||||||
map[string]interface{}{
|
map[string]interface{}{
|
||||||
|
|
13
vendor/github.com/onsi/ginkgo/v2/types/errors.go
generated
vendored
13
vendor/github.com/onsi/ginkgo/v2/types/errors.go
generated
vendored
|
@ -453,8 +453,8 @@ func (g ginkgoErrors) InvalidEntryDescription(cl CodeLocation) error {
|
||||||
|
|
||||||
func (g ginkgoErrors) MissingParametersForTableFunction(cl CodeLocation) error {
|
func (g ginkgoErrors) MissingParametersForTableFunction(cl CodeLocation) error {
|
||||||
return GinkgoError{
|
return GinkgoError{
|
||||||
Heading: "No parameters have been passed to the Table Function",
|
Heading: fmt.Sprintf("No parameters have been passed to the Table Function"),
|
||||||
Message: "The Table Function expected at least 1 parameter",
|
Message: fmt.Sprintf("The Table Function expected at least 1 parameter"),
|
||||||
CodeLocation: cl,
|
CodeLocation: cl,
|
||||||
DocLink: "table-specs",
|
DocLink: "table-specs",
|
||||||
}
|
}
|
||||||
|
@ -505,15 +505,6 @@ func (g ginkgoErrors) IncorrectVariadicParameterTypeToTableFunction(expected, ac
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g ginkgoErrors) ContextsCannotBeUsedInSubtreeTables(cl CodeLocation) error {
|
|
||||||
return GinkgoError{
|
|
||||||
Heading: "Contexts cannot be used in subtree tables",
|
|
||||||
Message: "You''ve defined a subtree body function that accepts a context but did not provide one in the table entry. Ginkgo SpecContexts can only be passed in to subject and setup nodes - so if you are trying to implement a spec timeout you should request a context in the It function within your subtree body function, not in the subtree body function itself.",
|
|
||||||
CodeLocation: cl,
|
|
||||||
DocLink: "table-specs",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Parallel Synchronization errors */
|
/* Parallel Synchronization errors */
|
||||||
|
|
||||||
func (g ginkgoErrors) AggregatedReportUnavailableDueToNodeDisappearing() error {
|
func (g ginkgoErrors) AggregatedReportUnavailableDueToNodeDisappearing() error {
|
||||||
|
|
13
vendor/github.com/onsi/ginkgo/v2/types/flags.go
generated
vendored
13
vendor/github.com/onsi/ginkgo/v2/types/flags.go
generated
vendored
|
@ -25,7 +25,6 @@ type GinkgoFlag struct {
|
||||||
DeprecatedVersion string
|
DeprecatedVersion string
|
||||||
|
|
||||||
ExportAs string
|
ExportAs string
|
||||||
AlwaysExport bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type GinkgoFlags []GinkgoFlag
|
type GinkgoFlags []GinkgoFlag
|
||||||
|
@ -432,7 +431,7 @@ func (ssv stringSliceVar) Set(s string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// given a set of GinkgoFlags and bindings, generate flag arguments suitable to be passed to an application with that set of flags configured.
|
//given a set of GinkgoFlags and bindings, generate flag arguments suitable to be passed to an application with that set of flags configured.
|
||||||
func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) {
|
func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) {
|
||||||
result := []string{}
|
result := []string{}
|
||||||
for _, flag := range flags {
|
for _, flag := range flags {
|
||||||
|
@ -452,19 +451,19 @@ func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error)
|
||||||
iface := value.Interface()
|
iface := value.Interface()
|
||||||
switch value.Type() {
|
switch value.Type() {
|
||||||
case reflect.TypeOf(string("")):
|
case reflect.TypeOf(string("")):
|
||||||
if iface.(string) != "" || flag.AlwaysExport {
|
if iface.(string) != "" {
|
||||||
result = append(result, fmt.Sprintf("--%s=%s", name, iface))
|
result = append(result, fmt.Sprintf("--%s=%s", name, iface))
|
||||||
}
|
}
|
||||||
case reflect.TypeOf(int64(0)):
|
case reflect.TypeOf(int64(0)):
|
||||||
if iface.(int64) != 0 || flag.AlwaysExport {
|
if iface.(int64) != 0 {
|
||||||
result = append(result, fmt.Sprintf("--%s=%d", name, iface))
|
result = append(result, fmt.Sprintf("--%s=%d", name, iface))
|
||||||
}
|
}
|
||||||
case reflect.TypeOf(float64(0)):
|
case reflect.TypeOf(float64(0)):
|
||||||
if iface.(float64) != 0 || flag.AlwaysExport {
|
if iface.(float64) != 0 {
|
||||||
result = append(result, fmt.Sprintf("--%s=%f", name, iface))
|
result = append(result, fmt.Sprintf("--%s=%f", name, iface))
|
||||||
}
|
}
|
||||||
case reflect.TypeOf(int(0)):
|
case reflect.TypeOf(int(0)):
|
||||||
if iface.(int) != 0 || flag.AlwaysExport {
|
if iface.(int) != 0 {
|
||||||
result = append(result, fmt.Sprintf("--%s=%d", name, iface))
|
result = append(result, fmt.Sprintf("--%s=%d", name, iface))
|
||||||
}
|
}
|
||||||
case reflect.TypeOf(bool(true)):
|
case reflect.TypeOf(bool(true)):
|
||||||
|
@ -472,7 +471,7 @@ func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error)
|
||||||
result = append(result, fmt.Sprintf("--%s", name))
|
result = append(result, fmt.Sprintf("--%s", name))
|
||||||
}
|
}
|
||||||
case reflect.TypeOf(time.Duration(0)):
|
case reflect.TypeOf(time.Duration(0)):
|
||||||
if iface.(time.Duration) != time.Duration(0) || flag.AlwaysExport {
|
if iface.(time.Duration) != time.Duration(0) {
|
||||||
result = append(result, fmt.Sprintf("--%s=%s", name, iface))
|
result = append(result, fmt.Sprintf("--%s=%s", name, iface))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
229
vendor/github.com/onsi/ginkgo/v2/types/label_filter.go
generated
vendored
229
vendor/github.com/onsi/ginkgo/v2/types/label_filter.go
generated
vendored
|
@ -45,83 +45,6 @@ func orAction(a, b LabelFilter) LabelFilter {
|
||||||
return func(labels []string) bool { return a(labels) || b(labels) }
|
return func(labels []string) bool { return a(labels) || b(labels) }
|
||||||
}
|
}
|
||||||
|
|
||||||
func labelSetFor(key string, labels []string) map[string]bool {
|
|
||||||
key = strings.ToLower(strings.TrimSpace(key))
|
|
||||||
out := map[string]bool{}
|
|
||||||
for _, label := range labels {
|
|
||||||
components := strings.SplitN(label, ":", 2)
|
|
||||||
if len(components) < 2 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if key == strings.ToLower(strings.TrimSpace(components[0])) {
|
|
||||||
out[strings.ToLower(strings.TrimSpace(components[1]))] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
func isEmptyLabelSetAction(key string) LabelFilter {
|
|
||||||
return func(labels []string) bool {
|
|
||||||
return len(labelSetFor(key, labels)) == 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func containsAnyLabelSetAction(key string, expectedValues []string) LabelFilter {
|
|
||||||
return func(labels []string) bool {
|
|
||||||
set := labelSetFor(key, labels)
|
|
||||||
for _, value := range expectedValues {
|
|
||||||
if set[value] {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func containsAllLabelSetAction(key string, expectedValues []string) LabelFilter {
|
|
||||||
return func(labels []string) bool {
|
|
||||||
set := labelSetFor(key, labels)
|
|
||||||
for _, value := range expectedValues {
|
|
||||||
if !set[value] {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func consistsOfLabelSetAction(key string, expectedValues []string) LabelFilter {
|
|
||||||
return func(labels []string) bool {
|
|
||||||
set := labelSetFor(key, labels)
|
|
||||||
if len(set) != len(expectedValues) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for _, value := range expectedValues {
|
|
||||||
if !set[value] {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func isSubsetOfLabelSetAction(key string, expectedValues []string) LabelFilter {
|
|
||||||
expectedSet := map[string]bool{}
|
|
||||||
for _, value := range expectedValues {
|
|
||||||
expectedSet[value] = true
|
|
||||||
}
|
|
||||||
return func(labels []string) bool {
|
|
||||||
set := labelSetFor(key, labels)
|
|
||||||
for value := range set {
|
|
||||||
if !expectedSet[value] {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type lfToken uint
|
type lfToken uint
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -135,9 +58,6 @@ const (
|
||||||
lfTokenOr
|
lfTokenOr
|
||||||
lfTokenRegexp
|
lfTokenRegexp
|
||||||
lfTokenLabel
|
lfTokenLabel
|
||||||
lfTokenSetKey
|
|
||||||
lfTokenSetOperation
|
|
||||||
lfTokenSetArgument
|
|
||||||
lfTokenEOF
|
lfTokenEOF
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -151,8 +71,6 @@ func (l lfToken) Precedence() int {
|
||||||
return 2
|
return 2
|
||||||
case lfTokenNot:
|
case lfTokenNot:
|
||||||
return 3
|
return 3
|
||||||
case lfTokenSetOperation:
|
|
||||||
return 4
|
|
||||||
}
|
}
|
||||||
return -1
|
return -1
|
||||||
}
|
}
|
||||||
|
@ -175,12 +93,6 @@ func (l lfToken) String() string {
|
||||||
return "/regexp/"
|
return "/regexp/"
|
||||||
case lfTokenLabel:
|
case lfTokenLabel:
|
||||||
return "label"
|
return "label"
|
||||||
case lfTokenSetKey:
|
|
||||||
return "set_key"
|
|
||||||
case lfTokenSetOperation:
|
|
||||||
return "set_operation"
|
|
||||||
case lfTokenSetArgument:
|
|
||||||
return "set_argument"
|
|
||||||
case lfTokenEOF:
|
case lfTokenEOF:
|
||||||
return "EOF"
|
return "EOF"
|
||||||
}
|
}
|
||||||
|
@ -236,35 +148,6 @@ func (tn *treeNode) constructLabelFilter(input string) (LabelFilter, error) {
|
||||||
return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("RegExp compilation error: %s", err))
|
return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("RegExp compilation error: %s", err))
|
||||||
}
|
}
|
||||||
return matchLabelRegexAction(re), nil
|
return matchLabelRegexAction(re), nil
|
||||||
case lfTokenSetOperation:
|
|
||||||
tokenSetOperation := strings.ToLower(tn.value)
|
|
||||||
if tokenSetOperation == "isempty" {
|
|
||||||
return isEmptyLabelSetAction(tn.leftNode.value), nil
|
|
||||||
}
|
|
||||||
if tn.rightNode == nil {
|
|
||||||
return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("Set operation '%s' is missing an argument.", tn.value))
|
|
||||||
}
|
|
||||||
|
|
||||||
rawValues := strings.Split(tn.rightNode.value, ",")
|
|
||||||
values := make([]string, len(rawValues))
|
|
||||||
for i := range rawValues {
|
|
||||||
values[i] = strings.ToLower(strings.TrimSpace(rawValues[i]))
|
|
||||||
if strings.ContainsAny(values[i], "&|!,()/") {
|
|
||||||
return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.rightNode.location, fmt.Sprintf("Invalid label value '%s' in set operation argument.", values[i]))
|
|
||||||
} else if values[i] == "" {
|
|
||||||
return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.rightNode.location, "Empty label value in set operation argument.")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
switch tokenSetOperation {
|
|
||||||
case "containsany":
|
|
||||||
return containsAnyLabelSetAction(tn.leftNode.value, values), nil
|
|
||||||
case "containsall":
|
|
||||||
return containsAllLabelSetAction(tn.leftNode.value, values), nil
|
|
||||||
case "consistsof":
|
|
||||||
return consistsOfLabelSetAction(tn.leftNode.value, values), nil
|
|
||||||
case "issubsetof":
|
|
||||||
return isSubsetOfLabelSetAction(tn.leftNode.value, values), nil
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if tn.rightNode == nil {
|
if tn.rightNode == nil {
|
||||||
|
@ -320,17 +203,7 @@ func (tn *treeNode) toString(indent int) string {
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
var validSetOperations = map[string]string{
|
|
||||||
"containsany": "containsAny",
|
|
||||||
"containsall": "containsAll",
|
|
||||||
"consistsof": "consistsOf",
|
|
||||||
"issubsetof": "isSubsetOf",
|
|
||||||
"isempty": "isEmpty",
|
|
||||||
}
|
|
||||||
|
|
||||||
func tokenize(input string) func() (*treeNode, error) {
|
func tokenize(input string) func() (*treeNode, error) {
|
||||||
lastToken := lfTokenInvalid
|
|
||||||
lastValue := ""
|
|
||||||
runes, i := []rune(input), 0
|
runes, i := []rune(input), 0
|
||||||
|
|
||||||
peekIs := func(r rune) bool {
|
peekIs := func(r rune) bool {
|
||||||
|
@ -360,53 +233,6 @@ func tokenize(input string) func() (*treeNode, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
node := &treeNode{location: i}
|
node := &treeNode{location: i}
|
||||||
defer func() {
|
|
||||||
lastToken = node.token
|
|
||||||
lastValue = node.value
|
|
||||||
}()
|
|
||||||
|
|
||||||
if lastToken == lfTokenSetKey {
|
|
||||||
//we should get a valid set operation next
|
|
||||||
value, n := consumeUntil(" )")
|
|
||||||
if validSetOperations[strings.ToLower(value)] == "" {
|
|
||||||
return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, fmt.Sprintf("Invalid set operation '%s'.", value))
|
|
||||||
}
|
|
||||||
i += n
|
|
||||||
node.token, node.value = lfTokenSetOperation, value
|
|
||||||
return node, nil
|
|
||||||
}
|
|
||||||
if lastToken == lfTokenSetOperation {
|
|
||||||
//we should get an argument next, if we aren't isempty
|
|
||||||
var arg = ""
|
|
||||||
origI := i
|
|
||||||
if runes[i] == '{' {
|
|
||||||
i += 1
|
|
||||||
value, n := consumeUntil("}")
|
|
||||||
if i+n >= len(runes) {
|
|
||||||
return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i-1, "Missing closing '}' in set operation argument?")
|
|
||||||
}
|
|
||||||
i += n + 1
|
|
||||||
arg = value
|
|
||||||
} else {
|
|
||||||
value, n := consumeUntil("&|!,()/")
|
|
||||||
i += n
|
|
||||||
arg = strings.TrimSpace(value)
|
|
||||||
}
|
|
||||||
if strings.ToLower(lastValue) == "isempty" && arg != "" {
|
|
||||||
return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, fmt.Sprintf("isEmpty does not take arguments, was passed '%s'.", arg))
|
|
||||||
}
|
|
||||||
if arg == "" && strings.ToLower(lastValue) != "isempty" {
|
|
||||||
if i < len(runes) && runes[i] == '/' {
|
|
||||||
return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, "Set operations do not support regular expressions.")
|
|
||||||
} else {
|
|
||||||
return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, fmt.Sprintf("Set operation '%s' requires an argument.", lastValue))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// note that we sent an empty SetArgument token if we are isempty
|
|
||||||
node.token, node.value = lfTokenSetArgument, arg
|
|
||||||
return node, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
switch runes[i] {
|
switch runes[i] {
|
||||||
case '&':
|
case '&':
|
||||||
if !peekIs('&') {
|
if !peekIs('&') {
|
||||||
|
@ -438,38 +264,8 @@ func tokenize(input string) func() (*treeNode, error) {
|
||||||
i += n + 1
|
i += n + 1
|
||||||
node.token, node.value = lfTokenRegexp, value
|
node.token, node.value = lfTokenRegexp, value
|
||||||
default:
|
default:
|
||||||
value, n := consumeUntil("&|!,()/:")
|
value, n := consumeUntil("&|!,()/")
|
||||||
i += n
|
i += n
|
||||||
value = strings.TrimSpace(value)
|
|
||||||
|
|
||||||
//are we the beginning of a set operation?
|
|
||||||
if i < len(runes) && runes[i] == ':' {
|
|
||||||
if peekIs(' ') {
|
|
||||||
if value == "" {
|
|
||||||
return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Missing set key.")
|
|
||||||
}
|
|
||||||
i += 1
|
|
||||||
//we are the beginning of a set operation
|
|
||||||
node.token, node.value = lfTokenSetKey, value
|
|
||||||
return node, nil
|
|
||||||
}
|
|
||||||
additionalValue, n := consumeUntil("&|!,()/")
|
|
||||||
additionalValue = strings.TrimSpace(additionalValue)
|
|
||||||
if additionalValue == ":" {
|
|
||||||
return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Missing set operation.")
|
|
||||||
}
|
|
||||||
i += n
|
|
||||||
value += additionalValue
|
|
||||||
}
|
|
||||||
|
|
||||||
valueToCheckForSetOperation := strings.ToLower(value)
|
|
||||||
for setOperation := range validSetOperations {
|
|
||||||
idx := strings.Index(valueToCheckForSetOperation, " "+setOperation)
|
|
||||||
if idx > 0 {
|
|
||||||
return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i-n+idx+1, fmt.Sprintf("Looks like you are using the set operator '%s' but did not provide a set key. Did you forget the ':'?", validSetOperations[setOperation]))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
node.token, node.value = lfTokenLabel, strings.TrimSpace(value)
|
node.token, node.value = lfTokenLabel, strings.TrimSpace(value)
|
||||||
}
|
}
|
||||||
return node, nil
|
return node, nil
|
||||||
|
@ -511,7 +307,7 @@ LOOP:
|
||||||
switch node.token {
|
switch node.token {
|
||||||
case lfTokenEOF:
|
case lfTokenEOF:
|
||||||
break LOOP
|
break LOOP
|
||||||
case lfTokenLabel, lfTokenRegexp, lfTokenSetKey:
|
case lfTokenLabel, lfTokenRegexp:
|
||||||
if current.rightNode != nil {
|
if current.rightNode != nil {
|
||||||
return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Found two adjacent labels. You need an operator between them.")
|
return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Found two adjacent labels. You need an operator between them.")
|
||||||
}
|
}
|
||||||
|
@ -530,18 +326,6 @@ LOOP:
|
||||||
node.setLeftNode(nodeToStealFrom.rightNode)
|
node.setLeftNode(nodeToStealFrom.rightNode)
|
||||||
nodeToStealFrom.setRightNode(node)
|
nodeToStealFrom.setRightNode(node)
|
||||||
current = node
|
current = node
|
||||||
case lfTokenSetOperation:
|
|
||||||
if current.rightNode == nil {
|
|
||||||
return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Set operation '%s' missing left hand operand.", node.value))
|
|
||||||
}
|
|
||||||
node.setLeftNode(current.rightNode)
|
|
||||||
current.setRightNode(node)
|
|
||||||
current = node
|
|
||||||
case lfTokenSetArgument:
|
|
||||||
if current.rightNode != nil {
|
|
||||||
return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Unexpected set argument '%s'.", node.token))
|
|
||||||
}
|
|
||||||
current.setRightNode(node)
|
|
||||||
case lfTokenCloseGroup:
|
case lfTokenCloseGroup:
|
||||||
firstUnmatchedOpenNode := current.firstUnmatchedOpenNode()
|
firstUnmatchedOpenNode := current.firstUnmatchedOpenNode()
|
||||||
if firstUnmatchedOpenNode == nil {
|
if firstUnmatchedOpenNode == nil {
|
||||||
|
@ -570,14 +354,5 @@ func ValidateAndCleanupLabel(label string, cl CodeLocation) (string, error) {
|
||||||
if strings.ContainsAny(out, "&|!,()/") {
|
if strings.ContainsAny(out, "&|!,()/") {
|
||||||
return "", GinkgoErrors.InvalidLabel(label, cl)
|
return "", GinkgoErrors.InvalidLabel(label, cl)
|
||||||
}
|
}
|
||||||
if out[0] == ':' {
|
|
||||||
return "", GinkgoErrors.InvalidLabel(label, cl)
|
|
||||||
}
|
|
||||||
if strings.Contains(out, ":") {
|
|
||||||
components := strings.SplitN(out, ":", 2)
|
|
||||||
if len(components) < 2 || components[1] == "" {
|
|
||||||
return "", GinkgoErrors.InvalidLabel(label, cl)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
14
vendor/github.com/onsi/ginkgo/v2/types/types.go
generated
vendored
14
vendor/github.com/onsi/ginkgo/v2/types/types.go
generated
vendored
|
@ -3,21 +3,13 @@ package types
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
const GINKGO_FOCUS_EXIT_CODE = 197
|
const GINKGO_FOCUS_EXIT_CODE = 197
|
||||||
|
const GINKGO_TIME_FORMAT = "01/02/06 15:04:05.999"
|
||||||
var GINKGO_TIME_FORMAT = "01/02/06 15:04:05.999"
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
if os.Getenv("GINKGO_TIME_FORMAT") != "" {
|
|
||||||
GINKGO_TIME_FORMAT = os.Getenv("GINKGO_TIME_FORMAT")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Report captures information about a Ginkgo test run
|
// Report captures information about a Ginkgo test run
|
||||||
type Report struct {
|
type Report struct {
|
||||||
|
@ -105,7 +97,9 @@ func (report Report) Add(other Report) Report {
|
||||||
report.RunTime = report.EndTime.Sub(report.StartTime)
|
report.RunTime = report.EndTime.Sub(report.StartTime)
|
||||||
|
|
||||||
reports := make(SpecReports, len(report.SpecReports)+len(other.SpecReports))
|
reports := make(SpecReports, len(report.SpecReports)+len(other.SpecReports))
|
||||||
copy(reports, report.SpecReports)
|
for i := range report.SpecReports {
|
||||||
|
reports[i] = report.SpecReports[i]
|
||||||
|
}
|
||||||
offset := len(report.SpecReports)
|
offset := len(report.SpecReports)
|
||||||
for i := range other.SpecReports {
|
for i := range other.SpecReports {
|
||||||
reports[i+offset] = other.SpecReports[i]
|
reports[i+offset] = other.SpecReports[i]
|
||||||
|
|
2
vendor/github.com/onsi/ginkgo/v2/types/version.go
generated
vendored
2
vendor/github.com/onsi/ginkgo/v2/types/version.go
generated
vendored
|
@ -1,3 +1,3 @@
|
||||||
package types
|
package types
|
||||||
|
|
||||||
const VERSION = "2.22.2"
|
const VERSION = "2.9.5"
|
||||||
|
|
7
vendor/github.com/quic-go/quic-go/.golangci.yml
generated
vendored
7
vendor/github.com/quic-go/quic-go/.golangci.yml
generated
vendored
|
@ -16,9 +16,9 @@ linters:
|
||||||
disable-all: true
|
disable-all: true
|
||||||
enable:
|
enable:
|
||||||
- asciicheck
|
- asciicheck
|
||||||
|
- copyloopvar
|
||||||
- depguard
|
- depguard
|
||||||
- exhaustive
|
- exhaustive
|
||||||
- exportloopref
|
|
||||||
- goimports
|
- goimports
|
||||||
- gofmt # redundant, since gofmt *should* be a no-op after gofumpt
|
- gofmt # redundant, since gofmt *should* be a no-op after gofumpt
|
||||||
- gofumpt
|
- gofumpt
|
||||||
|
@ -44,3 +44,8 @@ issues:
|
||||||
linters:
|
linters:
|
||||||
- exhaustive
|
- exhaustive
|
||||||
- prealloc
|
- prealloc
|
||||||
|
- unparam
|
||||||
|
- path: _test\.go
|
||||||
|
text: "SA1029:"
|
||||||
|
linters:
|
||||||
|
- staticcheck
|
||||||
|
|
2
vendor/github.com/quic-go/quic-go/README.md
generated
vendored
2
vendor/github.com/quic-go/quic-go/README.md
generated
vendored
|
@ -10,6 +10,7 @@
|
||||||
quic-go is an implementation of the QUIC protocol ([RFC 9000](https://datatracker.ietf.org/doc/html/rfc9000), [RFC 9001](https://datatracker.ietf.org/doc/html/rfc9001), [RFC 9002](https://datatracker.ietf.org/doc/html/rfc9002)) in Go. It has support for HTTP/3 ([RFC 9114](https://datatracker.ietf.org/doc/html/rfc9114)), including QPACK ([RFC 9204](https://datatracker.ietf.org/doc/html/rfc9204)) and HTTP Datagrams ([RFC 9297](https://datatracker.ietf.org/doc/html/rfc9297)).
|
quic-go is an implementation of the QUIC protocol ([RFC 9000](https://datatracker.ietf.org/doc/html/rfc9000), [RFC 9001](https://datatracker.ietf.org/doc/html/rfc9001), [RFC 9002](https://datatracker.ietf.org/doc/html/rfc9002)) in Go. It has support for HTTP/3 ([RFC 9114](https://datatracker.ietf.org/doc/html/rfc9114)), including QPACK ([RFC 9204](https://datatracker.ietf.org/doc/html/rfc9204)) and HTTP Datagrams ([RFC 9297](https://datatracker.ietf.org/doc/html/rfc9297)).
|
||||||
|
|
||||||
In addition to these base RFCs, it also implements the following RFCs:
|
In addition to these base RFCs, it also implements the following RFCs:
|
||||||
|
|
||||||
* Unreliable Datagram Extension ([RFC 9221](https://datatracker.ietf.org/doc/html/rfc9221))
|
* Unreliable Datagram Extension ([RFC 9221](https://datatracker.ietf.org/doc/html/rfc9221))
|
||||||
* Datagram Packetization Layer Path MTU Discovery (DPLPMTUD, [RFC 8899](https://datatracker.ietf.org/doc/html/rfc8899))
|
* Datagram Packetization Layer Path MTU Discovery (DPLPMTUD, [RFC 8899](https://datatracker.ietf.org/doc/html/rfc8899))
|
||||||
* QUIC Version 2 ([RFC 9369](https://datatracker.ietf.org/doc/html/rfc9369))
|
* QUIC Version 2 ([RFC 9369](https://datatracker.ietf.org/doc/html/rfc9369))
|
||||||
|
@ -33,6 +34,7 @@ Detailed documentation can be found on [quic-go.net](https://quic-go.net/docs/).
|
||||||
| [Hysteria](https://github.com/apernet/hysteria) | A powerful, lightning fast and censorship resistant proxy |  |
|
| [Hysteria](https://github.com/apernet/hysteria) | A powerful, lightning fast and censorship resistant proxy |  |
|
||||||
| [Mercure](https://github.com/dunglas/mercure) | An open, easy, fast, reliable and battery-efficient solution for real-time communications |  |
|
| [Mercure](https://github.com/dunglas/mercure) | An open, easy, fast, reliable and battery-efficient solution for real-time communications |  |
|
||||||
| [OONI Probe](https://github.com/ooni/probe-cli) | Next generation OONI Probe. Library and CLI tool. |  |
|
| [OONI Probe](https://github.com/ooni/probe-cli) | Next generation OONI Probe. Library and CLI tool. |  |
|
||||||
|
| [reverst](https://github.com/flipt-io/reverst) | Reverse Tunnels in Go over HTTP/3 and QUIC |  |
|
||||||
| [RoadRunner](https://github.com/roadrunner-server/roadrunner) | High-performance PHP application server, process manager written in Go and powered with plugins |  |
|
| [RoadRunner](https://github.com/roadrunner-server/roadrunner) | High-performance PHP application server, process manager written in Go and powered with plugins |  |
|
||||||
| [syncthing](https://github.com/syncthing/syncthing/) | Open Source Continuous File Synchronization |  |
|
| [syncthing](https://github.com/syncthing/syncthing/) | Open Source Continuous File Synchronization |  |
|
||||||
| [traefik](https://github.com/traefik/traefik) | The Cloud Native Application Proxy |  |
|
| [traefik](https://github.com/traefik/traefik) | The Cloud Native Application Proxy |  |
|
||||||
|
|
147
vendor/github.com/quic-go/quic-go/client.go
generated
vendored
147
vendor/github.com/quic-go/quic-go/client.go
generated
vendored
|
@ -7,38 +7,8 @@ import (
|
||||||
"net"
|
"net"
|
||||||
|
|
||||||
"github.com/quic-go/quic-go/internal/protocol"
|
"github.com/quic-go/quic-go/internal/protocol"
|
||||||
"github.com/quic-go/quic-go/internal/utils"
|
|
||||||
"github.com/quic-go/quic-go/logging"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type client struct {
|
|
||||||
sendConn sendConn
|
|
||||||
|
|
||||||
use0RTT bool
|
|
||||||
|
|
||||||
packetHandlers packetHandlerManager
|
|
||||||
onClose func()
|
|
||||||
|
|
||||||
tlsConf *tls.Config
|
|
||||||
config *Config
|
|
||||||
|
|
||||||
connIDGenerator ConnectionIDGenerator
|
|
||||||
srcConnID protocol.ConnectionID
|
|
||||||
destConnID protocol.ConnectionID
|
|
||||||
|
|
||||||
initialPacketNumber protocol.PacketNumber
|
|
||||||
hasNegotiatedVersion bool
|
|
||||||
version protocol.Version
|
|
||||||
|
|
||||||
handshakeChan chan struct{}
|
|
||||||
|
|
||||||
conn quicConn
|
|
||||||
|
|
||||||
tracer *logging.ConnectionTracer
|
|
||||||
tracingID ConnectionTracingID
|
|
||||||
logger utils.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
// make it possible to mock connection ID for initial generation in the tests
|
// make it possible to mock connection ID for initial generation in the tests
|
||||||
var generateConnectionIDForInitial = protocol.GenerateConnectionIDForInitial
|
var generateConnectionIDForInitial = protocol.GenerateConnectionIDForInitial
|
||||||
|
|
||||||
|
@ -132,120 +102,3 @@ func setupTransport(c net.PacketConn, tlsConf *tls.Config, createdPacketConn boo
|
||||||
isSingleUse: true,
|
isSingleUse: true,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func dial(
|
|
||||||
ctx context.Context,
|
|
||||||
conn sendConn,
|
|
||||||
connIDGenerator ConnectionIDGenerator,
|
|
||||||
packetHandlers packetHandlerManager,
|
|
||||||
tlsConf *tls.Config,
|
|
||||||
config *Config,
|
|
||||||
onClose func(),
|
|
||||||
use0RTT bool,
|
|
||||||
) (quicConn, error) {
|
|
||||||
c, err := newClient(conn, connIDGenerator, config, tlsConf, onClose, use0RTT)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
c.packetHandlers = packetHandlers
|
|
||||||
|
|
||||||
c.tracingID = nextConnTracingID()
|
|
||||||
if c.config.Tracer != nil {
|
|
||||||
c.tracer = c.config.Tracer(context.WithValue(ctx, ConnectionTracingKey, c.tracingID), protocol.PerspectiveClient, c.destConnID)
|
|
||||||
}
|
|
||||||
if c.tracer != nil && c.tracer.StartedConnection != nil {
|
|
||||||
c.tracer.StartedConnection(c.sendConn.LocalAddr(), c.sendConn.RemoteAddr(), c.srcConnID, c.destConnID)
|
|
||||||
}
|
|
||||||
if err := c.dial(ctx); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return c.conn, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newClient(sendConn sendConn, connIDGenerator ConnectionIDGenerator, config *Config, tlsConf *tls.Config, onClose func(), use0RTT bool) (*client, error) {
|
|
||||||
srcConnID, err := connIDGenerator.GenerateConnectionID()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
destConnID, err := generateConnectionIDForInitial()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
c := &client{
|
|
||||||
connIDGenerator: connIDGenerator,
|
|
||||||
srcConnID: srcConnID,
|
|
||||||
destConnID: destConnID,
|
|
||||||
sendConn: sendConn,
|
|
||||||
use0RTT: use0RTT,
|
|
||||||
onClose: onClose,
|
|
||||||
tlsConf: tlsConf,
|
|
||||||
config: config,
|
|
||||||
version: config.Versions[0],
|
|
||||||
handshakeChan: make(chan struct{}),
|
|
||||||
logger: utils.DefaultLogger.WithPrefix("client"),
|
|
||||||
}
|
|
||||||
return c, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *client) dial(ctx context.Context) error {
|
|
||||||
c.logger.Infof("Starting new connection to %s (%s -> %s), source connection ID %s, destination connection ID %s, version %s", c.tlsConf.ServerName, c.sendConn.LocalAddr(), c.sendConn.RemoteAddr(), c.srcConnID, c.destConnID, c.version)
|
|
||||||
|
|
||||||
c.conn = newClientConnection(
|
|
||||||
context.WithValue(context.WithoutCancel(ctx), ConnectionTracingKey, c.tracingID),
|
|
||||||
c.sendConn,
|
|
||||||
c.packetHandlers,
|
|
||||||
c.destConnID,
|
|
||||||
c.srcConnID,
|
|
||||||
c.connIDGenerator,
|
|
||||||
c.config,
|
|
||||||
c.tlsConf,
|
|
||||||
c.initialPacketNumber,
|
|
||||||
c.use0RTT,
|
|
||||||
c.hasNegotiatedVersion,
|
|
||||||
c.tracer,
|
|
||||||
c.logger,
|
|
||||||
c.version,
|
|
||||||
)
|
|
||||||
c.packetHandlers.Add(c.srcConnID, c.conn)
|
|
||||||
|
|
||||||
errorChan := make(chan error, 1)
|
|
||||||
recreateChan := make(chan errCloseForRecreating)
|
|
||||||
go func() {
|
|
||||||
err := c.conn.run()
|
|
||||||
var recreateErr *errCloseForRecreating
|
|
||||||
if errors.As(err, &recreateErr) {
|
|
||||||
recreateChan <- *recreateErr
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if c.onClose != nil {
|
|
||||||
c.onClose()
|
|
||||||
}
|
|
||||||
errorChan <- err // returns as soon as the connection is closed
|
|
||||||
}()
|
|
||||||
|
|
||||||
// only set when we're using 0-RTT
|
|
||||||
// Otherwise, earlyConnChan will be nil. Receiving from a nil chan blocks forever.
|
|
||||||
var earlyConnChan <-chan struct{}
|
|
||||||
if c.use0RTT {
|
|
||||||
earlyConnChan = c.conn.earlyConnReady()
|
|
||||||
}
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
c.conn.destroy(nil)
|
|
||||||
return context.Cause(ctx)
|
|
||||||
case err := <-errorChan:
|
|
||||||
return err
|
|
||||||
case recreateErr := <-recreateChan:
|
|
||||||
c.initialPacketNumber = recreateErr.nextPacketNumber
|
|
||||||
c.version = recreateErr.nextVersion
|
|
||||||
c.hasNegotiatedVersion = true
|
|
||||||
return c.dial(ctx)
|
|
||||||
case <-earlyConnChan:
|
|
||||||
// ready to send 0-RTT data
|
|
||||||
return nil
|
|
||||||
case <-c.conn.HandshakeComplete():
|
|
||||||
// handshake successfully completed
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
2
vendor/github.com/quic-go/quic-go/codecov.yml
generated
vendored
2
vendor/github.com/quic-go/quic-go/codecov.yml
generated
vendored
|
@ -6,6 +6,8 @@ coverage:
|
||||||
- internal/handshake/cipher_suite.go
|
- internal/handshake/cipher_suite.go
|
||||||
- internal/utils/linkedlist/linkedlist.go
|
- internal/utils/linkedlist/linkedlist.go
|
||||||
- internal/testdata
|
- internal/testdata
|
||||||
|
- logging/connection_tracer_multiplexer.go
|
||||||
|
- logging/tracer_multiplexer.go
|
||||||
- testutils/
|
- testutils/
|
||||||
- fuzzing/
|
- fuzzing/
|
||||||
- metrics/
|
- metrics/
|
||||||
|
|
8
vendor/github.com/quic-go/quic-go/conn_id_generator.go
generated
vendored
8
vendor/github.com/quic-go/quic-go/conn_id_generator.go
generated
vendored
|
@ -16,7 +16,7 @@ type connIDGenerator struct {
|
||||||
initialClientDestConnID *protocol.ConnectionID // nil for the client
|
initialClientDestConnID *protocol.ConnectionID // nil for the client
|
||||||
|
|
||||||
addConnectionID func(protocol.ConnectionID)
|
addConnectionID func(protocol.ConnectionID)
|
||||||
getStatelessResetToken func(protocol.ConnectionID) protocol.StatelessResetToken
|
statelessResetter *statelessResetter
|
||||||
removeConnectionID func(protocol.ConnectionID)
|
removeConnectionID func(protocol.ConnectionID)
|
||||||
retireConnectionID func(protocol.ConnectionID)
|
retireConnectionID func(protocol.ConnectionID)
|
||||||
replaceWithClosed func([]protocol.ConnectionID, []byte)
|
replaceWithClosed func([]protocol.ConnectionID, []byte)
|
||||||
|
@ -27,7 +27,7 @@ func newConnIDGenerator(
|
||||||
initialConnectionID protocol.ConnectionID,
|
initialConnectionID protocol.ConnectionID,
|
||||||
initialClientDestConnID *protocol.ConnectionID, // nil for the client
|
initialClientDestConnID *protocol.ConnectionID, // nil for the client
|
||||||
addConnectionID func(protocol.ConnectionID),
|
addConnectionID func(protocol.ConnectionID),
|
||||||
getStatelessResetToken func(protocol.ConnectionID) protocol.StatelessResetToken,
|
statelessResetter *statelessResetter,
|
||||||
removeConnectionID func(protocol.ConnectionID),
|
removeConnectionID func(protocol.ConnectionID),
|
||||||
retireConnectionID func(protocol.ConnectionID),
|
retireConnectionID func(protocol.ConnectionID),
|
||||||
replaceWithClosed func([]protocol.ConnectionID, []byte),
|
replaceWithClosed func([]protocol.ConnectionID, []byte),
|
||||||
|
@ -38,7 +38,7 @@ func newConnIDGenerator(
|
||||||
generator: generator,
|
generator: generator,
|
||||||
activeSrcConnIDs: make(map[uint64]protocol.ConnectionID),
|
activeSrcConnIDs: make(map[uint64]protocol.ConnectionID),
|
||||||
addConnectionID: addConnectionID,
|
addConnectionID: addConnectionID,
|
||||||
getStatelessResetToken: getStatelessResetToken,
|
statelessResetter: statelessResetter,
|
||||||
removeConnectionID: removeConnectionID,
|
removeConnectionID: removeConnectionID,
|
||||||
retireConnectionID: retireConnectionID,
|
retireConnectionID: retireConnectionID,
|
||||||
replaceWithClosed: replaceWithClosed,
|
replaceWithClosed: replaceWithClosed,
|
||||||
|
@ -104,7 +104,7 @@ func (m *connIDGenerator) issueNewConnID() error {
|
||||||
m.queueControlFrame(&wire.NewConnectionIDFrame{
|
m.queueControlFrame(&wire.NewConnectionIDFrame{
|
||||||
SequenceNumber: m.highestSeq + 1,
|
SequenceNumber: m.highestSeq + 1,
|
||||||
ConnectionID: connID,
|
ConnectionID: connID,
|
||||||
StatelessResetToken: m.getStatelessResetToken(connID),
|
StatelessResetToken: m.statelessResetter.GetStatelessResetToken(connID),
|
||||||
})
|
})
|
||||||
m.highestSeq++
|
m.highestSeq++
|
||||||
return nil
|
return nil
|
||||||
|
|
22
vendor/github.com/quic-go/quic-go/conn_id_manager.go
generated
vendored
22
vendor/github.com/quic-go/quic-go/conn_id_manager.go
generated
vendored
|
@ -35,6 +35,8 @@ type connIDManager struct {
|
||||||
addStatelessResetToken func(protocol.StatelessResetToken)
|
addStatelessResetToken func(protocol.StatelessResetToken)
|
||||||
removeStatelessResetToken func(protocol.StatelessResetToken)
|
removeStatelessResetToken func(protocol.StatelessResetToken)
|
||||||
queueControlFrame func(wire.Frame)
|
queueControlFrame func(wire.Frame)
|
||||||
|
|
||||||
|
closed bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func newConnIDManager(
|
func newConnIDManager(
|
||||||
|
@ -66,6 +68,12 @@ func (h *connIDManager) Add(f *wire.NewConnectionIDFrame) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *connIDManager) add(f *wire.NewConnectionIDFrame) error {
|
func (h *connIDManager) add(f *wire.NewConnectionIDFrame) error {
|
||||||
|
if h.activeConnectionID.Len() == 0 {
|
||||||
|
return &qerr.TransportError{
|
||||||
|
ErrorCode: qerr.ProtocolViolation,
|
||||||
|
ErrorMessage: "received NEW_CONNECTION_ID frame but zero-length connection IDs are in use",
|
||||||
|
}
|
||||||
|
}
|
||||||
// If the NEW_CONNECTION_ID frame is reordered, such that its sequence number is smaller than the currently active
|
// If the NEW_CONNECTION_ID frame is reordered, such that its sequence number is smaller than the currently active
|
||||||
// connection ID or if it was already retired, send the RETIRE_CONNECTION_ID frame immediately.
|
// connection ID or if it was already retired, send the RETIRE_CONNECTION_ID frame immediately.
|
||||||
if f.SequenceNumber < h.activeSequenceNumber || f.SequenceNumber < h.highestRetired {
|
if f.SequenceNumber < h.activeSequenceNumber || f.SequenceNumber < h.highestRetired {
|
||||||
|
@ -142,6 +150,7 @@ func (h *connIDManager) addConnectionID(seq uint64, connID protocol.ConnectionID
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *connIDManager) updateConnectionID() {
|
func (h *connIDManager) updateConnectionID() {
|
||||||
|
h.assertNotClosed()
|
||||||
h.queueControlFrame(&wire.RetireConnectionIDFrame{
|
h.queueControlFrame(&wire.RetireConnectionIDFrame{
|
||||||
SequenceNumber: h.activeSequenceNumber,
|
SequenceNumber: h.activeSequenceNumber,
|
||||||
})
|
})
|
||||||
|
@ -160,6 +169,7 @@ func (h *connIDManager) updateConnectionID() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *connIDManager) Close() {
|
func (h *connIDManager) Close() {
|
||||||
|
h.closed = true
|
||||||
if h.activeStatelessResetToken != nil {
|
if h.activeStatelessResetToken != nil {
|
||||||
h.removeStatelessResetToken(*h.activeStatelessResetToken)
|
h.removeStatelessResetToken(*h.activeStatelessResetToken)
|
||||||
}
|
}
|
||||||
|
@ -176,6 +186,7 @@ func (h *connIDManager) ChangeInitialConnID(newConnID protocol.ConnectionID) {
|
||||||
|
|
||||||
// is called when the server provides a stateless reset token in the transport parameters
|
// is called when the server provides a stateless reset token in the transport parameters
|
||||||
func (h *connIDManager) SetStatelessResetToken(token protocol.StatelessResetToken) {
|
func (h *connIDManager) SetStatelessResetToken(token protocol.StatelessResetToken) {
|
||||||
|
h.assertNotClosed()
|
||||||
if h.activeSequenceNumber != 0 {
|
if h.activeSequenceNumber != 0 {
|
||||||
panic("expected first connection ID to have sequence number 0")
|
panic("expected first connection ID to have sequence number 0")
|
||||||
}
|
}
|
||||||
|
@ -203,6 +214,7 @@ func (h *connIDManager) shouldUpdateConnID() bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *connIDManager) Get() protocol.ConnectionID {
|
func (h *connIDManager) Get() protocol.ConnectionID {
|
||||||
|
h.assertNotClosed()
|
||||||
if h.shouldUpdateConnID() {
|
if h.shouldUpdateConnID() {
|
||||||
h.updateConnectionID()
|
h.updateConnectionID()
|
||||||
}
|
}
|
||||||
|
@ -212,3 +224,13 @@ func (h *connIDManager) Get() protocol.ConnectionID {
|
||||||
func (h *connIDManager) SetHandshakeComplete() {
|
func (h *connIDManager) SetHandshakeComplete() {
|
||||||
h.handshakeComplete = true
|
h.handshakeComplete = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Using the connIDManager after it has been closed can have disastrous effects:
|
||||||
|
// If the connection ID is rotated, a new entry would be inserted into the packet handler map,
|
||||||
|
// leading to a memory leak of the connection struct.
|
||||||
|
// See https://github.com/quic-go/quic-go/pull/4852 for more details.
|
||||||
|
func (h *connIDManager) assertNotClosed() {
|
||||||
|
if h.closed {
|
||||||
|
panic("connection ID manager is closed")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
230
vendor/github.com/quic-go/quic-go/connection.go
generated
vendored
230
vendor/github.com/quic-go/quic-go/connection.go
generated
vendored
|
@ -85,7 +85,6 @@ func (p *receivedPacket) Clone() *receivedPacket {
|
||||||
|
|
||||||
type connRunner interface {
|
type connRunner interface {
|
||||||
Add(protocol.ConnectionID, packetHandler) bool
|
Add(protocol.ConnectionID, packetHandler) bool
|
||||||
GetStatelessResetToken(protocol.ConnectionID) protocol.StatelessResetToken
|
|
||||||
Retire(protocol.ConnectionID)
|
Retire(protocol.ConnectionID)
|
||||||
Remove(protocol.ConnectionID)
|
Remove(protocol.ConnectionID)
|
||||||
ReplaceWithClosed([]protocol.ConnectionID, []byte)
|
ReplaceWithClosed([]protocol.ConnectionID, []byte)
|
||||||
|
@ -225,7 +224,7 @@ var newConnection = func(
|
||||||
destConnID protocol.ConnectionID,
|
destConnID protocol.ConnectionID,
|
||||||
srcConnID protocol.ConnectionID,
|
srcConnID protocol.ConnectionID,
|
||||||
connIDGenerator ConnectionIDGenerator,
|
connIDGenerator ConnectionIDGenerator,
|
||||||
statelessResetToken protocol.StatelessResetToken,
|
statelessResetter *statelessResetter,
|
||||||
conf *Config,
|
conf *Config,
|
||||||
tlsConf *tls.Config,
|
tlsConf *tls.Config,
|
||||||
tokenGenerator *handshake.TokenGenerator,
|
tokenGenerator *handshake.TokenGenerator,
|
||||||
|
@ -263,7 +262,7 @@ var newConnection = func(
|
||||||
srcConnID,
|
srcConnID,
|
||||||
&clientDestConnID,
|
&clientDestConnID,
|
||||||
func(connID protocol.ConnectionID) { runner.Add(connID, s) },
|
func(connID protocol.ConnectionID) { runner.Add(connID, s) },
|
||||||
runner.GetStatelessResetToken,
|
statelessResetter,
|
||||||
runner.Remove,
|
runner.Remove,
|
||||||
runner.Retire,
|
runner.Retire,
|
||||||
runner.ReplaceWithClosed,
|
runner.ReplaceWithClosed,
|
||||||
|
@ -282,6 +281,7 @@ var newConnection = func(
|
||||||
s.logger,
|
s.logger,
|
||||||
)
|
)
|
||||||
s.maxPayloadSizeEstimate.Store(uint32(estimateMaxPayloadSize(protocol.ByteCount(s.config.InitialPacketSize))))
|
s.maxPayloadSizeEstimate.Store(uint32(estimateMaxPayloadSize(protocol.ByteCount(s.config.InitialPacketSize))))
|
||||||
|
statelessResetToken := statelessResetter.GetStatelessResetToken(srcConnID)
|
||||||
params := &wire.TransportParameters{
|
params := &wire.TransportParameters{
|
||||||
InitialMaxStreamDataBidiLocal: protocol.ByteCount(s.config.InitialStreamReceiveWindow),
|
InitialMaxStreamDataBidiLocal: protocol.ByteCount(s.config.InitialStreamReceiveWindow),
|
||||||
InitialMaxStreamDataBidiRemote: protocol.ByteCount(s.config.InitialStreamReceiveWindow),
|
InitialMaxStreamDataBidiRemote: protocol.ByteCount(s.config.InitialStreamReceiveWindow),
|
||||||
|
@ -340,6 +340,7 @@ var newClientConnection = func(
|
||||||
destConnID protocol.ConnectionID,
|
destConnID protocol.ConnectionID,
|
||||||
srcConnID protocol.ConnectionID,
|
srcConnID protocol.ConnectionID,
|
||||||
connIDGenerator ConnectionIDGenerator,
|
connIDGenerator ConnectionIDGenerator,
|
||||||
|
statelessResetter *statelessResetter,
|
||||||
conf *Config,
|
conf *Config,
|
||||||
tlsConf *tls.Config,
|
tlsConf *tls.Config,
|
||||||
initialPacketNumber protocol.PacketNumber,
|
initialPacketNumber protocol.PacketNumber,
|
||||||
|
@ -372,7 +373,7 @@ var newClientConnection = func(
|
||||||
srcConnID,
|
srcConnID,
|
||||||
nil,
|
nil,
|
||||||
func(connID protocol.ConnectionID) { runner.Add(connID, s) },
|
func(connID protocol.ConnectionID) { runner.Add(connID, s) },
|
||||||
runner.GetStatelessResetToken,
|
statelessResetter,
|
||||||
runner.Remove,
|
runner.Remove,
|
||||||
runner.Retire,
|
runner.Retire,
|
||||||
runner.ReplaceWithClosed,
|
runner.ReplaceWithClosed,
|
||||||
|
@ -477,7 +478,7 @@ func (s *connection) preSetup() {
|
||||||
uint64(s.config.MaxIncomingUniStreams),
|
uint64(s.config.MaxIncomingUniStreams),
|
||||||
s.perspective,
|
s.perspective,
|
||||||
)
|
)
|
||||||
s.framer = newFramer()
|
s.framer = newFramer(s.connFlowController)
|
||||||
s.receivedPackets = make(chan receivedPacket, protocol.MaxConnUnprocessedPackets)
|
s.receivedPackets = make(chan receivedPacket, protocol.MaxConnUnprocessedPackets)
|
||||||
s.closeChan = make(chan closeError, 1)
|
s.closeChan = make(chan closeError, 1)
|
||||||
s.sendingScheduled = make(chan struct{}, 1)
|
s.sendingScheduled = make(chan struct{}, 1)
|
||||||
|
@ -496,12 +497,28 @@ func (s *connection) run() error {
|
||||||
var closeErr closeError
|
var closeErr closeError
|
||||||
defer func() { s.ctxCancel(closeErr.err) }()
|
defer func() { s.ctxCancel(closeErr.err) }()
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
// Drain queued packets that will never be processed.
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case p, ok := <-s.receivedPackets:
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
p.buffer.Decrement()
|
||||||
|
p.buffer.MaybeRelease()
|
||||||
|
default:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
s.timer = *newTimer()
|
s.timer = *newTimer()
|
||||||
|
|
||||||
if err := s.cryptoStreamHandler.StartHandshake(s.ctx); err != nil {
|
if err := s.cryptoStreamHandler.StartHandshake(s.ctx); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := s.handleHandshakeEvents(); err != nil {
|
if err := s.handleHandshakeEvents(time.Now()); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
go func() {
|
go func() {
|
||||||
|
@ -602,7 +619,7 @@ runLoop:
|
||||||
if timeout := s.sentPacketHandler.GetLossDetectionTimeout(); !timeout.IsZero() && timeout.Before(now) {
|
if timeout := s.sentPacketHandler.GetLossDetectionTimeout(); !timeout.IsZero() && timeout.Before(now) {
|
||||||
// This could cause packets to be retransmitted.
|
// This could cause packets to be retransmitted.
|
||||||
// Check it before trying to send packets.
|
// Check it before trying to send packets.
|
||||||
if err := s.sentPacketHandler.OnLossDetectionTimeout(); err != nil {
|
if err := s.sentPacketHandler.OnLossDetectionTimeout(now); err != nil {
|
||||||
s.closeLocal(err)
|
s.closeLocal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -727,7 +744,7 @@ func (s *connection) idleTimeoutStartTime() time.Time {
|
||||||
return startTime
|
return startTime
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *connection) handleHandshakeComplete() error {
|
func (s *connection) handleHandshakeComplete(now time.Time) error {
|
||||||
defer close(s.handshakeCompleteChan)
|
defer close(s.handshakeCompleteChan)
|
||||||
// Once the handshake completes, we have derived 1-RTT keys.
|
// Once the handshake completes, we have derived 1-RTT keys.
|
||||||
// There's no point in queueing undecryptable packets for later decryption anymore.
|
// There's no point in queueing undecryptable packets for later decryption anymore.
|
||||||
|
@ -748,7 +765,7 @@ func (s *connection) handleHandshakeComplete() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// All these only apply to the server side.
|
// All these only apply to the server side.
|
||||||
if err := s.handleHandshakeConfirmed(); err != nil {
|
if err := s.handleHandshakeConfirmed(now); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -771,23 +788,22 @@ func (s *connection) handleHandshakeComplete() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *connection) handleHandshakeConfirmed() error {
|
func (s *connection) handleHandshakeConfirmed(now time.Time) error {
|
||||||
if err := s.dropEncryptionLevel(protocol.EncryptionHandshake); err != nil {
|
if err := s.dropEncryptionLevel(protocol.EncryptionHandshake, now); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
s.handshakeConfirmed = true
|
s.handshakeConfirmed = true
|
||||||
s.sentPacketHandler.SetHandshakeConfirmed()
|
|
||||||
s.cryptoStreamHandler.SetHandshakeConfirmed()
|
s.cryptoStreamHandler.SetHandshakeConfirmed()
|
||||||
|
|
||||||
if !s.config.DisablePathMTUDiscovery && s.conn.capabilities().DF {
|
if !s.config.DisablePathMTUDiscovery && s.conn.capabilities().DF {
|
||||||
s.mtuDiscoverer.Start()
|
s.mtuDiscoverer.Start(now)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *connection) handlePacketImpl(rp receivedPacket) bool {
|
func (s *connection) handlePacketImpl(rp receivedPacket) bool {
|
||||||
s.sentPacketHandler.ReceivedBytes(rp.Size())
|
s.sentPacketHandler.ReceivedBytes(rp.Size(), rp.rcvTime)
|
||||||
|
|
||||||
if wire.IsVersionNegotiationPacket(rp.data) {
|
if wire.IsVersionNegotiationPacket(rp.data) {
|
||||||
s.handleVersionNegotiationPacket(rp)
|
s.handleVersionNegotiationPacket(rp)
|
||||||
|
@ -958,7 +974,7 @@ func (s *connection) handleLongHeaderPacket(p receivedPacket, hdr *wire.Header)
|
||||||
// drop 0-RTT packets, if we are a client
|
// drop 0-RTT packets, if we are a client
|
||||||
if s.perspective == protocol.PerspectiveClient && hdr.Type == protocol.PacketType0RTT {
|
if s.perspective == protocol.PerspectiveClient && hdr.Type == protocol.PacketType0RTT {
|
||||||
if s.tracer != nil && s.tracer.DroppedPacket != nil {
|
if s.tracer != nil && s.tracer.DroppedPacket != nil {
|
||||||
s.tracer.DroppedPacket(logging.PacketType0RTT, protocol.InvalidPacketNumber, p.Size(), logging.PacketDropKeyUnavailable)
|
s.tracer.DroppedPacket(logging.PacketType0RTT, protocol.InvalidPacketNumber, p.Size(), logging.PacketDropUnexpectedPacket)
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
@ -1068,6 +1084,15 @@ func (s *connection) handleRetryPacket(hdr *wire.Header, data []byte, rcvTime ti
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
newDestConnID := hdr.SrcConnectionID
|
||||||
|
s.receivedRetry = true
|
||||||
|
s.sentPacketHandler.ResetForRetry(rcvTime)
|
||||||
|
s.handshakeDestConnID = newDestConnID
|
||||||
|
s.retrySrcConnID = &newDestConnID
|
||||||
|
s.cryptoStreamHandler.ChangeConnectionID(newDestConnID)
|
||||||
|
s.packer.SetToken(hdr.Token)
|
||||||
|
s.connIDManager.ChangeInitialConnID(newDestConnID)
|
||||||
|
|
||||||
if s.logger.Debug() {
|
if s.logger.Debug() {
|
||||||
s.logger.Debugf("<- Received Retry:")
|
s.logger.Debugf("<- Received Retry:")
|
||||||
(&wire.ExtendedHeader{Header: *hdr}).Log(s.logger)
|
(&wire.ExtendedHeader{Header: *hdr}).Log(s.logger)
|
||||||
|
@ -1076,17 +1101,7 @@ func (s *connection) handleRetryPacket(hdr *wire.Header, data []byte, rcvTime ti
|
||||||
if s.tracer != nil && s.tracer.ReceivedRetry != nil {
|
if s.tracer != nil && s.tracer.ReceivedRetry != nil {
|
||||||
s.tracer.ReceivedRetry(hdr)
|
s.tracer.ReceivedRetry(hdr)
|
||||||
}
|
}
|
||||||
newDestConnID := hdr.SrcConnectionID
|
|
||||||
s.receivedRetry = true
|
|
||||||
if err := s.sentPacketHandler.ResetForRetry(rcvTime); err != nil {
|
|
||||||
s.closeLocal(err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
s.handshakeDestConnID = newDestConnID
|
|
||||||
s.retrySrcConnID = &newDestConnID
|
|
||||||
s.cryptoStreamHandler.ChangeConnectionID(newDestConnID)
|
|
||||||
s.packer.SetToken(hdr.Token)
|
|
||||||
s.connIDManager.ChangeInitialConnID(newDestConnID)
|
|
||||||
s.scheduleSending()
|
s.scheduleSending()
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
@ -1195,7 +1210,7 @@ func (s *connection) handleUnpackedLongHeaderPacket(
|
||||||
!s.droppedInitialKeys {
|
!s.droppedInitialKeys {
|
||||||
// On the server side, Initial keys are dropped as soon as the first Handshake packet is received.
|
// On the server side, Initial keys are dropped as soon as the first Handshake packet is received.
|
||||||
// See Section 4.9.1 of RFC 9001.
|
// See Section 4.9.1 of RFC 9001.
|
||||||
if err := s.dropEncryptionLevel(protocol.EncryptionInitial); err != nil {
|
if err := s.dropEncryptionLevel(protocol.EncryptionInitial, rcvTime); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1210,7 +1225,7 @@ func (s *connection) handleUnpackedLongHeaderPacket(
|
||||||
s.tracer.ReceivedLongHeaderPacket(packet.hdr, packetSize, ecn, frames)
|
s.tracer.ReceivedLongHeaderPacket(packet.hdr, packetSize, ecn, frames)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
isAckEliciting, err := s.handleFrames(packet.data, packet.hdr.DestConnectionID, packet.encryptionLevel, log)
|
isAckEliciting, err := s.handleFrames(packet.data, packet.hdr.DestConnectionID, packet.encryptionLevel, log, rcvTime)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -1229,7 +1244,7 @@ func (s *connection) handleUnpackedShortHeaderPacket(
|
||||||
s.firstAckElicitingPacketAfterIdleSentTime = time.Time{}
|
s.firstAckElicitingPacketAfterIdleSentTime = time.Time{}
|
||||||
s.keepAlivePingSent = false
|
s.keepAlivePingSent = false
|
||||||
|
|
||||||
isAckEliciting, err := s.handleFrames(data, destConnID, protocol.Encryption1RTT, log)
|
isAckEliciting, err := s.handleFrames(data, destConnID, protocol.Encryption1RTT, log, rcvTime)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -1241,6 +1256,7 @@ func (s *connection) handleFrames(
|
||||||
destConnID protocol.ConnectionID,
|
destConnID protocol.ConnectionID,
|
||||||
encLevel protocol.EncryptionLevel,
|
encLevel protocol.EncryptionLevel,
|
||||||
log func([]logging.Frame),
|
log func([]logging.Frame),
|
||||||
|
rcvTime time.Time,
|
||||||
) (isAckEliciting bool, _ error) {
|
) (isAckEliciting bool, _ error) {
|
||||||
// Only used for tracing.
|
// Only used for tracing.
|
||||||
// If we're not tracing, this slice will always remain empty.
|
// If we're not tracing, this slice will always remain empty.
|
||||||
|
@ -1270,7 +1286,7 @@ func (s *connection) handleFrames(
|
||||||
if handleErr != nil {
|
if handleErr != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err := s.handleFrame(frame, encLevel, destConnID); err != nil {
|
if err := s.handleFrame(frame, encLevel, destConnID, rcvTime); err != nil {
|
||||||
if log == nil {
|
if log == nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
@ -1291,7 +1307,7 @@ func (s *connection) handleFrames(
|
||||||
// We receive a Handshake packet that contains the CRYPTO frame that allows us to complete the handshake,
|
// We receive a Handshake packet that contains the CRYPTO frame that allows us to complete the handshake,
|
||||||
// and an ACK serialized after that CRYPTO frame. In this case, we still want to process the ACK frame.
|
// and an ACK serialized after that CRYPTO frame. In this case, we still want to process the ACK frame.
|
||||||
if !handshakeWasComplete && s.handshakeComplete {
|
if !handshakeWasComplete && s.handshakeComplete {
|
||||||
if err := s.handleHandshakeComplete(); err != nil {
|
if err := s.handleHandshakeComplete(rcvTime); err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1299,20 +1315,25 @@ func (s *connection) handleFrames(
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *connection) handleFrame(f wire.Frame, encLevel protocol.EncryptionLevel, destConnID protocol.ConnectionID) error {
|
func (s *connection) handleFrame(
|
||||||
|
f wire.Frame,
|
||||||
|
encLevel protocol.EncryptionLevel,
|
||||||
|
destConnID protocol.ConnectionID,
|
||||||
|
rcvTime time.Time,
|
||||||
|
) error {
|
||||||
var err error
|
var err error
|
||||||
wire.LogFrame(s.logger, f, false)
|
wire.LogFrame(s.logger, f, false)
|
||||||
switch frame := f.(type) {
|
switch frame := f.(type) {
|
||||||
case *wire.CryptoFrame:
|
case *wire.CryptoFrame:
|
||||||
err = s.handleCryptoFrame(frame, encLevel)
|
err = s.handleCryptoFrame(frame, encLevel, rcvTime)
|
||||||
case *wire.StreamFrame:
|
case *wire.StreamFrame:
|
||||||
err = s.handleStreamFrame(frame)
|
err = s.handleStreamFrame(frame, rcvTime)
|
||||||
case *wire.AckFrame:
|
case *wire.AckFrame:
|
||||||
err = s.handleAckFrame(frame, encLevel)
|
err = s.handleAckFrame(frame, encLevel, rcvTime)
|
||||||
case *wire.ConnectionCloseFrame:
|
case *wire.ConnectionCloseFrame:
|
||||||
s.handleConnectionCloseFrame(frame)
|
s.handleConnectionCloseFrame(frame)
|
||||||
case *wire.ResetStreamFrame:
|
case *wire.ResetStreamFrame:
|
||||||
err = s.handleResetStreamFrame(frame)
|
err = s.handleResetStreamFrame(frame, rcvTime)
|
||||||
case *wire.MaxDataFrame:
|
case *wire.MaxDataFrame:
|
||||||
s.handleMaxDataFrame(frame)
|
s.handleMaxDataFrame(frame)
|
||||||
case *wire.MaxStreamDataFrame:
|
case *wire.MaxStreamDataFrame:
|
||||||
|
@ -1321,6 +1342,7 @@ func (s *connection) handleFrame(f wire.Frame, encLevel protocol.EncryptionLevel
|
||||||
s.handleMaxStreamsFrame(frame)
|
s.handleMaxStreamsFrame(frame)
|
||||||
case *wire.DataBlockedFrame:
|
case *wire.DataBlockedFrame:
|
||||||
case *wire.StreamDataBlockedFrame:
|
case *wire.StreamDataBlockedFrame:
|
||||||
|
err = s.handleStreamDataBlockedFrame(frame)
|
||||||
case *wire.StreamsBlockedFrame:
|
case *wire.StreamsBlockedFrame:
|
||||||
case *wire.StopSendingFrame:
|
case *wire.StopSendingFrame:
|
||||||
err = s.handleStopSendingFrame(frame)
|
err = s.handleStopSendingFrame(frame)
|
||||||
|
@ -1329,7 +1351,10 @@ func (s *connection) handleFrame(f wire.Frame, encLevel protocol.EncryptionLevel
|
||||||
s.handlePathChallengeFrame(frame)
|
s.handlePathChallengeFrame(frame)
|
||||||
case *wire.PathResponseFrame:
|
case *wire.PathResponseFrame:
|
||||||
// since we don't send PATH_CHALLENGEs, we don't expect PATH_RESPONSEs
|
// since we don't send PATH_CHALLENGEs, we don't expect PATH_RESPONSEs
|
||||||
err = errors.New("unexpected PATH_RESPONSE frame")
|
err = &qerr.TransportError{
|
||||||
|
ErrorCode: qerr.ProtocolViolation,
|
||||||
|
ErrorMessage: "unexpected PATH_RESPONSE frame",
|
||||||
|
}
|
||||||
case *wire.NewTokenFrame:
|
case *wire.NewTokenFrame:
|
||||||
err = s.handleNewTokenFrame(frame)
|
err = s.handleNewTokenFrame(frame)
|
||||||
case *wire.NewConnectionIDFrame:
|
case *wire.NewConnectionIDFrame:
|
||||||
|
@ -1337,7 +1362,7 @@ func (s *connection) handleFrame(f wire.Frame, encLevel protocol.EncryptionLevel
|
||||||
case *wire.RetireConnectionIDFrame:
|
case *wire.RetireConnectionIDFrame:
|
||||||
err = s.handleRetireConnectionIDFrame(frame, destConnID)
|
err = s.handleRetireConnectionIDFrame(frame, destConnID)
|
||||||
case *wire.HandshakeDoneFrame:
|
case *wire.HandshakeDoneFrame:
|
||||||
err = s.handleHandshakeDoneFrame()
|
err = s.handleHandshakeDoneFrame(rcvTime)
|
||||||
case *wire.DatagramFrame:
|
case *wire.DatagramFrame:
|
||||||
err = s.handleDatagramFrame(frame)
|
err = s.handleDatagramFrame(frame)
|
||||||
default:
|
default:
|
||||||
|
@ -1376,7 +1401,7 @@ func (s *connection) handleConnectionCloseFrame(frame *wire.ConnectionCloseFrame
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *connection) handleCryptoFrame(frame *wire.CryptoFrame, encLevel protocol.EncryptionLevel) error {
|
func (s *connection) handleCryptoFrame(frame *wire.CryptoFrame, encLevel protocol.EncryptionLevel, rcvTime time.Time) error {
|
||||||
if err := s.cryptoStreamManager.HandleCryptoFrame(frame, encLevel); err != nil {
|
if err := s.cryptoStreamManager.HandleCryptoFrame(frame, encLevel); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -1389,10 +1414,10 @@ func (s *connection) handleCryptoFrame(frame *wire.CryptoFrame, encLevel protoco
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return s.handleHandshakeEvents()
|
return s.handleHandshakeEvents(rcvTime)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *connection) handleHandshakeEvents() error {
|
func (s *connection) handleHandshakeEvents(now time.Time) error {
|
||||||
for {
|
for {
|
||||||
ev := s.cryptoStreamHandler.NextEvent()
|
ev := s.cryptoStreamHandler.NextEvent()
|
||||||
var err error
|
var err error
|
||||||
|
@ -1413,7 +1438,7 @@ func (s *connection) handleHandshakeEvents() error {
|
||||||
s.undecryptablePacketsToProcess = s.undecryptablePackets
|
s.undecryptablePacketsToProcess = s.undecryptablePackets
|
||||||
s.undecryptablePackets = nil
|
s.undecryptablePackets = nil
|
||||||
case handshake.EventDiscard0RTTKeys:
|
case handshake.EventDiscard0RTTKeys:
|
||||||
err = s.dropEncryptionLevel(protocol.Encryption0RTT)
|
err = s.dropEncryptionLevel(protocol.Encryption0RTT, now)
|
||||||
case handshake.EventWriteInitialData:
|
case handshake.EventWriteInitialData:
|
||||||
_, err = s.initialStream.Write(ev.Data)
|
_, err = s.initialStream.Write(ev.Data)
|
||||||
case handshake.EventWriteHandshakeData:
|
case handshake.EventWriteHandshakeData:
|
||||||
|
@ -1425,17 +1450,15 @@ func (s *connection) handleHandshakeEvents() error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *connection) handleStreamFrame(frame *wire.StreamFrame) error {
|
func (s *connection) handleStreamFrame(frame *wire.StreamFrame, rcvTime time.Time) error {
|
||||||
str, err := s.streamsMap.GetOrOpenReceiveStream(frame.StreamID)
|
str, err := s.streamsMap.GetOrOpenReceiveStream(frame.StreamID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if str == nil {
|
if str == nil { // stream was already closed and garbage collected
|
||||||
// Stream is closed and already garbage collected
|
|
||||||
// ignore this StreamFrame
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return str.handleStreamFrame(frame)
|
return str.handleStreamFrame(frame, rcvTime)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *connection) handleMaxDataFrame(frame *wire.MaxDataFrame) {
|
func (s *connection) handleMaxDataFrame(frame *wire.MaxDataFrame) {
|
||||||
|
@ -1455,11 +1478,18 @@ func (s *connection) handleMaxStreamDataFrame(frame *wire.MaxStreamDataFrame) er
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *connection) handleStreamDataBlockedFrame(frame *wire.StreamDataBlockedFrame) error {
|
||||||
|
// We don't need to do anything in response to a STREAM_DATA_BLOCKED frame,
|
||||||
|
// but we need to make sure that the stream ID is valid.
|
||||||
|
_, err := s.streamsMap.GetOrOpenReceiveStream(frame.StreamID)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
func (s *connection) handleMaxStreamsFrame(frame *wire.MaxStreamsFrame) {
|
func (s *connection) handleMaxStreamsFrame(frame *wire.MaxStreamsFrame) {
|
||||||
s.streamsMap.HandleMaxStreamsFrame(frame)
|
s.streamsMap.HandleMaxStreamsFrame(frame)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *connection) handleResetStreamFrame(frame *wire.ResetStreamFrame) error {
|
func (s *connection) handleResetStreamFrame(frame *wire.ResetStreamFrame, rcvTime time.Time) error {
|
||||||
str, err := s.streamsMap.GetOrOpenReceiveStream(frame.StreamID)
|
str, err := s.streamsMap.GetOrOpenReceiveStream(frame.StreamID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -1468,7 +1498,7 @@ func (s *connection) handleResetStreamFrame(frame *wire.ResetStreamFrame) error
|
||||||
// stream is closed and already garbage collected
|
// stream is closed and already garbage collected
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return str.handleResetStreamFrame(frame)
|
return str.handleResetStreamFrame(frame, rcvTime)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *connection) handleStopSendingFrame(frame *wire.StopSendingFrame) error {
|
func (s *connection) handleStopSendingFrame(frame *wire.StopSendingFrame) error {
|
||||||
|
@ -1509,7 +1539,7 @@ func (s *connection) handleRetireConnectionIDFrame(f *wire.RetireConnectionIDFra
|
||||||
return s.connIDGenerator.Retire(f.SequenceNumber, destConnID)
|
return s.connIDGenerator.Retire(f.SequenceNumber, destConnID)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *connection) handleHandshakeDoneFrame() error {
|
func (s *connection) handleHandshakeDoneFrame(rcvTime time.Time) error {
|
||||||
if s.perspective == protocol.PerspectiveServer {
|
if s.perspective == protocol.PerspectiveServer {
|
||||||
return &qerr.TransportError{
|
return &qerr.TransportError{
|
||||||
ErrorCode: qerr.ProtocolViolation,
|
ErrorCode: qerr.ProtocolViolation,
|
||||||
|
@ -1517,12 +1547,12 @@ func (s *connection) handleHandshakeDoneFrame() error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !s.handshakeConfirmed {
|
if !s.handshakeConfirmed {
|
||||||
return s.handleHandshakeConfirmed()
|
return s.handleHandshakeConfirmed(rcvTime)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *connection) handleAckFrame(frame *wire.AckFrame, encLevel protocol.EncryptionLevel) error {
|
func (s *connection) handleAckFrame(frame *wire.AckFrame, encLevel protocol.EncryptionLevel, rcvTime time.Time) error {
|
||||||
acked1RTTPacket, err := s.sentPacketHandler.ReceivedAck(frame, encLevel, s.lastPacketReceivedTime)
|
acked1RTTPacket, err := s.sentPacketHandler.ReceivedAck(frame, encLevel, s.lastPacketReceivedTime)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -1534,7 +1564,7 @@ func (s *connection) handleAckFrame(frame *wire.AckFrame, encLevel protocol.Encr
|
||||||
// This is only possible if the ACK was sent in a 1-RTT packet.
|
// This is only possible if the ACK was sent in a 1-RTT packet.
|
||||||
// This is an optimization over simply waiting for a HANDSHAKE_DONE frame, see section 4.1.2 of RFC 9001.
|
// This is an optimization over simply waiting for a HANDSHAKE_DONE frame, see section 4.1.2 of RFC 9001.
|
||||||
if s.perspective == protocol.PerspectiveClient && !s.handshakeConfirmed {
|
if s.perspective == protocol.PerspectiveClient && !s.handshakeConfirmed {
|
||||||
if err := s.handleHandshakeConfirmed(); err != nil {
|
if err := s.handleHandshakeConfirmed(rcvTime); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1627,6 +1657,8 @@ func (s *connection) handleCloseError(closeErr *closeError) {
|
||||||
errors.As(e, &recreateErr),
|
errors.As(e, &recreateErr),
|
||||||
errors.As(e, &applicationErr),
|
errors.As(e, &applicationErr),
|
||||||
errors.As(e, &transportErr):
|
errors.As(e, &transportErr):
|
||||||
|
case closeErr.immediate:
|
||||||
|
e = closeErr.err
|
||||||
default:
|
default:
|
||||||
e = &qerr.TransportError{
|
e = &qerr.TransportError{
|
||||||
ErrorCode: qerr.InternalError,
|
ErrorCode: qerr.InternalError,
|
||||||
|
@ -1635,11 +1667,16 @@ func (s *connection) handleCloseError(closeErr *closeError) {
|
||||||
}
|
}
|
||||||
|
|
||||||
s.streamsMap.CloseWithError(e)
|
s.streamsMap.CloseWithError(e)
|
||||||
s.connIDManager.Close()
|
|
||||||
if s.datagramQueue != nil {
|
if s.datagramQueue != nil {
|
||||||
s.datagramQueue.CloseWithError(e)
|
s.datagramQueue.CloseWithError(e)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// In rare instances, the connection ID manager might switch to a new connection ID
|
||||||
|
// when sending the CONNECTION_CLOSE frame.
|
||||||
|
// The connection ID manager removes the active stateless reset token from the packet
|
||||||
|
// handler map when it is closed, so we need to make sure that this happens last.
|
||||||
|
defer s.connIDManager.Close()
|
||||||
|
|
||||||
if s.tracer != nil && s.tracer.ClosedConnection != nil && !errors.As(e, &recreateErr) {
|
if s.tracer != nil && s.tracer.ClosedConnection != nil && !errors.As(e, &recreateErr) {
|
||||||
s.tracer.ClosedConnection(e)
|
s.tracer.ClosedConnection(e)
|
||||||
}
|
}
|
||||||
|
@ -1666,11 +1703,11 @@ func (s *connection) handleCloseError(closeErr *closeError) {
|
||||||
s.connIDGenerator.ReplaceWithClosed(connClosePacket)
|
s.connIDGenerator.ReplaceWithClosed(connClosePacket)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *connection) dropEncryptionLevel(encLevel protocol.EncryptionLevel) error {
|
func (s *connection) dropEncryptionLevel(encLevel protocol.EncryptionLevel, now time.Time) error {
|
||||||
if s.tracer != nil && s.tracer.DroppedEncryptionLevel != nil {
|
if s.tracer != nil && s.tracer.DroppedEncryptionLevel != nil {
|
||||||
s.tracer.DroppedEncryptionLevel(encLevel)
|
s.tracer.DroppedEncryptionLevel(encLevel)
|
||||||
}
|
}
|
||||||
s.sentPacketHandler.DropPackets(encLevel)
|
s.sentPacketHandler.DropPackets(encLevel, now)
|
||||||
s.receivedPacketHandler.DropPackets(encLevel)
|
s.receivedPacketHandler.DropPackets(encLevel)
|
||||||
//nolint:exhaustive // only Initial and 0-RTT need special treatment
|
//nolint:exhaustive // only Initial and 0-RTT need special treatment
|
||||||
switch encLevel {
|
switch encLevel {
|
||||||
|
@ -1772,7 +1809,7 @@ func (s *connection) applyTransportParameters() {
|
||||||
if params.MaxIdleTimeout > 0 {
|
if params.MaxIdleTimeout > 0 {
|
||||||
s.idleTimeout = min(s.idleTimeout, params.MaxIdleTimeout)
|
s.idleTimeout = min(s.idleTimeout, params.MaxIdleTimeout)
|
||||||
}
|
}
|
||||||
s.keepAliveInterval = min(s.config.KeepAlivePeriod, min(s.idleTimeout/2, protocol.MaxKeepAliveInterval))
|
s.keepAliveInterval = min(s.config.KeepAlivePeriod, s.idleTimeout/2)
|
||||||
s.streamsMap.UpdateLimits(params)
|
s.streamsMap.UpdateLimits(params)
|
||||||
s.frameParser.SetAckDelayExponent(params.AckDelayExponent)
|
s.frameParser.SetAckDelayExponent(params.AckDelayExponent)
|
||||||
s.connFlowController.UpdateSendWindow(params.InitialMaxData)
|
s.connFlowController.UpdateSendWindow(params.InitialMaxData)
|
||||||
|
@ -1822,28 +1859,10 @@ func (s *connection) triggerSending(now time.Time) error {
|
||||||
case ackhandler.SendAck:
|
case ackhandler.SendAck:
|
||||||
// We can at most send a single ACK only packet.
|
// We can at most send a single ACK only packet.
|
||||||
// There will only be a new ACK after receiving new packets.
|
// There will only be a new ACK after receiving new packets.
|
||||||
// SendAck is only returned when we're congestion limited, so we don't need to set the pacinggs timer.
|
// SendAck is only returned when we're congestion limited, so we don't need to set the pacing timer.
|
||||||
return s.maybeSendAckOnlyPacket(now)
|
return s.maybeSendAckOnlyPacket(now)
|
||||||
case ackhandler.SendPTOInitial:
|
case ackhandler.SendPTOInitial, ackhandler.SendPTOHandshake, ackhandler.SendPTOAppData:
|
||||||
if err := s.sendProbePacket(protocol.EncryptionInitial, now); err != nil {
|
if err := s.sendProbePacket(sendMode, now); err != nil {
|
||||||
return err
|
|
||||||
}
|
|
||||||
if s.sendQueue.WouldBlock() {
|
|
||||||
s.scheduleSending()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return s.triggerSending(now)
|
|
||||||
case ackhandler.SendPTOHandshake:
|
|
||||||
if err := s.sendProbePacket(protocol.EncryptionHandshake, now); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if s.sendQueue.WouldBlock() {
|
|
||||||
s.scheduleSending()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return s.triggerSending(now)
|
|
||||||
case ackhandler.SendPTOAppData:
|
|
||||||
if err := s.sendProbePacket(protocol.Encryption1RTT, now); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if s.sendQueue.WouldBlock() {
|
if s.sendQueue.WouldBlock() {
|
||||||
|
@ -1862,7 +1881,7 @@ func (s *connection) sendPackets(now time.Time) error {
|
||||||
// Performance-wise, this doesn't matter, since we only send a very small (<10) number of
|
// Performance-wise, this doesn't matter, since we only send a very small (<10) number of
|
||||||
// MTU probe packets per connection.
|
// MTU probe packets per connection.
|
||||||
if s.handshakeConfirmed && s.mtuDiscoverer != nil && s.mtuDiscoverer.ShouldSendProbe(now) {
|
if s.handshakeConfirmed && s.mtuDiscoverer != nil && s.mtuDiscoverer.ShouldSendProbe(now) {
|
||||||
ping, size := s.mtuDiscoverer.GetPing()
|
ping, size := s.mtuDiscoverer.GetPing(now)
|
||||||
p, buf, err := s.packer.PackMTUProbePacket(ping, size, s.version)
|
p, buf, err := s.packer.PackMTUProbePacket(ping, size, s.version)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -1871,15 +1890,12 @@ func (s *connection) sendPackets(now time.Time) error {
|
||||||
s.logShortHeaderPacket(p.DestConnID, p.Ack, p.Frames, p.StreamFrames, p.PacketNumber, p.PacketNumberLen, p.KeyPhase, ecn, buf.Len(), false)
|
s.logShortHeaderPacket(p.DestConnID, p.Ack, p.Frames, p.StreamFrames, p.PacketNumber, p.PacketNumberLen, p.KeyPhase, ecn, buf.Len(), false)
|
||||||
s.registerPackedShortHeaderPacket(p, ecn, now)
|
s.registerPackedShortHeaderPacket(p, ecn, now)
|
||||||
s.sendQueue.Send(buf, 0, ecn)
|
s.sendQueue.Send(buf, 0, ecn)
|
||||||
// This is kind of a hack. We need to trigger sending again somehow.
|
// There's (likely) more data to send. Loop around again.
|
||||||
s.pacingDeadline = deadlineSendImmediately
|
s.scheduleSending()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if isBlocked, offset := s.connFlowController.IsNewlyBlocked(); isBlocked {
|
if offset := s.connFlowController.GetWindowUpdate(now); offset > 0 {
|
||||||
s.framer.QueueControlFrame(&wire.DataBlockedFrame{MaximumData: offset})
|
|
||||||
}
|
|
||||||
if offset := s.connFlowController.GetWindowUpdate(); offset > 0 {
|
|
||||||
s.framer.QueueControlFrame(&wire.MaxDataFrame{MaximumData: offset})
|
s.framer.QueueControlFrame(&wire.MaxDataFrame{MaximumData: offset})
|
||||||
}
|
}
|
||||||
if cf := s.cryptoStreamManager.GetPostHandshakeData(protocol.MaxPostHandshakeCryptoFrameSize); cf != nil {
|
if cf := s.cryptoStreamManager.GetPostHandshakeData(protocol.MaxPostHandshakeCryptoFrameSize); cf != nil {
|
||||||
|
@ -1887,7 +1903,7 @@ func (s *connection) sendPackets(now time.Time) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if !s.handshakeConfirmed {
|
if !s.handshakeConfirmed {
|
||||||
packet, err := s.packer.PackCoalescedPacket(false, s.maxPacketSize(), s.version)
|
packet, err := s.packer.PackCoalescedPacket(false, s.maxPacketSize(), now, s.version)
|
||||||
if err != nil || packet == nil {
|
if err != nil || packet == nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -1999,6 +2015,7 @@ func (s *connection) sendPacketsWithGSO(now time.Time) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ecn = nextECN
|
||||||
buf = getLargePacketBuffer()
|
buf = getLargePacketBuffer()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2014,7 +2031,7 @@ func (s *connection) resetPacingDeadline() {
|
||||||
func (s *connection) maybeSendAckOnlyPacket(now time.Time) error {
|
func (s *connection) maybeSendAckOnlyPacket(now time.Time) error {
|
||||||
if !s.handshakeConfirmed {
|
if !s.handshakeConfirmed {
|
||||||
ecn := s.sentPacketHandler.ECNMode(false)
|
ecn := s.sentPacketHandler.ECNMode(false)
|
||||||
packet, err := s.packer.PackCoalescedPacket(true, s.maxPacketSize(), s.version)
|
packet, err := s.packer.PackCoalescedPacket(true, s.maxPacketSize(), now, s.version)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -2025,7 +2042,7 @@ func (s *connection) maybeSendAckOnlyPacket(now time.Time) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
ecn := s.sentPacketHandler.ECNMode(true)
|
ecn := s.sentPacketHandler.ECNMode(true)
|
||||||
p, buf, err := s.packer.PackAckOnlyPacket(s.maxPacketSize(), s.version)
|
p, buf, err := s.packer.PackAckOnlyPacket(s.maxPacketSize(), now, s.version)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == errNothingToPack {
|
if err == errNothingToPack {
|
||||||
return nil
|
return nil
|
||||||
|
@ -2038,7 +2055,19 @@ func (s *connection) maybeSendAckOnlyPacket(now time.Time) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *connection) sendProbePacket(encLevel protocol.EncryptionLevel, now time.Time) error {
|
func (s *connection) sendProbePacket(sendMode ackhandler.SendMode, now time.Time) error {
|
||||||
|
var encLevel protocol.EncryptionLevel
|
||||||
|
//nolint:exhaustive // We only need to handle the PTO send modes here.
|
||||||
|
switch sendMode {
|
||||||
|
case ackhandler.SendPTOInitial:
|
||||||
|
encLevel = protocol.EncryptionInitial
|
||||||
|
case ackhandler.SendPTOHandshake:
|
||||||
|
encLevel = protocol.EncryptionHandshake
|
||||||
|
case ackhandler.SendPTOAppData:
|
||||||
|
encLevel = protocol.Encryption1RTT
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("connection BUG: unexpected send mode: %d", sendMode)
|
||||||
|
}
|
||||||
// Queue probe packets until we actually send out a packet,
|
// Queue probe packets until we actually send out a packet,
|
||||||
// or until there are no more packets to queue.
|
// or until there are no more packets to queue.
|
||||||
var packet *coalescedPacket
|
var packet *coalescedPacket
|
||||||
|
@ -2047,7 +2076,7 @@ func (s *connection) sendProbePacket(encLevel protocol.EncryptionLevel, now time
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
var err error
|
var err error
|
||||||
packet, err = s.packer.MaybePackProbePacket(encLevel, s.maxPacketSize(), s.version)
|
packet, err = s.packer.MaybePackProbePacket(encLevel, s.maxPacketSize(), now, s.version)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -2058,7 +2087,7 @@ func (s *connection) sendProbePacket(encLevel protocol.EncryptionLevel, now time
|
||||||
if packet == nil {
|
if packet == nil {
|
||||||
s.retransmissionQueue.AddPing(encLevel)
|
s.retransmissionQueue.AddPing(encLevel)
|
||||||
var err error
|
var err error
|
||||||
packet, err = s.packer.MaybePackProbePacket(encLevel, s.maxPacketSize(), s.version)
|
packet, err = s.packer.MaybePackProbePacket(encLevel, s.maxPacketSize(), now, s.version)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -2073,7 +2102,7 @@ func (s *connection) sendProbePacket(encLevel protocol.EncryptionLevel, now time
|
||||||
// If there was nothing to pack, the returned size is 0.
|
// If there was nothing to pack, the returned size is 0.
|
||||||
func (s *connection) appendOneShortHeaderPacket(buf *packetBuffer, maxSize protocol.ByteCount, ecn protocol.ECN, now time.Time) (protocol.ByteCount, error) {
|
func (s *connection) appendOneShortHeaderPacket(buf *packetBuffer, maxSize protocol.ByteCount, ecn protocol.ECN, now time.Time) (protocol.ByteCount, error) {
|
||||||
startLen := buf.Len()
|
startLen := buf.Len()
|
||||||
p, err := s.packer.AppendPacket(buf, maxSize, s.version)
|
p, err := s.packer.AppendPacket(buf, maxSize, now, s.version)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
@ -2111,7 +2140,7 @@ func (s *connection) sendPackedCoalescedPacket(packet *coalescedPacket, ecn prot
|
||||||
!s.droppedInitialKeys {
|
!s.droppedInitialKeys {
|
||||||
// On the client side, Initial keys are dropped as soon as the first Handshake packet is sent.
|
// On the client side, Initial keys are dropped as soon as the first Handshake packet is sent.
|
||||||
// See Section 4.9.1 of RFC 9001.
|
// See Section 4.9.1 of RFC 9001.
|
||||||
if err := s.dropEncryptionLevel(protocol.EncryptionInitial); err != nil {
|
if err := s.dropEncryptionLevel(protocol.EncryptionInitial, now); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2251,6 +2280,8 @@ func (s *connection) queueControlFrame(f wire.Frame) {
|
||||||
s.scheduleSending()
|
s.scheduleSending()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *connection) onHasConnectionData() { s.scheduleSending() }
|
||||||
|
|
||||||
func (s *connection) onHasStreamData(id protocol.StreamID, str sendStreamI) {
|
func (s *connection) onHasStreamData(id protocol.StreamID, str sendStreamI) {
|
||||||
s.framer.AddActiveStream(id, str)
|
s.framer.AddActiveStream(id, str)
|
||||||
s.scheduleSending()
|
s.scheduleSending()
|
||||||
|
@ -2300,17 +2331,8 @@ func (s *connection) ReceiveDatagram(ctx context.Context) ([]byte, error) {
|
||||||
return s.datagramQueue.Receive(ctx)
|
return s.datagramQueue.Receive(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *connection) LocalAddr() net.Addr {
|
func (s *connection) LocalAddr() net.Addr { return s.conn.LocalAddr() }
|
||||||
return s.conn.LocalAddr()
|
func (s *connection) RemoteAddr() net.Addr { return s.conn.RemoteAddr() }
|
||||||
}
|
|
||||||
|
|
||||||
func (s *connection) RemoteAddr() net.Addr {
|
|
||||||
return s.conn.RemoteAddr()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *connection) GetVersion() protocol.Version {
|
|
||||||
return s.version
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *connection) NextConnection(ctx context.Context) (Connection, error) {
|
func (s *connection) NextConnection(ctx context.Context) (Connection, error) {
|
||||||
// The handshake might fail after the server rejected 0-RTT.
|
// The handshake might fail after the server rejected 0-RTT.
|
||||||
|
|
7
vendor/github.com/quic-go/quic-go/connection_logging.go
generated
vendored
7
vendor/github.com/quic-go/quic-go/connection_logging.go
generated
vendored
|
@ -125,12 +125,7 @@ func (s *connection) logShortHeaderPacket(
|
||||||
ack = toLoggingAckFrame(ackFrame)
|
ack = toLoggingAckFrame(ackFrame)
|
||||||
}
|
}
|
||||||
s.tracer.SentShortHeaderPacket(
|
s.tracer.SentShortHeaderPacket(
|
||||||
&logging.ShortHeader{
|
&logging.ShortHeader{DestConnectionID: destConnID, PacketNumber: pn, PacketNumberLen: pnLen, KeyPhase: kp},
|
||||||
DestConnectionID: destConnID,
|
|
||||||
PacketNumber: pn,
|
|
||||||
PacketNumberLen: pnLen,
|
|
||||||
KeyPhase: kp,
|
|
||||||
},
|
|
||||||
size,
|
size,
|
||||||
ecn,
|
ecn,
|
||||||
ack,
|
ack,
|
||||||
|
|
8
vendor/github.com/quic-go/quic-go/errors.go
generated
vendored
8
vendor/github.com/quic-go/quic-go/errors.go
generated
vendored
|
@ -50,8 +50,8 @@ type StreamError struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *StreamError) Is(target error) bool {
|
func (e *StreamError) Is(target error) bool {
|
||||||
_, ok := target.(*StreamError)
|
t, ok := target.(*StreamError)
|
||||||
return ok
|
return ok && e.StreamID == t.StreamID && e.ErrorCode == t.ErrorCode && e.Remote == t.Remote
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *StreamError) Error() string {
|
func (e *StreamError) Error() string {
|
||||||
|
@ -68,8 +68,8 @@ type DatagramTooLargeError struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *DatagramTooLargeError) Is(target error) bool {
|
func (e *DatagramTooLargeError) Is(target error) bool {
|
||||||
_, ok := target.(*DatagramTooLargeError)
|
t, ok := target.(*DatagramTooLargeError)
|
||||||
return ok
|
return ok && e.MaxDatagramPayloadSize == t.MaxDatagramPayloadSize
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *DatagramTooLargeError) Error() string { return "DATAGRAM frame too large" }
|
func (e *DatagramTooLargeError) Error() string { return "DATAGRAM frame too large" }
|
||||||
|
|
121
vendor/github.com/quic-go/quic-go/framer.go
generated
vendored
121
vendor/github.com/quic-go/quic-go/framer.go
generated
vendored
|
@ -3,8 +3,10 @@ package quic
|
||||||
import (
|
import (
|
||||||
"slices"
|
"slices"
|
||||||
"sync"
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/quic-go/quic-go/internal/ackhandler"
|
"github.com/quic-go/quic-go/internal/ackhandler"
|
||||||
|
"github.com/quic-go/quic-go/internal/flowcontrol"
|
||||||
"github.com/quic-go/quic-go/internal/protocol"
|
"github.com/quic-go/quic-go/internal/protocol"
|
||||||
"github.com/quic-go/quic-go/internal/utils/ringbuffer"
|
"github.com/quic-go/quic-go/internal/utils/ringbuffer"
|
||||||
"github.com/quic-go/quic-go/internal/wire"
|
"github.com/quic-go/quic-go/internal/wire"
|
||||||
|
@ -21,7 +23,7 @@ const (
|
||||||
const maxStreamControlFrameSize = 25
|
const maxStreamControlFrameSize = 25
|
||||||
|
|
||||||
type streamControlFrameGetter interface {
|
type streamControlFrameGetter interface {
|
||||||
getControlFrame() (_ ackhandler.Frame, ok, hasMore bool)
|
getControlFrame(time.Time) (_ ackhandler.Frame, ok, hasMore bool)
|
||||||
}
|
}
|
||||||
|
|
||||||
type framer struct {
|
type framer struct {
|
||||||
|
@ -34,13 +36,15 @@ type framer struct {
|
||||||
controlFrameMutex sync.Mutex
|
controlFrameMutex sync.Mutex
|
||||||
controlFrames []wire.Frame
|
controlFrames []wire.Frame
|
||||||
pathResponses []*wire.PathResponseFrame
|
pathResponses []*wire.PathResponseFrame
|
||||||
|
connFlowController flowcontrol.ConnectionFlowController
|
||||||
queuedTooManyControlFrames bool
|
queuedTooManyControlFrames bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func newFramer() *framer {
|
func newFramer(connFlowController flowcontrol.ConnectionFlowController) *framer {
|
||||||
return &framer{
|
return &framer{
|
||||||
activeStreams: make(map[protocol.StreamID]sendStreamI),
|
activeStreams: make(map[protocol.StreamID]sendStreamI),
|
||||||
streamsWithControlFrames: make(map[protocol.StreamID]streamControlFrameGetter),
|
streamsWithControlFrames: make(map[protocol.StreamID]streamControlFrameGetter),
|
||||||
|
connFlowController: connFlowController,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -78,10 +82,80 @@ func (f *framer) QueueControlFrame(frame wire.Frame) {
|
||||||
f.controlFrames = append(f.controlFrames, frame)
|
f.controlFrames = append(f.controlFrames, frame)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *framer) AppendControlFrames(frames []ackhandler.Frame, maxLen protocol.ByteCount, v protocol.Version) ([]ackhandler.Frame, protocol.ByteCount) {
|
func (f *framer) Append(
|
||||||
|
frames []ackhandler.Frame,
|
||||||
|
streamFrames []ackhandler.StreamFrame,
|
||||||
|
maxLen protocol.ByteCount,
|
||||||
|
now time.Time,
|
||||||
|
v protocol.Version,
|
||||||
|
) ([]ackhandler.Frame, []ackhandler.StreamFrame, protocol.ByteCount) {
|
||||||
f.controlFrameMutex.Lock()
|
f.controlFrameMutex.Lock()
|
||||||
defer f.controlFrameMutex.Unlock()
|
frames, controlFrameLen := f.appendControlFrames(frames, maxLen, now, v)
|
||||||
|
maxLen -= controlFrameLen
|
||||||
|
|
||||||
|
var lastFrame ackhandler.StreamFrame
|
||||||
|
var streamFrameLen protocol.ByteCount
|
||||||
|
f.mutex.Lock()
|
||||||
|
// pop STREAM frames, until less than 128 bytes are left in the packet
|
||||||
|
numActiveStreams := f.streamQueue.Len()
|
||||||
|
for i := 0; i < numActiveStreams; i++ {
|
||||||
|
if protocol.MinStreamFrameSize > maxLen {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
sf, blocked := f.getNextStreamFrame(maxLen, v)
|
||||||
|
if sf.Frame != nil {
|
||||||
|
streamFrames = append(streamFrames, sf)
|
||||||
|
maxLen -= sf.Frame.Length(v)
|
||||||
|
lastFrame = sf
|
||||||
|
streamFrameLen += sf.Frame.Length(v)
|
||||||
|
}
|
||||||
|
// If the stream just became blocked on stream flow control, attempt to pack the
|
||||||
|
// STREAM_DATA_BLOCKED into the same packet.
|
||||||
|
if blocked != nil {
|
||||||
|
l := blocked.Length(v)
|
||||||
|
// In case it doesn't fit, queue it for the next packet.
|
||||||
|
if maxLen < l {
|
||||||
|
f.controlFrames = append(f.controlFrames, blocked)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
frames = append(frames, ackhandler.Frame{Frame: blocked})
|
||||||
|
maxLen -= l
|
||||||
|
controlFrameLen += l
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// The only way to become blocked on connection-level flow control is by sending STREAM frames.
|
||||||
|
if isBlocked, offset := f.connFlowController.IsNewlyBlocked(); isBlocked {
|
||||||
|
blocked := &wire.DataBlockedFrame{MaximumData: offset}
|
||||||
|
l := blocked.Length(v)
|
||||||
|
// In case it doesn't fit, queue it for the next packet.
|
||||||
|
if maxLen >= l {
|
||||||
|
frames = append(frames, ackhandler.Frame{Frame: blocked})
|
||||||
|
controlFrameLen += l
|
||||||
|
} else {
|
||||||
|
f.controlFrames = append(f.controlFrames, blocked)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
f.mutex.Unlock()
|
||||||
|
f.controlFrameMutex.Unlock()
|
||||||
|
|
||||||
|
if lastFrame.Frame != nil {
|
||||||
|
// account for the smaller size of the last STREAM frame
|
||||||
|
streamFrameLen -= lastFrame.Frame.Length(v)
|
||||||
|
lastFrame.Frame.DataLenPresent = false
|
||||||
|
streamFrameLen += lastFrame.Frame.Length(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
return frames, streamFrames, controlFrameLen + streamFrameLen
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *framer) appendControlFrames(
|
||||||
|
frames []ackhandler.Frame,
|
||||||
|
maxLen protocol.ByteCount,
|
||||||
|
now time.Time,
|
||||||
|
v protocol.Version,
|
||||||
|
) ([]ackhandler.Frame, protocol.ByteCount) {
|
||||||
var length protocol.ByteCount
|
var length protocol.ByteCount
|
||||||
// add a PATH_RESPONSE first, but only pack a single PATH_RESPONSE per packet
|
// add a PATH_RESPONSE first, but only pack a single PATH_RESPONSE per packet
|
||||||
if len(f.pathResponses) > 0 {
|
if len(f.pathResponses) > 0 {
|
||||||
|
@ -101,7 +175,7 @@ func (f *framer) AppendControlFrames(frames []ackhandler.Frame, maxLen protocol.
|
||||||
if remainingLen <= maxStreamControlFrameSize {
|
if remainingLen <= maxStreamControlFrameSize {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
fr, ok, hasMore := str.getControlFrame()
|
fr, ok, hasMore := str.getControlFrame(now)
|
||||||
if !hasMore {
|
if !hasMore {
|
||||||
delete(f.streamsWithControlFrames, id)
|
delete(f.streamsWithControlFrames, id)
|
||||||
}
|
}
|
||||||
|
@ -163,56 +237,33 @@ func (f *framer) RemoveActiveStream(id protocol.StreamID) {
|
||||||
delete(f.activeStreams, id)
|
delete(f.activeStreams, id)
|
||||||
// We don't delete the stream from the streamQueue,
|
// We don't delete the stream from the streamQueue,
|
||||||
// since we'd have to iterate over the ringbuffer.
|
// since we'd have to iterate over the ringbuffer.
|
||||||
// Instead, we check if the stream is still in activeStreams in AppendStreamFrames.
|
// Instead, we check if the stream is still in activeStreams when appending STREAM frames.
|
||||||
f.mutex.Unlock()
|
f.mutex.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *framer) AppendStreamFrames(frames []ackhandler.StreamFrame, maxLen protocol.ByteCount, v protocol.Version) ([]ackhandler.StreamFrame, protocol.ByteCount) {
|
func (f *framer) getNextStreamFrame(maxLen protocol.ByteCount, v protocol.Version) (ackhandler.StreamFrame, *wire.StreamDataBlockedFrame) {
|
||||||
startLen := len(frames)
|
|
||||||
var length protocol.ByteCount
|
|
||||||
f.mutex.Lock()
|
|
||||||
// pop STREAM frames, until less than 128 bytes are left in the packet
|
|
||||||
numActiveStreams := f.streamQueue.Len()
|
|
||||||
for i := 0; i < numActiveStreams; i++ {
|
|
||||||
if protocol.MinStreamFrameSize+length > maxLen {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
id := f.streamQueue.PopFront()
|
id := f.streamQueue.PopFront()
|
||||||
// This should never return an error. Better check it anyway.
|
// This should never return an error. Better check it anyway.
|
||||||
// The stream will only be in the streamQueue, if it enqueued itself there.
|
// The stream will only be in the streamQueue, if it enqueued itself there.
|
||||||
str, ok := f.activeStreams[id]
|
str, ok := f.activeStreams[id]
|
||||||
// The stream might have been removed after being enqueued.
|
// The stream might have been removed after being enqueued.
|
||||||
if !ok {
|
if !ok {
|
||||||
continue
|
return ackhandler.StreamFrame{}, nil
|
||||||
}
|
}
|
||||||
remainingLen := maxLen - length
|
|
||||||
// For the last STREAM frame, we'll remove the DataLen field later.
|
// For the last STREAM frame, we'll remove the DataLen field later.
|
||||||
// Therefore, we can pretend to have more bytes available when popping
|
// Therefore, we can pretend to have more bytes available when popping
|
||||||
// the STREAM frame (which will always have the DataLen set).
|
// the STREAM frame (which will always have the DataLen set).
|
||||||
remainingLen += protocol.ByteCount(quicvarint.Len(uint64(remainingLen)))
|
maxLen += protocol.ByteCount(quicvarint.Len(uint64(maxLen)))
|
||||||
frame, ok, hasMoreData := str.popStreamFrame(remainingLen, v)
|
frame, blocked, hasMoreData := str.popStreamFrame(maxLen, v)
|
||||||
if hasMoreData { // put the stream back in the queue (at the end)
|
if hasMoreData { // put the stream back in the queue (at the end)
|
||||||
f.streamQueue.PushBack(id)
|
f.streamQueue.PushBack(id)
|
||||||
} else { // no more data to send. Stream is not active
|
} else { // no more data to send. Stream is not active
|
||||||
delete(f.activeStreams, id)
|
delete(f.activeStreams, id)
|
||||||
}
|
}
|
||||||
// The frame can be "nil"
|
// Note that the frame.Frame can be nil:
|
||||||
// * if the stream was canceled after it said it had data
|
// * if the stream was canceled after it said it had data
|
||||||
// * the remaining size doesn't allow us to add another STREAM frame
|
// * the remaining size doesn't allow us to add another STREAM frame
|
||||||
if !ok {
|
return frame, blocked
|
||||||
continue
|
|
||||||
}
|
|
||||||
frames = append(frames, frame)
|
|
||||||
length += frame.Frame.Length(v)
|
|
||||||
}
|
|
||||||
f.mutex.Unlock()
|
|
||||||
if len(frames) > startLen {
|
|
||||||
l := frames[len(frames)-1].Frame.Length(v)
|
|
||||||
// account for the smaller size of the last STREAM frame
|
|
||||||
frames[len(frames)-1].Frame.DataLenPresent = false
|
|
||||||
length += frames[len(frames)-1].Frame.Length(v) - l
|
|
||||||
}
|
|
||||||
return frames, length
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *framer) Handle0RTTRejection() {
|
func (f *framer) Handle0RTTRejection() {
|
||||||
|
|
9
vendor/github.com/quic-go/quic-go/http3/body.go
generated
vendored
9
vendor/github.com/quic-go/quic-go/http3/body.go
generated
vendored
|
@ -4,6 +4,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
|
"sync"
|
||||||
|
|
||||||
"github.com/quic-go/quic-go"
|
"github.com/quic-go/quic-go"
|
||||||
)
|
)
|
||||||
|
@ -96,7 +97,7 @@ type hijackableBody struct {
|
||||||
// The channel is closed when the user is done with this response:
|
// The channel is closed when the user is done with this response:
|
||||||
// either when Read() errors, or when Close() is called.
|
// either when Read() errors, or when Close() is called.
|
||||||
reqDone chan<- struct{}
|
reqDone chan<- struct{}
|
||||||
reqDoneClosed bool
|
reqDoneOnce sync.Once
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ io.ReadCloser = &hijackableBody{}
|
var _ io.ReadCloser = &hijackableBody{}
|
||||||
|
@ -117,13 +118,11 @@ func (r *hijackableBody) Read(b []byte) (int, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *hijackableBody) requestDone() {
|
func (r *hijackableBody) requestDone() {
|
||||||
if r.reqDoneClosed || r.reqDone == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if r.reqDone != nil {
|
if r.reqDone != nil {
|
||||||
|
r.reqDoneOnce.Do(func() {
|
||||||
close(r.reqDone)
|
close(r.reqDone)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
r.reqDoneClosed = true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *hijackableBody) Close() error {
|
func (r *hijackableBody) Close() error {
|
||||||
|
|
15
vendor/github.com/quic-go/quic-go/http3/client.go
generated
vendored
15
vendor/github.com/quic-go/quic-go/http3/client.go
generated
vendored
|
@ -294,10 +294,13 @@ func (c *ClientConn) sendRequestBody(str Stream, body io.ReadCloser, contentLeng
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *ClientConn) doRequest(req *http.Request, str *requestStream) (*http.Response, error) {
|
func (c *ClientConn) doRequest(req *http.Request, str *requestStream) (*http.Response, error) {
|
||||||
|
trace := httptrace.ContextClientTrace(req.Context())
|
||||||
if err := str.SendRequestHeader(req); err != nil {
|
if err := str.SendRequestHeader(req); err != nil {
|
||||||
|
traceWroteRequest(trace, err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if req.Body == nil {
|
if req.Body == nil {
|
||||||
|
traceWroteRequest(trace, nil)
|
||||||
str.Close()
|
str.Close()
|
||||||
} else {
|
} else {
|
||||||
// send the request body asynchronously
|
// send the request body asynchronously
|
||||||
|
@ -308,7 +311,9 @@ func (c *ClientConn) doRequest(req *http.Request, str *requestStream) (*http.Res
|
||||||
if req.ContentLength > 0 {
|
if req.ContentLength > 0 {
|
||||||
contentLength = req.ContentLength
|
contentLength = req.ContentLength
|
||||||
}
|
}
|
||||||
if err := c.sendRequestBody(str, req.Body, contentLength); err != nil {
|
err := c.sendRequestBody(str, req.Body, contentLength)
|
||||||
|
traceWroteRequest(trace, err)
|
||||||
|
if err != nil {
|
||||||
if c.logger != nil {
|
if c.logger != nil {
|
||||||
c.logger.Debug("error writing request", "error", err)
|
c.logger.Debug("error writing request", "error", err)
|
||||||
}
|
}
|
||||||
|
@ -318,7 +323,6 @@ func (c *ClientConn) doRequest(req *http.Request, str *requestStream) (*http.Res
|
||||||
}
|
}
|
||||||
|
|
||||||
// copy from net/http: support 1xx responses
|
// copy from net/http: support 1xx responses
|
||||||
trace := httptrace.ContextClientTrace(req.Context())
|
|
||||||
num1xx := 0 // number of informational 1xx headers received
|
num1xx := 0 // number of informational 1xx headers received
|
||||||
const max1xxResponses = 5 // arbitrary bound on number of informational responses
|
const max1xxResponses = 5 // arbitrary bound on number of informational responses
|
||||||
|
|
||||||
|
@ -338,10 +342,9 @@ func (c *ClientConn) doRequest(req *http.Request, str *requestStream) (*http.Res
|
||||||
if num1xx > max1xxResponses {
|
if num1xx > max1xxResponses {
|
||||||
return nil, errors.New("http: too many 1xx informational responses")
|
return nil, errors.New("http: too many 1xx informational responses")
|
||||||
}
|
}
|
||||||
if trace != nil && trace.Got1xxResponse != nil {
|
traceGot1xxResponse(trace, resCode, textproto.MIMEHeader(res.Header))
|
||||||
if err := trace.Got1xxResponse(resCode, textproto.MIMEHeader(res.Header)); err != nil {
|
if resCode == 100 {
|
||||||
return nil, err
|
traceGot100Continue(trace)
|
||||||
}
|
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
4
vendor/github.com/quic-go/quic-go/http3/conn.go
generated
vendored
4
vendor/github.com/quic-go/quic-go/http3/conn.go
generated
vendored
|
@ -7,6 +7,7 @@ import (
|
||||||
"log/slog"
|
"log/slog"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/http/httptrace"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
@ -123,7 +124,8 @@ func (c *connection) openRequestStream(
|
||||||
rsp.Trailer = hdr
|
rsp.Trailer = hdr
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
return newRequestStream(hstr, requestWriter, reqDone, c.decoder, disableCompression, maxHeaderBytes, rsp), nil
|
trace := httptrace.ContextClientTrace(ctx)
|
||||||
|
return newRequestStream(hstr, requestWriter, reqDone, c.decoder, disableCompression, maxHeaderBytes, rsp, trace), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *connection) decodeTrailers(r io.Reader, l, maxHeaderBytes uint64) (http.Header, error) {
|
func (c *connection) decodeTrailers(r io.Reader, l, maxHeaderBytes uint64) (http.Header, error) {
|
||||||
|
|
5
vendor/github.com/quic-go/quic-go/http3/error.go
generated
vendored
5
vendor/github.com/quic-go/quic-go/http3/error.go
generated
vendored
|
@ -33,6 +33,11 @@ func (e *Error) Error() string {
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (e *Error) Is(target error) bool {
|
||||||
|
t, ok := target.(*Error)
|
||||||
|
return ok && e.ErrorCode == t.ErrorCode && e.Remote == t.Remote
|
||||||
|
}
|
||||||
|
|
||||||
func maybeReplaceError(err error) error {
|
func maybeReplaceError(err error) error {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return nil
|
return nil
|
||||||
|
|
26
vendor/github.com/quic-go/quic-go/http3/http_stream.go
generated
vendored
26
vendor/github.com/quic-go/quic-go/http3/http_stream.go
generated
vendored
|
@ -6,6 +6,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/http/httptrace"
|
||||||
|
|
||||||
"github.com/quic-go/quic-go"
|
"github.com/quic-go/quic-go"
|
||||||
"github.com/quic-go/quic-go/internal/protocol"
|
"github.com/quic-go/quic-go/internal/protocol"
|
||||||
|
@ -147,10 +148,12 @@ type requestStream struct {
|
||||||
reqDone chan<- struct{}
|
reqDone chan<- struct{}
|
||||||
disableCompression bool
|
disableCompression bool
|
||||||
response *http.Response
|
response *http.Response
|
||||||
|
trace *httptrace.ClientTrace
|
||||||
|
|
||||||
sentRequest bool
|
sentRequest bool
|
||||||
requestedGzip bool
|
requestedGzip bool
|
||||||
isConnect bool
|
isConnect bool
|
||||||
|
firstByte bool
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ RequestStream = &requestStream{}
|
var _ RequestStream = &requestStream{}
|
||||||
|
@ -163,6 +166,7 @@ func newRequestStream(
|
||||||
disableCompression bool,
|
disableCompression bool,
|
||||||
maxHeaderBytes uint64,
|
maxHeaderBytes uint64,
|
||||||
rsp *http.Response,
|
rsp *http.Response,
|
||||||
|
trace *httptrace.ClientTrace,
|
||||||
) *requestStream {
|
) *requestStream {
|
||||||
return &requestStream{
|
return &requestStream{
|
||||||
stream: str,
|
stream: str,
|
||||||
|
@ -172,6 +176,7 @@ func newRequestStream(
|
||||||
disableCompression: disableCompression,
|
disableCompression: disableCompression,
|
||||||
maxHeaderBytes: maxHeaderBytes,
|
maxHeaderBytes: maxHeaderBytes,
|
||||||
response: rsp,
|
response: rsp,
|
||||||
|
trace: trace,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -197,8 +202,12 @@ func (s *requestStream) SendRequestHeader(req *http.Request) error {
|
||||||
|
|
||||||
func (s *requestStream) ReadResponse() (*http.Response, error) {
|
func (s *requestStream) ReadResponse() (*http.Response, error) {
|
||||||
fp := &frameParser{
|
fp := &frameParser{
|
||||||
r: s.Stream,
|
|
||||||
conn: s.conn,
|
conn: s.conn,
|
||||||
|
r: &tracingReader{
|
||||||
|
Reader: s.Stream,
|
||||||
|
first: &s.firstByte,
|
||||||
|
trace: s.trace,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
frame, err := fp.ParseNext()
|
frame, err := fp.ParseNext()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -268,3 +277,18 @@ func (s *stream) ReceiveDatagram(ctx context.Context) ([]byte, error) {
|
||||||
// TODO: reject if datagrams are not negotiated (yet)
|
// TODO: reject if datagrams are not negotiated (yet)
|
||||||
return s.datagrams.Receive(ctx)
|
return s.datagrams.Receive(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type tracingReader struct {
|
||||||
|
io.Reader
|
||||||
|
first *bool
|
||||||
|
trace *httptrace.ClientTrace
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *tracingReader) Read(b []byte) (int, error) {
|
||||||
|
n, err := r.Reader.Read(b)
|
||||||
|
if n > 0 && r.first != nil && !*r.first {
|
||||||
|
traceGotFirstResponseByte(r.trace)
|
||||||
|
*r.first = true
|
||||||
|
}
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
48
vendor/github.com/quic-go/quic-go/http3/ip_addr.go
generated
vendored
Normal file
48
vendor/github.com/quic-go/quic-go/http3/ip_addr.go
generated
vendored
Normal file
|
@ -0,0 +1,48 @@
|
||||||
|
package http3
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// An addrList represents a list of network endpoint addresses.
|
||||||
|
// Copy from [net.addrList] and change type from [net.Addr] to [net.IPAddr]
|
||||||
|
type addrList []net.IPAddr
|
||||||
|
|
||||||
|
// isIPv4 reports whether addr contains an IPv4 address.
|
||||||
|
func isIPv4(addr net.IPAddr) bool {
|
||||||
|
return addr.IP.To4() != nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// isNotIPv4 reports whether addr does not contain an IPv4 address.
|
||||||
|
func isNotIPv4(addr net.IPAddr) bool { return !isIPv4(addr) }
|
||||||
|
|
||||||
|
// forResolve returns the most appropriate address in address for
|
||||||
|
// a call to ResolveTCPAddr, ResolveUDPAddr, or ResolveIPAddr.
|
||||||
|
// IPv4 is preferred, unless addr contains an IPv6 literal.
|
||||||
|
func (addrs addrList) forResolve(network, addr string) net.IPAddr {
|
||||||
|
var want6 bool
|
||||||
|
switch network {
|
||||||
|
case "ip":
|
||||||
|
// IPv6 literal (addr does NOT contain a port)
|
||||||
|
want6 = strings.ContainsRune(addr, ':')
|
||||||
|
case "tcp", "udp":
|
||||||
|
// IPv6 literal. (addr contains a port, so look for '[')
|
||||||
|
want6 = strings.ContainsRune(addr, '[')
|
||||||
|
}
|
||||||
|
if want6 {
|
||||||
|
return addrs.first(isNotIPv4)
|
||||||
|
}
|
||||||
|
return addrs.first(isIPv4)
|
||||||
|
}
|
||||||
|
|
||||||
|
// first returns the first address which satisfies strategy, or if
|
||||||
|
// none do, then the first address of any kind.
|
||||||
|
func (addrs addrList) first(strategy func(net.IPAddr) bool) net.IPAddr {
|
||||||
|
for _, addr := range addrs {
|
||||||
|
if strategy(addr) {
|
||||||
|
return addr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return addrs[0]
|
||||||
|
}
|
4
vendor/github.com/quic-go/quic-go/http3/mockgen.go
generated
vendored
4
vendor/github.com/quic-go/quic-go/http3/mockgen.go
generated
vendored
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
package http3
|
package http3
|
||||||
|
|
||||||
//go:generate sh -c "go run go.uber.org/mock/mockgen -typed -build_flags=\"-tags=gomock\" -package http3 -destination mock_singleroundtripper_test.go github.com/quic-go/quic-go/http3 SingleRoundTripper"
|
//go:generate sh -c "go run go.uber.org/mock/mockgen -typed -build_flags=\"-tags=gomock\" -mock_names=TestClientConnInterface=MockClientConn -package http3 -destination mock_clientconn_test.go github.com/quic-go/quic-go/http3 TestClientConnInterface"
|
||||||
type SingleRoundTripper = singleRoundTripper
|
type TestClientConnInterface = clientConn
|
||||||
|
|
||||||
//go:generate sh -c "go run go.uber.org/mock/mockgen -typed -package http3 -destination mock_quic_early_listener_test.go github.com/quic-go/quic-go/http3 QUICEarlyListener"
|
//go:generate sh -c "go run go.uber.org/mock/mockgen -typed -package http3 -destination mock_quic_early_listener_test.go github.com/quic-go/quic-go/http3 QUICEarlyListener"
|
||||||
|
|
17
vendor/github.com/quic-go/quic-go/http3/request_writer.go
generated
vendored
17
vendor/github.com/quic-go/quic-go/http3/request_writer.go
generated
vendored
|
@ -7,6 +7,7 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/http/httptrace"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -42,8 +43,12 @@ func (w *requestWriter) WriteRequestHeader(str quic.Stream, req *http.Request, g
|
||||||
if err := w.writeHeaders(buf, req, gzip); err != nil {
|
if err := w.writeHeaders(buf, req, gzip); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_, err := str.Write(buf.Bytes())
|
if _, err := str.Write(buf.Bytes()); err != nil {
|
||||||
return err
|
return err
|
||||||
|
}
|
||||||
|
trace := httptrace.ContextClientTrace(req.Context())
|
||||||
|
traceWroteHeaders(trace)
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *requestWriter) writeHeaders(wr io.Writer, req *http.Request, gzip bool) error {
|
func (w *requestWriter) writeHeaders(wr io.Writer, req *http.Request, gzip bool) error {
|
||||||
|
@ -198,16 +203,16 @@ func (w *requestWriter) encodeHeaders(req *http.Request, addGzipHeader bool, tra
|
||||||
// return errRequestHeaderListSize
|
// return errRequestHeaderListSize
|
||||||
// }
|
// }
|
||||||
|
|
||||||
// trace := httptrace.ContextClientTrace(req.Context())
|
trace := httptrace.ContextClientTrace(req.Context())
|
||||||
// traceHeaders := traceHasWroteHeaderField(trace)
|
traceHeaders := traceHasWroteHeaderField(trace)
|
||||||
|
|
||||||
// Header list size is ok. Write the headers.
|
// Header list size is ok. Write the headers.
|
||||||
enumerateHeaders(func(name, value string) {
|
enumerateHeaders(func(name, value string) {
|
||||||
name = strings.ToLower(name)
|
name = strings.ToLower(name)
|
||||||
w.encoder.WriteField(qpack.HeaderField{Name: name, Value: value})
|
w.encoder.WriteField(qpack.HeaderField{Name: name, Value: value})
|
||||||
// if traceHeaders {
|
if traceHeaders {
|
||||||
// traceWroteHeaderField(trace, name, value)
|
traceWroteHeaderField(trace, name, value)
|
||||||
// }
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
105
vendor/github.com/quic-go/quic-go/http3/trace.go
generated
vendored
Normal file
105
vendor/github.com/quic-go/quic-go/http3/trace.go
generated
vendored
Normal file
|
@ -0,0 +1,105 @@
|
||||||
|
package http3
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"net"
|
||||||
|
"net/http/httptrace"
|
||||||
|
"net/textproto"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/quic-go/quic-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
func traceGetConn(trace *httptrace.ClientTrace, hostPort string) {
|
||||||
|
if trace != nil && trace.GetConn != nil {
|
||||||
|
trace.GetConn(hostPort)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// fakeConn is a wrapper for quic.EarlyConnection
|
||||||
|
// because the quic connection does not implement net.Conn.
|
||||||
|
type fakeConn struct {
|
||||||
|
conn quic.EarlyConnection
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *fakeConn) Close() error { panic("connection operation prohibited") }
|
||||||
|
func (c *fakeConn) Read(p []byte) (int, error) { panic("connection operation prohibited") }
|
||||||
|
func (c *fakeConn) Write(p []byte) (int, error) { panic("connection operation prohibited") }
|
||||||
|
func (c *fakeConn) SetDeadline(t time.Time) error { panic("connection operation prohibited") }
|
||||||
|
func (c *fakeConn) SetReadDeadline(t time.Time) error { panic("connection operation prohibited") }
|
||||||
|
func (c *fakeConn) SetWriteDeadline(t time.Time) error { panic("connection operation prohibited") }
|
||||||
|
func (c *fakeConn) RemoteAddr() net.Addr { return c.conn.RemoteAddr() }
|
||||||
|
func (c *fakeConn) LocalAddr() net.Addr { return c.conn.LocalAddr() }
|
||||||
|
|
||||||
|
func traceGotConn(trace *httptrace.ClientTrace, conn quic.EarlyConnection, reused bool) {
|
||||||
|
if trace != nil && trace.GotConn != nil {
|
||||||
|
trace.GotConn(httptrace.GotConnInfo{
|
||||||
|
Conn: &fakeConn{conn: conn},
|
||||||
|
Reused: reused,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func traceGotFirstResponseByte(trace *httptrace.ClientTrace) {
|
||||||
|
if trace != nil && trace.GotFirstResponseByte != nil {
|
||||||
|
trace.GotFirstResponseByte()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func traceGot1xxResponse(trace *httptrace.ClientTrace, code int, header textproto.MIMEHeader) {
|
||||||
|
if trace != nil && trace.Got1xxResponse != nil {
|
||||||
|
trace.Got1xxResponse(code, header)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func traceGot100Continue(trace *httptrace.ClientTrace) {
|
||||||
|
if trace != nil && trace.Got100Continue != nil {
|
||||||
|
trace.Got100Continue()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool {
|
||||||
|
return trace != nil && trace.WroteHeaderField != nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {
|
||||||
|
if trace != nil && trace.WroteHeaderField != nil {
|
||||||
|
trace.WroteHeaderField(k, []string{v})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func traceWroteHeaders(trace *httptrace.ClientTrace) {
|
||||||
|
if trace != nil && trace.WroteHeaders != nil {
|
||||||
|
trace.WroteHeaders()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func traceWroteRequest(trace *httptrace.ClientTrace, err error) {
|
||||||
|
if trace != nil && trace.WroteRequest != nil {
|
||||||
|
trace.WroteRequest(httptrace.WroteRequestInfo{Err: err})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func traceConnectStart(trace *httptrace.ClientTrace, network, addr string) {
|
||||||
|
if trace != nil && trace.ConnectStart != nil {
|
||||||
|
trace.ConnectStart(network, addr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func traceConnectDone(trace *httptrace.ClientTrace, network, addr string, err error) {
|
||||||
|
if trace != nil && trace.ConnectDone != nil {
|
||||||
|
trace.ConnectDone(network, addr, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func traceTLSHandshakeStart(trace *httptrace.ClientTrace) {
|
||||||
|
if trace != nil && trace.TLSHandshakeStart != nil {
|
||||||
|
trace.TLSHandshakeStart()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func traceTLSHandshakeDone(trace *httptrace.ClientTrace, state tls.ConnectionState, err error) {
|
||||||
|
if trace != nil && trace.TLSHandshakeDone != nil {
|
||||||
|
trace.TLSHandshakeDone(state, err)
|
||||||
|
}
|
||||||
|
}
|
112
vendor/github.com/quic-go/quic-go/http3/transport.go
generated
vendored
112
vendor/github.com/quic-go/quic-go/http3/transport.go
generated
vendored
|
@ -9,6 +9,7 @@ import (
|
||||||
"log/slog"
|
"log/slog"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/http/httptrace"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
@ -36,7 +37,7 @@ type RoundTripOpt struct {
|
||||||
OnlyCachedConn bool
|
OnlyCachedConn bool
|
||||||
}
|
}
|
||||||
|
|
||||||
type singleRoundTripper interface {
|
type clientConn interface {
|
||||||
OpenRequestStream(context.Context) (RequestStream, error)
|
OpenRequestStream(context.Context) (RequestStream, error)
|
||||||
RoundTrip(*http.Request) (*http.Response, error)
|
RoundTrip(*http.Request) (*http.Response, error)
|
||||||
}
|
}
|
||||||
|
@ -46,7 +47,7 @@ type roundTripperWithCount struct {
|
||||||
dialing chan struct{} // closed as soon as quic.Dial(Early) returned
|
dialing chan struct{} // closed as soon as quic.Dial(Early) returned
|
||||||
dialErr error
|
dialErr error
|
||||||
conn quic.EarlyConnection
|
conn quic.EarlyConnection
|
||||||
rt singleRoundTripper
|
clientConn clientConn
|
||||||
|
|
||||||
useCount atomic.Int64
|
useCount atomic.Int64
|
||||||
}
|
}
|
||||||
|
@ -106,7 +107,7 @@ type Transport struct {
|
||||||
initOnce sync.Once
|
initOnce sync.Once
|
||||||
initErr error
|
initErr error
|
||||||
|
|
||||||
newClient func(quic.EarlyConnection) singleRoundTripper
|
newClientConn func(quic.EarlyConnection) clientConn
|
||||||
|
|
||||||
clients map[string]*roundTripperWithCount
|
clients map[string]*roundTripperWithCount
|
||||||
transport *quic.Transport
|
transport *quic.Transport
|
||||||
|
@ -124,8 +125,8 @@ type RoundTripper = Transport
|
||||||
var ErrNoCachedConn = errors.New("http3: no cached connection was available")
|
var ErrNoCachedConn = errors.New("http3: no cached connection was available")
|
||||||
|
|
||||||
func (t *Transport) init() error {
|
func (t *Transport) init() error {
|
||||||
if t.newClient == nil {
|
if t.newClientConn == nil {
|
||||||
t.newClient = func(conn quic.EarlyConnection) singleRoundTripper {
|
t.newClientConn = func(conn quic.EarlyConnection) clientConn {
|
||||||
return newClientConn(
|
return newClientConn(
|
||||||
conn,
|
conn,
|
||||||
t.EnableDatagrams,
|
t.EnableDatagrams,
|
||||||
|
@ -160,27 +161,37 @@ func (t *Transport) init() error {
|
||||||
|
|
||||||
// RoundTripOpt is like RoundTrip, but takes options.
|
// RoundTripOpt is like RoundTrip, but takes options.
|
||||||
func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) {
|
func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) {
|
||||||
|
rsp, err := t.roundTripOpt(req, opt)
|
||||||
|
if err != nil {
|
||||||
|
if req.Body != nil {
|
||||||
|
req.Body.Close()
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return rsp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Transport) roundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) {
|
||||||
t.initOnce.Do(func() { t.initErr = t.init() })
|
t.initOnce.Do(func() { t.initErr = t.init() })
|
||||||
if t.initErr != nil {
|
if t.initErr != nil {
|
||||||
return nil, t.initErr
|
return nil, t.initErr
|
||||||
}
|
}
|
||||||
|
|
||||||
if req.URL == nil {
|
if req.URL == nil {
|
||||||
closeRequestBody(req)
|
|
||||||
return nil, errors.New("http3: nil Request.URL")
|
return nil, errors.New("http3: nil Request.URL")
|
||||||
}
|
}
|
||||||
if req.URL.Scheme != "https" {
|
if req.URL.Scheme != "https" {
|
||||||
closeRequestBody(req)
|
|
||||||
return nil, fmt.Errorf("http3: unsupported protocol scheme: %s", req.URL.Scheme)
|
return nil, fmt.Errorf("http3: unsupported protocol scheme: %s", req.URL.Scheme)
|
||||||
}
|
}
|
||||||
if req.URL.Host == "" {
|
if req.URL.Host == "" {
|
||||||
closeRequestBody(req)
|
|
||||||
return nil, errors.New("http3: no Host in request URL")
|
return nil, errors.New("http3: no Host in request URL")
|
||||||
}
|
}
|
||||||
if req.Header == nil {
|
if req.Header == nil {
|
||||||
closeRequestBody(req)
|
|
||||||
return nil, errors.New("http3: nil Request.Header")
|
return nil, errors.New("http3: nil Request.Header")
|
||||||
}
|
}
|
||||||
|
if req.Method != "" && !validMethod(req.Method) {
|
||||||
|
return nil, fmt.Errorf("http3: invalid method %q", req.Method)
|
||||||
|
}
|
||||||
for k, vv := range req.Header {
|
for k, vv := range req.Header {
|
||||||
if !httpguts.ValidHeaderFieldName(k) {
|
if !httpguts.ValidHeaderFieldName(k) {
|
||||||
return nil, fmt.Errorf("http3: invalid http header field name %q", k)
|
return nil, fmt.Errorf("http3: invalid http header field name %q", k)
|
||||||
|
@ -192,12 +203,9 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if req.Method != "" && !validMethod(req.Method) {
|
trace := httptrace.ContextClientTrace(req.Context())
|
||||||
closeRequestBody(req)
|
|
||||||
return nil, fmt.Errorf("http3: invalid method %q", req.Method)
|
|
||||||
}
|
|
||||||
|
|
||||||
hostname := authorityAddr(hostnameFromURL(req.URL))
|
hostname := authorityAddr(hostnameFromURL(req.URL))
|
||||||
|
traceGetConn(trace, hostname)
|
||||||
cl, isReused, err := t.getClient(req.Context(), hostname, opt.OnlyCachedConn)
|
cl, isReused, err := t.getClient(req.Context(), hostname, opt.OnlyCachedConn)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -213,23 +221,36 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
|
||||||
t.removeClient(hostname)
|
t.removeClient(hostname)
|
||||||
return nil, cl.dialErr
|
return nil, cl.dialErr
|
||||||
}
|
}
|
||||||
|
traceGotConn(trace, cl.conn, isReused)
|
||||||
defer cl.useCount.Add(-1)
|
defer cl.useCount.Add(-1)
|
||||||
rsp, err := cl.rt.RoundTrip(req)
|
rsp, err := cl.clientConn.RoundTrip(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// non-nil errors on roundtrip are likely due to a problem with the connection
|
// request aborted due to context cancellation
|
||||||
// so we remove the client from the cache so that subsequent trips reconnect
|
select {
|
||||||
// context cancelation is excluded as is does not signify a connection error
|
case <-req.Context().Done():
|
||||||
if !errors.Is(err, context.Canceled) {
|
return nil, err
|
||||||
t.removeClient(hostname)
|
default:
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Retry the request on a new connection if:
|
||||||
|
// 1. it was sent on a reused connection,
|
||||||
|
// 2. this connection is now closed,
|
||||||
|
// 3. and the error is a timeout error.
|
||||||
|
select {
|
||||||
|
case <-cl.conn.Context().Done():
|
||||||
|
t.removeClient(hostname)
|
||||||
if isReused {
|
if isReused {
|
||||||
if nerr, ok := err.(net.Error); ok && nerr.Timeout() {
|
var nerr net.Error
|
||||||
|
if errors.As(err, &nerr) && nerr.Timeout() {
|
||||||
return t.RoundTripOpt(req, opt)
|
return t.RoundTripOpt(req, opt)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return nil, err
|
||||||
|
default:
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
return rsp, err
|
}
|
||||||
|
return rsp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// RoundTrip does a round trip.
|
// RoundTrip does a round trip.
|
||||||
|
@ -264,7 +285,7 @@ func (t *Transport) getClient(ctx context.Context, hostname string, onlyCached b
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
cl.conn = conn
|
cl.conn = conn
|
||||||
cl.rt = rt
|
cl.clientConn = rt
|
||||||
}()
|
}()
|
||||||
t.clients[hostname] = cl
|
t.clients[hostname] = cl
|
||||||
}
|
}
|
||||||
|
@ -285,7 +306,7 @@ func (t *Transport) getClient(ctx context.Context, hostname string, onlyCached b
|
||||||
return cl, isReused, nil
|
return cl, isReused, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *Transport) dial(ctx context.Context, hostname string) (quic.EarlyConnection, singleRoundTripper, error) {
|
func (t *Transport) dial(ctx context.Context, hostname string) (quic.EarlyConnection, clientConn, error) {
|
||||||
var tlsConf *tls.Config
|
var tlsConf *tls.Config
|
||||||
if t.TLSClientConfig == nil {
|
if t.TLSClientConfig == nil {
|
||||||
tlsConf = &tls.Config{}
|
tlsConf = &tls.Config{}
|
||||||
|
@ -313,19 +334,48 @@ func (t *Transport) dial(ctx context.Context, hostname string) (quic.EarlyConnec
|
||||||
t.transport = &quic.Transport{Conn: udpConn}
|
t.transport = &quic.Transport{Conn: udpConn}
|
||||||
}
|
}
|
||||||
dial = func(ctx context.Context, addr string, tlsCfg *tls.Config, cfg *quic.Config) (quic.EarlyConnection, error) {
|
dial = func(ctx context.Context, addr string, tlsCfg *tls.Config, cfg *quic.Config) (quic.EarlyConnection, error) {
|
||||||
udpAddr, err := net.ResolveUDPAddr("udp", addr)
|
network := "udp"
|
||||||
|
udpAddr, err := t.resolveUDPAddr(ctx, network, addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return t.transport.DialEarly(ctx, udpAddr, tlsCfg, cfg)
|
trace := httptrace.ContextClientTrace(ctx)
|
||||||
|
traceConnectStart(trace, network, udpAddr.String())
|
||||||
|
traceTLSHandshakeStart(trace)
|
||||||
|
conn, err := t.transport.DialEarly(ctx, udpAddr, tlsCfg, cfg)
|
||||||
|
var state tls.ConnectionState
|
||||||
|
if conn != nil {
|
||||||
|
state = conn.ConnectionState().TLS
|
||||||
|
}
|
||||||
|
traceTLSHandshakeDone(trace, state, err)
|
||||||
|
traceConnectDone(trace, network, udpAddr.String(), err)
|
||||||
|
return conn, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
conn, err := dial(ctx, hostname, tlsConf, t.QUICConfig)
|
conn, err := dial(ctx, hostname, tlsConf, t.QUICConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
return conn, t.newClient(conn), nil
|
return conn, t.newClientConn(conn), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Transport) resolveUDPAddr(ctx context.Context, network, addr string) (*net.UDPAddr, error) {
|
||||||
|
host, portStr, err := net.SplitHostPort(addr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
port, err := net.LookupPort(network, portStr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
resolver := net.DefaultResolver
|
||||||
|
ipAddrs, err := resolver.LookupIPAddr(ctx, host)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
addrs := addrList(ipAddrs)
|
||||||
|
ip := addrs.forResolve(network, addr)
|
||||||
|
return &net.UDPAddr{IP: ip.IP, Port: port, Zone: ip.Zone}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *Transport) removeClient(hostname string) {
|
func (t *Transport) removeClient(hostname string) {
|
||||||
|
@ -378,12 +428,6 @@ func (t *Transport) Close() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func closeRequestBody(req *http.Request) {
|
|
||||||
if req.Body != nil {
|
|
||||||
req.Body.Close()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func validMethod(method string) bool {
|
func validMethod(method string) bool {
|
||||||
/*
|
/*
|
||||||
Method = "OPTIONS" ; Section 9.2
|
Method = "OPTIONS" ; Section 9.2
|
||||||
|
|
9
vendor/github.com/quic-go/quic-go/interface.go
generated
vendored
9
vendor/github.com/quic-go/quic-go/interface.go
generated
vendored
|
@ -98,7 +98,6 @@ type ReceiveStream interface {
|
||||||
// SetReadDeadline sets the deadline for future Read calls and
|
// SetReadDeadline sets the deadline for future Read calls and
|
||||||
// any currently-blocked Read call.
|
// any currently-blocked Read call.
|
||||||
// A zero value for t means Read will not time out.
|
// A zero value for t means Read will not time out.
|
||||||
|
|
||||||
SetReadDeadline(t time.Time) error
|
SetReadDeadline(t time.Time) error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -357,10 +356,10 @@ type ClientHelloInfo struct {
|
||||||
type ConnectionState struct {
|
type ConnectionState struct {
|
||||||
// TLS contains information about the TLS connection state, incl. the tls.ConnectionState.
|
// TLS contains information about the TLS connection state, incl. the tls.ConnectionState.
|
||||||
TLS tls.ConnectionState
|
TLS tls.ConnectionState
|
||||||
// SupportsDatagrams says if support for QUIC datagrams (RFC 9221) was negotiated.
|
// SupportsDatagrams indicates whether the peer advertised support for QUIC datagrams (RFC 9221).
|
||||||
// This requires both nodes to support and enable the datagram extensions (via Config.EnableDatagrams).
|
// When true, datagrams can be sent using the Connection's SendDatagram method.
|
||||||
// If datagram support was negotiated, datagrams can be sent and received using the
|
// This is a unilateral declaration by the peer - receiving datagrams is only possible if
|
||||||
// SendDatagram and ReceiveDatagram methods on the Connection.
|
// datagram support was enabled locally via Config.EnableDatagrams.
|
||||||
SupportsDatagrams bool
|
SupportsDatagrams bool
|
||||||
// Used0RTT says if 0-RTT resumption was used.
|
// Used0RTT says if 0-RTT resumption was used.
|
||||||
Used0RTT bool
|
Used0RTT bool
|
||||||
|
|
13
vendor/github.com/quic-go/quic-go/internal/ackhandler/interfaces.go
generated
vendored
13
vendor/github.com/quic-go/quic-go/internal/ackhandler/interfaces.go
generated
vendored
|
@ -14,10 +14,9 @@ type SentPacketHandler interface {
|
||||||
// ReceivedAck processes an ACK frame.
|
// ReceivedAck processes an ACK frame.
|
||||||
// It does not store a copy of the frame.
|
// It does not store a copy of the frame.
|
||||||
ReceivedAck(f *wire.AckFrame, encLevel protocol.EncryptionLevel, rcvTime time.Time) (bool /* 1-RTT packet acked */, error)
|
ReceivedAck(f *wire.AckFrame, encLevel protocol.EncryptionLevel, rcvTime time.Time) (bool /* 1-RTT packet acked */, error)
|
||||||
ReceivedBytes(protocol.ByteCount)
|
ReceivedBytes(_ protocol.ByteCount, rcvTime time.Time)
|
||||||
DropPackets(protocol.EncryptionLevel)
|
DropPackets(_ protocol.EncryptionLevel, rcvTime time.Time)
|
||||||
ResetForRetry(rcvTime time.Time) error
|
ResetForRetry(rcvTime time.Time)
|
||||||
SetHandshakeConfirmed()
|
|
||||||
|
|
||||||
// The SendMode determines if and what kind of packets can be sent.
|
// The SendMode determines if and what kind of packets can be sent.
|
||||||
SendMode(now time.Time) SendMode
|
SendMode(now time.Time) SendMode
|
||||||
|
@ -34,12 +33,12 @@ type SentPacketHandler interface {
|
||||||
PopPacketNumber(protocol.EncryptionLevel) protocol.PacketNumber
|
PopPacketNumber(protocol.EncryptionLevel) protocol.PacketNumber
|
||||||
|
|
||||||
GetLossDetectionTimeout() time.Time
|
GetLossDetectionTimeout() time.Time
|
||||||
OnLossDetectionTimeout() error
|
OnLossDetectionTimeout(now time.Time) error
|
||||||
}
|
}
|
||||||
|
|
||||||
type sentPacketTracker interface {
|
type sentPacketTracker interface {
|
||||||
GetLowestPacketNotConfirmedAcked() protocol.PacketNumber
|
GetLowestPacketNotConfirmedAcked() protocol.PacketNumber
|
||||||
ReceivedPacket(protocol.EncryptionLevel)
|
ReceivedPacket(_ protocol.EncryptionLevel, rcvTime time.Time)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReceivedPacketHandler handles ACKs needed to send for incoming packets
|
// ReceivedPacketHandler handles ACKs needed to send for incoming packets
|
||||||
|
@ -49,5 +48,5 @@ type ReceivedPacketHandler interface {
|
||||||
DropPackets(protocol.EncryptionLevel)
|
DropPackets(protocol.EncryptionLevel)
|
||||||
|
|
||||||
GetAlarmTimeout() time.Time
|
GetAlarmTimeout() time.Time
|
||||||
GetAckFrame(encLevel protocol.EncryptionLevel, onlyIfQueued bool) *wire.AckFrame
|
GetAckFrame(_ protocol.EncryptionLevel, now time.Time, onlyIfQueued bool) *wire.AckFrame
|
||||||
}
|
}
|
||||||
|
|
6
vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_handler.go
generated
vendored
6
vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_handler.go
generated
vendored
|
@ -38,7 +38,7 @@ func (h *receivedPacketHandler) ReceivedPacket(
|
||||||
rcvTime time.Time,
|
rcvTime time.Time,
|
||||||
ackEliciting bool,
|
ackEliciting bool,
|
||||||
) error {
|
) error {
|
||||||
h.sentPackets.ReceivedPacket(encLevel)
|
h.sentPackets.ReceivedPacket(encLevel, rcvTime)
|
||||||
switch encLevel {
|
switch encLevel {
|
||||||
case protocol.EncryptionInitial:
|
case protocol.EncryptionInitial:
|
||||||
return h.initialPackets.ReceivedPacket(pn, ecn, rcvTime, ackEliciting)
|
return h.initialPackets.ReceivedPacket(pn, ecn, rcvTime, ackEliciting)
|
||||||
|
@ -87,7 +87,7 @@ func (h *receivedPacketHandler) GetAlarmTimeout() time.Time {
|
||||||
return h.appDataPackets.GetAlarmTimeout()
|
return h.appDataPackets.GetAlarmTimeout()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *receivedPacketHandler) GetAckFrame(encLevel protocol.EncryptionLevel, onlyIfQueued bool) *wire.AckFrame {
|
func (h *receivedPacketHandler) GetAckFrame(encLevel protocol.EncryptionLevel, now time.Time, onlyIfQueued bool) *wire.AckFrame {
|
||||||
//nolint:exhaustive // 0-RTT packets can't contain ACK frames.
|
//nolint:exhaustive // 0-RTT packets can't contain ACK frames.
|
||||||
switch encLevel {
|
switch encLevel {
|
||||||
case protocol.EncryptionInitial:
|
case protocol.EncryptionInitial:
|
||||||
|
@ -101,7 +101,7 @@ func (h *receivedPacketHandler) GetAckFrame(encLevel protocol.EncryptionLevel, o
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
case protocol.Encryption1RTT:
|
case protocol.Encryption1RTT:
|
||||||
return h.appDataPackets.GetAckFrame(onlyIfQueued)
|
return h.appDataPackets.GetAckFrame(now, onlyIfQueued)
|
||||||
default:
|
default:
|
||||||
// 0-RTT packets can't contain ACK frames
|
// 0-RTT packets can't contain ACK frames
|
||||||
return nil
|
return nil
|
||||||
|
|
3
vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_tracker.go
generated
vendored
3
vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_tracker.go
generated
vendored
|
@ -196,8 +196,7 @@ func (h *appDataReceivedPacketTracker) shouldQueueACK(pn protocol.PacketNumber,
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *appDataReceivedPacketTracker) GetAckFrame(onlyIfQueued bool) *wire.AckFrame {
|
func (h *appDataReceivedPacketTracker) GetAckFrame(now time.Time, onlyIfQueued bool) *wire.AckFrame {
|
||||||
now := time.Now()
|
|
||||||
if onlyIfQueued && !h.ackQueued {
|
if onlyIfQueued && !h.ackQueued {
|
||||||
if h.ackAlarm.IsZero() || h.ackAlarm.After(now) {
|
if h.ackAlarm.IsZero() || h.ackAlarm.After(now) {
|
||||||
return nil
|
return nil
|
||||||
|
|
161
vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_handler.go
generated
vendored
161
vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_handler.go
generated
vendored
|
@ -53,6 +53,12 @@ func newPacketNumberSpace(initialPN protocol.PacketNumber, isAppData bool) *pack
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type alarmTimer struct {
|
||||||
|
Time time.Time
|
||||||
|
TimerType logging.TimerType
|
||||||
|
EncryptionLevel protocol.EncryptionLevel
|
||||||
|
}
|
||||||
|
|
||||||
type sentPacketHandler struct {
|
type sentPacketHandler struct {
|
||||||
initialPackets *packetNumberSpace
|
initialPackets *packetNumberSpace
|
||||||
handshakePackets *packetNumberSpace
|
handshakePackets *packetNumberSpace
|
||||||
|
@ -90,7 +96,7 @@ type sentPacketHandler struct {
|
||||||
numProbesToSend int
|
numProbesToSend int
|
||||||
|
|
||||||
// The alarm timeout
|
// The alarm timeout
|
||||||
alarm time.Time
|
alarm alarmTimer
|
||||||
|
|
||||||
enableECN bool
|
enableECN bool
|
||||||
ecnTracker ecnHandler
|
ecnTracker ecnHandler
|
||||||
|
@ -155,7 +161,7 @@ func (h *sentPacketHandler) removeFromBytesInFlight(p *packet) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *sentPacketHandler) DropPackets(encLevel protocol.EncryptionLevel) {
|
func (h *sentPacketHandler) DropPackets(encLevel protocol.EncryptionLevel, now time.Time) {
|
||||||
// The server won't await address validation after the handshake is confirmed.
|
// The server won't await address validation after the handshake is confirmed.
|
||||||
// This applies even if we didn't receive an ACK for a Handshake packet.
|
// This applies even if we didn't receive an ACK for a Handshake packet.
|
||||||
if h.perspective == protocol.PerspectiveClient && encLevel == protocol.EncryptionHandshake {
|
if h.perspective == protocol.PerspectiveClient && encLevel == protocol.EncryptionHandshake {
|
||||||
|
@ -179,6 +185,9 @@ func (h *sentPacketHandler) DropPackets(encLevel protocol.EncryptionLevel) {
|
||||||
case protocol.EncryptionInitial:
|
case protocol.EncryptionInitial:
|
||||||
h.initialPackets = nil
|
h.initialPackets = nil
|
||||||
case protocol.EncryptionHandshake:
|
case protocol.EncryptionHandshake:
|
||||||
|
// Dropping the handshake packet number space means that the handshake is confirmed,
|
||||||
|
// see section 4.9.2 of RFC 9001.
|
||||||
|
h.handshakeConfirmed = true
|
||||||
h.handshakePackets = nil
|
h.handshakePackets = nil
|
||||||
case protocol.Encryption0RTT:
|
case protocol.Encryption0RTT:
|
||||||
// This function is only called when 0-RTT is rejected,
|
// This function is only called when 0-RTT is rejected,
|
||||||
|
@ -202,21 +211,21 @@ func (h *sentPacketHandler) DropPackets(encLevel protocol.EncryptionLevel) {
|
||||||
h.ptoCount = 0
|
h.ptoCount = 0
|
||||||
h.numProbesToSend = 0
|
h.numProbesToSend = 0
|
||||||
h.ptoMode = SendNone
|
h.ptoMode = SendNone
|
||||||
h.setLossDetectionTimer()
|
h.setLossDetectionTimer(now)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *sentPacketHandler) ReceivedBytes(n protocol.ByteCount) {
|
func (h *sentPacketHandler) ReceivedBytes(n protocol.ByteCount, t time.Time) {
|
||||||
wasAmplificationLimit := h.isAmplificationLimited()
|
wasAmplificationLimit := h.isAmplificationLimited()
|
||||||
h.bytesReceived += n
|
h.bytesReceived += n
|
||||||
if wasAmplificationLimit && !h.isAmplificationLimited() {
|
if wasAmplificationLimit && !h.isAmplificationLimited() {
|
||||||
h.setLossDetectionTimer()
|
h.setLossDetectionTimer(t)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *sentPacketHandler) ReceivedPacket(l protocol.EncryptionLevel) {
|
func (h *sentPacketHandler) ReceivedPacket(l protocol.EncryptionLevel, t time.Time) {
|
||||||
if h.perspective == protocol.PerspectiveServer && l == protocol.EncryptionHandshake && !h.peerAddressValidated {
|
if h.perspective == protocol.PerspectiveServer && l == protocol.EncryptionHandshake && !h.peerAddressValidated {
|
||||||
h.peerAddressValidated = true
|
h.peerAddressValidated = true
|
||||||
h.setLossDetectionTimer()
|
h.setLossDetectionTimer(t)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -269,7 +278,7 @@ func (h *sentPacketHandler) SentPacket(
|
||||||
if !isAckEliciting {
|
if !isAckEliciting {
|
||||||
pnSpace.history.SentNonAckElicitingPacket(pn)
|
pnSpace.history.SentNonAckElicitingPacket(pn)
|
||||||
if !h.peerCompletedAddressValidation {
|
if !h.peerCompletedAddressValidation {
|
||||||
h.setLossDetectionTimer()
|
h.setLossDetectionTimer(t)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -289,7 +298,7 @@ func (h *sentPacketHandler) SentPacket(
|
||||||
if h.tracer != nil && h.tracer.UpdatedMetrics != nil {
|
if h.tracer != nil && h.tracer.UpdatedMetrics != nil {
|
||||||
h.tracer.UpdatedMetrics(h.rttStats, h.congestion.GetCongestionWindow(), h.bytesInFlight, h.packetsInFlight())
|
h.tracer.UpdatedMetrics(h.rttStats, h.congestion.GetCongestionWindow(), h.bytesInFlight, h.packetsInFlight())
|
||||||
}
|
}
|
||||||
h.setLossDetectionTimer()
|
h.setLossDetectionTimer(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *sentPacketHandler) getPacketNumberSpace(encLevel protocol.EncryptionLevel) *packetNumberSpace {
|
func (h *sentPacketHandler) getPacketNumberSpace(encLevel protocol.EncryptionLevel) *packetNumberSpace {
|
||||||
|
@ -322,7 +331,7 @@ func (h *sentPacketHandler) ReceivedAck(ack *wire.AckFrame, encLevel protocol.En
|
||||||
h.peerCompletedAddressValidation = true
|
h.peerCompletedAddressValidation = true
|
||||||
h.logger.Debugf("Peer doesn't await address validation any longer.")
|
h.logger.Debugf("Peer doesn't await address validation any longer.")
|
||||||
// Make sure that the timer is reset, even if this ACK doesn't acknowledge any (ack-eliciting) packets.
|
// Make sure that the timer is reset, even if this ACK doesn't acknowledge any (ack-eliciting) packets.
|
||||||
h.setLossDetectionTimer()
|
h.setLossDetectionTimer(rcvTime)
|
||||||
}
|
}
|
||||||
|
|
||||||
priorInFlight := h.bytesInFlight
|
priorInFlight := h.bytesInFlight
|
||||||
|
@ -338,7 +347,7 @@ func (h *sentPacketHandler) ReceivedAck(ack *wire.AckFrame, encLevel protocol.En
|
||||||
if encLevel == protocol.Encryption1RTT {
|
if encLevel == protocol.Encryption1RTT {
|
||||||
ackDelay = min(ack.DelayTime, h.rttStats.MaxAckDelay())
|
ackDelay = min(ack.DelayTime, h.rttStats.MaxAckDelay())
|
||||||
}
|
}
|
||||||
h.rttStats.UpdateRTT(rcvTime.Sub(p.SendTime), ackDelay, rcvTime)
|
h.rttStats.UpdateRTT(rcvTime.Sub(p.SendTime), ackDelay)
|
||||||
if h.logger.Debug() {
|
if h.logger.Debug() {
|
||||||
h.logger.Debugf("\tupdated RTT: %s (σ: %s)", h.rttStats.SmoothedRTT(), h.rttStats.MeanDeviation())
|
h.logger.Debugf("\tupdated RTT: %s (σ: %s)", h.rttStats.SmoothedRTT(), h.rttStats.MeanDeviation())
|
||||||
}
|
}
|
||||||
|
@ -387,7 +396,7 @@ func (h *sentPacketHandler) ReceivedAck(ack *wire.AckFrame, encLevel protocol.En
|
||||||
h.tracer.UpdatedMetrics(h.rttStats, h.congestion.GetCongestionWindow(), h.bytesInFlight, h.packetsInFlight())
|
h.tracer.UpdatedMetrics(h.rttStats, h.congestion.GetCongestionWindow(), h.bytesInFlight, h.packetsInFlight())
|
||||||
}
|
}
|
||||||
|
|
||||||
h.setLossDetectionTimer()
|
h.setLossDetectionTimer(rcvTime)
|
||||||
return acked1RTTPacket, nil
|
return acked1RTTPacket, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -498,14 +507,14 @@ func (h *sentPacketHandler) getScaledPTO(includeMaxAckDelay bool) time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
// same logic as getLossTimeAndSpace, but for lastAckElicitingPacketTime instead of lossTime
|
// same logic as getLossTimeAndSpace, but for lastAckElicitingPacketTime instead of lossTime
|
||||||
func (h *sentPacketHandler) getPTOTimeAndSpace() (pto time.Time, encLevel protocol.EncryptionLevel, ok bool) {
|
func (h *sentPacketHandler) getPTOTimeAndSpace(now time.Time) (pto time.Time, encLevel protocol.EncryptionLevel, ok bool) {
|
||||||
// We only send application data probe packets once the handshake is confirmed,
|
// We only send application data probe packets once the handshake is confirmed,
|
||||||
// because before that, we don't have the keys to decrypt ACKs sent in 1-RTT packets.
|
// because before that, we don't have the keys to decrypt ACKs sent in 1-RTT packets.
|
||||||
if !h.handshakeConfirmed && !h.hasOutstandingCryptoPackets() {
|
if !h.handshakeConfirmed && !h.hasOutstandingCryptoPackets() {
|
||||||
if h.peerCompletedAddressValidation {
|
if h.peerCompletedAddressValidation {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
t := time.Now().Add(h.getScaledPTO(false))
|
t := now.Add(h.getScaledPTO(false))
|
||||||
if h.initialPackets != nil {
|
if h.initialPackets != nil {
|
||||||
return t, protocol.EncryptionInitial, true
|
return t, protocol.EncryptionInitial, true
|
||||||
}
|
}
|
||||||
|
@ -545,61 +554,53 @@ func (h *sentPacketHandler) hasOutstandingCryptoPackets() bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *sentPacketHandler) hasOutstandingPackets() bool {
|
func (h *sentPacketHandler) setLossDetectionTimer(now time.Time) {
|
||||||
return h.appDataPackets.history.HasOutstandingPackets() || h.hasOutstandingCryptoPackets()
|
oldAlarm := h.alarm // only needed in case tracing is enabled
|
||||||
|
newAlarm := h.lossDetectionTime(now)
|
||||||
|
h.alarm = newAlarm
|
||||||
|
|
||||||
|
if newAlarm.Time.IsZero() && !oldAlarm.Time.IsZero() {
|
||||||
|
h.logger.Debugf("Canceling loss detection timer.")
|
||||||
|
if h.tracer != nil && h.tracer.LossTimerCanceled != nil {
|
||||||
|
h.tracer.LossTimerCanceled()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if h.tracer != nil && h.tracer.SetLossTimer != nil && newAlarm != oldAlarm {
|
||||||
|
h.tracer.SetLossTimer(newAlarm.TimerType, newAlarm.EncryptionLevel, newAlarm.Time)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *sentPacketHandler) setLossDetectionTimer() {
|
func (h *sentPacketHandler) lossDetectionTime(now time.Time) alarmTimer {
|
||||||
oldAlarm := h.alarm // only needed in case tracing is enabled
|
// cancel the alarm if no packets are outstanding
|
||||||
|
if h.peerCompletedAddressValidation &&
|
||||||
|
!h.hasOutstandingCryptoPackets() && !h.appDataPackets.history.HasOutstandingPackets() {
|
||||||
|
return alarmTimer{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// cancel the alarm if amplification limited
|
||||||
|
if h.isAmplificationLimited() {
|
||||||
|
return alarmTimer{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// early retransmit timer or time loss detection
|
||||||
lossTime, encLevel := h.getLossTimeAndSpace()
|
lossTime, encLevel := h.getLossTimeAndSpace()
|
||||||
if !lossTime.IsZero() {
|
if !lossTime.IsZero() {
|
||||||
// Early retransmit timer or time loss detection.
|
return alarmTimer{
|
||||||
h.alarm = lossTime
|
Time: lossTime,
|
||||||
if h.tracer != nil && h.tracer.SetLossTimer != nil && h.alarm != oldAlarm {
|
TimerType: logging.TimerTypeACK,
|
||||||
h.tracer.SetLossTimer(logging.TimerTypeACK, encLevel, h.alarm)
|
EncryptionLevel: encLevel,
|
||||||
}
|
}
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cancel the alarm if amplification limited.
|
ptoTime, encLevel, ok := h.getPTOTimeAndSpace(now)
|
||||||
if h.isAmplificationLimited() {
|
|
||||||
h.alarm = time.Time{}
|
|
||||||
if !oldAlarm.IsZero() {
|
|
||||||
h.logger.Debugf("Canceling loss detection timer. Amplification limited.")
|
|
||||||
if h.tracer != nil && h.tracer.LossTimerCanceled != nil {
|
|
||||||
h.tracer.LossTimerCanceled()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Cancel the alarm if no packets are outstanding
|
|
||||||
if !h.hasOutstandingPackets() && h.peerCompletedAddressValidation {
|
|
||||||
h.alarm = time.Time{}
|
|
||||||
if !oldAlarm.IsZero() {
|
|
||||||
h.logger.Debugf("Canceling loss detection timer. No packets in flight.")
|
|
||||||
if h.tracer != nil && h.tracer.LossTimerCanceled != nil {
|
|
||||||
h.tracer.LossTimerCanceled()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// PTO alarm
|
|
||||||
ptoTime, encLevel, ok := h.getPTOTimeAndSpace()
|
|
||||||
if !ok {
|
if !ok {
|
||||||
if !oldAlarm.IsZero() {
|
return alarmTimer{}
|
||||||
h.alarm = time.Time{}
|
|
||||||
h.logger.Debugf("Canceling loss detection timer. No PTO needed..")
|
|
||||||
if h.tracer != nil && h.tracer.LossTimerCanceled != nil {
|
|
||||||
h.tracer.LossTimerCanceled()
|
|
||||||
}
|
}
|
||||||
}
|
return alarmTimer{
|
||||||
return
|
Time: ptoTime,
|
||||||
}
|
TimerType: logging.TimerTypePTO,
|
||||||
h.alarm = ptoTime
|
EncryptionLevel: encLevel,
|
||||||
if h.tracer != nil && h.tracer.SetLossTimer != nil && h.alarm != oldAlarm {
|
|
||||||
h.tracer.SetLossTimer(logging.TimerTypePTO, encLevel, h.alarm)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -623,7 +624,7 @@ func (h *sentPacketHandler) detectLostPackets(now time.Time, encLevel protocol.E
|
||||||
}
|
}
|
||||||
|
|
||||||
var packetLost bool
|
var packetLost bool
|
||||||
if p.SendTime.Before(lostSendTime) {
|
if !p.SendTime.After(lostSendTime) {
|
||||||
packetLost = true
|
packetLost = true
|
||||||
if !p.skippedPacket {
|
if !p.skippedPacket {
|
||||||
if h.logger.Debug() {
|
if h.logger.Debug() {
|
||||||
|
@ -669,8 +670,8 @@ func (h *sentPacketHandler) detectLostPackets(now time.Time, encLevel protocol.E
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *sentPacketHandler) OnLossDetectionTimeout() error {
|
func (h *sentPacketHandler) OnLossDetectionTimeout(now time.Time) error {
|
||||||
defer h.setLossDetectionTimer()
|
defer h.setLossDetectionTimer(now)
|
||||||
earliestLossTime, encLevel := h.getLossTimeAndSpace()
|
earliestLossTime, encLevel := h.getLossTimeAndSpace()
|
||||||
if !earliestLossTime.IsZero() {
|
if !earliestLossTime.IsZero() {
|
||||||
if h.logger.Debug() {
|
if h.logger.Debug() {
|
||||||
|
@ -680,13 +681,13 @@ func (h *sentPacketHandler) OnLossDetectionTimeout() error {
|
||||||
h.tracer.LossTimerExpired(logging.TimerTypeACK, encLevel)
|
h.tracer.LossTimerExpired(logging.TimerTypeACK, encLevel)
|
||||||
}
|
}
|
||||||
// Early retransmit or time loss detection
|
// Early retransmit or time loss detection
|
||||||
return h.detectLostPackets(time.Now(), encLevel)
|
return h.detectLostPackets(now, encLevel)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PTO
|
// PTO
|
||||||
// When all outstanding are acknowledged, the alarm is canceled in
|
// When all outstanding are acknowledged, the alarm is canceled in setLossDetectionTimer.
|
||||||
// setLossDetectionTimer. This doesn't reset the timer in the session though.
|
// However, there's no way to reset the timer in the connection.
|
||||||
// When OnAlarm is called, we therefore need to make sure that there are
|
// When OnLossDetectionTimeout is called, we therefore need to make sure that there are
|
||||||
// actually packets outstanding.
|
// actually packets outstanding.
|
||||||
if h.bytesInFlight == 0 && !h.peerCompletedAddressValidation {
|
if h.bytesInFlight == 0 && !h.peerCompletedAddressValidation {
|
||||||
h.ptoCount++
|
h.ptoCount++
|
||||||
|
@ -701,7 +702,7 @@ func (h *sentPacketHandler) OnLossDetectionTimeout() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
_, encLevel, ok := h.getPTOTimeAndSpace()
|
_, encLevel, ok := h.getPTOTimeAndSpace(now)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -739,7 +740,7 @@ func (h *sentPacketHandler) OnLossDetectionTimeout() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *sentPacketHandler) GetLossDetectionTimeout() time.Time {
|
func (h *sentPacketHandler) GetLossDetectionTimeout() time.Time {
|
||||||
return h.alarm
|
return h.alarm.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *sentPacketHandler) ECNMode(isShortHeaderPacket bool) protocol.ECN {
|
func (h *sentPacketHandler) ECNMode(isShortHeaderPacket bool) protocol.ECN {
|
||||||
|
@ -864,7 +865,7 @@ func (h *sentPacketHandler) queueFramesForRetransmission(p *packet) {
|
||||||
p.Frames = nil
|
p.Frames = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *sentPacketHandler) ResetForRetry(now time.Time) error {
|
func (h *sentPacketHandler) ResetForRetry(now time.Time) {
|
||||||
h.bytesInFlight = 0
|
h.bytesInFlight = 0
|
||||||
var firstPacketSendTime time.Time
|
var firstPacketSendTime time.Time
|
||||||
h.initialPackets.history.Iterate(func(p *packet) (bool, error) {
|
h.initialPackets.history.Iterate(func(p *packet) (bool, error) {
|
||||||
|
@ -890,7 +891,7 @@ func (h *sentPacketHandler) ResetForRetry(now time.Time) error {
|
||||||
// Otherwise, we don't know which Initial the Retry was sent in response to.
|
// Otherwise, we don't know which Initial the Retry was sent in response to.
|
||||||
if h.ptoCount == 0 {
|
if h.ptoCount == 0 {
|
||||||
// Don't set the RTT to a value lower than 5ms here.
|
// Don't set the RTT to a value lower than 5ms here.
|
||||||
h.rttStats.UpdateRTT(max(minRTTAfterRetry, now.Sub(firstPacketSendTime)), 0, now)
|
h.rttStats.UpdateRTT(max(minRTTAfterRetry, now.Sub(firstPacketSendTime)), 0)
|
||||||
if h.logger.Debug() {
|
if h.logger.Debug() {
|
||||||
h.logger.Debugf("\tupdated RTT: %s (σ: %s)", h.rttStats.SmoothedRTT(), h.rttStats.MeanDeviation())
|
h.logger.Debugf("\tupdated RTT: %s (σ: %s)", h.rttStats.SmoothedRTT(), h.rttStats.MeanDeviation())
|
||||||
}
|
}
|
||||||
|
@ -901,28 +902,14 @@ func (h *sentPacketHandler) ResetForRetry(now time.Time) error {
|
||||||
h.initialPackets = newPacketNumberSpace(h.initialPackets.pns.Peek(), false)
|
h.initialPackets = newPacketNumberSpace(h.initialPackets.pns.Peek(), false)
|
||||||
h.appDataPackets = newPacketNumberSpace(h.appDataPackets.pns.Peek(), true)
|
h.appDataPackets = newPacketNumberSpace(h.appDataPackets.pns.Peek(), true)
|
||||||
oldAlarm := h.alarm
|
oldAlarm := h.alarm
|
||||||
h.alarm = time.Time{}
|
h.alarm = alarmTimer{}
|
||||||
if h.tracer != nil {
|
if h.tracer != nil {
|
||||||
if h.tracer.UpdatedPTOCount != nil {
|
if h.tracer.UpdatedPTOCount != nil {
|
||||||
h.tracer.UpdatedPTOCount(0)
|
h.tracer.UpdatedPTOCount(0)
|
||||||
}
|
}
|
||||||
if !oldAlarm.IsZero() && h.tracer.LossTimerCanceled != nil {
|
if !oldAlarm.Time.IsZero() && h.tracer.LossTimerCanceled != nil {
|
||||||
h.tracer.LossTimerCanceled()
|
h.tracer.LossTimerCanceled()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
h.ptoCount = 0
|
h.ptoCount = 0
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *sentPacketHandler) SetHandshakeConfirmed() {
|
|
||||||
if h.initialPackets != nil {
|
|
||||||
panic("didn't drop initial correctly")
|
|
||||||
}
|
|
||||||
if h.handshakePackets != nil {
|
|
||||||
panic("didn't drop handshake correctly")
|
|
||||||
}
|
|
||||||
h.handshakeConfirmed = true
|
|
||||||
// We don't send PTOs for application data packets before the handshake completes.
|
|
||||||
// Make sure the timer is armed now, if necessary.
|
|
||||||
h.setLossDetectionTimer()
|
|
||||||
}
|
}
|
||||||
|
|
16
vendor/github.com/quic-go/quic-go/internal/flowcontrol/base_flow_controller.go
generated
vendored
16
vendor/github.com/quic-go/quic-go/internal/flowcontrol/base_flow_controller.go
generated
vendored
|
@ -36,7 +36,7 @@ type baseFlowController struct {
|
||||||
// For every offset, it only returns true once.
|
// For every offset, it only returns true once.
|
||||||
// If it is blocked, the offset is returned.
|
// If it is blocked, the offset is returned.
|
||||||
func (c *baseFlowController) IsNewlyBlocked() (bool, protocol.ByteCount) {
|
func (c *baseFlowController) IsNewlyBlocked() (bool, protocol.ByteCount) {
|
||||||
if c.sendWindowSize() != 0 || c.sendWindow == c.lastBlockedAt {
|
if c.SendWindowSize() != 0 || c.sendWindow == c.lastBlockedAt {
|
||||||
return false, 0
|
return false, 0
|
||||||
}
|
}
|
||||||
c.lastBlockedAt = c.sendWindow
|
c.lastBlockedAt = c.sendWindow
|
||||||
|
@ -56,7 +56,7 @@ func (c *baseFlowController) UpdateSendWindow(offset protocol.ByteCount) (update
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *baseFlowController) sendWindowSize() protocol.ByteCount {
|
func (c *baseFlowController) SendWindowSize() protocol.ByteCount {
|
||||||
// this only happens during connection establishment, when data is sent before we receive the peer's transport parameters
|
// this only happens during connection establishment, when data is sent before we receive the peer's transport parameters
|
||||||
if c.bytesSent > c.sendWindow {
|
if c.bytesSent > c.sendWindow {
|
||||||
return 0
|
return 0
|
||||||
|
@ -66,11 +66,6 @@ func (c *baseFlowController) sendWindowSize() protocol.ByteCount {
|
||||||
|
|
||||||
// needs to be called with locked mutex
|
// needs to be called with locked mutex
|
||||||
func (c *baseFlowController) addBytesRead(n protocol.ByteCount) {
|
func (c *baseFlowController) addBytesRead(n protocol.ByteCount) {
|
||||||
// pretend we sent a WindowUpdate when reading the first byte
|
|
||||||
// this way auto-tuning of the window size already works for the first WindowUpdate
|
|
||||||
if c.bytesRead == 0 {
|
|
||||||
c.startNewAutoTuningEpoch(time.Now())
|
|
||||||
}
|
|
||||||
c.bytesRead += n
|
c.bytesRead += n
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -82,19 +77,19 @@ func (c *baseFlowController) hasWindowUpdate() bool {
|
||||||
|
|
||||||
// getWindowUpdate updates the receive window, if necessary
|
// getWindowUpdate updates the receive window, if necessary
|
||||||
// it returns the new offset
|
// it returns the new offset
|
||||||
func (c *baseFlowController) getWindowUpdate() protocol.ByteCount {
|
func (c *baseFlowController) getWindowUpdate(now time.Time) protocol.ByteCount {
|
||||||
if !c.hasWindowUpdate() {
|
if !c.hasWindowUpdate() {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
c.maybeAdjustWindowSize()
|
c.maybeAdjustWindowSize(now)
|
||||||
c.receiveWindow = c.bytesRead + c.receiveWindowSize
|
c.receiveWindow = c.bytesRead + c.receiveWindowSize
|
||||||
return c.receiveWindow
|
return c.receiveWindow
|
||||||
}
|
}
|
||||||
|
|
||||||
// maybeAdjustWindowSize increases the receiveWindowSize if we're sending updates too often.
|
// maybeAdjustWindowSize increases the receiveWindowSize if we're sending updates too often.
|
||||||
// For details about auto-tuning, see https://docs.google.com/document/d/1SExkMmGiz8VYzV3s9E35JQlJ73vhzCekKkDi85F1qCE/edit?usp=sharing.
|
// For details about auto-tuning, see https://docs.google.com/document/d/1SExkMmGiz8VYzV3s9E35JQlJ73vhzCekKkDi85F1qCE/edit?usp=sharing.
|
||||||
func (c *baseFlowController) maybeAdjustWindowSize() {
|
func (c *baseFlowController) maybeAdjustWindowSize(now time.Time) {
|
||||||
bytesReadInEpoch := c.bytesRead - c.epochStartOffset
|
bytesReadInEpoch := c.bytesRead - c.epochStartOffset
|
||||||
// don't do anything if less than half the window has been consumed
|
// don't do anything if less than half the window has been consumed
|
||||||
if bytesReadInEpoch <= c.receiveWindowSize/2 {
|
if bytesReadInEpoch <= c.receiveWindowSize/2 {
|
||||||
|
@ -106,7 +101,6 @@ func (c *baseFlowController) maybeAdjustWindowSize() {
|
||||||
}
|
}
|
||||||
|
|
||||||
fraction := float64(bytesReadInEpoch) / float64(c.receiveWindowSize)
|
fraction := float64(bytesReadInEpoch) / float64(c.receiveWindowSize)
|
||||||
now := time.Now()
|
|
||||||
if now.Sub(c.epochStartTime) < time.Duration(4*fraction*float64(rtt)) {
|
if now.Sub(c.epochStartTime) < time.Duration(4*fraction*float64(rtt)) {
|
||||||
// window is consumed too fast, try to increase the window size
|
// window is consumed too fast, try to increase the window size
|
||||||
newSize := min(2*c.receiveWindowSize, c.maxReceiveWindowSize)
|
newSize := min(2*c.receiveWindowSize, c.maxReceiveWindowSize)
|
||||||
|
|
43
vendor/github.com/quic-go/quic-go/internal/flowcontrol/connection_flow_controller.go
generated
vendored
43
vendor/github.com/quic-go/quic-go/internal/flowcontrol/connection_flow_controller.go
generated
vendored
|
@ -24,7 +24,7 @@ func NewConnectionFlowController(
|
||||||
allowWindowIncrease func(size protocol.ByteCount) bool,
|
allowWindowIncrease func(size protocol.ByteCount) bool,
|
||||||
rttStats *utils.RTTStats,
|
rttStats *utils.RTTStats,
|
||||||
logger utils.Logger,
|
logger utils.Logger,
|
||||||
) ConnectionFlowController {
|
) *connectionFlowController {
|
||||||
return &connectionFlowController{
|
return &connectionFlowController{
|
||||||
baseFlowController: baseFlowController{
|
baseFlowController: baseFlowController{
|
||||||
rttStats: rttStats,
|
rttStats: rttStats,
|
||||||
|
@ -37,16 +37,17 @@ func NewConnectionFlowController(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *connectionFlowController) SendWindowSize() protocol.ByteCount {
|
|
||||||
return c.baseFlowController.sendWindowSize()
|
|
||||||
}
|
|
||||||
|
|
||||||
// IncrementHighestReceived adds an increment to the highestReceived value
|
// IncrementHighestReceived adds an increment to the highestReceived value
|
||||||
func (c *connectionFlowController) IncrementHighestReceived(increment protocol.ByteCount) error {
|
func (c *connectionFlowController) IncrementHighestReceived(increment protocol.ByteCount, now time.Time) error {
|
||||||
c.mutex.Lock()
|
c.mutex.Lock()
|
||||||
defer c.mutex.Unlock()
|
defer c.mutex.Unlock()
|
||||||
|
|
||||||
|
// If this is the first frame received on this connection, start flow-control auto-tuning.
|
||||||
|
if c.highestReceived == 0 {
|
||||||
|
c.startNewAutoTuningEpoch(now)
|
||||||
|
}
|
||||||
c.highestReceived += increment
|
c.highestReceived += increment
|
||||||
|
|
||||||
if c.checkFlowControlViolation() {
|
if c.checkFlowControlViolation() {
|
||||||
return &qerr.TransportError{
|
return &qerr.TransportError{
|
||||||
ErrorCode: qerr.FlowControlError,
|
ErrorCode: qerr.FlowControlError,
|
||||||
|
@ -56,40 +57,47 @@ func (c *connectionFlowController) IncrementHighestReceived(increment protocol.B
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *connectionFlowController) AddBytesRead(n protocol.ByteCount) {
|
func (c *connectionFlowController) AddBytesRead(n protocol.ByteCount) (hasWindowUpdate bool) {
|
||||||
c.mutex.Lock()
|
c.mutex.Lock()
|
||||||
|
defer c.mutex.Unlock()
|
||||||
|
|
||||||
c.baseFlowController.addBytesRead(n)
|
c.baseFlowController.addBytesRead(n)
|
||||||
c.mutex.Unlock()
|
return c.baseFlowController.hasWindowUpdate()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *connectionFlowController) GetWindowUpdate() protocol.ByteCount {
|
func (c *connectionFlowController) GetWindowUpdate(now time.Time) protocol.ByteCount {
|
||||||
c.mutex.Lock()
|
c.mutex.Lock()
|
||||||
|
defer c.mutex.Unlock()
|
||||||
|
|
||||||
oldWindowSize := c.receiveWindowSize
|
oldWindowSize := c.receiveWindowSize
|
||||||
offset := c.baseFlowController.getWindowUpdate()
|
offset := c.baseFlowController.getWindowUpdate(now)
|
||||||
if c.logger.Debug() && oldWindowSize < c.receiveWindowSize {
|
if c.logger.Debug() && oldWindowSize < c.receiveWindowSize {
|
||||||
c.logger.Debugf("Increasing receive flow control window for the connection to %d kB", c.receiveWindowSize/(1<<10))
|
c.logger.Debugf("Increasing receive flow control window for the connection to %d kB", c.receiveWindowSize/(1<<10))
|
||||||
}
|
}
|
||||||
c.mutex.Unlock()
|
|
||||||
return offset
|
return offset
|
||||||
}
|
}
|
||||||
|
|
||||||
// EnsureMinimumWindowSize sets a minimum window size
|
// EnsureMinimumWindowSize sets a minimum window size
|
||||||
// it should make sure that the connection-level window is increased when a stream-level window grows
|
// it should make sure that the connection-level window is increased when a stream-level window grows
|
||||||
func (c *connectionFlowController) EnsureMinimumWindowSize(inc protocol.ByteCount) {
|
func (c *connectionFlowController) EnsureMinimumWindowSize(inc protocol.ByteCount, now time.Time) {
|
||||||
c.mutex.Lock()
|
c.mutex.Lock()
|
||||||
if inc > c.receiveWindowSize {
|
defer c.mutex.Unlock()
|
||||||
c.logger.Debugf("Increasing receive flow control window for the connection to %d kB, in response to stream flow control window increase", c.receiveWindowSize/(1<<10))
|
|
||||||
|
if inc <= c.receiveWindowSize {
|
||||||
|
return
|
||||||
|
}
|
||||||
newSize := min(inc, c.maxReceiveWindowSize)
|
newSize := min(inc, c.maxReceiveWindowSize)
|
||||||
if delta := newSize - c.receiveWindowSize; delta > 0 && c.allowWindowIncrease(delta) {
|
if delta := newSize - c.receiveWindowSize; delta > 0 && c.allowWindowIncrease(delta) {
|
||||||
c.receiveWindowSize = newSize
|
c.receiveWindowSize = newSize
|
||||||
|
if c.logger.Debug() {
|
||||||
|
c.logger.Debugf("Increasing receive flow control window for the connection to %d, in response to stream flow control window increase", newSize)
|
||||||
}
|
}
|
||||||
c.startNewAutoTuningEpoch(time.Now())
|
|
||||||
}
|
}
|
||||||
c.mutex.Unlock()
|
c.startNewAutoTuningEpoch(now)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reset rests the flow controller. This happens when 0-RTT is rejected.
|
// Reset rests the flow controller. This happens when 0-RTT is rejected.
|
||||||
// All stream data is invalidated, it's if we had never opened a stream and never sent any data.
|
// All stream data is invalidated, it's as if we had never opened a stream and never sent any data.
|
||||||
// At that point, we only have sent stream data, but we didn't have the keys to open 1-RTT keys yet.
|
// At that point, we only have sent stream data, but we didn't have the keys to open 1-RTT keys yet.
|
||||||
func (c *connectionFlowController) Reset() error {
|
func (c *connectionFlowController) Reset() error {
|
||||||
c.mutex.Lock()
|
c.mutex.Lock()
|
||||||
|
@ -100,5 +108,6 @@ func (c *connectionFlowController) Reset() error {
|
||||||
}
|
}
|
||||||
c.bytesSent = 0
|
c.bytesSent = 0
|
||||||
c.lastBlockedAt = 0
|
c.lastBlockedAt = 0
|
||||||
|
c.sendWindow = 0
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
18
vendor/github.com/quic-go/quic-go/internal/flowcontrol/interface.go
generated
vendored
18
vendor/github.com/quic-go/quic-go/internal/flowcontrol/interface.go
generated
vendored
|
@ -1,6 +1,10 @@
|
||||||
package flowcontrol
|
package flowcontrol
|
||||||
|
|
||||||
import "github.com/quic-go/quic-go/internal/protocol"
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/quic-go/quic-go/internal/protocol"
|
||||||
|
)
|
||||||
|
|
||||||
type flowController interface {
|
type flowController interface {
|
||||||
// for sending
|
// for sending
|
||||||
|
@ -8,17 +12,17 @@ type flowController interface {
|
||||||
UpdateSendWindow(protocol.ByteCount) (updated bool)
|
UpdateSendWindow(protocol.ByteCount) (updated bool)
|
||||||
AddBytesSent(protocol.ByteCount)
|
AddBytesSent(protocol.ByteCount)
|
||||||
// for receiving
|
// for receiving
|
||||||
GetWindowUpdate() protocol.ByteCount // returns 0 if no update is necessary
|
GetWindowUpdate(time.Time) protocol.ByteCount // returns 0 if no update is necessary
|
||||||
}
|
}
|
||||||
|
|
||||||
// A StreamFlowController is a flow controller for a QUIC stream.
|
// A StreamFlowController is a flow controller for a QUIC stream.
|
||||||
type StreamFlowController interface {
|
type StreamFlowController interface {
|
||||||
flowController
|
flowController
|
||||||
AddBytesRead(protocol.ByteCount) (shouldQueueWindowUpdate bool)
|
AddBytesRead(protocol.ByteCount) (hasStreamWindowUpdate, hasConnWindowUpdate bool)
|
||||||
// UpdateHighestReceived is called when a new highest offset is received
|
// UpdateHighestReceived is called when a new highest offset is received
|
||||||
// final has to be to true if this is the final offset of the stream,
|
// final has to be to true if this is the final offset of the stream,
|
||||||
// as contained in a STREAM frame with FIN bit, and the RESET_STREAM frame
|
// as contained in a STREAM frame with FIN bit, and the RESET_STREAM frame
|
||||||
UpdateHighestReceived(offset protocol.ByteCount, final bool) error
|
UpdateHighestReceived(offset protocol.ByteCount, final bool, now time.Time) error
|
||||||
// Abandon is called when reading from the stream is aborted early,
|
// Abandon is called when reading from the stream is aborted early,
|
||||||
// and there won't be any further calls to AddBytesRead.
|
// and there won't be any further calls to AddBytesRead.
|
||||||
Abandon()
|
Abandon()
|
||||||
|
@ -28,7 +32,7 @@ type StreamFlowController interface {
|
||||||
// The ConnectionFlowController is the flow controller for the connection.
|
// The ConnectionFlowController is the flow controller for the connection.
|
||||||
type ConnectionFlowController interface {
|
type ConnectionFlowController interface {
|
||||||
flowController
|
flowController
|
||||||
AddBytesRead(protocol.ByteCount)
|
AddBytesRead(protocol.ByteCount) (hasWindowUpdate bool)
|
||||||
Reset() error
|
Reset() error
|
||||||
IsNewlyBlocked() (bool, protocol.ByteCount)
|
IsNewlyBlocked() (bool, protocol.ByteCount)
|
||||||
}
|
}
|
||||||
|
@ -37,7 +41,7 @@ type connectionFlowControllerI interface {
|
||||||
ConnectionFlowController
|
ConnectionFlowController
|
||||||
// The following two methods are not supposed to be called from outside this packet, but are needed internally
|
// The following two methods are not supposed to be called from outside this packet, but are needed internally
|
||||||
// for sending
|
// for sending
|
||||||
EnsureMinimumWindowSize(protocol.ByteCount)
|
EnsureMinimumWindowSize(protocol.ByteCount, time.Time)
|
||||||
// for receiving
|
// for receiving
|
||||||
IncrementHighestReceived(protocol.ByteCount) error
|
IncrementHighestReceived(protocol.ByteCount, time.Time) error
|
||||||
}
|
}
|
||||||
|
|
35
vendor/github.com/quic-go/quic-go/internal/flowcontrol/stream_flow_controller.go
generated
vendored
35
vendor/github.com/quic-go/quic-go/internal/flowcontrol/stream_flow_controller.go
generated
vendored
|
@ -2,6 +2,7 @@ package flowcontrol
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/quic-go/quic-go/internal/protocol"
|
"github.com/quic-go/quic-go/internal/protocol"
|
||||||
"github.com/quic-go/quic-go/internal/qerr"
|
"github.com/quic-go/quic-go/internal/qerr"
|
||||||
|
@ -45,7 +46,7 @@ func NewStreamFlowController(
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateHighestReceived updates the highestReceived value, if the offset is higher.
|
// UpdateHighestReceived updates the highestReceived value, if the offset is higher.
|
||||||
func (c *streamFlowController) UpdateHighestReceived(offset protocol.ByteCount, final bool) error {
|
func (c *streamFlowController) UpdateHighestReceived(offset protocol.ByteCount, final bool, now time.Time) error {
|
||||||
// If the final offset for this stream is already known, check for consistency.
|
// If the final offset for this stream is already known, check for consistency.
|
||||||
if c.receivedFinalOffset {
|
if c.receivedFinalOffset {
|
||||||
// If we receive another final offset, check that it's the same.
|
// If we receive another final offset, check that it's the same.
|
||||||
|
@ -70,9 +71,8 @@ func (c *streamFlowController) UpdateHighestReceived(offset protocol.ByteCount,
|
||||||
if offset == c.highestReceived {
|
if offset == c.highestReceived {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
// A higher offset was received before.
|
// A higher offset was received before. This can happen due to reordering.
|
||||||
// This can happen due to reordering.
|
if offset < c.highestReceived {
|
||||||
if offset <= c.highestReceived {
|
|
||||||
if final {
|
if final {
|
||||||
return &qerr.TransportError{
|
return &qerr.TransportError{
|
||||||
ErrorCode: qerr.FinalSizeError,
|
ErrorCode: qerr.FinalSizeError,
|
||||||
|
@ -82,23 +82,28 @@ func (c *streamFlowController) UpdateHighestReceived(offset protocol.ByteCount,
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If this is the first frame received for this stream, start flow-control auto-tuning.
|
||||||
|
if c.highestReceived == 0 {
|
||||||
|
c.startNewAutoTuningEpoch(now)
|
||||||
|
}
|
||||||
increment := offset - c.highestReceived
|
increment := offset - c.highestReceived
|
||||||
c.highestReceived = offset
|
c.highestReceived = offset
|
||||||
|
|
||||||
if c.checkFlowControlViolation() {
|
if c.checkFlowControlViolation() {
|
||||||
return &qerr.TransportError{
|
return &qerr.TransportError{
|
||||||
ErrorCode: qerr.FlowControlError,
|
ErrorCode: qerr.FlowControlError,
|
||||||
ErrorMessage: fmt.Sprintf("received %d bytes on stream %d, allowed %d bytes", offset, c.streamID, c.receiveWindow),
|
ErrorMessage: fmt.Sprintf("received %d bytes on stream %d, allowed %d bytes", offset, c.streamID, c.receiveWindow),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return c.connection.IncrementHighestReceived(increment)
|
return c.connection.IncrementHighestReceived(increment, now)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *streamFlowController) AddBytesRead(n protocol.ByteCount) (shouldQueueWindowUpdate bool) {
|
func (c *streamFlowController) AddBytesRead(n protocol.ByteCount) (hasStreamWindowUpdate, hasConnWindowUpdate bool) {
|
||||||
c.mutex.Lock()
|
c.mutex.Lock()
|
||||||
c.baseFlowController.addBytesRead(n)
|
c.baseFlowController.addBytesRead(n)
|
||||||
shouldQueueWindowUpdate = c.shouldQueueWindowUpdate()
|
hasStreamWindowUpdate = c.shouldQueueWindowUpdate()
|
||||||
c.mutex.Unlock()
|
c.mutex.Unlock()
|
||||||
c.connection.AddBytesRead(n)
|
hasConnWindowUpdate = c.connection.AddBytesRead(n)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -118,7 +123,7 @@ func (c *streamFlowController) AddBytesSent(n protocol.ByteCount) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *streamFlowController) SendWindowSize() protocol.ByteCount {
|
func (c *streamFlowController) SendWindowSize() protocol.ByteCount {
|
||||||
return min(c.baseFlowController.sendWindowSize(), c.connection.SendWindowSize())
|
return min(c.baseFlowController.SendWindowSize(), c.connection.SendWindowSize())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *streamFlowController) IsNewlyBlocked() bool {
|
func (c *streamFlowController) IsNewlyBlocked() bool {
|
||||||
|
@ -130,20 +135,20 @@ func (c *streamFlowController) shouldQueueWindowUpdate() bool {
|
||||||
return !c.receivedFinalOffset && c.hasWindowUpdate()
|
return !c.receivedFinalOffset && c.hasWindowUpdate()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *streamFlowController) GetWindowUpdate() protocol.ByteCount {
|
func (c *streamFlowController) GetWindowUpdate(now time.Time) protocol.ByteCount {
|
||||||
// If we already received the final offset for this stream, the peer won't need any additional flow control credit.
|
// If we already received the final offset for this stream, the peer won't need any additional flow control credit.
|
||||||
if c.receivedFinalOffset {
|
if c.receivedFinalOffset {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// Don't use defer for unlocking the mutex here, GetWindowUpdate() is called frequently and defer shows up in the profiler
|
|
||||||
c.mutex.Lock()
|
c.mutex.Lock()
|
||||||
|
defer c.mutex.Unlock()
|
||||||
|
|
||||||
oldWindowSize := c.receiveWindowSize
|
oldWindowSize := c.receiveWindowSize
|
||||||
offset := c.baseFlowController.getWindowUpdate()
|
offset := c.baseFlowController.getWindowUpdate(now)
|
||||||
if c.receiveWindowSize > oldWindowSize { // auto-tuning enlarged the window size
|
if c.receiveWindowSize > oldWindowSize { // auto-tuning enlarged the window size
|
||||||
c.logger.Debugf("Increasing receive flow control window for stream %d to %d kB", c.streamID, c.receiveWindowSize/(1<<10))
|
c.logger.Debugf("Increasing receive flow control window for stream %d to %d", c.streamID, c.receiveWindowSize)
|
||||||
c.connection.EnsureMinimumWindowSize(protocol.ByteCount(float64(c.receiveWindowSize) * protocol.ConnectionFlowControlMultiplier))
|
c.connection.EnsureMinimumWindowSize(protocol.ByteCount(float64(c.receiveWindowSize)*protocol.ConnectionFlowControlMultiplier), now)
|
||||||
}
|
}
|
||||||
c.mutex.Unlock()
|
|
||||||
return offset
|
return offset
|
||||||
}
|
}
|
||||||
|
|
13
vendor/github.com/quic-go/quic-go/internal/handshake/retry.go
generated
vendored
13
vendor/github.com/quic-go/quic-go/internal/handshake/retry.go
generated
vendored
|
@ -10,16 +10,13 @@ import (
|
||||||
"github.com/quic-go/quic-go/internal/protocol"
|
"github.com/quic-go/quic-go/internal/protocol"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Instead of using an init function, the AEADs are created lazily.
|
||||||
|
// For more details see https://github.com/quic-go/quic-go/issues/4894.
|
||||||
var (
|
var (
|
||||||
retryAEADv1 cipher.AEAD // used for QUIC v1 (RFC 9000)
|
retryAEADv1 cipher.AEAD // used for QUIC v1 (RFC 9000)
|
||||||
retryAEADv2 cipher.AEAD // used for QUIC v2 (RFC 9369)
|
retryAEADv2 cipher.AEAD // used for QUIC v2 (RFC 9369)
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
|
||||||
retryAEADv1 = initAEAD([16]byte{0xbe, 0x0c, 0x69, 0x0b, 0x9f, 0x66, 0x57, 0x5a, 0x1d, 0x76, 0x6b, 0x54, 0xe3, 0x68, 0xc8, 0x4e})
|
|
||||||
retryAEADv2 = initAEAD([16]byte{0x8f, 0xb4, 0xb0, 0x1b, 0x56, 0xac, 0x48, 0xe2, 0x60, 0xfb, 0xcb, 0xce, 0xad, 0x7c, 0xcc, 0x92})
|
|
||||||
}
|
|
||||||
|
|
||||||
func initAEAD(key [16]byte) cipher.AEAD {
|
func initAEAD(key [16]byte) cipher.AEAD {
|
||||||
aes, err := aes.NewCipher(key[:])
|
aes, err := aes.NewCipher(key[:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -52,8 +49,14 @@ func GetRetryIntegrityTag(retry []byte, origDestConnID protocol.ConnectionID, ve
|
||||||
var tag [16]byte
|
var tag [16]byte
|
||||||
var sealed []byte
|
var sealed []byte
|
||||||
if version == protocol.Version2 {
|
if version == protocol.Version2 {
|
||||||
|
if retryAEADv2 == nil {
|
||||||
|
retryAEADv2 = initAEAD([16]byte{0x8f, 0xb4, 0xb0, 0x1b, 0x56, 0xac, 0x48, 0xe2, 0x60, 0xfb, 0xcb, 0xce, 0xad, 0x7c, 0xcc, 0x92})
|
||||||
|
}
|
||||||
sealed = retryAEADv2.Seal(tag[:0], retryNonceV2[:], nil, retryBuf.Bytes())
|
sealed = retryAEADv2.Seal(tag[:0], retryNonceV2[:], nil, retryBuf.Bytes())
|
||||||
} else {
|
} else {
|
||||||
|
if retryAEADv1 == nil {
|
||||||
|
retryAEADv1 = initAEAD([16]byte{0xbe, 0x0c, 0x69, 0x0b, 0x9f, 0x66, 0x57, 0x5a, 0x1d, 0x76, 0x6b, 0x54, 0xe3, 0x68, 0xc8, 0x4e})
|
||||||
|
}
|
||||||
sealed = retryAEADv1.Seal(tag[:0], retryNonceV1[:], nil, retryBuf.Bytes())
|
sealed = retryAEADv1.Seal(tag[:0], retryNonceV1[:], nil, retryBuf.Bytes())
|
||||||
}
|
}
|
||||||
if len(sealed) != 16 {
|
if len(sealed) != 16 {
|
||||||
|
|
4
vendor/github.com/quic-go/quic-go/internal/protocol/params.go
generated
vendored
4
vendor/github.com/quic-go/quic-go/internal/protocol/params.go
generated
vendored
|
@ -102,10 +102,6 @@ const DefaultIdleTimeout = 30 * time.Second
|
||||||
// DefaultHandshakeIdleTimeout is the default idle timeout used before handshake completion.
|
// DefaultHandshakeIdleTimeout is the default idle timeout used before handshake completion.
|
||||||
const DefaultHandshakeIdleTimeout = 5 * time.Second
|
const DefaultHandshakeIdleTimeout = 5 * time.Second
|
||||||
|
|
||||||
// MaxKeepAliveInterval is the maximum time until we send a packet to keep a connection alive.
|
|
||||||
// It should be shorter than the time that NATs clear their mapping.
|
|
||||||
const MaxKeepAliveInterval = 20 * time.Second
|
|
||||||
|
|
||||||
// RetiredConnectionIDDeleteTimeout is the time we keep closed connections around in order to retransmit the CONNECTION_CLOSE.
|
// RetiredConnectionIDDeleteTimeout is the time we keep closed connections around in order to retransmit the CONNECTION_CLOSE.
|
||||||
// after this time all information about the old connection will be deleted
|
// after this time all information about the old connection will be deleted
|
||||||
const RetiredConnectionIDDeleteTimeout = 5 * time.Second
|
const RetiredConnectionIDDeleteTimeout = 5 * time.Second
|
||||||
|
|
39
vendor/github.com/quic-go/quic-go/internal/qerr/errors.go
generated
vendored
39
vendor/github.com/quic-go/quic-go/internal/qerr/errors.go
generated
vendored
|
@ -48,21 +48,16 @@ func (e *TransportError) Error() string {
|
||||||
return str + ": " + msg
|
return str + ": " + msg
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *TransportError) Is(target error) bool {
|
func (e *TransportError) Unwrap() []error { return []error{net.ErrClosed, e.error} }
|
||||||
return target == net.ErrClosed
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *TransportError) Unwrap() error {
|
func (e *TransportError) Is(target error) bool {
|
||||||
return e.error
|
t, ok := target.(*TransportError)
|
||||||
|
return ok && e.ErrorCode == t.ErrorCode && e.FrameType == t.FrameType && e.Remote == t.Remote
|
||||||
}
|
}
|
||||||
|
|
||||||
// An ApplicationErrorCode is an application-defined error code.
|
// An ApplicationErrorCode is an application-defined error code.
|
||||||
type ApplicationErrorCode uint64
|
type ApplicationErrorCode uint64
|
||||||
|
|
||||||
func (e *ApplicationError) Is(target error) bool {
|
|
||||||
return target == net.ErrClosed
|
|
||||||
}
|
|
||||||
|
|
||||||
// A StreamErrorCode is an error code used to cancel streams.
|
// A StreamErrorCode is an error code used to cancel streams.
|
||||||
type StreamErrorCode uint64
|
type StreamErrorCode uint64
|
||||||
|
|
||||||
|
@ -81,6 +76,13 @@ func (e *ApplicationError) Error() string {
|
||||||
return fmt.Sprintf("Application error %#x (%s): %s", e.ErrorCode, getRole(e.Remote), e.ErrorMessage)
|
return fmt.Sprintf("Application error %#x (%s): %s", e.ErrorCode, getRole(e.Remote), e.ErrorMessage)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (e *ApplicationError) Unwrap() error { return net.ErrClosed }
|
||||||
|
|
||||||
|
func (e *ApplicationError) Is(target error) bool {
|
||||||
|
t, ok := target.(*ApplicationError)
|
||||||
|
return ok && e.ErrorCode == t.ErrorCode && e.Remote == t.Remote
|
||||||
|
}
|
||||||
|
|
||||||
type IdleTimeoutError struct{}
|
type IdleTimeoutError struct{}
|
||||||
|
|
||||||
var _ error = &IdleTimeoutError{}
|
var _ error = &IdleTimeoutError{}
|
||||||
|
@ -88,7 +90,7 @@ var _ error = &IdleTimeoutError{}
|
||||||
func (e *IdleTimeoutError) Timeout() bool { return true }
|
func (e *IdleTimeoutError) Timeout() bool { return true }
|
||||||
func (e *IdleTimeoutError) Temporary() bool { return false }
|
func (e *IdleTimeoutError) Temporary() bool { return false }
|
||||||
func (e *IdleTimeoutError) Error() string { return "timeout: no recent network activity" }
|
func (e *IdleTimeoutError) Error() string { return "timeout: no recent network activity" }
|
||||||
func (e *IdleTimeoutError) Is(target error) bool { return target == net.ErrClosed }
|
func (e *IdleTimeoutError) Unwrap() error { return net.ErrClosed }
|
||||||
|
|
||||||
type HandshakeTimeoutError struct{}
|
type HandshakeTimeoutError struct{}
|
||||||
|
|
||||||
|
@ -97,7 +99,7 @@ var _ error = &HandshakeTimeoutError{}
|
||||||
func (e *HandshakeTimeoutError) Timeout() bool { return true }
|
func (e *HandshakeTimeoutError) Timeout() bool { return true }
|
||||||
func (e *HandshakeTimeoutError) Temporary() bool { return false }
|
func (e *HandshakeTimeoutError) Temporary() bool { return false }
|
||||||
func (e *HandshakeTimeoutError) Error() string { return "timeout: handshake did not complete in time" }
|
func (e *HandshakeTimeoutError) Error() string { return "timeout: handshake did not complete in time" }
|
||||||
func (e *HandshakeTimeoutError) Is(target error) bool { return target == net.ErrClosed }
|
func (e *HandshakeTimeoutError) Unwrap() error { return net.ErrClosed }
|
||||||
|
|
||||||
// A VersionNegotiationError occurs when the client and the server can't agree on a QUIC version.
|
// A VersionNegotiationError occurs when the client and the server can't agree on a QUIC version.
|
||||||
type VersionNegotiationError struct {
|
type VersionNegotiationError struct {
|
||||||
|
@ -109,25 +111,18 @@ func (e *VersionNegotiationError) Error() string {
|
||||||
return fmt.Sprintf("no compatible QUIC version found (we support %s, server offered %s)", e.Ours, e.Theirs)
|
return fmt.Sprintf("no compatible QUIC version found (we support %s, server offered %s)", e.Ours, e.Theirs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *VersionNegotiationError) Is(target error) bool {
|
func (e *VersionNegotiationError) Unwrap() error { return net.ErrClosed }
|
||||||
return target == net.ErrClosed
|
|
||||||
}
|
|
||||||
|
|
||||||
// A StatelessResetError occurs when we receive a stateless reset.
|
// A StatelessResetError occurs when we receive a stateless reset.
|
||||||
type StatelessResetError struct {
|
type StatelessResetError struct{}
|
||||||
Token protocol.StatelessResetToken
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ net.Error = &StatelessResetError{}
|
var _ net.Error = &StatelessResetError{}
|
||||||
|
|
||||||
func (e *StatelessResetError) Error() string {
|
func (e *StatelessResetError) Error() string {
|
||||||
return fmt.Sprintf("received a stateless reset with token %x", e.Token)
|
return "received a stateless reset"
|
||||||
}
|
|
||||||
|
|
||||||
func (e *StatelessResetError) Is(target error) bool {
|
|
||||||
return target == net.ErrClosed
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (e *StatelessResetError) Unwrap() error { return net.ErrClosed }
|
||||||
func (e *StatelessResetError) Timeout() bool { return false }
|
func (e *StatelessResetError) Timeout() bool { return false }
|
||||||
func (e *StatelessResetError) Temporary() bool { return true }
|
func (e *StatelessResetError) Temporary() bool { return true }
|
||||||
|
|
||||||
|
|
2
vendor/github.com/quic-go/quic-go/internal/utils/rtt_stats.go
generated
vendored
2
vendor/github.com/quic-go/quic-go/internal/utils/rtt_stats.go
generated
vendored
|
@ -58,7 +58,7 @@ func (r *RTTStats) PTO(includeMaxAckDelay bool) time.Duration {
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateRTT updates the RTT based on a new sample.
|
// UpdateRTT updates the RTT based on a new sample.
|
||||||
func (r *RTTStats) UpdateRTT(sendDelta, ackDelay time.Duration, now time.Time) {
|
func (r *RTTStats) UpdateRTT(sendDelta, ackDelay time.Duration) {
|
||||||
if sendDelta <= 0 {
|
if sendDelta <= 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue