mirror of
https://github.com/DNSCrypt/dnscrypt-proxy.git
synced 2025-03-04 02:14:40 +01:00
go get -u ./...; go mod tidy
This commit is contained in:
parent
efe8b7824c
commit
699a6a1ebc
224 changed files with 12493 additions and 4330 deletions
22
go.mod
22
go.mod
|
@ -21,32 +21,32 @@ require (
|
|||
github.com/opencoff/go-sieve v0.2.1
|
||||
github.com/powerman/check v1.8.0
|
||||
github.com/quic-go/quic-go v0.48.2
|
||||
golang.org/x/crypto v0.31.0
|
||||
golang.org/x/crypto v0.32.0
|
||||
golang.org/x/net v0.33.0
|
||||
golang.org/x/sys v0.28.0
|
||||
golang.org/x/sys v0.29.0
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 // indirect
|
||||
github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect
|
||||
github.com/hashicorp/go-syslog v1.0.0 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.0 // indirect
|
||||
github.com/onsi/ginkgo/v2 v2.9.5 // indirect
|
||||
github.com/hashicorp/golang-lru v1.0.2 // indirect
|
||||
github.com/onsi/ginkgo/v2 v2.22.2 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/powerman/deepequal v0.1.0 // indirect
|
||||
github.com/quic-go/qpack v0.5.1 // indirect
|
||||
github.com/smartystreets/goconvey v1.8.1 // indirect
|
||||
go.uber.org/mock v0.4.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect
|
||||
golang.org/x/mod v0.18.0 // indirect
|
||||
go.uber.org/mock v0.5.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329 // indirect
|
||||
golang.org/x/mod v0.22.0 // indirect
|
||||
golang.org/x/sync v0.10.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
golang.org/x/tools v0.22.0 // indirect
|
||||
golang.org/x/tools v0.28.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
|
||||
google.golang.org/grpc v1.56.3 // indirect
|
||||
google.golang.org/protobuf v1.34.2 // indirect
|
||||
google.golang.org/protobuf v1.36.1 // indirect
|
||||
)
|
||||
|
|
61
go.sum
61
go.sum
|
@ -2,28 +2,24 @@ github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0
|
|||
github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
|
||||
github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU=
|
||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dchest/safefile v0.0.0-20151022103144-855e8d98f185 h1:3T8ZyTDp5QxTx3NU48JVb2u+75xc040fofcBaN+6jPA=
|
||||
github.com/dchest/safefile v0.0.0-20151022103144-855e8d98f185/go.mod h1:cFRxtTwTOJkz2x3rQUNCYKWC93yP1VKjR8NUhqFxZNU=
|
||||
github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
|
||||
github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 h1:yAJXTCF9TqKcTiHJAE8dj7HMvPfh66eeA2JYW7eFpSE=
|
||||
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg=
|
||||
github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
|
||||
github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g=
|
||||
github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k=
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
|
||||
|
@ -32,11 +28,11 @@ github.com/hashicorp/go-syslog v1.0.0 h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwM
|
|||
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
|
||||
github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM=
|
||||
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c=
|
||||
github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb h1:PGufWXXDq9yaev6xX1YQauaO1MV90e6Mpoq1I7Lz/VM=
|
||||
github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb/go.mod h1:QiyDdbZLaJ/mZP4Zwc9g2QsfaEA4o7XvvgZegSci5/E=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/jedisct1/dlog v0.0.0-20241212093805-3c5fd791b405 h1:6j/0utSiy3KhZSpFJgobk+ME1BIwXeq9jepJaDLW3Yg=
|
||||
github.com/jedisct1/dlog v0.0.0-20241212093805-3c5fd791b405/go.mod h1:OO1HpQNlMCMaPdHPuI00fhChZQZ8npbVTTjMvJUxUqQ=
|
||||
github.com/jedisct1/go-clocksmith v0.0.0-20230211133011-392c1afea73e h1:tzG4EjKgHIqKVkLIAC4pXTIapuM2BR05uXokEEysAXA=
|
||||
|
@ -57,10 +53,10 @@ github.com/kardianos/service v1.2.2 h1:ZvePhAHfvo0A7Mftk/tEzqEZ7Q4lgnR8sGz4xu1YX
|
|||
github.com/kardianos/service v1.2.2/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM=
|
||||
github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ=
|
||||
github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ=
|
||||
github.com/onsi/ginkgo/v2 v2.9.5 h1:+6Hr4uxzP4XIUyAkg61dWBw8lb/gc4/X5luuxN/EC+Q=
|
||||
github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3RonqW57k=
|
||||
github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE=
|
||||
github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg=
|
||||
github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU=
|
||||
github.com/onsi/ginkgo/v2 v2.22.2/go.mod h1:oeMosUL+8LtarXBHu/c0bx2D/K9zyQ6uX3cTyztHwsk=
|
||||
github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8=
|
||||
github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY=
|
||||
github.com/opencoff/go-sieve v0.2.1 h1:5Pv6rd3zRquNmXcYHFndjVoolTgcv0ua2XTdMQ+gw0M=
|
||||
github.com/opencoff/go-sieve v0.2.1/go.mod h1:CndxLpW4R8fDq04XfBSCOZ+qWwDCcxjfUJbr0GPqWHY=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
|
@ -79,33 +75,30 @@ github.com/smarty/assertions v1.15.0 h1:cR//PqUBUiQRakZWqBiFFQ9wb8emQGDb0HeGdqGB
|
|||
github.com/smarty/assertions v1.15.0/go.mod h1:yABtdzeQs6l1brC900WlRNwj6ZR55d7B+E8C6HtKdec=
|
||||
github.com/smartystreets/goconvey v1.8.1 h1:qGjIddxOk4grTu9JPOU31tVfq3cNdBlNa5sSznIX1xY=
|
||||
github.com/smartystreets/goconvey v1.8.1/go.mod h1:+/u4qLyY6x1jReYOp7GOM2FSt8aP9CzCZL03bI28W60=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU=
|
||||
go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc=
|
||||
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
|
||||
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM=
|
||||
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc=
|
||||
golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0=
|
||||
golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU=
|
||||
go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM=
|
||||
golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
|
||||
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
|
||||
golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329 h1:9kj3STMvgqy3YA4VQXBrN7925ICMxD5wzMRcgA30588=
|
||||
golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c=
|
||||
golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4=
|
||||
golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
||||
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
|
||||
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
|
||||
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20190529164535-6a60838ec259/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
|
||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
|
||||
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
||||
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA=
|
||||
golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c=
|
||||
golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8=
|
||||
golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A=
|
||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU=
|
||||
|
@ -113,11 +106,9 @@ google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc=
|
|||
google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
|
||||
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk=
|
||||
google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
|
|
@ -1,5 +1,24 @@
|
|||
# Changelog
|
||||
|
||||
## Release 3.2.3 (2022-11-29)
|
||||
|
||||
### Changed
|
||||
|
||||
- Updated docs (thanks @book987 @aJetHorn @neelayu @pellizzetti @apricote @SaigyoujiYuyuko233 @AlekSi)
|
||||
- #348: Updated huandu/xstrings which fixed a snake case bug (thanks @yxxhero)
|
||||
- #353: Updated masterminds/semver which included bug fixes
|
||||
- #354: Updated golang.org/x/crypto which included bug fixes
|
||||
|
||||
## Release 3.2.2 (2021-02-04)
|
||||
|
||||
This is a re-release of 3.2.1 to satisfy something with the Go module system.
|
||||
|
||||
## Release 3.2.1 (2021-02-04)
|
||||
|
||||
### Changed
|
||||
|
||||
- Upgraded `Masterminds/goutils` to `v1.1.1`. see the [Security Advisory](https://github.com/Masterminds/goutils/security/advisories/GHSA-xg2h-wx96-xgxr)
|
||||
|
||||
## Release 3.2.0 (2020-12-14)
|
||||
|
||||
### Added
|
|
@ -1,4 +1,4 @@
|
|||
# Slim-Sprig: Template functions for Go templates [](https://godoc.org/github.com/go-task/slim-sprig) [](https://goreportcard.com/report/github.com/go-task/slim-sprig)
|
||||
# Slim-Sprig: Template functions for Go templates [](https://pkg.go.dev/github.com/go-task/slim-sprig/v3)
|
||||
|
||||
Slim-Sprig is a fork of [Sprig](https://github.com/Masterminds/sprig), but with
|
||||
all functions that depend on external (non standard library) or crypto packages
|
|
@ -1,6 +1,6 @@
|
|||
# https://taskfile.dev
|
||||
|
||||
version: '2'
|
||||
version: '3'
|
||||
|
||||
tasks:
|
||||
default:
|
97
vendor/github.com/google/pprof/profile/encode.go
generated
vendored
97
vendor/github.com/google/pprof/profile/encode.go
generated
vendored
|
@ -17,6 +17,7 @@ package profile
|
|||
import (
|
||||
"errors"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func (p *Profile) decoder() []decoder {
|
||||
|
@ -121,6 +122,7 @@ func (p *Profile) preEncode() {
|
|||
}
|
||||
|
||||
p.defaultSampleTypeX = addString(strings, p.DefaultSampleType)
|
||||
p.docURLX = addString(strings, p.DocURL)
|
||||
|
||||
p.stringTable = make([]string, len(strings))
|
||||
for s, i := range strings {
|
||||
|
@ -155,6 +157,7 @@ func (p *Profile) encode(b *buffer) {
|
|||
encodeInt64Opt(b, 12, p.Period)
|
||||
encodeInt64s(b, 13, p.commentX)
|
||||
encodeInt64(b, 14, p.defaultSampleTypeX)
|
||||
encodeInt64Opt(b, 15, p.docURLX)
|
||||
}
|
||||
|
||||
var profileDecoder = []decoder{
|
||||
|
@ -183,12 +186,13 @@ var profileDecoder = []decoder{
|
|||
// repeated Location location = 4
|
||||
func(b *buffer, m message) error {
|
||||
x := new(Location)
|
||||
x.Line = make([]Line, 0, 8) // Pre-allocate Line buffer
|
||||
x.Line = b.tmpLines[:0] // Use shared space temporarily
|
||||
pp := m.(*Profile)
|
||||
pp.Location = append(pp.Location, x)
|
||||
err := decodeMessage(b, x)
|
||||
var tmp []Line
|
||||
x.Line = append(tmp, x.Line...) // Shrink to allocated size
|
||||
b.tmpLines = x.Line[:0]
|
||||
// Copy to shrink size and detach from shared space.
|
||||
x.Line = append([]Line(nil), x.Line...)
|
||||
return err
|
||||
},
|
||||
// repeated Function function = 5
|
||||
|
@ -235,6 +239,8 @@ var profileDecoder = []decoder{
|
|||
func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Profile).commentX) },
|
||||
// int64 defaultSampleType = 14
|
||||
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).defaultSampleTypeX) },
|
||||
// string doc_link = 15;
|
||||
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).docURLX) },
|
||||
}
|
||||
|
||||
// postDecode takes the unexported fields populated by decode (with
|
||||
|
@ -252,6 +258,14 @@ func (p *Profile) postDecode() error {
|
|||
} else {
|
||||
mappings[m.ID] = m
|
||||
}
|
||||
|
||||
// If this a main linux kernel mapping with a relocation symbol suffix
|
||||
// ("[kernel.kallsyms]_text"), extract said suffix.
|
||||
// It is fairly hacky to handle at this level, but the alternatives appear even worse.
|
||||
const prefix = "[kernel.kallsyms]"
|
||||
if strings.HasPrefix(m.File, prefix) {
|
||||
m.KernelRelocationSymbol = m.File[len(prefix):]
|
||||
}
|
||||
}
|
||||
|
||||
functions := make(map[uint64]*Function, len(p.Function))
|
||||
|
@ -298,41 +312,52 @@ func (p *Profile) postDecode() error {
|
|||
st.Unit, err = getString(p.stringTable, &st.unitX, err)
|
||||
}
|
||||
|
||||
// Pre-allocate space for all locations.
|
||||
numLocations := 0
|
||||
for _, s := range p.Sample {
|
||||
labels := make(map[string][]string, len(s.labelX))
|
||||
numLabels := make(map[string][]int64, len(s.labelX))
|
||||
numUnits := make(map[string][]string, len(s.labelX))
|
||||
for _, l := range s.labelX {
|
||||
var key, value string
|
||||
key, err = getString(p.stringTable, &l.keyX, err)
|
||||
if l.strX != 0 {
|
||||
value, err = getString(p.stringTable, &l.strX, err)
|
||||
labels[key] = append(labels[key], value)
|
||||
} else if l.numX != 0 || l.unitX != 0 {
|
||||
numValues := numLabels[key]
|
||||
units := numUnits[key]
|
||||
if l.unitX != 0 {
|
||||
var unit string
|
||||
unit, err = getString(p.stringTable, &l.unitX, err)
|
||||
units = padStringArray(units, len(numValues))
|
||||
numUnits[key] = append(units, unit)
|
||||
}
|
||||
numLabels[key] = append(numLabels[key], l.numX)
|
||||
}
|
||||
}
|
||||
if len(labels) > 0 {
|
||||
s.Label = labels
|
||||
}
|
||||
if len(numLabels) > 0 {
|
||||
s.NumLabel = numLabels
|
||||
for key, units := range numUnits {
|
||||
if len(units) > 0 {
|
||||
numUnits[key] = padStringArray(units, len(numLabels[key]))
|
||||
numLocations += len(s.locationIDX)
|
||||
}
|
||||
locBuffer := make([]*Location, numLocations)
|
||||
|
||||
for _, s := range p.Sample {
|
||||
if len(s.labelX) > 0 {
|
||||
labels := make(map[string][]string, len(s.labelX))
|
||||
numLabels := make(map[string][]int64, len(s.labelX))
|
||||
numUnits := make(map[string][]string, len(s.labelX))
|
||||
for _, l := range s.labelX {
|
||||
var key, value string
|
||||
key, err = getString(p.stringTable, &l.keyX, err)
|
||||
if l.strX != 0 {
|
||||
value, err = getString(p.stringTable, &l.strX, err)
|
||||
labels[key] = append(labels[key], value)
|
||||
} else if l.numX != 0 || l.unitX != 0 {
|
||||
numValues := numLabels[key]
|
||||
units := numUnits[key]
|
||||
if l.unitX != 0 {
|
||||
var unit string
|
||||
unit, err = getString(p.stringTable, &l.unitX, err)
|
||||
units = padStringArray(units, len(numValues))
|
||||
numUnits[key] = append(units, unit)
|
||||
}
|
||||
numLabels[key] = append(numLabels[key], l.numX)
|
||||
}
|
||||
}
|
||||
s.NumUnit = numUnits
|
||||
if len(labels) > 0 {
|
||||
s.Label = labels
|
||||
}
|
||||
if len(numLabels) > 0 {
|
||||
s.NumLabel = numLabels
|
||||
for key, units := range numUnits {
|
||||
if len(units) > 0 {
|
||||
numUnits[key] = padStringArray(units, len(numLabels[key]))
|
||||
}
|
||||
}
|
||||
s.NumUnit = numUnits
|
||||
}
|
||||
}
|
||||
s.Location = make([]*Location, len(s.locationIDX))
|
||||
|
||||
s.Location = locBuffer[:len(s.locationIDX)]
|
||||
locBuffer = locBuffer[len(s.locationIDX):]
|
||||
for i, lid := range s.locationIDX {
|
||||
if lid < uint64(len(locationIds)) {
|
||||
s.Location[i] = locationIds[lid]
|
||||
|
@ -363,6 +388,7 @@ func (p *Profile) postDecode() error {
|
|||
|
||||
p.commentX = nil
|
||||
p.DefaultSampleType, err = getString(p.stringTable, &p.defaultSampleTypeX, err)
|
||||
p.DocURL, err = getString(p.stringTable, &p.docURLX, err)
|
||||
p.stringTable = nil
|
||||
return err
|
||||
}
|
||||
|
@ -509,6 +535,7 @@ func (p *Line) decoder() []decoder {
|
|||
func (p *Line) encode(b *buffer) {
|
||||
encodeUint64Opt(b, 1, p.functionIDX)
|
||||
encodeInt64Opt(b, 2, p.Line)
|
||||
encodeInt64Opt(b, 3, p.Column)
|
||||
}
|
||||
|
||||
var lineDecoder = []decoder{
|
||||
|
@ -517,6 +544,8 @@ var lineDecoder = []decoder{
|
|||
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Line).functionIDX) },
|
||||
// optional int64 line = 2
|
||||
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Line) },
|
||||
// optional int64 column = 3
|
||||
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Column) },
|
||||
}
|
||||
|
||||
func (p *Function) decoder() []decoder {
|
||||
|
|
4
vendor/github.com/google/pprof/profile/filter.go
generated
vendored
4
vendor/github.com/google/pprof/profile/filter.go
generated
vendored
|
@ -22,6 +22,10 @@ import "regexp"
|
|||
// samples where at least one frame matches focus but none match ignore.
|
||||
// Returns true is the corresponding regexp matched at least one sample.
|
||||
func (p *Profile) FilterSamplesByName(focus, ignore, hide, show *regexp.Regexp) (fm, im, hm, hnm bool) {
|
||||
if focus == nil && ignore == nil && hide == nil && show == nil {
|
||||
fm = true // Missing focus implies a match
|
||||
return
|
||||
}
|
||||
focusOrIgnore := make(map[uint64]bool)
|
||||
hidden := make(map[uint64]bool)
|
||||
for _, l := range p.Location {
|
||||
|
|
4
vendor/github.com/google/pprof/profile/legacy_java_profile.go
generated
vendored
4
vendor/github.com/google/pprof/profile/legacy_java_profile.go
generated
vendored
|
@ -56,7 +56,7 @@ func javaCPUProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte
|
|||
}
|
||||
|
||||
// Strip out addresses for better merge.
|
||||
if err = p.Aggregate(true, true, true, true, false); err != nil {
|
||||
if err = p.Aggregate(true, true, true, true, false, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -99,7 +99,7 @@ func parseJavaProfile(b []byte) (*Profile, error) {
|
|||
}
|
||||
|
||||
// Strip out addresses for better merge.
|
||||
if err = p.Aggregate(true, true, true, true, false); err != nil {
|
||||
if err = p.Aggregate(true, true, true, true, false, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
31
vendor/github.com/google/pprof/profile/legacy_profile.go
generated
vendored
31
vendor/github.com/google/pprof/profile/legacy_profile.go
generated
vendored
|
@ -295,11 +295,12 @@ func get64b(b []byte) (uint64, []byte) {
|
|||
//
|
||||
// The general format for profilez samples is a sequence of words in
|
||||
// binary format. The first words are a header with the following data:
|
||||
// 1st word -- 0
|
||||
// 2nd word -- 3
|
||||
// 3rd word -- 0 if a c++ application, 1 if a java application.
|
||||
// 4th word -- Sampling period (in microseconds).
|
||||
// 5th word -- Padding.
|
||||
//
|
||||
// 1st word -- 0
|
||||
// 2nd word -- 3
|
||||
// 3rd word -- 0 if a c++ application, 1 if a java application.
|
||||
// 4th word -- Sampling period (in microseconds).
|
||||
// 5th word -- Padding.
|
||||
func parseCPU(b []byte) (*Profile, error) {
|
||||
var parse func([]byte) (uint64, []byte)
|
||||
var n1, n2, n3, n4, n5 uint64
|
||||
|
@ -403,15 +404,18 @@ func cleanupDuplicateLocations(p *Profile) {
|
|||
//
|
||||
// profilez samples are a repeated sequence of stack frames of the
|
||||
// form:
|
||||
// 1st word -- The number of times this stack was encountered.
|
||||
// 2nd word -- The size of the stack (StackSize).
|
||||
// 3rd word -- The first address on the stack.
|
||||
// ...
|
||||
// StackSize + 2 -- The last address on the stack
|
||||
//
|
||||
// 1st word -- The number of times this stack was encountered.
|
||||
// 2nd word -- The size of the stack (StackSize).
|
||||
// 3rd word -- The first address on the stack.
|
||||
// ...
|
||||
// StackSize + 2 -- The last address on the stack
|
||||
//
|
||||
// The last stack trace is of the form:
|
||||
// 1st word -- 0
|
||||
// 2nd word -- 1
|
||||
// 3rd word -- 0
|
||||
//
|
||||
// 1st word -- 0
|
||||
// 2nd word -- 1
|
||||
// 3rd word -- 0
|
||||
//
|
||||
// Addresses from stack traces may point to the next instruction after
|
||||
// each call. Optionally adjust by -1 to land somewhere on the actual
|
||||
|
@ -861,7 +865,6 @@ func parseThread(b []byte) (*Profile, error) {
|
|||
// Recognize each thread and populate profile samples.
|
||||
for !isMemoryMapSentinel(line) {
|
||||
if strings.HasPrefix(line, "---- no stack trace for") {
|
||||
line = ""
|
||||
break
|
||||
}
|
||||
if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 {
|
||||
|
|
287
vendor/github.com/google/pprof/profile/merge.go
generated
vendored
287
vendor/github.com/google/pprof/profile/merge.go
generated
vendored
|
@ -15,6 +15,7 @@
|
|||
package profile
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
@ -58,7 +59,7 @@ func Merge(srcs []*Profile) (*Profile, error) {
|
|||
|
||||
for _, src := range srcs {
|
||||
// Clear the profile-specific hash tables
|
||||
pm.locationsByID = make(map[uint64]*Location, len(src.Location))
|
||||
pm.locationsByID = makeLocationIDMap(len(src.Location))
|
||||
pm.functionsByID = make(map[uint64]*Function, len(src.Function))
|
||||
pm.mappingsByID = make(map[uint64]mapInfo, len(src.Mapping))
|
||||
|
||||
|
@ -136,7 +137,7 @@ type profileMerger struct {
|
|||
p *Profile
|
||||
|
||||
// Memoization tables within a profile.
|
||||
locationsByID map[uint64]*Location
|
||||
locationsByID locationIDMap
|
||||
functionsByID map[uint64]*Function
|
||||
mappingsByID map[uint64]mapInfo
|
||||
|
||||
|
@ -153,6 +154,16 @@ type mapInfo struct {
|
|||
}
|
||||
|
||||
func (pm *profileMerger) mapSample(src *Sample) *Sample {
|
||||
// Check memoization table
|
||||
k := pm.sampleKey(src)
|
||||
if ss, ok := pm.samples[k]; ok {
|
||||
for i, v := range src.Value {
|
||||
ss.Value[i] += v
|
||||
}
|
||||
return ss
|
||||
}
|
||||
|
||||
// Make new sample.
|
||||
s := &Sample{
|
||||
Location: make([]*Location, len(src.Location)),
|
||||
Value: make([]int64, len(src.Value)),
|
||||
|
@ -177,52 +188,98 @@ func (pm *profileMerger) mapSample(src *Sample) *Sample {
|
|||
s.NumLabel[k] = vv
|
||||
s.NumUnit[k] = uu
|
||||
}
|
||||
// Check memoization table. Must be done on the remapped location to
|
||||
// account for the remapped mapping. Add current values to the
|
||||
// existing sample.
|
||||
k := s.key()
|
||||
if ss, ok := pm.samples[k]; ok {
|
||||
for i, v := range src.Value {
|
||||
ss.Value[i] += v
|
||||
}
|
||||
return ss
|
||||
}
|
||||
copy(s.Value, src.Value)
|
||||
pm.samples[k] = s
|
||||
pm.p.Sample = append(pm.p.Sample, s)
|
||||
return s
|
||||
}
|
||||
|
||||
// key generates sampleKey to be used as a key for maps.
|
||||
func (sample *Sample) key() sampleKey {
|
||||
ids := make([]string, len(sample.Location))
|
||||
for i, l := range sample.Location {
|
||||
ids[i] = strconv.FormatUint(l.ID, 16)
|
||||
func (pm *profileMerger) sampleKey(sample *Sample) sampleKey {
|
||||
// Accumulate contents into a string.
|
||||
var buf strings.Builder
|
||||
buf.Grow(64) // Heuristic to avoid extra allocs
|
||||
|
||||
// encode a number
|
||||
putNumber := func(v uint64) {
|
||||
var num [binary.MaxVarintLen64]byte
|
||||
n := binary.PutUvarint(num[:], v)
|
||||
buf.Write(num[:n])
|
||||
}
|
||||
|
||||
labels := make([]string, 0, len(sample.Label))
|
||||
for k, v := range sample.Label {
|
||||
labels = append(labels, fmt.Sprintf("%q%q", k, v))
|
||||
// encode a string prefixed with its length.
|
||||
putDelimitedString := func(s string) {
|
||||
putNumber(uint64(len(s)))
|
||||
buf.WriteString(s)
|
||||
}
|
||||
sort.Strings(labels)
|
||||
|
||||
numlabels := make([]string, 0, len(sample.NumLabel))
|
||||
for k, v := range sample.NumLabel {
|
||||
numlabels = append(numlabels, fmt.Sprintf("%q%x%x", k, v, sample.NumUnit[k]))
|
||||
for _, l := range sample.Location {
|
||||
// Get the location in the merged profile, which may have a different ID.
|
||||
if loc := pm.mapLocation(l); loc != nil {
|
||||
putNumber(loc.ID)
|
||||
}
|
||||
}
|
||||
sort.Strings(numlabels)
|
||||
putNumber(0) // Delimiter
|
||||
|
||||
return sampleKey{
|
||||
strings.Join(ids, "|"),
|
||||
strings.Join(labels, ""),
|
||||
strings.Join(numlabels, ""),
|
||||
for _, l := range sortedKeys1(sample.Label) {
|
||||
putDelimitedString(l)
|
||||
values := sample.Label[l]
|
||||
putNumber(uint64(len(values)))
|
||||
for _, v := range values {
|
||||
putDelimitedString(v)
|
||||
}
|
||||
}
|
||||
|
||||
for _, l := range sortedKeys2(sample.NumLabel) {
|
||||
putDelimitedString(l)
|
||||
values := sample.NumLabel[l]
|
||||
putNumber(uint64(len(values)))
|
||||
for _, v := range values {
|
||||
putNumber(uint64(v))
|
||||
}
|
||||
units := sample.NumUnit[l]
|
||||
putNumber(uint64(len(units)))
|
||||
for _, v := range units {
|
||||
putDelimitedString(v)
|
||||
}
|
||||
}
|
||||
|
||||
return sampleKey(buf.String())
|
||||
}
|
||||
|
||||
type sampleKey struct {
|
||||
locations string
|
||||
labels string
|
||||
numlabels string
|
||||
type sampleKey string
|
||||
|
||||
// sortedKeys1 returns the sorted keys found in a string->[]string map.
|
||||
//
|
||||
// Note: this is currently non-generic since github pprof runs golint,
|
||||
// which does not support generics. When that issue is fixed, it can
|
||||
// be merged with sortedKeys2 and made into a generic function.
|
||||
func sortedKeys1(m map[string][]string) []string {
|
||||
if len(m) == 0 {
|
||||
return nil
|
||||
}
|
||||
keys := make([]string, 0, len(m))
|
||||
for k := range m {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
return keys
|
||||
}
|
||||
|
||||
// sortedKeys2 returns the sorted keys found in a string->[]int64 map.
|
||||
//
|
||||
// Note: this is currently non-generic since github pprof runs golint,
|
||||
// which does not support generics. When that issue is fixed, it can
|
||||
// be merged with sortedKeys1 and made into a generic function.
|
||||
func sortedKeys2(m map[string][]int64) []string {
|
||||
if len(m) == 0 {
|
||||
return nil
|
||||
}
|
||||
keys := make([]string, 0, len(m))
|
||||
for k := range m {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
return keys
|
||||
}
|
||||
|
||||
func (pm *profileMerger) mapLocation(src *Location) *Location {
|
||||
|
@ -230,7 +287,7 @@ func (pm *profileMerger) mapLocation(src *Location) *Location {
|
|||
return nil
|
||||
}
|
||||
|
||||
if l, ok := pm.locationsByID[src.ID]; ok {
|
||||
if l := pm.locationsByID.get(src.ID); l != nil {
|
||||
return l
|
||||
}
|
||||
|
||||
|
@ -249,10 +306,10 @@ func (pm *profileMerger) mapLocation(src *Location) *Location {
|
|||
// account for the remapped mapping ID.
|
||||
k := l.key()
|
||||
if ll, ok := pm.locations[k]; ok {
|
||||
pm.locationsByID[src.ID] = ll
|
||||
pm.locationsByID.set(src.ID, ll)
|
||||
return ll
|
||||
}
|
||||
pm.locationsByID[src.ID] = l
|
||||
pm.locationsByID.set(src.ID, l)
|
||||
pm.locations[k] = l
|
||||
pm.p.Location = append(pm.p.Location, l)
|
||||
return l
|
||||
|
@ -269,12 +326,13 @@ func (l *Location) key() locationKey {
|
|||
key.addr -= l.Mapping.Start
|
||||
key.mappingID = l.Mapping.ID
|
||||
}
|
||||
lines := make([]string, len(l.Line)*2)
|
||||
lines := make([]string, len(l.Line)*3)
|
||||
for i, line := range l.Line {
|
||||
if line.Function != nil {
|
||||
lines[i*2] = strconv.FormatUint(line.Function.ID, 16)
|
||||
}
|
||||
lines[i*2+1] = strconv.FormatInt(line.Line, 16)
|
||||
lines[i*2+2] = strconv.FormatInt(line.Column, 16)
|
||||
}
|
||||
key.lines = strings.Join(lines, "|")
|
||||
return key
|
||||
|
@ -303,16 +361,17 @@ func (pm *profileMerger) mapMapping(src *Mapping) mapInfo {
|
|||
return mi
|
||||
}
|
||||
m := &Mapping{
|
||||
ID: uint64(len(pm.p.Mapping) + 1),
|
||||
Start: src.Start,
|
||||
Limit: src.Limit,
|
||||
Offset: src.Offset,
|
||||
File: src.File,
|
||||
BuildID: src.BuildID,
|
||||
HasFunctions: src.HasFunctions,
|
||||
HasFilenames: src.HasFilenames,
|
||||
HasLineNumbers: src.HasLineNumbers,
|
||||
HasInlineFrames: src.HasInlineFrames,
|
||||
ID: uint64(len(pm.p.Mapping) + 1),
|
||||
Start: src.Start,
|
||||
Limit: src.Limit,
|
||||
Offset: src.Offset,
|
||||
File: src.File,
|
||||
KernelRelocationSymbol: src.KernelRelocationSymbol,
|
||||
BuildID: src.BuildID,
|
||||
HasFunctions: src.HasFunctions,
|
||||
HasFilenames: src.HasFilenames,
|
||||
HasLineNumbers: src.HasLineNumbers,
|
||||
HasInlineFrames: src.HasInlineFrames,
|
||||
}
|
||||
pm.p.Mapping = append(pm.p.Mapping, m)
|
||||
|
||||
|
@ -360,6 +419,7 @@ func (pm *profileMerger) mapLine(src Line) Line {
|
|||
ln := Line{
|
||||
Function: pm.mapFunction(src.Function),
|
||||
Line: src.Line,
|
||||
Column: src.Column,
|
||||
}
|
||||
return ln
|
||||
}
|
||||
|
@ -416,6 +476,7 @@ func combineHeaders(srcs []*Profile) (*Profile, error) {
|
|||
var timeNanos, durationNanos, period int64
|
||||
var comments []string
|
||||
seenComments := map[string]bool{}
|
||||
var docURL string
|
||||
var defaultSampleType string
|
||||
for _, s := range srcs {
|
||||
if timeNanos == 0 || s.TimeNanos < timeNanos {
|
||||
|
@ -434,6 +495,9 @@ func combineHeaders(srcs []*Profile) (*Profile, error) {
|
|||
if defaultSampleType == "" {
|
||||
defaultSampleType = s.DefaultSampleType
|
||||
}
|
||||
if docURL == "" {
|
||||
docURL = s.DocURL
|
||||
}
|
||||
}
|
||||
|
||||
p := &Profile{
|
||||
|
@ -449,6 +513,7 @@ func combineHeaders(srcs []*Profile) (*Profile, error) {
|
|||
|
||||
Comments: comments,
|
||||
DefaultSampleType: defaultSampleType,
|
||||
DocURL: docURL,
|
||||
}
|
||||
copy(p.SampleType, srcs[0].SampleType)
|
||||
return p, nil
|
||||
|
@ -479,3 +544,131 @@ func (p *Profile) compatible(pb *Profile) error {
|
|||
func equalValueType(st1, st2 *ValueType) bool {
|
||||
return st1.Type == st2.Type && st1.Unit == st2.Unit
|
||||
}
|
||||
|
||||
// locationIDMap is like a map[uint64]*Location, but provides efficiency for
|
||||
// ids that are densely numbered, which is often the case.
|
||||
type locationIDMap struct {
|
||||
dense []*Location // indexed by id for id < len(dense)
|
||||
sparse map[uint64]*Location // indexed by id for id >= len(dense)
|
||||
}
|
||||
|
||||
func makeLocationIDMap(n int) locationIDMap {
|
||||
return locationIDMap{
|
||||
dense: make([]*Location, n),
|
||||
sparse: map[uint64]*Location{},
|
||||
}
|
||||
}
|
||||
|
||||
func (lm locationIDMap) get(id uint64) *Location {
|
||||
if id < uint64(len(lm.dense)) {
|
||||
return lm.dense[int(id)]
|
||||
}
|
||||
return lm.sparse[id]
|
||||
}
|
||||
|
||||
func (lm locationIDMap) set(id uint64, loc *Location) {
|
||||
if id < uint64(len(lm.dense)) {
|
||||
lm.dense[id] = loc
|
||||
return
|
||||
}
|
||||
lm.sparse[id] = loc
|
||||
}
|
||||
|
||||
// CompatibilizeSampleTypes makes profiles compatible to be compared/merged. It
|
||||
// keeps sample types that appear in all profiles only and drops/reorders the
|
||||
// sample types as necessary.
|
||||
//
|
||||
// In the case of sample types order is not the same for given profiles the
|
||||
// order is derived from the first profile.
|
||||
//
|
||||
// Profiles are modified in-place.
|
||||
//
|
||||
// It returns an error if the sample type's intersection is empty.
|
||||
func CompatibilizeSampleTypes(ps []*Profile) error {
|
||||
sTypes := commonSampleTypes(ps)
|
||||
if len(sTypes) == 0 {
|
||||
return fmt.Errorf("profiles have empty common sample type list")
|
||||
}
|
||||
for _, p := range ps {
|
||||
if err := compatibilizeSampleTypes(p, sTypes); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// commonSampleTypes returns sample types that appear in all profiles in the
|
||||
// order how they ordered in the first profile.
|
||||
func commonSampleTypes(ps []*Profile) []string {
|
||||
if len(ps) == 0 {
|
||||
return nil
|
||||
}
|
||||
sTypes := map[string]int{}
|
||||
for _, p := range ps {
|
||||
for _, st := range p.SampleType {
|
||||
sTypes[st.Type]++
|
||||
}
|
||||
}
|
||||
var res []string
|
||||
for _, st := range ps[0].SampleType {
|
||||
if sTypes[st.Type] == len(ps) {
|
||||
res = append(res, st.Type)
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// compatibilizeSampleTypes drops sample types that are not present in sTypes
|
||||
// list and reorder them if needed.
|
||||
//
|
||||
// It sets DefaultSampleType to sType[0] if it is not in sType list.
|
||||
//
|
||||
// It assumes that all sample types from the sTypes list are present in the
|
||||
// given profile otherwise it returns an error.
|
||||
func compatibilizeSampleTypes(p *Profile, sTypes []string) error {
|
||||
if len(sTypes) == 0 {
|
||||
return fmt.Errorf("sample type list is empty")
|
||||
}
|
||||
defaultSampleType := sTypes[0]
|
||||
reMap, needToModify := make([]int, len(sTypes)), false
|
||||
for i, st := range sTypes {
|
||||
if st == p.DefaultSampleType {
|
||||
defaultSampleType = p.DefaultSampleType
|
||||
}
|
||||
idx := searchValueType(p.SampleType, st)
|
||||
if idx < 0 {
|
||||
return fmt.Errorf("%q sample type is not found in profile", st)
|
||||
}
|
||||
reMap[i] = idx
|
||||
if idx != i {
|
||||
needToModify = true
|
||||
}
|
||||
}
|
||||
if !needToModify && len(sTypes) == len(p.SampleType) {
|
||||
return nil
|
||||
}
|
||||
p.DefaultSampleType = defaultSampleType
|
||||
oldSampleTypes := p.SampleType
|
||||
p.SampleType = make([]*ValueType, len(sTypes))
|
||||
for i, idx := range reMap {
|
||||
p.SampleType[i] = oldSampleTypes[idx]
|
||||
}
|
||||
values := make([]int64, len(sTypes))
|
||||
for _, s := range p.Sample {
|
||||
for i, idx := range reMap {
|
||||
values[i] = s.Value[idx]
|
||||
}
|
||||
s.Value = s.Value[:len(values)]
|
||||
copy(s.Value, values)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func searchValueType(vts []*ValueType, s string) int {
|
||||
for i, vt := range vts {
|
||||
if vt.Type == s {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
|
84
vendor/github.com/google/pprof/profile/profile.go
generated
vendored
84
vendor/github.com/google/pprof/profile/profile.go
generated
vendored
|
@ -21,7 +21,6 @@ import (
|
|||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
|
@ -40,6 +39,7 @@ type Profile struct {
|
|||
Location []*Location
|
||||
Function []*Function
|
||||
Comments []string
|
||||
DocURL string
|
||||
|
||||
DropFrames string
|
||||
KeepFrames string
|
||||
|
@ -54,6 +54,7 @@ type Profile struct {
|
|||
encodeMu sync.Mutex
|
||||
|
||||
commentX []int64
|
||||
docURLX int64
|
||||
dropFramesX int64
|
||||
keepFramesX int64
|
||||
stringTable []string
|
||||
|
@ -73,9 +74,23 @@ type ValueType struct {
|
|||
type Sample struct {
|
||||
Location []*Location
|
||||
Value []int64
|
||||
Label map[string][]string
|
||||
// Label is a per-label-key map to values for string labels.
|
||||
//
|
||||
// In general, having multiple values for the given label key is strongly
|
||||
// discouraged - see docs for the sample label field in profile.proto. The
|
||||
// main reason this unlikely state is tracked here is to make the
|
||||
// decoding->encoding roundtrip not lossy. But we expect that the value
|
||||
// slices present in this map are always of length 1.
|
||||
Label map[string][]string
|
||||
// NumLabel is a per-label-key map to values for numeric labels. See a note
|
||||
// above on handling multiple values for a label.
|
||||
NumLabel map[string][]int64
|
||||
NumUnit map[string][]string
|
||||
// NumUnit is a per-label-key map to the unit names of corresponding numeric
|
||||
// label values. The unit info may be missing even if the label is in
|
||||
// NumLabel, see the docs in profile.proto for details. When the value is
|
||||
// slice is present and not nil, its length must be equal to the length of
|
||||
// the corresponding value slice in NumLabel.
|
||||
NumUnit map[string][]string
|
||||
|
||||
locationIDX []uint64
|
||||
labelX []label
|
||||
|
@ -106,6 +121,15 @@ type Mapping struct {
|
|||
|
||||
fileX int64
|
||||
buildIDX int64
|
||||
|
||||
// Name of the kernel relocation symbol ("_text" or "_stext"), extracted from File.
|
||||
// For linux kernel mappings generated by some tools, correct symbolization depends
|
||||
// on knowing which of the two possible relocation symbols was used for `Start`.
|
||||
// This is given to us as a suffix in `File` (e.g. "[kernel.kallsyms]_stext").
|
||||
//
|
||||
// Note, this public field is not persisted in the proto. For the purposes of
|
||||
// copying / merging / hashing profiles, it is considered subsumed by `File`.
|
||||
KernelRelocationSymbol string
|
||||
}
|
||||
|
||||
// Location corresponds to Profile.Location
|
||||
|
@ -123,6 +147,7 @@ type Location struct {
|
|||
type Line struct {
|
||||
Function *Function
|
||||
Line int64
|
||||
Column int64
|
||||
|
||||
functionIDX uint64
|
||||
}
|
||||
|
@ -144,7 +169,7 @@ type Function struct {
|
|||
// may be a gzip-compressed encoded protobuf or one of many legacy
|
||||
// profile formats which may be unsupported in the future.
|
||||
func Parse(r io.Reader) (*Profile, error) {
|
||||
data, err := ioutil.ReadAll(r)
|
||||
data, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -159,7 +184,7 @@ func ParseData(data []byte) (*Profile, error) {
|
|||
if len(data) >= 2 && data[0] == 0x1f && data[1] == 0x8b {
|
||||
gz, err := gzip.NewReader(bytes.NewBuffer(data))
|
||||
if err == nil {
|
||||
data, err = ioutil.ReadAll(gz)
|
||||
data, err = io.ReadAll(gz)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("decompressing profile: %v", err)
|
||||
|
@ -414,7 +439,7 @@ func (p *Profile) CheckValid() error {
|
|||
// Aggregate merges the locations in the profile into equivalence
|
||||
// classes preserving the request attributes. It also updates the
|
||||
// samples to point to the merged locations.
|
||||
func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address bool) error {
|
||||
func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, columnnumber, address bool) error {
|
||||
for _, m := range p.Mapping {
|
||||
m.HasInlineFrames = m.HasInlineFrames && inlineFrame
|
||||
m.HasFunctions = m.HasFunctions && function
|
||||
|
@ -436,7 +461,7 @@ func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address
|
|||
}
|
||||
|
||||
// Aggregate locations
|
||||
if !inlineFrame || !address || !linenumber {
|
||||
if !inlineFrame || !address || !linenumber || !columnnumber {
|
||||
for _, l := range p.Location {
|
||||
if !inlineFrame && len(l.Line) > 1 {
|
||||
l.Line = l.Line[len(l.Line)-1:]
|
||||
|
@ -444,6 +469,12 @@ func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address
|
|||
if !linenumber {
|
||||
for i := range l.Line {
|
||||
l.Line[i].Line = 0
|
||||
l.Line[i].Column = 0
|
||||
}
|
||||
}
|
||||
if !columnnumber {
|
||||
for i := range l.Line {
|
||||
l.Line[i].Column = 0
|
||||
}
|
||||
}
|
||||
if !address {
|
||||
|
@ -526,6 +557,9 @@ func (p *Profile) String() string {
|
|||
for _, c := range p.Comments {
|
||||
ss = append(ss, "Comment: "+c)
|
||||
}
|
||||
if url := p.DocURL; url != "" {
|
||||
ss = append(ss, fmt.Sprintf("Doc: %s", url))
|
||||
}
|
||||
if pt := p.PeriodType; pt != nil {
|
||||
ss = append(ss, fmt.Sprintf("PeriodType: %s %s", pt.Type, pt.Unit))
|
||||
}
|
||||
|
@ -605,10 +639,11 @@ func (l *Location) string() string {
|
|||
for li := range l.Line {
|
||||
lnStr := "??"
|
||||
if fn := l.Line[li].Function; fn != nil {
|
||||
lnStr = fmt.Sprintf("%s %s:%d s=%d",
|
||||
lnStr = fmt.Sprintf("%s %s:%d:%d s=%d",
|
||||
fn.Name,
|
||||
fn.Filename,
|
||||
l.Line[li].Line,
|
||||
l.Line[li].Column,
|
||||
fn.StartLine)
|
||||
if fn.Name != fn.SystemName {
|
||||
lnStr = lnStr + "(" + fn.SystemName + ")"
|
||||
|
@ -707,6 +742,35 @@ func (s *Sample) HasLabel(key, value string) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// SetNumLabel sets the specified key to the specified value for all samples in the
|
||||
// profile. "unit" is a slice that describes the units that each corresponding member
|
||||
// of "values" is measured in (e.g. bytes or seconds). If there is no relevant
|
||||
// unit for a given value, that member of "unit" should be the empty string.
|
||||
// "unit" must either have the same length as "value", or be nil.
|
||||
func (p *Profile) SetNumLabel(key string, value []int64, unit []string) {
|
||||
for _, sample := range p.Sample {
|
||||
if sample.NumLabel == nil {
|
||||
sample.NumLabel = map[string][]int64{key: value}
|
||||
} else {
|
||||
sample.NumLabel[key] = value
|
||||
}
|
||||
if sample.NumUnit == nil {
|
||||
sample.NumUnit = map[string][]string{key: unit}
|
||||
} else {
|
||||
sample.NumUnit[key] = unit
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveNumLabel removes all numerical labels associated with the specified key for all
|
||||
// samples in the profile.
|
||||
func (p *Profile) RemoveNumLabel(key string) {
|
||||
for _, sample := range p.Sample {
|
||||
delete(sample.NumLabel, key)
|
||||
delete(sample.NumUnit, key)
|
||||
}
|
||||
}
|
||||
|
||||
// DiffBaseSample returns true if a sample belongs to the diff base and false
|
||||
// otherwise.
|
||||
func (s *Sample) DiffBaseSample() bool {
|
||||
|
@ -785,10 +849,10 @@ func (p *Profile) HasFileLines() bool {
|
|||
|
||||
// Unsymbolizable returns true if a mapping points to a binary for which
|
||||
// locations can't be symbolized in principle, at least now. Examples are
|
||||
// "[vdso]", [vsyscall]" and some others, see the code.
|
||||
// "[vdso]", "[vsyscall]" and some others, see the code.
|
||||
func (m *Mapping) Unsymbolizable() bool {
|
||||
name := filepath.Base(m.File)
|
||||
return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/")
|
||||
return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/") || m.File == "//anon"
|
||||
}
|
||||
|
||||
// Copy makes a fully independent copy of a profile.
|
||||
|
|
19
vendor/github.com/google/pprof/profile/proto.go
generated
vendored
19
vendor/github.com/google/pprof/profile/proto.go
generated
vendored
|
@ -39,11 +39,12 @@ import (
|
|||
)
|
||||
|
||||
type buffer struct {
|
||||
field int // field tag
|
||||
typ int // proto wire type code for field
|
||||
u64 uint64
|
||||
data []byte
|
||||
tmp [16]byte
|
||||
field int // field tag
|
||||
typ int // proto wire type code for field
|
||||
u64 uint64
|
||||
data []byte
|
||||
tmp [16]byte
|
||||
tmpLines []Line // temporary storage used while decoding "repeated Line".
|
||||
}
|
||||
|
||||
type decoder func(*buffer, message) error
|
||||
|
@ -286,7 +287,6 @@ func decodeInt64s(b *buffer, x *[]int64) error {
|
|||
if b.typ == 2 {
|
||||
// Packed encoding
|
||||
data := b.data
|
||||
tmp := make([]int64, 0, len(data)) // Maximally sized
|
||||
for len(data) > 0 {
|
||||
var u uint64
|
||||
var err error
|
||||
|
@ -294,9 +294,8 @@ func decodeInt64s(b *buffer, x *[]int64) error {
|
|||
if u, data, err = decodeVarint(data); err != nil {
|
||||
return err
|
||||
}
|
||||
tmp = append(tmp, int64(u))
|
||||
*x = append(*x, int64(u))
|
||||
}
|
||||
*x = append(*x, tmp...)
|
||||
return nil
|
||||
}
|
||||
var i int64
|
||||
|
@ -319,7 +318,6 @@ func decodeUint64s(b *buffer, x *[]uint64) error {
|
|||
if b.typ == 2 {
|
||||
data := b.data
|
||||
// Packed encoding
|
||||
tmp := make([]uint64, 0, len(data)) // Maximally sized
|
||||
for len(data) > 0 {
|
||||
var u uint64
|
||||
var err error
|
||||
|
@ -327,9 +325,8 @@ func decodeUint64s(b *buffer, x *[]uint64) error {
|
|||
if u, data, err = decodeVarint(data); err != nil {
|
||||
return err
|
||||
}
|
||||
tmp = append(tmp, u)
|
||||
*x = append(*x, u)
|
||||
}
|
||||
*x = append(*x, tmp...)
|
||||
return nil
|
||||
}
|
||||
var u uint64
|
||||
|
|
26
vendor/github.com/google/pprof/profile/prune.go
generated
vendored
26
vendor/github.com/google/pprof/profile/prune.go
generated
vendored
|
@ -62,15 +62,31 @@ func (p *Profile) Prune(dropRx, keepRx *regexp.Regexp) {
|
|||
prune := make(map[uint64]bool)
|
||||
pruneBeneath := make(map[uint64]bool)
|
||||
|
||||
// simplifyFunc can be expensive, so cache results.
|
||||
// Note that the same function name can be encountered many times due
|
||||
// different lines and addresses in the same function.
|
||||
pruneCache := map[string]bool{} // Map from function to whether or not to prune
|
||||
pruneFromHere := func(s string) bool {
|
||||
if r, ok := pruneCache[s]; ok {
|
||||
return r
|
||||
}
|
||||
funcName := simplifyFunc(s)
|
||||
if dropRx.MatchString(funcName) {
|
||||
if keepRx == nil || !keepRx.MatchString(funcName) {
|
||||
pruneCache[s] = true
|
||||
return true
|
||||
}
|
||||
}
|
||||
pruneCache[s] = false
|
||||
return false
|
||||
}
|
||||
|
||||
for _, loc := range p.Location {
|
||||
var i int
|
||||
for i = len(loc.Line) - 1; i >= 0; i-- {
|
||||
if fn := loc.Line[i].Function; fn != nil && fn.Name != "" {
|
||||
funcName := simplifyFunc(fn.Name)
|
||||
if dropRx.MatchString(funcName) {
|
||||
if keepRx == nil || !keepRx.MatchString(funcName) {
|
||||
break
|
||||
}
|
||||
if pruneFromHere(fn.Name) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
2
vendor/github.com/hashicorp/golang-lru/LICENSE
generated
vendored
2
vendor/github.com/hashicorp/golang-lru/LICENSE
generated
vendored
|
@ -1,3 +1,5 @@
|
|||
Copyright (c) 2014 HashiCorp, Inc.
|
||||
|
||||
Mozilla Public License, version 2.0
|
||||
|
||||
1. Definitions
|
||||
|
|
22
vendor/github.com/hashicorp/golang-lru/simplelru/lru.go
generated
vendored
22
vendor/github.com/hashicorp/golang-lru/simplelru/lru.go
generated
vendored
|
@ -25,7 +25,7 @@ type entry struct {
|
|||
// NewLRU constructs an LRU of the given size
|
||||
func NewLRU(size int, onEvict EvictCallback) (*LRU, error) {
|
||||
if size <= 0 {
|
||||
return nil, errors.New("Must provide a positive size")
|
||||
return nil, errors.New("must provide a positive size")
|
||||
}
|
||||
c := &LRU{
|
||||
size: size,
|
||||
|
@ -73,6 +73,9 @@ func (c *LRU) Add(key, value interface{}) (evicted bool) {
|
|||
func (c *LRU) Get(key interface{}) (value interface{}, ok bool) {
|
||||
if ent, ok := c.items[key]; ok {
|
||||
c.evictList.MoveToFront(ent)
|
||||
if ent.Value.(*entry) == nil {
|
||||
return nil, false
|
||||
}
|
||||
return ent.Value.(*entry).value, true
|
||||
}
|
||||
return
|
||||
|
@ -106,7 +109,7 @@ func (c *LRU) Remove(key interface{}) (present bool) {
|
|||
}
|
||||
|
||||
// RemoveOldest removes the oldest item from the cache.
|
||||
func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) {
|
||||
func (c *LRU) RemoveOldest() (key, value interface{}, ok bool) {
|
||||
ent := c.evictList.Back()
|
||||
if ent != nil {
|
||||
c.removeElement(ent)
|
||||
|
@ -117,7 +120,7 @@ func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) {
|
|||
}
|
||||
|
||||
// GetOldest returns the oldest entry
|
||||
func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) {
|
||||
func (c *LRU) GetOldest() (key, value interface{}, ok bool) {
|
||||
ent := c.evictList.Back()
|
||||
if ent != nil {
|
||||
kv := ent.Value.(*entry)
|
||||
|
@ -142,6 +145,19 @@ func (c *LRU) Len() int {
|
|||
return c.evictList.Len()
|
||||
}
|
||||
|
||||
// Resize changes the cache size.
|
||||
func (c *LRU) Resize(size int) (evicted int) {
|
||||
diff := c.Len() - size
|
||||
if diff < 0 {
|
||||
diff = 0
|
||||
}
|
||||
for i := 0; i < diff; i++ {
|
||||
c.removeOldest()
|
||||
}
|
||||
c.size = size
|
||||
return diff
|
||||
}
|
||||
|
||||
// removeOldest removes the oldest item from the cache.
|
||||
func (c *LRU) removeOldest() {
|
||||
ent := c.evictList.Back()
|
||||
|
|
8
vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go
generated
vendored
8
vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go
generated
vendored
|
@ -1,3 +1,4 @@
|
|||
// Package simplelru provides simple LRU implementation based on build-in container/list.
|
||||
package simplelru
|
||||
|
||||
// LRUCache is the interface for simple LRU cache.
|
||||
|
@ -10,7 +11,7 @@ type LRUCache interface {
|
|||
// updates the "recently used"-ness of the key. #value, isFound
|
||||
Get(key interface{}) (value interface{}, ok bool)
|
||||
|
||||
// Check if a key exsists in cache without updating the recent-ness.
|
||||
// Checks if a key exists in cache without updating the recent-ness.
|
||||
Contains(key interface{}) (ok bool)
|
||||
|
||||
// Returns key's value without updating the "recently used"-ness of the key.
|
||||
|
@ -31,6 +32,9 @@ type LRUCache interface {
|
|||
// Returns the number of items in the cache.
|
||||
Len() int
|
||||
|
||||
// Clear all cache entries
|
||||
// Clears all cache entries.
|
||||
Purge()
|
||||
|
||||
// Resizes cache, returning number evicted
|
||||
Resize(int) int
|
||||
}
|
||||
|
|
4
vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go
generated
vendored
4
vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go
generated
vendored
|
@ -82,6 +82,10 @@ func New(colorMode ColorMode) Formatter {
|
|||
return fmt.Sprintf("\x1b[38;5;%dm", colorCode)
|
||||
}
|
||||
|
||||
if _, noColor := os.LookupEnv("GINKGO_NO_COLOR"); noColor {
|
||||
colorMode = ColorModeNone
|
||||
}
|
||||
|
||||
f := Formatter{
|
||||
ColorMode: colorMode,
|
||||
colors: map[string]string{
|
||||
|
|
15
vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go
generated
vendored
15
vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go
generated
vendored
|
@ -2,6 +2,8 @@ package build
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/command"
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/internal"
|
||||
|
@ -53,7 +55,18 @@ func buildSpecs(args []string, cliConfig types.CLIConfig, goFlagsConfig types.Go
|
|||
if suite.State.Is(internal.TestSuiteStateFailedToCompile) {
|
||||
fmt.Println(suite.CompilationError.Error())
|
||||
} else {
|
||||
fmt.Printf("Compiled %s.test\n", suite.PackageName)
|
||||
if len(goFlagsConfig.O) == 0 {
|
||||
goFlagsConfig.O = path.Join(suite.Path, suite.PackageName+".test")
|
||||
} else {
|
||||
stat, err := os.Stat(goFlagsConfig.O)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if stat.IsDir() {
|
||||
goFlagsConfig.O += "/" + suite.PackageName + ".test"
|
||||
}
|
||||
}
|
||||
fmt.Printf("Compiled %s\n", goFlagsConfig.O)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
2
vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go
generated
vendored
2
vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go
generated
vendored
|
@ -7,7 +7,7 @@ import (
|
|||
"os"
|
||||
"text/template"
|
||||
|
||||
sprig "github.com/go-task/slim-sprig"
|
||||
sprig "github.com/go-task/slim-sprig/v3"
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/command"
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/internal"
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
|
|
8
vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go
generated
vendored
8
vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go
generated
vendored
|
@ -10,7 +10,7 @@ import (
|
|||
"strings"
|
||||
"text/template"
|
||||
|
||||
sprig "github.com/go-task/slim-sprig"
|
||||
sprig "github.com/go-task/slim-sprig/v3"
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/command"
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/internal"
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
|
@ -32,6 +32,9 @@ func BuildGenerateCommand() command.Command {
|
|||
{Name: "template-data", KeyPath: "CustomTemplateData",
|
||||
UsageArgument: "template-data-file",
|
||||
Usage: "If specified, generate will use the contents of the file passed as data to be rendered in the test file template"},
|
||||
{Name: "tags", KeyPath: "Tags",
|
||||
UsageArgument: "build-tags",
|
||||
Usage: "If specified, generate will create a test file that uses the given build tags (i.e. `--tags e2e,!unit` will add `//go:build e2e,!unit`)"},
|
||||
},
|
||||
&conf,
|
||||
types.GinkgoFlagSections{},
|
||||
|
@ -59,6 +62,7 @@ You can also pass a <filename> of the form "file.go" and generate will emit "fil
|
|||
}
|
||||
|
||||
type specData struct {
|
||||
BuildTags string
|
||||
Package string
|
||||
Subject string
|
||||
PackageImportPath string
|
||||
|
@ -93,6 +97,7 @@ func generateTestFileForSubject(subject string, conf GeneratorsConfig) {
|
|||
}
|
||||
|
||||
data := specData{
|
||||
BuildTags: getBuildTags(conf.Tags),
|
||||
Package: determinePackageName(packageName, conf.Internal),
|
||||
Subject: formattedName,
|
||||
PackageImportPath: getPackageImportPath(),
|
||||
|
@ -169,6 +174,7 @@ func moduleName(modRoot string) string {
|
|||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
defer modFile.Close()
|
||||
|
||||
mod := make([]byte, 128)
|
||||
_, err = modFile.Read(mod)
|
||||
|
|
6
vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go
generated
vendored
6
vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go
generated
vendored
|
@ -1,6 +1,7 @@
|
|||
package generators
|
||||
|
||||
var specText = `package {{.Package}}
|
||||
var specText = `{{.BuildTags}}
|
||||
package {{.Package}}
|
||||
|
||||
import (
|
||||
{{.GinkgoImport}}
|
||||
|
@ -14,7 +15,8 @@ var _ = {{.GinkgoPackage}}Describe("{{.Subject}}", func() {
|
|||
})
|
||||
`
|
||||
|
||||
var agoutiSpecText = `package {{.Package}}
|
||||
var agoutiSpecText = `{{.BuildTags}}
|
||||
package {{.Package}}
|
||||
|
||||
import (
|
||||
{{.GinkgoImport}}
|
||||
|
|
12
vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go
generated
vendored
12
vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go
generated
vendored
|
@ -1,6 +1,7 @@
|
|||
package generators
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/build"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
@ -14,6 +15,7 @@ type GeneratorsConfig struct {
|
|||
Agouti, NoDot, Internal bool
|
||||
CustomTemplate string
|
||||
CustomTemplateData string
|
||||
Tags string
|
||||
}
|
||||
|
||||
func getPackageAndFormattedName() (string, string, string) {
|
||||
|
@ -62,3 +64,13 @@ func determinePackageName(name string, internal bool) string {
|
|||
|
||||
return name + "_test"
|
||||
}
|
||||
|
||||
// getBuildTags returns the resultant string to be added.
|
||||
// If the input string is not empty, then returns a `//go:build {}` string,
|
||||
// otherwise returns an empty string.
|
||||
func getBuildTags(tags string) string {
|
||||
if tags != "" {
|
||||
return fmt.Sprintf("//go:build %s\n", tags)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
|
14
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go
generated
vendored
14
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go
generated
vendored
|
@ -25,6 +25,18 @@ func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig) TestSuite
|
|||
return suite
|
||||
}
|
||||
|
||||
if len(goFlagsConfig.O) > 0 {
|
||||
userDefinedPath, err := filepath.Abs(goFlagsConfig.O)
|
||||
if err != nil {
|
||||
suite.State = TestSuiteStateFailedToCompile
|
||||
suite.CompilationError = fmt.Errorf("Failed to compute compilation target path %s:\n%s", goFlagsConfig.O, err.Error())
|
||||
return suite
|
||||
}
|
||||
path = userDefinedPath
|
||||
}
|
||||
|
||||
goFlagsConfig.O = path
|
||||
|
||||
ginkgoInvocationPath, _ := os.Getwd()
|
||||
ginkgoInvocationPath, _ = filepath.Abs(ginkgoInvocationPath)
|
||||
packagePath := suite.AbsPath()
|
||||
|
@ -34,7 +46,7 @@ func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig) TestSuite
|
|||
suite.CompilationError = fmt.Errorf("Failed to get relative path from package to the current working directory:\n%s", err.Error())
|
||||
return suite
|
||||
}
|
||||
args, err := types.GenerateGoTestCompileArgs(goFlagsConfig, path, "./", pathToInvocationPath)
|
||||
args, err := types.GenerateGoTestCompileArgs(goFlagsConfig, "./", pathToInvocationPath)
|
||||
if err != nil {
|
||||
suite.State = TestSuiteStateFailedToCompile
|
||||
suite.CompilationError = fmt.Errorf("Failed to generate go test compile flags:\n%s", err.Error())
|
||||
|
|
129
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go
generated
vendored
Normal file
129
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go
generated
vendored
Normal file
|
@ -0,0 +1,129 @@
|
|||
// Copyright (c) 2015, Wade Simmons
|
||||
// All rights reserved.
|
||||
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||
// list of conditions and the following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Package gocovmerge takes the results from multiple `go test -coverprofile`
|
||||
// runs and merges them into one profile
|
||||
|
||||
// this file was originally taken from the gocovmerge project
|
||||
// see also: https://go.shabbyrobe.org/gocovmerge
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
|
||||
"golang.org/x/tools/cover"
|
||||
)
|
||||
|
||||
func AddCoverProfile(profiles []*cover.Profile, p *cover.Profile) []*cover.Profile {
|
||||
i := sort.Search(len(profiles), func(i int) bool { return profiles[i].FileName >= p.FileName })
|
||||
if i < len(profiles) && profiles[i].FileName == p.FileName {
|
||||
MergeCoverProfiles(profiles[i], p)
|
||||
} else {
|
||||
profiles = append(profiles, nil)
|
||||
copy(profiles[i+1:], profiles[i:])
|
||||
profiles[i] = p
|
||||
}
|
||||
return profiles
|
||||
}
|
||||
|
||||
func DumpCoverProfiles(profiles []*cover.Profile, out io.Writer) error {
|
||||
if len(profiles) == 0 {
|
||||
return nil
|
||||
}
|
||||
if _, err := fmt.Fprintf(out, "mode: %s\n", profiles[0].Mode); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, p := range profiles {
|
||||
for _, b := range p.Blocks {
|
||||
if _, err := fmt.Fprintf(out, "%s:%d.%d,%d.%d %d %d\n", p.FileName, b.StartLine, b.StartCol, b.EndLine, b.EndCol, b.NumStmt, b.Count); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func MergeCoverProfiles(into *cover.Profile, merge *cover.Profile) error {
|
||||
if into.Mode != merge.Mode {
|
||||
return fmt.Errorf("cannot merge profiles with different modes")
|
||||
}
|
||||
// Since the blocks are sorted, we can keep track of where the last block
|
||||
// was inserted and only look at the blocks after that as targets for merge
|
||||
startIndex := 0
|
||||
for _, b := range merge.Blocks {
|
||||
var err error
|
||||
startIndex, err = mergeProfileBlock(into, b, startIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func mergeProfileBlock(p *cover.Profile, pb cover.ProfileBlock, startIndex int) (int, error) {
|
||||
sortFunc := func(i int) bool {
|
||||
pi := p.Blocks[i+startIndex]
|
||||
return pi.StartLine >= pb.StartLine && (pi.StartLine != pb.StartLine || pi.StartCol >= pb.StartCol)
|
||||
}
|
||||
|
||||
i := 0
|
||||
if sortFunc(i) != true {
|
||||
i = sort.Search(len(p.Blocks)-startIndex, sortFunc)
|
||||
}
|
||||
|
||||
i += startIndex
|
||||
if i < len(p.Blocks) && p.Blocks[i].StartLine == pb.StartLine && p.Blocks[i].StartCol == pb.StartCol {
|
||||
if p.Blocks[i].EndLine != pb.EndLine || p.Blocks[i].EndCol != pb.EndCol {
|
||||
return i, fmt.Errorf("gocovmerge: overlapping merge %v %v %v", p.FileName, p.Blocks[i], pb)
|
||||
}
|
||||
switch p.Mode {
|
||||
case "set":
|
||||
p.Blocks[i].Count |= pb.Count
|
||||
case "count", "atomic":
|
||||
p.Blocks[i].Count += pb.Count
|
||||
default:
|
||||
return i, fmt.Errorf("gocovmerge: unsupported covermode '%s'", p.Mode)
|
||||
}
|
||||
|
||||
} else {
|
||||
if i > 0 {
|
||||
pa := p.Blocks[i-1]
|
||||
if pa.EndLine >= pb.EndLine && (pa.EndLine != pb.EndLine || pa.EndCol > pb.EndCol) {
|
||||
return i, fmt.Errorf("gocovmerge: overlap before %v %v %v", p.FileName, pa, pb)
|
||||
}
|
||||
}
|
||||
if i < len(p.Blocks)-1 {
|
||||
pa := p.Blocks[i+1]
|
||||
if pa.StartLine <= pb.StartLine && (pa.StartLine != pb.StartLine || pa.StartCol < pb.StartCol) {
|
||||
return i, fmt.Errorf("gocovmerge: overlap after %v %v %v", p.FileName, pa, pb)
|
||||
}
|
||||
}
|
||||
p.Blocks = append(p.Blocks, cover.ProfileBlock{})
|
||||
copy(p.Blocks[i+1:], p.Blocks[i:])
|
||||
p.Blocks[i] = pb
|
||||
}
|
||||
|
||||
return i + 1, nil
|
||||
}
|
46
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go
generated
vendored
46
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go
generated
vendored
|
@ -1,7 +1,6 @@
|
|||
package internal
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
|
@ -12,6 +11,7 @@ import (
|
|||
"github.com/google/pprof/profile"
|
||||
"github.com/onsi/ginkgo/v2/reporters"
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
"golang.org/x/tools/cover"
|
||||
)
|
||||
|
||||
func AbsPathForGeneratedAsset(assetName string, suite TestSuite, cliConfig types.CLIConfig, process int) string {
|
||||
|
@ -144,38 +144,27 @@ func FinalizeProfilesAndReportsForSuites(suites TestSuites, cliConfig types.CLIC
|
|||
return messages, nil
|
||||
}
|
||||
|
||||
//loads each profile, combines them, deletes them, stores them in destination
|
||||
// loads each profile, merges them, deletes them, stores them in destination
|
||||
func MergeAndCleanupCoverProfiles(profiles []string, destination string) error {
|
||||
combined := &bytes.Buffer{}
|
||||
modeRegex := regexp.MustCompile(`^mode: .*\n`)
|
||||
for i, profile := range profiles {
|
||||
contents, err := os.ReadFile(profile)
|
||||
var merged []*cover.Profile
|
||||
for _, file := range profiles {
|
||||
parsedProfiles, err := cover.ParseProfiles(file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to read coverage file %s:\n%s", profile, err.Error())
|
||||
return err
|
||||
}
|
||||
os.Remove(profile)
|
||||
|
||||
// remove the cover mode line from every file
|
||||
// except the first one
|
||||
if i > 0 {
|
||||
contents = modeRegex.ReplaceAll(contents, []byte{})
|
||||
}
|
||||
|
||||
_, err = combined.Write(contents)
|
||||
|
||||
// Add a newline to the end of every file if missing.
|
||||
if err == nil && len(contents) > 0 && contents[len(contents)-1] != '\n' {
|
||||
_, err = combined.Write([]byte("\n"))
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to append to coverprofile:\n%s", err.Error())
|
||||
os.Remove(file)
|
||||
for _, p := range parsedProfiles {
|
||||
merged = AddCoverProfile(merged, p)
|
||||
}
|
||||
}
|
||||
|
||||
err := os.WriteFile(destination, combined.Bytes(), 0666)
|
||||
dst, err := os.OpenFile(destination, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to create combined cover profile:\n%s", err.Error())
|
||||
return err
|
||||
}
|
||||
defer dst.Close()
|
||||
err = DumpCoverProfiles(merged, dst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -184,7 +173,7 @@ func GetCoverageFromCoverProfile(profile string) (float64, error) {
|
|||
cmd := exec.Command("go", "tool", "cover", "-func", profile)
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("Could not process Coverprofile %s: %s", profile, err.Error())
|
||||
return 0, fmt.Errorf("Could not process Coverprofile %s: %s - %s", profile, err.Error(), string(output))
|
||||
}
|
||||
re := regexp.MustCompile(`total:\s*\(statements\)\s*(\d*\.\d*)\%`)
|
||||
matches := re.FindStringSubmatch(string(output))
|
||||
|
@ -208,6 +197,7 @@ func MergeProfiles(profilePaths []string, destination string) error {
|
|||
return fmt.Errorf("Could not open profile: %s\n%s", profilePath, err.Error())
|
||||
}
|
||||
prof, err := profile.Parse(proFile)
|
||||
_ = proFile.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not parse profile: %s\n%s", profilePath, err.Error())
|
||||
}
|
||||
|
|
9
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go
generated
vendored
9
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go
generated
vendored
|
@ -7,6 +7,7 @@ import (
|
|||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
|
@ -192,7 +193,7 @@ func precompiledTestSuite(path string) (TestSuite, error) {
|
|||
return TestSuite{}, errors.New("this is not a .test binary")
|
||||
}
|
||||
|
||||
if filepath.Ext(path) == ".test" && info.Mode()&0111 == 0 {
|
||||
if filepath.Ext(path) == ".test" && runtime.GOOS != "windows" && info.Mode()&0111 == 0 {
|
||||
return TestSuite{}, errors.New("this is not executable")
|
||||
}
|
||||
|
||||
|
@ -225,7 +226,7 @@ func suitesInDir(dir string, recurse bool) TestSuites {
|
|||
files, _ := os.ReadDir(dir)
|
||||
re := regexp.MustCompile(`^[^._].*_test\.go$`)
|
||||
for _, file := range files {
|
||||
if !file.IsDir() && re.Match([]byte(file.Name())) {
|
||||
if !file.IsDir() && re.MatchString(file.Name()) {
|
||||
suite := TestSuite{
|
||||
Path: relPath(dir),
|
||||
PackageName: packageNameForSuite(dir),
|
||||
|
@ -240,7 +241,7 @@ func suitesInDir(dir string, recurse bool) TestSuites {
|
|||
if recurse {
|
||||
re = regexp.MustCompile(`^[._]`)
|
||||
for _, file := range files {
|
||||
if file.IsDir() && !re.Match([]byte(file.Name())) {
|
||||
if file.IsDir() && !re.MatchString(file.Name()) {
|
||||
suites = append(suites, suitesInDir(dir+"/"+file.Name(), recurse)...)
|
||||
}
|
||||
}
|
||||
|
@ -271,7 +272,7 @@ func filesHaveGinkgoSuite(dir string, files []os.DirEntry) bool {
|
|||
reGinkgo := regexp.MustCompile(`package ginkgo|\/ginkgo"|\/ginkgo\/v2"|\/ginkgo\/v2/dsl/`)
|
||||
|
||||
for _, file := range files {
|
||||
if !file.IsDir() && reTestFile.Match([]byte(file.Name())) {
|
||||
if !file.IsDir() && reTestFile.MatchString(file.Name()) {
|
||||
contents, _ := os.ReadFile(dir + "/" + file.Name())
|
||||
if reGinkgo.Match(contents) {
|
||||
return true
|
||||
|
|
7
vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go
generated
vendored
7
vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go
generated
vendored
|
@ -1,10 +1,11 @@
|
|||
package outline
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"strconv"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -244,9 +245,7 @@ func labelFromCallExpr(ce *ast.CallExpr) []string {
|
|||
}
|
||||
if id.Name == "Label" {
|
||||
ls := extractLabels(expr)
|
||||
for _, label := range ls {
|
||||
labels = append(labels, label)
|
||||
}
|
||||
labels = append(labels, ls...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
9
vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go
generated
vendored
9
vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go
generated
vendored
|
@ -28,14 +28,7 @@ func packageNameForImport(f *ast.File, path string) *string {
|
|||
}
|
||||
name := spec.Name.String()
|
||||
if name == "<nil>" {
|
||||
// If the package name is not explicitly specified,
|
||||
// make an educated guess. This is not guaranteed to be correct.
|
||||
lastSlash := strings.LastIndex(path, "/")
|
||||
if lastSlash == -1 {
|
||||
name = path
|
||||
} else {
|
||||
name = path[lastSlash+1:]
|
||||
}
|
||||
name = "ginkgo"
|
||||
}
|
||||
if name == "." {
|
||||
name = ""
|
||||
|
|
26
vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go
generated
vendored
26
vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go
generated
vendored
|
@ -1,10 +1,13 @@
|
|||
package outline
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/csv"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/tools/go/ast/inspector"
|
||||
|
@ -84,9 +87,11 @@ func (o *outline) String() string {
|
|||
// StringIndent returns a CSV-formated outline, but every line is indented by
|
||||
// one 'width' of spaces for every level of nesting.
|
||||
func (o *outline) StringIndent(width int) string {
|
||||
var b strings.Builder
|
||||
var b bytes.Buffer
|
||||
b.WriteString("Name,Text,Start,End,Spec,Focused,Pending,Labels\n")
|
||||
|
||||
csvWriter := csv.NewWriter(&b)
|
||||
|
||||
currentIndent := 0
|
||||
pre := func(n *ginkgoNode) {
|
||||
b.WriteString(fmt.Sprintf("%*s", currentIndent, ""))
|
||||
|
@ -96,8 +101,22 @@ func (o *outline) StringIndent(width int) string {
|
|||
} else {
|
||||
labels = strings.Join(n.Labels, ", ")
|
||||
}
|
||||
//enclosing labels in a double quoted comma separate listed so that when inmported into a CSV app the Labels column has comma separate strings
|
||||
b.WriteString(fmt.Sprintf("%s,%s,%d,%d,%t,%t,%t,\"%s\"\n", n.Name, n.Text, n.Start, n.End, n.Spec, n.Focused, n.Pending, labels))
|
||||
|
||||
row := []string{
|
||||
n.Name,
|
||||
n.Text,
|
||||
strconv.Itoa(n.Start),
|
||||
strconv.Itoa(n.End),
|
||||
strconv.FormatBool(n.Spec),
|
||||
strconv.FormatBool(n.Focused),
|
||||
strconv.FormatBool(n.Pending),
|
||||
labels,
|
||||
}
|
||||
csvWriter.Write(row)
|
||||
|
||||
// Ensure we write to `b' before the next `b.WriteString()', which might be adding indentation
|
||||
csvWriter.Flush()
|
||||
|
||||
currentIndent += width
|
||||
}
|
||||
post := func(n *ginkgoNode) {
|
||||
|
@ -106,5 +125,6 @@ func (o *outline) StringIndent(width int) string {
|
|||
for _, n := range o.Nodes {
|
||||
n.Walk(pre, post)
|
||||
}
|
||||
|
||||
return b.String()
|
||||
}
|
||||
|
|
2
vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go
generated
vendored
2
vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go
generated
vendored
|
@ -78,7 +78,7 @@ func (d Dependencies) resolveAndAdd(deps []string, depth int) {
|
|||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if !pkg.Goroot && (!ginkgoAndGomegaFilter.Match([]byte(pkg.Dir)) || ginkgoIntegrationTestFilter.Match([]byte(pkg.Dir))) {
|
||||
if !pkg.Goroot && (!ginkgoAndGomegaFilter.MatchString(pkg.Dir) || ginkgoIntegrationTestFilter.MatchString(pkg.Dir)) {
|
||||
d.addDepIfNotPresent(pkg.Dir, depth)
|
||||
}
|
||||
}
|
||||
|
|
13
vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go
generated
vendored
13
vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go
generated
vendored
|
@ -4,6 +4,7 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
|
@ -79,7 +80,11 @@ func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Ti
|
|||
continue
|
||||
}
|
||||
|
||||
if goTestRegExp.Match([]byte(info.Name())) {
|
||||
if isHiddenFile(info) {
|
||||
continue
|
||||
}
|
||||
|
||||
if goTestRegExp.MatchString(info.Name()) {
|
||||
testHash += p.hashForFileInfo(info)
|
||||
if info.ModTime().After(testModifiedTime) {
|
||||
testModifiedTime = info.ModTime()
|
||||
|
@ -87,7 +92,7 @@ func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Ti
|
|||
continue
|
||||
}
|
||||
|
||||
if p.watchRegExp.Match([]byte(info.Name())) {
|
||||
if p.watchRegExp.MatchString(info.Name()) {
|
||||
codeHash += p.hashForFileInfo(info)
|
||||
if info.ModTime().After(codeModifiedTime) {
|
||||
codeModifiedTime = info.ModTime()
|
||||
|
@ -103,6 +108,10 @@ func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Ti
|
|||
return
|
||||
}
|
||||
|
||||
func isHiddenFile(info os.FileInfo) bool {
|
||||
return strings.HasPrefix(info.Name(), ".") || strings.HasPrefix(info.Name(), "_")
|
||||
}
|
||||
|
||||
func (p *PackageHash) hashForFileInfo(info os.FileInfo) string {
|
||||
return fmt.Sprintf("%s_%d_%d", info.Name(), info.Size(), info.ModTime().UnixNano())
|
||||
}
|
||||
|
|
61
vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go
generated
vendored
61
vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go
generated
vendored
|
@ -182,10 +182,31 @@ func (r *DefaultReporter) WillRun(report types.SpecReport) {
|
|||
r.emitBlock(r.f(r.codeLocationBlock(report, "{{/}}", v.Is(types.VerbosityLevelVeryVerbose), false)))
|
||||
}
|
||||
|
||||
func (r *DefaultReporter) wrapTextBlock(sectionName string, fn func()) {
|
||||
r.emitBlock("\n")
|
||||
if r.conf.GithubOutput {
|
||||
r.emitBlock(r.fi(1, "::group::%s", sectionName))
|
||||
} else {
|
||||
r.emitBlock(r.fi(1, "{{gray}}%s >>{{/}}", sectionName))
|
||||
}
|
||||
fn()
|
||||
if r.conf.GithubOutput {
|
||||
r.emitBlock(r.fi(1, "::endgroup::"))
|
||||
} else {
|
||||
r.emitBlock(r.fi(1, "{{gray}}<< %s{{/}}", sectionName))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (r *DefaultReporter) DidRun(report types.SpecReport) {
|
||||
v := r.conf.Verbosity()
|
||||
inParallel := report.RunningInParallel
|
||||
|
||||
//should we completely omit this spec?
|
||||
if report.State.Is(types.SpecStateSkipped) && r.conf.SilenceSkips {
|
||||
return
|
||||
}
|
||||
|
||||
header := r.specDenoter
|
||||
if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) {
|
||||
header = fmt.Sprintf("[%s]", report.LeafNodeType)
|
||||
|
@ -262,9 +283,12 @@ func (r *DefaultReporter) DidRun(report types.SpecReport) {
|
|||
}
|
||||
}
|
||||
|
||||
// If we have no content to show, jsut emit the header and return
|
||||
// If we have no content to show, just emit the header and return
|
||||
if !reportHasContent {
|
||||
r.emit(r.f(highlightColor + header + "{{/}}"))
|
||||
if r.conf.ForceNewlines {
|
||||
r.emit("\n")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -283,26 +307,23 @@ func (r *DefaultReporter) DidRun(report types.SpecReport) {
|
|||
|
||||
//Emit Stdout/Stderr Output
|
||||
if showSeparateStdSection {
|
||||
r.emitBlock("\n")
|
||||
r.emitBlock(r.fi(1, "{{gray}}Captured StdOut/StdErr Output >>{{/}}"))
|
||||
r.emitBlock(r.fi(1, "%s", report.CapturedStdOutErr))
|
||||
r.emitBlock(r.fi(1, "{{gray}}<< Captured StdOut/StdErr Output{{/}}"))
|
||||
r.wrapTextBlock("Captured StdOut/StdErr Output", func() {
|
||||
r.emitBlock(r.fi(1, "%s", report.CapturedStdOutErr))
|
||||
})
|
||||
}
|
||||
|
||||
if showSeparateVisibilityAlwaysReportsSection {
|
||||
r.emitBlock("\n")
|
||||
r.emitBlock(r.fi(1, "{{gray}}Report Entries >>{{/}}"))
|
||||
for _, entry := range report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) {
|
||||
r.emitReportEntry(1, entry)
|
||||
}
|
||||
r.emitBlock(r.fi(1, "{{gray}}<< Report Entries{{/}}"))
|
||||
r.wrapTextBlock("Report Entries", func() {
|
||||
for _, entry := range report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) {
|
||||
r.emitReportEntry(1, entry)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
if showTimeline {
|
||||
r.emitBlock("\n")
|
||||
r.emitBlock(r.fi(1, "{{gray}}Timeline >>{{/}}"))
|
||||
r.emitTimeline(1, report, timeline)
|
||||
r.emitBlock(r.fi(1, "{{gray}}<< Timeline{{/}}"))
|
||||
r.wrapTextBlock("Timeline", func() {
|
||||
r.emitTimeline(1, report, timeline)
|
||||
})
|
||||
}
|
||||
|
||||
// Emit Failure Message
|
||||
|
@ -405,7 +426,15 @@ func (r *DefaultReporter) emitShortFailure(indent uint, state types.SpecState, f
|
|||
func (r *DefaultReporter) emitFailure(indent uint, state types.SpecState, failure types.Failure, includeAdditionalFailure bool) {
|
||||
highlightColor := r.highlightColorForState(state)
|
||||
r.emitBlock(r.fi(indent, highlightColor+"[%s] %s{{/}}", r.humanReadableState(state), failure.Message))
|
||||
r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
|
||||
if r.conf.GithubOutput {
|
||||
level := "error"
|
||||
if state.Is(types.SpecStateSkipped) {
|
||||
level = "notice"
|
||||
}
|
||||
r.emitBlock(r.fi(indent, "::%s file=%s,line=%d::%s %s", level, failure.Location.FileName, failure.Location.LineNumber, failure.FailureNodeType, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
|
||||
} else {
|
||||
r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
|
||||
}
|
||||
if failure.ForwardedPanic != "" {
|
||||
r.emitBlock("\n")
|
||||
r.emitBlock(r.fi(indent, highlightColor+"%s{{/}}", failure.ForwardedPanic))
|
||||
|
|
19
vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go
generated
vendored
19
vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go
generated
vendored
|
@ -4,16 +4,21 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
)
|
||||
|
||||
//GenerateJSONReport produces a JSON-formatted report at the passed in destination
|
||||
// GenerateJSONReport produces a JSON-formatted report at the passed in destination
|
||||
func GenerateJSONReport(report types.Report, destination string) error {
|
||||
if err := os.MkdirAll(path.Dir(destination), 0770); err != nil {
|
||||
return err
|
||||
}
|
||||
f, err := os.Create(destination)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
enc := json.NewEncoder(f)
|
||||
enc.SetIndent("", " ")
|
||||
err = enc.Encode([]types.Report{
|
||||
|
@ -22,11 +27,11 @@ func GenerateJSONReport(report types.Report, destination string) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return f.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
//MergeJSONReports produces a single JSON-formatted report at the passed in destination by merging the JSON-formatted reports provided in sources
|
||||
//It skips over reports that fail to decode but reports on them via the returned messages []string
|
||||
// MergeJSONReports produces a single JSON-formatted report at the passed in destination by merging the JSON-formatted reports provided in sources
|
||||
// It skips over reports that fail to decode but reports on them via the returned messages []string
|
||||
func MergeAndCleanupJSONReports(sources []string, destination string) ([]string, error) {
|
||||
messages := []string{}
|
||||
allReports := []types.Report{}
|
||||
|
@ -46,15 +51,19 @@ func MergeAndCleanupJSONReports(sources []string, destination string) ([]string,
|
|||
allReports = append(allReports, reports...)
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(path.Dir(destination), 0770); err != nil {
|
||||
return messages, err
|
||||
}
|
||||
f, err := os.Create(destination)
|
||||
if err != nil {
|
||||
return messages, err
|
||||
}
|
||||
defer f.Close()
|
||||
enc := json.NewEncoder(f)
|
||||
enc.SetIndent("", " ")
|
||||
err = enc.Encode(allReports)
|
||||
if err != nil {
|
||||
return messages, err
|
||||
}
|
||||
return messages, f.Close()
|
||||
return messages, nil
|
||||
}
|
||||
|
|
21
vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go
generated
vendored
21
vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go
generated
vendored
|
@ -14,6 +14,8 @@ import (
|
|||
"encoding/xml"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/config"
|
||||
|
@ -103,6 +105,8 @@ type JUnitProperty struct {
|
|||
Value string `xml:"value,attr"`
|
||||
}
|
||||
|
||||
var ownerRE = regexp.MustCompile(`(?i)^owner:(.*)$`)
|
||||
|
||||
type JUnitTestCase struct {
|
||||
// Name maps onto the full text of the spec - equivalent to "[SpecReport.LeafNodeType] SpecReport.FullText()"
|
||||
Name string `xml:"name,attr"`
|
||||
|
@ -112,6 +116,8 @@ type JUnitTestCase struct {
|
|||
Status string `xml:"status,attr"`
|
||||
// Time is the time in seconds to execute the spec - maps onto SpecReport.RunTime
|
||||
Time float64 `xml:"time,attr"`
|
||||
// Owner is the owner the spec - is set if a label matching Label("owner:X") is provided. The last matching label is used as the owner, thereby allowing specs to override owners specified in container nodes.
|
||||
Owner string `xml:"owner,attr,omitempty"`
|
||||
//Skipped is populated with a message if the test was skipped or pending
|
||||
Skipped *JUnitSkipped `xml:"skipped,omitempty"`
|
||||
//Error is populated if the test panicked or was interrupted
|
||||
|
@ -171,6 +177,7 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit
|
|||
{"FocusFiles", strings.Join(report.SuiteConfig.FocusFiles, ";")},
|
||||
{"SkipFiles", strings.Join(report.SuiteConfig.SkipFiles, ";")},
|
||||
{"FailOnPending", fmt.Sprintf("%t", report.SuiteConfig.FailOnPending)},
|
||||
{"FailOnEmpty", fmt.Sprintf("%t", report.SuiteConfig.FailOnEmpty)},
|
||||
{"FailFast", fmt.Sprintf("%t", report.SuiteConfig.FailFast)},
|
||||
{"FlakeAttempts", fmt.Sprintf("%d", report.SuiteConfig.FlakeAttempts)},
|
||||
{"DryRun", fmt.Sprintf("%t", report.SuiteConfig.DryRun)},
|
||||
|
@ -194,6 +201,12 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit
|
|||
if len(labels) > 0 && !config.OmitSpecLabels {
|
||||
name = name + " [" + strings.Join(labels, ", ") + "]"
|
||||
}
|
||||
owner := ""
|
||||
for _, label := range labels {
|
||||
if matches := ownerRE.FindStringSubmatch(label); len(matches) == 2 {
|
||||
owner = matches[1]
|
||||
}
|
||||
}
|
||||
name = strings.TrimSpace(name)
|
||||
|
||||
test := JUnitTestCase{
|
||||
|
@ -201,6 +214,7 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit
|
|||
Classname: report.SuiteDescription,
|
||||
Status: spec.State.String(),
|
||||
Time: spec.RunTime.Seconds(),
|
||||
Owner: owner,
|
||||
}
|
||||
if !spec.State.Is(config.OmitTimelinesForSpecState) {
|
||||
test.SystemErr = systemErrForUnstructuredReporters(spec)
|
||||
|
@ -285,6 +299,9 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit
|
|||
TestSuites: []JUnitTestSuite{suite},
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(path.Dir(dst), 0770); err != nil {
|
||||
return err
|
||||
}
|
||||
f, err := os.Create(dst)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -308,6 +325,7 @@ func MergeAndCleanupJUnitReports(sources []string, dst string) ([]string, error)
|
|||
continue
|
||||
}
|
||||
err = xml.NewDecoder(f).Decode(&report)
|
||||
_ = f.Close()
|
||||
if err != nil {
|
||||
messages = append(messages, fmt.Sprintf("Could not decode %s:\n%s", source, err.Error()))
|
||||
continue
|
||||
|
@ -322,6 +340,9 @@ func MergeAndCleanupJUnitReports(sources []string, dst string) ([]string, error)
|
|||
mergedReport.TestSuites = append(mergedReport.TestSuites, report.TestSuites...)
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(path.Dir(dst), 0770); err != nil {
|
||||
return messages, err
|
||||
}
|
||||
f, err := os.Create(dst)
|
||||
if err != nil {
|
||||
return messages, err
|
||||
|
|
4
vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go
generated
vendored
4
vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go
generated
vendored
|
@ -11,6 +11,7 @@ package reporters
|
|||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
|
@ -27,6 +28,9 @@ func tcEscape(s string) string {
|
|||
}
|
||||
|
||||
func GenerateTeamcityReport(report types.Report, dst string) error {
|
||||
if err := os.MkdirAll(path.Dir(dst), 0770); err != nil {
|
||||
return err
|
||||
}
|
||||
f, err := os.Create(dst)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
2
vendor/github.com/onsi/ginkgo/v2/types/code_location.go
generated
vendored
2
vendor/github.com/onsi/ginkgo/v2/types/code_location.go
generated
vendored
|
@ -149,7 +149,7 @@ func PruneStack(fullStackTrace string, skip int) string {
|
|||
re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`)
|
||||
for i := 0; i < len(stack)/2; i++ {
|
||||
// We filter out based on the source code file name.
|
||||
if !re.Match([]byte(stack[i*2+1])) {
|
||||
if !re.MatchString(stack[i*2+1]) {
|
||||
prunedStack = append(prunedStack, stack[i*2])
|
||||
prunedStack = append(prunedStack, stack[i*2+1])
|
||||
}
|
||||
|
|
29
vendor/github.com/onsi/ginkgo/v2/types/config.go
generated
vendored
29
vendor/github.com/onsi/ginkgo/v2/types/config.go
generated
vendored
|
@ -25,8 +25,10 @@ type SuiteConfig struct {
|
|||
SkipFiles []string
|
||||
LabelFilter string
|
||||
FailOnPending bool
|
||||
FailOnEmpty bool
|
||||
FailFast bool
|
||||
FlakeAttempts int
|
||||
MustPassRepeatedly int
|
||||
DryRun bool
|
||||
PollProgressAfter time.Duration
|
||||
PollProgressInterval time.Duration
|
||||
|
@ -88,6 +90,9 @@ type ReporterConfig struct {
|
|||
VeryVerbose bool
|
||||
FullTrace bool
|
||||
ShowNodeEvents bool
|
||||
GithubOutput bool
|
||||
SilenceSkips bool
|
||||
ForceNewlines bool
|
||||
|
||||
JSONReport string
|
||||
JUnitReport string
|
||||
|
@ -197,6 +202,7 @@ type GoFlagsConfig struct {
|
|||
A bool
|
||||
ASMFlags string
|
||||
BuildMode string
|
||||
BuildVCS bool
|
||||
Compiler string
|
||||
GCCGoFlags string
|
||||
GCFlags string
|
||||
|
@ -214,6 +220,7 @@ type GoFlagsConfig struct {
|
|||
ToolExec string
|
||||
Work bool
|
||||
X bool
|
||||
O string
|
||||
}
|
||||
|
||||
func NewDefaultGoFlagsConfig() GoFlagsConfig {
|
||||
|
@ -263,7 +270,7 @@ var FlagSections = GinkgoFlagSections{
|
|||
// SuiteConfigFlags provides flags for the Ginkgo test process, and CLI
|
||||
var SuiteConfigFlags = GinkgoFlags{
|
||||
{KeyPath: "S.RandomSeed", Name: "seed", SectionKey: "order", UsageDefaultValue: "randomly generated by Ginkgo",
|
||||
Usage: "The seed used to randomize the spec suite."},
|
||||
Usage: "The seed used to randomize the spec suite.", AlwaysExport: true},
|
||||
{KeyPath: "S.RandomizeAllSpecs", Name: "randomize-all", SectionKey: "order", DeprecatedName: "randomizeAllSpecs", DeprecatedDocLink: "changed-command-line-flags",
|
||||
Usage: "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When containers."},
|
||||
|
||||
|
@ -273,6 +280,8 @@ var SuiteConfigFlags = GinkgoFlags{
|
|||
Usage: "If set, ginkgo will stop running a test suite after a failure occurs."},
|
||||
{KeyPath: "S.FlakeAttempts", Name: "flake-attempts", SectionKey: "failure", UsageDefaultValue: "0 - failed tests are not retried", DeprecatedName: "flakeAttempts", DeprecatedDocLink: "changed-command-line-flags",
|
||||
Usage: "Make up to this many attempts to run each spec. If any of the attempts succeed, the suite will not be failed."},
|
||||
{KeyPath: "S.FailOnEmpty", Name: "fail-on-empty", SectionKey: "failure",
|
||||
Usage: "If set, ginkgo will mark the test suite as failed if no specs are run."},
|
||||
|
||||
{KeyPath: "S.DryRun", Name: "dry-run", SectionKey: "debug", DeprecatedName: "dryRun", DeprecatedDocLink: "changed-command-line-flags",
|
||||
Usage: "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v."},
|
||||
|
@ -319,7 +328,7 @@ var ParallelConfigFlags = GinkgoFlags{
|
|||
// ReporterConfigFlags provides flags for the Ginkgo test process, and CLI
|
||||
var ReporterConfigFlags = GinkgoFlags{
|
||||
{KeyPath: "R.NoColor", Name: "no-color", SectionKey: "output", DeprecatedName: "noColor", DeprecatedDocLink: "changed-command-line-flags",
|
||||
Usage: "If set, suppress color output in default reporter."},
|
||||
Usage: "If set, suppress color output in default reporter. You can also set the environment variable GINKGO_NO_COLOR=TRUE"},
|
||||
{KeyPath: "R.Verbose", Name: "v", SectionKey: "output",
|
||||
Usage: "If set, emits more output including GinkgoWriter contents."},
|
||||
{KeyPath: "R.VeryVerbose", Name: "vv", SectionKey: "output",
|
||||
|
@ -330,6 +339,12 @@ var ReporterConfigFlags = GinkgoFlags{
|
|||
Usage: "If set, default reporter prints out the full stack trace when a failure occurs"},
|
||||
{KeyPath: "R.ShowNodeEvents", Name: "show-node-events", SectionKey: "output",
|
||||
Usage: "If set, default reporter prints node > Enter and < Exit events when specs fail"},
|
||||
{KeyPath: "R.GithubOutput", Name: "github-output", SectionKey: "output",
|
||||
Usage: "If set, default reporter prints easier to manage output in Github Actions."},
|
||||
{KeyPath: "R.SilenceSkips", Name: "silence-skips", SectionKey: "output",
|
||||
Usage: "If set, default reporter will not print out skipped tests."},
|
||||
{KeyPath: "R.ForceNewlines", Name: "force-newlines", SectionKey: "output",
|
||||
Usage: "If set, default reporter will ensure a newline appears after each test."},
|
||||
|
||||
{KeyPath: "R.JSONReport", Name: "json-report", UsageArgument: "filename.json", SectionKey: "output",
|
||||
Usage: "If set, Ginkgo will generate a JSON-formatted test report at the specified location."},
|
||||
|
@ -498,7 +513,7 @@ var GinkgoCLIWatchFlags = GinkgoFlags{
|
|||
// GoBuildFlags provides flags for the Ginkgo CLI build, run, and watch commands that capture go's build-time flags. These are passed to go test -c by the ginkgo CLI
|
||||
var GoBuildFlags = GinkgoFlags{
|
||||
{KeyPath: "Go.Race", Name: "race", SectionKey: "code-and-coverage-analysis",
|
||||
Usage: "enable data race detection. Supported only on linux/amd64, freebsd/amd64, darwin/amd64, windows/amd64, linux/ppc64le and linux/arm64 (only for 48-bit VMA)."},
|
||||
Usage: "enable data race detection. Supported on linux/amd64, linux/ppc64le, linux/arm64, linux/s390x, freebsd/amd64, netbsd/amd64, darwin/amd64, darwin/arm64, and windows/amd64."},
|
||||
{KeyPath: "Go.Vet", Name: "vet", UsageArgument: "list", SectionKey: "code-and-coverage-analysis",
|
||||
Usage: `Configure the invocation of "go vet" during "go test" to use the comma-separated list of vet checks. If list is empty, "go test" runs "go vet" with a curated list of checks believed to be always worth addressing. If list is "off", "go test" does not run "go vet" at all. Available checks can be found by running 'go doc cmd/vet'`},
|
||||
{KeyPath: "Go.Cover", Name: "cover", SectionKey: "code-and-coverage-analysis",
|
||||
|
@ -514,6 +529,8 @@ var GoBuildFlags = GinkgoFlags{
|
|||
Usage: "arguments to pass on each go tool asm invocation."},
|
||||
{KeyPath: "Go.BuildMode", Name: "buildmode", UsageArgument: "mode", SectionKey: "go-build",
|
||||
Usage: "build mode to use. See 'go help buildmode' for more."},
|
||||
{KeyPath: "Go.BuildVCS", Name: "buildvcs", SectionKey: "go-build",
|
||||
Usage: "adds version control information."},
|
||||
{KeyPath: "Go.Compiler", Name: "compiler", UsageArgument: "name", SectionKey: "go-build",
|
||||
Usage: "name of compiler to use, as in runtime.Compiler (gccgo or gc)."},
|
||||
{KeyPath: "Go.GCCGoFlags", Name: "gccgoflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build",
|
||||
|
@ -548,6 +565,8 @@ var GoBuildFlags = GinkgoFlags{
|
|||
Usage: "print the name of the temporary work directory and do not delete it when exiting."},
|
||||
{KeyPath: "Go.X", Name: "x", SectionKey: "go-build",
|
||||
Usage: "print the commands."},
|
||||
{KeyPath: "Go.O", Name: "o", SectionKey: "go-build",
|
||||
Usage: "output binary path (including name)."},
|
||||
}
|
||||
|
||||
// GoRunFlags provides flags for the Ginkgo CLI run, and watch commands that capture go's run-time flags. These are passed to the compiled test binary by the ginkgo CLI
|
||||
|
@ -601,7 +620,7 @@ func VetAndInitializeCLIAndGoConfig(cliConfig CLIConfig, goFlagsConfig GoFlagsCo
|
|||
}
|
||||
|
||||
// GenerateGoTestCompileArgs is used by the Ginkgo CLI to generate command line arguments to pass to the go test -c command when compiling the test
|
||||
func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, destination string, packageToBuild string, pathToInvocationPath string) ([]string, error) {
|
||||
func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, packageToBuild string, pathToInvocationPath string) ([]string, error) {
|
||||
// if the user has set the CoverProfile run-time flag make sure to set the build-time cover flag to make sure
|
||||
// the built test binary can generate a coverprofile
|
||||
if goFlagsConfig.CoverProfile != "" {
|
||||
|
@ -624,7 +643,7 @@ func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, destination string,
|
|||
goFlagsConfig.CoverPkg = strings.Join(adjustedCoverPkgs, ",")
|
||||
}
|
||||
|
||||
args := []string{"test", "-c", "-o", destination, packageToBuild}
|
||||
args := []string{"test", "-c", packageToBuild}
|
||||
goArgs, err := GenerateFlagArgs(
|
||||
GoBuildFlags,
|
||||
map[string]interface{}{
|
||||
|
|
13
vendor/github.com/onsi/ginkgo/v2/types/errors.go
generated
vendored
13
vendor/github.com/onsi/ginkgo/v2/types/errors.go
generated
vendored
|
@ -453,8 +453,8 @@ func (g ginkgoErrors) InvalidEntryDescription(cl CodeLocation) error {
|
|||
|
||||
func (g ginkgoErrors) MissingParametersForTableFunction(cl CodeLocation) error {
|
||||
return GinkgoError{
|
||||
Heading: fmt.Sprintf("No parameters have been passed to the Table Function"),
|
||||
Message: fmt.Sprintf("The Table Function expected at least 1 parameter"),
|
||||
Heading: "No parameters have been passed to the Table Function",
|
||||
Message: "The Table Function expected at least 1 parameter",
|
||||
CodeLocation: cl,
|
||||
DocLink: "table-specs",
|
||||
}
|
||||
|
@ -505,6 +505,15 @@ func (g ginkgoErrors) IncorrectVariadicParameterTypeToTableFunction(expected, ac
|
|||
}
|
||||
}
|
||||
|
||||
func (g ginkgoErrors) ContextsCannotBeUsedInSubtreeTables(cl CodeLocation) error {
|
||||
return GinkgoError{
|
||||
Heading: "Contexts cannot be used in subtree tables",
|
||||
Message: "You''ve defined a subtree body function that accepts a context but did not provide one in the table entry. Ginkgo SpecContexts can only be passed in to subject and setup nodes - so if you are trying to implement a spec timeout you should request a context in the It function within your subtree body function, not in the subtree body function itself.",
|
||||
CodeLocation: cl,
|
||||
DocLink: "table-specs",
|
||||
}
|
||||
}
|
||||
|
||||
/* Parallel Synchronization errors */
|
||||
|
||||
func (g ginkgoErrors) AggregatedReportUnavailableDueToNodeDisappearing() error {
|
||||
|
|
15
vendor/github.com/onsi/ginkgo/v2/types/flags.go
generated
vendored
15
vendor/github.com/onsi/ginkgo/v2/types/flags.go
generated
vendored
|
@ -24,7 +24,8 @@ type GinkgoFlag struct {
|
|||
DeprecatedDocLink string
|
||||
DeprecatedVersion string
|
||||
|
||||
ExportAs string
|
||||
ExportAs string
|
||||
AlwaysExport bool
|
||||
}
|
||||
|
||||
type GinkgoFlags []GinkgoFlag
|
||||
|
@ -431,7 +432,7 @@ func (ssv stringSliceVar) Set(s string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
//given a set of GinkgoFlags and bindings, generate flag arguments suitable to be passed to an application with that set of flags configured.
|
||||
// given a set of GinkgoFlags and bindings, generate flag arguments suitable to be passed to an application with that set of flags configured.
|
||||
func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) {
|
||||
result := []string{}
|
||||
for _, flag := range flags {
|
||||
|
@ -451,19 +452,19 @@ func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error)
|
|||
iface := value.Interface()
|
||||
switch value.Type() {
|
||||
case reflect.TypeOf(string("")):
|
||||
if iface.(string) != "" {
|
||||
if iface.(string) != "" || flag.AlwaysExport {
|
||||
result = append(result, fmt.Sprintf("--%s=%s", name, iface))
|
||||
}
|
||||
case reflect.TypeOf(int64(0)):
|
||||
if iface.(int64) != 0 {
|
||||
if iface.(int64) != 0 || flag.AlwaysExport {
|
||||
result = append(result, fmt.Sprintf("--%s=%d", name, iface))
|
||||
}
|
||||
case reflect.TypeOf(float64(0)):
|
||||
if iface.(float64) != 0 {
|
||||
if iface.(float64) != 0 || flag.AlwaysExport {
|
||||
result = append(result, fmt.Sprintf("--%s=%f", name, iface))
|
||||
}
|
||||
case reflect.TypeOf(int(0)):
|
||||
if iface.(int) != 0 {
|
||||
if iface.(int) != 0 || flag.AlwaysExport {
|
||||
result = append(result, fmt.Sprintf("--%s=%d", name, iface))
|
||||
}
|
||||
case reflect.TypeOf(bool(true)):
|
||||
|
@ -471,7 +472,7 @@ func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error)
|
|||
result = append(result, fmt.Sprintf("--%s", name))
|
||||
}
|
||||
case reflect.TypeOf(time.Duration(0)):
|
||||
if iface.(time.Duration) != time.Duration(0) {
|
||||
if iface.(time.Duration) != time.Duration(0) || flag.AlwaysExport {
|
||||
result = append(result, fmt.Sprintf("--%s=%s", name, iface))
|
||||
}
|
||||
|
||||
|
|
229
vendor/github.com/onsi/ginkgo/v2/types/label_filter.go
generated
vendored
229
vendor/github.com/onsi/ginkgo/v2/types/label_filter.go
generated
vendored
|
@ -45,6 +45,83 @@ func orAction(a, b LabelFilter) LabelFilter {
|
|||
return func(labels []string) bool { return a(labels) || b(labels) }
|
||||
}
|
||||
|
||||
func labelSetFor(key string, labels []string) map[string]bool {
|
||||
key = strings.ToLower(strings.TrimSpace(key))
|
||||
out := map[string]bool{}
|
||||
for _, label := range labels {
|
||||
components := strings.SplitN(label, ":", 2)
|
||||
if len(components) < 2 {
|
||||
continue
|
||||
}
|
||||
if key == strings.ToLower(strings.TrimSpace(components[0])) {
|
||||
out[strings.ToLower(strings.TrimSpace(components[1]))] = true
|
||||
}
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
func isEmptyLabelSetAction(key string) LabelFilter {
|
||||
return func(labels []string) bool {
|
||||
return len(labelSetFor(key, labels)) == 0
|
||||
}
|
||||
}
|
||||
|
||||
func containsAnyLabelSetAction(key string, expectedValues []string) LabelFilter {
|
||||
return func(labels []string) bool {
|
||||
set := labelSetFor(key, labels)
|
||||
for _, value := range expectedValues {
|
||||
if set[value] {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func containsAllLabelSetAction(key string, expectedValues []string) LabelFilter {
|
||||
return func(labels []string) bool {
|
||||
set := labelSetFor(key, labels)
|
||||
for _, value := range expectedValues {
|
||||
if !set[value] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func consistsOfLabelSetAction(key string, expectedValues []string) LabelFilter {
|
||||
return func(labels []string) bool {
|
||||
set := labelSetFor(key, labels)
|
||||
if len(set) != len(expectedValues) {
|
||||
return false
|
||||
}
|
||||
for _, value := range expectedValues {
|
||||
if !set[value] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func isSubsetOfLabelSetAction(key string, expectedValues []string) LabelFilter {
|
||||
expectedSet := map[string]bool{}
|
||||
for _, value := range expectedValues {
|
||||
expectedSet[value] = true
|
||||
}
|
||||
return func(labels []string) bool {
|
||||
set := labelSetFor(key, labels)
|
||||
for value := range set {
|
||||
if !expectedSet[value] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
type lfToken uint
|
||||
|
||||
const (
|
||||
|
@ -58,6 +135,9 @@ const (
|
|||
lfTokenOr
|
||||
lfTokenRegexp
|
||||
lfTokenLabel
|
||||
lfTokenSetKey
|
||||
lfTokenSetOperation
|
||||
lfTokenSetArgument
|
||||
lfTokenEOF
|
||||
)
|
||||
|
||||
|
@ -71,6 +151,8 @@ func (l lfToken) Precedence() int {
|
|||
return 2
|
||||
case lfTokenNot:
|
||||
return 3
|
||||
case lfTokenSetOperation:
|
||||
return 4
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
@ -93,6 +175,12 @@ func (l lfToken) String() string {
|
|||
return "/regexp/"
|
||||
case lfTokenLabel:
|
||||
return "label"
|
||||
case lfTokenSetKey:
|
||||
return "set_key"
|
||||
case lfTokenSetOperation:
|
||||
return "set_operation"
|
||||
case lfTokenSetArgument:
|
||||
return "set_argument"
|
||||
case lfTokenEOF:
|
||||
return "EOF"
|
||||
}
|
||||
|
@ -148,6 +236,35 @@ func (tn *treeNode) constructLabelFilter(input string) (LabelFilter, error) {
|
|||
return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("RegExp compilation error: %s", err))
|
||||
}
|
||||
return matchLabelRegexAction(re), nil
|
||||
case lfTokenSetOperation:
|
||||
tokenSetOperation := strings.ToLower(tn.value)
|
||||
if tokenSetOperation == "isempty" {
|
||||
return isEmptyLabelSetAction(tn.leftNode.value), nil
|
||||
}
|
||||
if tn.rightNode == nil {
|
||||
return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("Set operation '%s' is missing an argument.", tn.value))
|
||||
}
|
||||
|
||||
rawValues := strings.Split(tn.rightNode.value, ",")
|
||||
values := make([]string, len(rawValues))
|
||||
for i := range rawValues {
|
||||
values[i] = strings.ToLower(strings.TrimSpace(rawValues[i]))
|
||||
if strings.ContainsAny(values[i], "&|!,()/") {
|
||||
return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.rightNode.location, fmt.Sprintf("Invalid label value '%s' in set operation argument.", values[i]))
|
||||
} else if values[i] == "" {
|
||||
return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.rightNode.location, "Empty label value in set operation argument.")
|
||||
}
|
||||
}
|
||||
switch tokenSetOperation {
|
||||
case "containsany":
|
||||
return containsAnyLabelSetAction(tn.leftNode.value, values), nil
|
||||
case "containsall":
|
||||
return containsAllLabelSetAction(tn.leftNode.value, values), nil
|
||||
case "consistsof":
|
||||
return consistsOfLabelSetAction(tn.leftNode.value, values), nil
|
||||
case "issubsetof":
|
||||
return isSubsetOfLabelSetAction(tn.leftNode.value, values), nil
|
||||
}
|
||||
}
|
||||
|
||||
if tn.rightNode == nil {
|
||||
|
@ -203,7 +320,17 @@ func (tn *treeNode) toString(indent int) string {
|
|||
return out
|
||||
}
|
||||
|
||||
var validSetOperations = map[string]string{
|
||||
"containsany": "containsAny",
|
||||
"containsall": "containsAll",
|
||||
"consistsof": "consistsOf",
|
||||
"issubsetof": "isSubsetOf",
|
||||
"isempty": "isEmpty",
|
||||
}
|
||||
|
||||
func tokenize(input string) func() (*treeNode, error) {
|
||||
lastToken := lfTokenInvalid
|
||||
lastValue := ""
|
||||
runes, i := []rune(input), 0
|
||||
|
||||
peekIs := func(r rune) bool {
|
||||
|
@ -233,6 +360,53 @@ func tokenize(input string) func() (*treeNode, error) {
|
|||
}
|
||||
|
||||
node := &treeNode{location: i}
|
||||
defer func() {
|
||||
lastToken = node.token
|
||||
lastValue = node.value
|
||||
}()
|
||||
|
||||
if lastToken == lfTokenSetKey {
|
||||
//we should get a valid set operation next
|
||||
value, n := consumeUntil(" )")
|
||||
if validSetOperations[strings.ToLower(value)] == "" {
|
||||
return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, fmt.Sprintf("Invalid set operation '%s'.", value))
|
||||
}
|
||||
i += n
|
||||
node.token, node.value = lfTokenSetOperation, value
|
||||
return node, nil
|
||||
}
|
||||
if lastToken == lfTokenSetOperation {
|
||||
//we should get an argument next, if we aren't isempty
|
||||
var arg = ""
|
||||
origI := i
|
||||
if runes[i] == '{' {
|
||||
i += 1
|
||||
value, n := consumeUntil("}")
|
||||
if i+n >= len(runes) {
|
||||
return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i-1, "Missing closing '}' in set operation argument?")
|
||||
}
|
||||
i += n + 1
|
||||
arg = value
|
||||
} else {
|
||||
value, n := consumeUntil("&|!,()/")
|
||||
i += n
|
||||
arg = strings.TrimSpace(value)
|
||||
}
|
||||
if strings.ToLower(lastValue) == "isempty" && arg != "" {
|
||||
return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, fmt.Sprintf("isEmpty does not take arguments, was passed '%s'.", arg))
|
||||
}
|
||||
if arg == "" && strings.ToLower(lastValue) != "isempty" {
|
||||
if i < len(runes) && runes[i] == '/' {
|
||||
return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, "Set operations do not support regular expressions.")
|
||||
} else {
|
||||
return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, fmt.Sprintf("Set operation '%s' requires an argument.", lastValue))
|
||||
}
|
||||
}
|
||||
// note that we sent an empty SetArgument token if we are isempty
|
||||
node.token, node.value = lfTokenSetArgument, arg
|
||||
return node, nil
|
||||
}
|
||||
|
||||
switch runes[i] {
|
||||
case '&':
|
||||
if !peekIs('&') {
|
||||
|
@ -264,8 +438,38 @@ func tokenize(input string) func() (*treeNode, error) {
|
|||
i += n + 1
|
||||
node.token, node.value = lfTokenRegexp, value
|
||||
default:
|
||||
value, n := consumeUntil("&|!,()/")
|
||||
value, n := consumeUntil("&|!,()/:")
|
||||
i += n
|
||||
value = strings.TrimSpace(value)
|
||||
|
||||
//are we the beginning of a set operation?
|
||||
if i < len(runes) && runes[i] == ':' {
|
||||
if peekIs(' ') {
|
||||
if value == "" {
|
||||
return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Missing set key.")
|
||||
}
|
||||
i += 1
|
||||
//we are the beginning of a set operation
|
||||
node.token, node.value = lfTokenSetKey, value
|
||||
return node, nil
|
||||
}
|
||||
additionalValue, n := consumeUntil("&|!,()/")
|
||||
additionalValue = strings.TrimSpace(additionalValue)
|
||||
if additionalValue == ":" {
|
||||
return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Missing set operation.")
|
||||
}
|
||||
i += n
|
||||
value += additionalValue
|
||||
}
|
||||
|
||||
valueToCheckForSetOperation := strings.ToLower(value)
|
||||
for setOperation := range validSetOperations {
|
||||
idx := strings.Index(valueToCheckForSetOperation, " "+setOperation)
|
||||
if idx > 0 {
|
||||
return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i-n+idx+1, fmt.Sprintf("Looks like you are using the set operator '%s' but did not provide a set key. Did you forget the ':'?", validSetOperations[setOperation]))
|
||||
}
|
||||
}
|
||||
|
||||
node.token, node.value = lfTokenLabel, strings.TrimSpace(value)
|
||||
}
|
||||
return node, nil
|
||||
|
@ -307,7 +511,7 @@ LOOP:
|
|||
switch node.token {
|
||||
case lfTokenEOF:
|
||||
break LOOP
|
||||
case lfTokenLabel, lfTokenRegexp:
|
||||
case lfTokenLabel, lfTokenRegexp, lfTokenSetKey:
|
||||
if current.rightNode != nil {
|
||||
return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Found two adjacent labels. You need an operator between them.")
|
||||
}
|
||||
|
@ -326,6 +530,18 @@ LOOP:
|
|||
node.setLeftNode(nodeToStealFrom.rightNode)
|
||||
nodeToStealFrom.setRightNode(node)
|
||||
current = node
|
||||
case lfTokenSetOperation:
|
||||
if current.rightNode == nil {
|
||||
return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Set operation '%s' missing left hand operand.", node.value))
|
||||
}
|
||||
node.setLeftNode(current.rightNode)
|
||||
current.setRightNode(node)
|
||||
current = node
|
||||
case lfTokenSetArgument:
|
||||
if current.rightNode != nil {
|
||||
return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Unexpected set argument '%s'.", node.token))
|
||||
}
|
||||
current.setRightNode(node)
|
||||
case lfTokenCloseGroup:
|
||||
firstUnmatchedOpenNode := current.firstUnmatchedOpenNode()
|
||||
if firstUnmatchedOpenNode == nil {
|
||||
|
@ -354,5 +570,14 @@ func ValidateAndCleanupLabel(label string, cl CodeLocation) (string, error) {
|
|||
if strings.ContainsAny(out, "&|!,()/") {
|
||||
return "", GinkgoErrors.InvalidLabel(label, cl)
|
||||
}
|
||||
if out[0] == ':' {
|
||||
return "", GinkgoErrors.InvalidLabel(label, cl)
|
||||
}
|
||||
if strings.Contains(out, ":") {
|
||||
components := strings.SplitN(out, ":", 2)
|
||||
if len(components) < 2 || components[1] == "" {
|
||||
return "", GinkgoErrors.InvalidLabel(label, cl)
|
||||
}
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
|
14
vendor/github.com/onsi/ginkgo/v2/types/types.go
generated
vendored
14
vendor/github.com/onsi/ginkgo/v2/types/types.go
generated
vendored
|
@ -3,13 +3,21 @@ package types
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const GINKGO_FOCUS_EXIT_CODE = 197
|
||||
const GINKGO_TIME_FORMAT = "01/02/06 15:04:05.999"
|
||||
|
||||
var GINKGO_TIME_FORMAT = "01/02/06 15:04:05.999"
|
||||
|
||||
func init() {
|
||||
if os.Getenv("GINKGO_TIME_FORMAT") != "" {
|
||||
GINKGO_TIME_FORMAT = os.Getenv("GINKGO_TIME_FORMAT")
|
||||
}
|
||||
}
|
||||
|
||||
// Report captures information about a Ginkgo test run
|
||||
type Report struct {
|
||||
|
@ -97,9 +105,7 @@ func (report Report) Add(other Report) Report {
|
|||
report.RunTime = report.EndTime.Sub(report.StartTime)
|
||||
|
||||
reports := make(SpecReports, len(report.SpecReports)+len(other.SpecReports))
|
||||
for i := range report.SpecReports {
|
||||
reports[i] = report.SpecReports[i]
|
||||
}
|
||||
copy(reports, report.SpecReports)
|
||||
offset := len(report.SpecReports)
|
||||
for i := range other.SpecReports {
|
||||
reports[i+offset] = other.SpecReports[i]
|
||||
|
|
2
vendor/github.com/onsi/ginkgo/v2/types/version.go
generated
vendored
2
vendor/github.com/onsi/ginkgo/v2/types/version.go
generated
vendored
|
@ -1,3 +1,3 @@
|
|||
package types
|
||||
|
||||
const VERSION = "2.9.5"
|
||||
const VERSION = "2.22.2"
|
||||
|
|
41
vendor/go.uber.org/mock/mockgen/deprecated.go
generated
vendored
Normal file
41
vendor/go.uber.org/mock/mockgen/deprecated.go
generated
vendored
Normal file
|
@ -0,0 +1,41 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
const (
|
||||
deprecatedFlagProgOnly = "prog_only"
|
||||
deprecatedFlagExecOnly = "exec_only"
|
||||
)
|
||||
|
||||
var (
|
||||
_ = flag.Bool("prog_only", false, "DEPRECATED (reflect mode) Only generate the reflection program; write it to stdout and exit.")
|
||||
_ = flag.String("exec_only", "", "DEPRECATED (reflect mode) If set, execute this reflection program.")
|
||||
)
|
||||
|
||||
// notifyAboutDeprecatedFlags prints a warning message for a deprecated flags if they are set.
|
||||
func notifyAboutDeprecatedFlags() {
|
||||
const resetColorPostfix = "\033[0m"
|
||||
logger := initWarningLogger()
|
||||
|
||||
flag.Visit(func(f *flag.Flag) {
|
||||
switch f.Name {
|
||||
case deprecatedFlagProgOnly:
|
||||
logger.Println("The -prog_only flag is deprecated and has no effect.", resetColorPostfix)
|
||||
case deprecatedFlagExecOnly:
|
||||
logger.Println("The -exec_only flag is deprecated and has no effect.", resetColorPostfix)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func initWarningLogger() *log.Logger {
|
||||
const (
|
||||
yellowColor = "\033[33m"
|
||||
warningPrefix = yellowColor + "WARNING: "
|
||||
)
|
||||
|
||||
return log.New(os.Stdout, warningPrefix, log.Ldate|log.Ltime)
|
||||
}
|
|
@ -5,9 +5,6 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
|
@ -15,7 +12,6 @@ import (
|
|||
"fmt"
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"strings"
|
||||
|
||||
"go.uber.org/mock/mockgen/model"
|
||||
)
|
||||
|
@ -67,29 +63,6 @@ func (p *fileParser) parseGenericType(pkg string, typ ast.Expr, tps map[string]m
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
func getIdentTypeParams(decl any) string {
|
||||
if decl == nil {
|
||||
return ""
|
||||
}
|
||||
ts, ok := decl.(*ast.TypeSpec)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
if ts.TypeParams == nil || len(ts.TypeParams.List) == 0 {
|
||||
return ""
|
||||
}
|
||||
var sb strings.Builder
|
||||
sb.WriteString("[")
|
||||
for i, v := range ts.TypeParams.List {
|
||||
if i != 0 {
|
||||
sb.WriteString(", ")
|
||||
}
|
||||
sb.WriteString(v.Names[0].Name)
|
||||
}
|
||||
sb.WriteString("]")
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
func (p *fileParser) parseGenericMethod(field *ast.Field, it *namedInterface, iface *model.Interface, pkg string, tps map[string]model.Type) ([]*model.Method, error) {
|
||||
var indices []ast.Expr
|
||||
var typ ast.Expr
|
41
vendor/go.uber.org/mock/mockgen/generic_notgo118.go
generated
vendored
41
vendor/go.uber.org/mock/mockgen/generic_notgo118.go
generated
vendored
|
@ -1,41 +0,0 @@
|
|||
// Copyright 2022 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build !go1.18
|
||||
// +build !go1.18
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
|
||||
"go.uber.org/mock/mockgen/model"
|
||||
)
|
||||
|
||||
func getTypeSpecTypeParams(ts *ast.TypeSpec) []*ast.Field {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *fileParser) parseGenericType(pkg string, typ ast.Expr, tps map[string]model.Type) (model.Type, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func getIdentTypeParams(decl any) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (p *fileParser) parseGenericMethod(field *ast.Field, it *namedInterface, iface *model.Interface, pkg string, tps map[string]model.Type) ([]*model.Method, error) {
|
||||
return nil, fmt.Errorf("don't know how to mock method of type %T", field.Type)
|
||||
}
|
21
vendor/go.uber.org/mock/mockgen/gob.go
generated
vendored
Normal file
21
vendor/go.uber.org/mock/mockgen/gob.go
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"encoding/gob"
|
||||
"os"
|
||||
|
||||
"go.uber.org/mock/mockgen/model"
|
||||
)
|
||||
|
||||
func gobMode(path string) (*model.Package, error) {
|
||||
in, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer in.Close()
|
||||
var pkg model.Package
|
||||
if err := gob.NewDecoder(in).Decode(&pkg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &pkg, nil
|
||||
}
|
75
vendor/go.uber.org/mock/mockgen/mockgen.go
generated
vendored
75
vendor/go.uber.org/mock/mockgen/mockgen.go
generated
vendored
|
@ -59,14 +59,17 @@ var (
|
|||
mockNames = flag.String("mock_names", "", "Comma-separated interfaceName=mockName pairs of explicit mock names to use. Mock names default to 'Mock'+ interfaceName suffix.")
|
||||
packageOut = flag.String("package", "", "Package of the generated code; defaults to the package of the input with a 'mock_' prefix.")
|
||||
selfPackage = flag.String("self_package", "", "The full package import path for the generated code. The purpose of this flag is to prevent import cycles in the generated code by trying to include its own package. This can happen if the mock's package is set to one of its inputs (usually the main one) and the output is stdio so mockgen cannot detect the final output package. Setting this flag will then tell mockgen which import to exclude.")
|
||||
writeCmdComment = flag.Bool("write_command_comment", true, "Writes the command used as a comment if true.")
|
||||
writePkgComment = flag.Bool("write_package_comment", true, "Writes package documentation comment (godoc) if true.")
|
||||
writeSourceComment = flag.Bool("write_source_comment", true, "Writes original file (source mode) or interface names (reflect mode) comment if true.")
|
||||
writeSourceComment = flag.Bool("write_source_comment", true, "Writes original file (source mode) or interface names (package mode) comment if true.")
|
||||
writeGenerateDirective = flag.Bool("write_generate_directive", false, "Add //go:generate directive to regenerate the mock")
|
||||
copyrightFile = flag.String("copyright_file", "", "Copyright file used to add copyright header")
|
||||
buildConstraint = flag.String("build_constraint", "", "If non-empty, added as //go:build <constraint>")
|
||||
typed = flag.Bool("typed", false, "Generate Type-safe 'Return', 'Do', 'DoAndReturn' function")
|
||||
imports = flag.String("imports", "", "(source mode) Comma-separated name=path pairs of explicit imports to use.")
|
||||
auxFiles = flag.String("aux_files", "", "(source mode) Comma-separated pkg=path pairs of auxiliary Go source files.")
|
||||
excludeInterfaces = flag.String("exclude_interfaces", "", "Comma-separated names of interfaces to be excluded")
|
||||
excludeInterfaces = flag.String("exclude_interfaces", "", "(source mode) Comma-separated names of interfaces to be excluded")
|
||||
modelGob = flag.String("model_gob", "", "Skip package/source loading entirely and use the gob encoded model.Package at the given path")
|
||||
|
||||
debugParser = flag.Bool("debug_parser", false, "Print out parser results only.")
|
||||
showVersion = flag.Bool("version", false, "Print version.")
|
||||
|
@ -76,6 +79,8 @@ func main() {
|
|||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
|
||||
notifyAboutDeprecatedFlags()
|
||||
|
||||
if *showVersion {
|
||||
printVersion()
|
||||
return
|
||||
|
@ -84,7 +89,9 @@ func main() {
|
|||
var pkg *model.Package
|
||||
var err error
|
||||
var packageName string
|
||||
if *source != "" {
|
||||
if *modelGob != "" {
|
||||
pkg, err = gobMode(*modelGob)
|
||||
} else if *source != "" {
|
||||
pkg, err = sourceMode(*source)
|
||||
} else {
|
||||
if flag.NArg() != 2 {
|
||||
|
@ -103,7 +110,8 @@ func main() {
|
|||
log.Fatalf("Parse package name failed: %v", err)
|
||||
}
|
||||
}
|
||||
pkg, err = reflectMode(packageName, interfaces)
|
||||
parser := packageModeParser{}
|
||||
pkg, err = parser.parsePackage(packageName, interfaces)
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatalf("Loading input failed: %v", err)
|
||||
|
@ -116,7 +124,7 @@ func main() {
|
|||
|
||||
outputPackageName := *packageOut
|
||||
if outputPackageName == "" {
|
||||
// pkg.Name in reflect mode is the base name of the import path,
|
||||
// pkg.Name in package mode is the base name of the import path,
|
||||
// which might have characters that are illegal to have in package names.
|
||||
outputPackageName = "mock_" + sanitize(pkg.Name)
|
||||
}
|
||||
|
@ -142,7 +150,9 @@ func main() {
|
|||
}
|
||||
}
|
||||
|
||||
g := new(generator)
|
||||
g := &generator{
|
||||
buildConstraint: *buildConstraint,
|
||||
}
|
||||
if *source != "" {
|
||||
g.filename = *source
|
||||
} else {
|
||||
|
@ -225,20 +235,21 @@ func usage() {
|
|||
flag.PrintDefaults()
|
||||
}
|
||||
|
||||
const usageText = `mockgen has two modes of operation: source and reflect.
|
||||
const usageText = `mockgen has two modes of operation: source and package.
|
||||
|
||||
Source mode generates mock interfaces from a source file.
|
||||
It is enabled by using the -source flag. Other flags that
|
||||
may be useful in this mode are -imports and -aux_files.
|
||||
may be useful in this mode are -imports, -aux_files and -exclude_interfaces.
|
||||
Example:
|
||||
mockgen -source=foo.go [other options]
|
||||
|
||||
Reflect mode generates mock interfaces by building a program
|
||||
that uses reflection to understand interfaces. It is enabled
|
||||
by passing two non-flag arguments: an import path, and a
|
||||
Package mode works by specifying the package and interface names.
|
||||
It is enabled by passing two non-flag arguments: an import path, and a
|
||||
comma-separated list of symbols.
|
||||
You can use "." to refer to the current path's package.
|
||||
Example:
|
||||
mockgen database/sql/driver Conn,Driver
|
||||
mockgen . SomeInterface
|
||||
|
||||
`
|
||||
|
||||
|
@ -250,12 +261,13 @@ type generator struct {
|
|||
destination string // may be empty
|
||||
srcPackage, srcInterfaces string // may be empty
|
||||
copyrightHeader string
|
||||
buildConstraint string // may be empty
|
||||
|
||||
packageMap map[string]string // map from import path to package name
|
||||
}
|
||||
|
||||
func (g *generator) p(format string, args ...any) {
|
||||
fmt.Fprintf(&g.buf, g.indent+format+"\n", args...)
|
||||
_, _ = fmt.Fprintf(&g.buf, g.indent+format+"\n", args...)
|
||||
}
|
||||
|
||||
func (g *generator) in() {
|
||||
|
@ -305,6 +317,12 @@ func (g *generator) Generate(pkg *model.Package, outputPkgName string, outputPac
|
|||
g.p("")
|
||||
}
|
||||
|
||||
if g.buildConstraint != "" {
|
||||
g.p("//go:build %s", g.buildConstraint)
|
||||
// https://pkg.go.dev/cmd/go#hdr-Build_constraints:~:text=a%20build%20constraint%20should%20be%20followed%20by%20a%20blank%20line
|
||||
g.p("")
|
||||
}
|
||||
|
||||
g.p("// Code generated by MockGen. DO NOT EDIT.")
|
||||
if *writeSourceComment {
|
||||
if g.filename != "" {
|
||||
|
@ -313,16 +331,18 @@ func (g *generator) Generate(pkg *model.Package, outputPkgName string, outputPac
|
|||
g.p("// Source: %v (interfaces: %v)", g.srcPackage, g.srcInterfaces)
|
||||
}
|
||||
}
|
||||
g.p("//")
|
||||
g.p("// Generated by this command:")
|
||||
g.p("//")
|
||||
// only log the name of the executable, not the full path
|
||||
name := filepath.Base(os.Args[0])
|
||||
if runtime.GOOS == "windows" {
|
||||
name = strings.TrimSuffix(name, ".exe")
|
||||
if *writeCmdComment {
|
||||
g.p("//")
|
||||
g.p("// Generated by this command:")
|
||||
g.p("//")
|
||||
// only log the name of the executable, not the full path
|
||||
name := filepath.Base(os.Args[0])
|
||||
if runtime.GOOS == "windows" {
|
||||
name = strings.TrimSuffix(name, ".exe")
|
||||
}
|
||||
g.p("//\t%v", strings.Join(append([]string{name}, os.Args[1:]...), " "))
|
||||
g.p("//")
|
||||
}
|
||||
g.p("//\t%v", strings.Join(append([]string{name}, os.Args[1:]...), " "))
|
||||
g.p("//")
|
||||
|
||||
// Get all required imports, and generate unique names for them all.
|
||||
im := pkg.Imports()
|
||||
|
@ -392,11 +412,13 @@ func (g *generator) Generate(pkg *model.Package, outputPkgName string, outputPac
|
|||
localNames[pkgName] = true
|
||||
}
|
||||
|
||||
if *writePkgComment {
|
||||
// Ensure there's an empty line before the package to follow the recommendations:
|
||||
// https://github.com/golang/go/wiki/CodeReviewComments#package-comments
|
||||
g.p("")
|
||||
// Ensure there is an empty line between “generated by” block and
|
||||
// package documentation comments to follow the recommendations:
|
||||
// https://go.dev/wiki/CodeReviewComments#package-comments
|
||||
// That is, “generated by” should not be a package comment.
|
||||
g.p("")
|
||||
|
||||
if *writePkgComment {
|
||||
g.p("// Package %v is a generated GoMock package.", outputPkgName)
|
||||
}
|
||||
g.p("package %v", outputPkgName)
|
||||
|
@ -472,6 +494,7 @@ func (g *generator) GenerateMockInterface(intf *model.Interface, outputPackagePa
|
|||
g.in()
|
||||
g.p("ctrl *gomock.Controller")
|
||||
g.p("recorder *%vMockRecorder%v", mockType, shortTp)
|
||||
g.p("isgomock struct{}")
|
||||
g.out()
|
||||
g.p("}")
|
||||
g.p("")
|
||||
|
@ -816,7 +839,7 @@ func createPackageMap(importPaths []string) map[string]string {
|
|||
}
|
||||
pkgMap := make(map[string]string)
|
||||
b := bytes.NewBuffer(nil)
|
||||
args := []string{"list", "-json"}
|
||||
args := []string{"list", "-json=ImportPath,Name"}
|
||||
args = append(args, importPaths...)
|
||||
cmd := exec.Command("go", args...)
|
||||
cmd.Stdout = b
|
||||
|
|
358
vendor/go.uber.org/mock/mockgen/package_mode.go
generated
vendored
Normal file
358
vendor/go.uber.org/mock/mockgen/package_mode.go
generated
vendored
Normal file
|
@ -0,0 +1,358 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/types"
|
||||
"strings"
|
||||
|
||||
"go.uber.org/mock/mockgen/model"
|
||||
"golang.org/x/tools/go/packages"
|
||||
)
|
||||
|
||||
var (
|
||||
buildFlags = flag.String("build_flags", "", "(package mode) Additional flags for go build.")
|
||||
)
|
||||
|
||||
type packageModeParser struct {
|
||||
pkgName string
|
||||
}
|
||||
|
||||
func (p *packageModeParser) parsePackage(packageName string, ifaces []string) (*model.Package, error) {
|
||||
p.pkgName = packageName
|
||||
|
||||
pkg, err := p.loadPackage(packageName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("load package: %w", err)
|
||||
}
|
||||
|
||||
interfaces, err := p.extractInterfacesFromPackage(pkg, ifaces)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("extract interfaces from package: %w", err)
|
||||
}
|
||||
|
||||
return &model.Package{
|
||||
Name: pkg.Types.Name(),
|
||||
PkgPath: packageName,
|
||||
Interfaces: interfaces,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *packageModeParser) loadPackage(packageName string) (*packages.Package, error) {
|
||||
var buildFlagsSet []string
|
||||
if *buildFlags != "" {
|
||||
buildFlagsSet = strings.Split(*buildFlags, " ")
|
||||
}
|
||||
|
||||
cfg := &packages.Config{
|
||||
Mode: packages.NeedDeps | packages.NeedImports | packages.NeedTypes | packages.NeedTypesInfo | packages.NeedEmbedFiles,
|
||||
BuildFlags: buildFlagsSet,
|
||||
}
|
||||
pkgs, err := packages.Load(cfg, packageName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("load packages: %w", err)
|
||||
}
|
||||
|
||||
if len(pkgs) != 1 {
|
||||
return nil, fmt.Errorf("packages length must be 1: %d", len(pkgs))
|
||||
}
|
||||
|
||||
if len(pkgs[0].Errors) > 0 {
|
||||
errs := make([]error, len(pkgs[0].Errors))
|
||||
for i, err := range pkgs[0].Errors {
|
||||
errs[i] = err
|
||||
}
|
||||
|
||||
return nil, errors.Join(errs...)
|
||||
}
|
||||
|
||||
return pkgs[0], nil
|
||||
}
|
||||
|
||||
func (p *packageModeParser) extractInterfacesFromPackage(pkg *packages.Package, ifaces []string) ([]*model.Interface, error) {
|
||||
interfaces := make([]*model.Interface, len(ifaces))
|
||||
for i, iface := range ifaces {
|
||||
obj := pkg.Types.Scope().Lookup(iface)
|
||||
if obj == nil {
|
||||
return nil, fmt.Errorf("interface %s does not exist", iface)
|
||||
}
|
||||
|
||||
modelIface, err := p.parseInterface(obj)
|
||||
if err != nil {
|
||||
return nil, newParseTypeError("parse interface", obj.Name(), err)
|
||||
}
|
||||
|
||||
interfaces[i] = modelIface
|
||||
}
|
||||
|
||||
return interfaces, nil
|
||||
}
|
||||
|
||||
func (p *packageModeParser) parseInterface(obj types.Object) (*model.Interface, error) {
|
||||
named, ok := types.Unalias(obj.Type()).(*types.Named)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("%s is not an interface. it is a %T", obj.Name(), obj.Type().Underlying())
|
||||
}
|
||||
|
||||
iface, ok := named.Underlying().(*types.Interface)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("%s is not an interface. it is a %T", obj.Name(), obj.Type().Underlying())
|
||||
}
|
||||
|
||||
if p.isConstraint(iface) {
|
||||
return nil, fmt.Errorf("interface %s is a constraint", obj.Name())
|
||||
}
|
||||
|
||||
methods := make([]*model.Method, iface.NumMethods())
|
||||
for i := range iface.NumMethods() {
|
||||
method := iface.Method(i)
|
||||
typedMethod, ok := method.Type().(*types.Signature)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("method %s is not a signature", method.Name())
|
||||
}
|
||||
|
||||
modelFunc, err := p.parseFunc(typedMethod)
|
||||
if err != nil {
|
||||
return nil, newParseTypeError("parse method", typedMethod.String(), err)
|
||||
}
|
||||
|
||||
methods[i] = &model.Method{
|
||||
Name: method.Name(),
|
||||
In: modelFunc.In,
|
||||
Out: modelFunc.Out,
|
||||
Variadic: modelFunc.Variadic,
|
||||
}
|
||||
}
|
||||
|
||||
if named.TypeParams() == nil {
|
||||
return &model.Interface{Name: obj.Name(), Methods: methods}, nil
|
||||
}
|
||||
|
||||
typeParams := make([]*model.Parameter, named.TypeParams().Len())
|
||||
for i := range named.TypeParams().Len() {
|
||||
param := named.TypeParams().At(i)
|
||||
typeParam, err := p.parseConstraint(param)
|
||||
if err != nil {
|
||||
return nil, newParseTypeError("parse type parameter", param.String(), err)
|
||||
}
|
||||
|
||||
typeParams[i] = &model.Parameter{Name: param.Obj().Name(), Type: typeParam}
|
||||
}
|
||||
|
||||
return &model.Interface{Name: obj.Name(), Methods: methods, TypeParams: typeParams}, nil
|
||||
}
|
||||
|
||||
func (o *packageModeParser) isConstraint(t *types.Interface) bool {
|
||||
for i := range t.NumEmbeddeds() {
|
||||
embed := t.EmbeddedType(i)
|
||||
if _, ok := embed.Underlying().(*types.Interface); !ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *packageModeParser) parseType(t types.Type) (model.Type, error) {
|
||||
switch t := t.(type) {
|
||||
case *types.Array:
|
||||
elementType, err := p.parseType(t.Elem())
|
||||
if err != nil {
|
||||
return nil, newParseTypeError("parse array type", t.Elem().String(), err)
|
||||
}
|
||||
return &model.ArrayType{Len: int(t.Len()), Type: elementType}, nil
|
||||
case *types.Slice:
|
||||
elementType, err := p.parseType(t.Elem())
|
||||
if err != nil {
|
||||
return nil, newParseTypeError("parse slice type", t.Elem().String(), err)
|
||||
}
|
||||
|
||||
return &model.ArrayType{Len: -1, Type: elementType}, nil
|
||||
case *types.Chan:
|
||||
var dir model.ChanDir
|
||||
switch t.Dir() {
|
||||
case types.RecvOnly:
|
||||
dir = model.RecvDir
|
||||
case types.SendOnly:
|
||||
dir = model.SendDir
|
||||
}
|
||||
|
||||
chanType, err := p.parseType(t.Elem())
|
||||
if err != nil {
|
||||
return nil, newParseTypeError("parse chan type", t.Elem().String(), err)
|
||||
}
|
||||
|
||||
return &model.ChanType{Dir: dir, Type: chanType}, nil
|
||||
case *types.Signature:
|
||||
sig, err := p.parseFunc(t)
|
||||
if err != nil {
|
||||
return nil, newParseTypeError("parse signature", t.String(), err)
|
||||
}
|
||||
|
||||
return sig, nil
|
||||
case *types.Named, *types.Alias:
|
||||
object := t.(interface{ Obj() *types.TypeName })
|
||||
var pkg string
|
||||
if object.Obj().Pkg() != nil {
|
||||
pkg = object.Obj().Pkg().Path()
|
||||
}
|
||||
|
||||
// TypeArgs method not available for aliases in go1.22
|
||||
genericType, ok := t.(interface{ TypeArgs() *types.TypeList })
|
||||
if !ok || genericType.TypeArgs() == nil {
|
||||
return &model.NamedType{
|
||||
Package: pkg,
|
||||
Type: object.Obj().Name(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
typeParams := &model.TypeParametersType{TypeParameters: make([]model.Type, genericType.TypeArgs().Len())}
|
||||
for i := range genericType.TypeArgs().Len() {
|
||||
typeParam := genericType.TypeArgs().At(i)
|
||||
typedParam, err := p.parseType(typeParam)
|
||||
if err != nil {
|
||||
return nil, newParseTypeError("parse type parameter", typeParam.String(), err)
|
||||
}
|
||||
|
||||
typeParams.TypeParameters[i] = typedParam
|
||||
}
|
||||
|
||||
return &model.NamedType{
|
||||
Package: pkg,
|
||||
Type: object.Obj().Name(),
|
||||
TypeParams: typeParams,
|
||||
}, nil
|
||||
case *types.Interface:
|
||||
if t.Empty() {
|
||||
return model.PredeclaredType("any"), nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("cannot handle non-empty unnamed interfaces")
|
||||
case *types.Map:
|
||||
key, err := p.parseType(t.Key())
|
||||
if err != nil {
|
||||
return nil, newParseTypeError("parse map key", t.Key().String(), err)
|
||||
}
|
||||
value, err := p.parseType(t.Elem())
|
||||
if err != nil {
|
||||
return nil, newParseTypeError("parse map value", t.Elem().String(), err)
|
||||
}
|
||||
|
||||
return &model.MapType{Key: key, Value: value}, nil
|
||||
case *types.Pointer:
|
||||
valueType, err := p.parseType(t.Elem())
|
||||
if err != nil {
|
||||
return nil, newParseTypeError("parse pointer type", t.Elem().String(), err)
|
||||
}
|
||||
|
||||
return &model.PointerType{Type: valueType}, nil
|
||||
case *types.Struct:
|
||||
if t.NumFields() > 0 {
|
||||
return nil, fmt.Errorf("cannot handle non-empty unnamed structs")
|
||||
}
|
||||
|
||||
return model.PredeclaredType("struct{}"), nil
|
||||
case *types.Basic:
|
||||
return model.PredeclaredType(t.Name()), nil
|
||||
case *types.Tuple:
|
||||
panic("tuple field") // TODO
|
||||
case *types.TypeParam:
|
||||
return &model.NamedType{Type: t.Obj().Name()}, nil
|
||||
default:
|
||||
panic("unknown type") // TODO
|
||||
}
|
||||
}
|
||||
|
||||
func (p *packageModeParser) parseFunc(sig *types.Signature) (*model.FuncType, error) {
|
||||
var variadic *model.Parameter
|
||||
params := make([]*model.Parameter, 0, sig.Params().Len())
|
||||
for i := range sig.Params().Len() {
|
||||
param := sig.Params().At(i)
|
||||
|
||||
isVariadicParam := i == sig.Params().Len()-1 && sig.Variadic()
|
||||
parseType := param.Type()
|
||||
if isVariadicParam {
|
||||
sliceType, ok := param.Type().(*types.Slice)
|
||||
if !ok {
|
||||
return nil, newParseTypeError("variadic parameter is not a slice", param.String(), nil)
|
||||
}
|
||||
|
||||
parseType = sliceType.Elem()
|
||||
}
|
||||
|
||||
paramType, err := p.parseType(parseType)
|
||||
if err != nil {
|
||||
return nil, newParseTypeError("parse parameter type", parseType.String(), err)
|
||||
}
|
||||
|
||||
modelParameter := &model.Parameter{Type: paramType, Name: param.Name()}
|
||||
|
||||
if isVariadicParam {
|
||||
variadic = modelParameter
|
||||
} else {
|
||||
params = append(params, modelParameter)
|
||||
}
|
||||
}
|
||||
|
||||
if len(params) == 0 {
|
||||
params = nil
|
||||
}
|
||||
|
||||
results := make([]*model.Parameter, sig.Results().Len())
|
||||
for i := range sig.Results().Len() {
|
||||
result := sig.Results().At(i)
|
||||
|
||||
resultType, err := p.parseType(result.Type())
|
||||
if err != nil {
|
||||
return nil, newParseTypeError("parse result type", result.Type().String(), err)
|
||||
}
|
||||
|
||||
results[i] = &model.Parameter{Type: resultType, Name: result.Name()}
|
||||
}
|
||||
|
||||
if len(results) == 0 {
|
||||
results = nil
|
||||
}
|
||||
|
||||
return &model.FuncType{
|
||||
In: params,
|
||||
Out: results,
|
||||
Variadic: variadic,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *packageModeParser) parseConstraint(t *types.TypeParam) (model.Type, error) {
|
||||
if t == nil {
|
||||
return nil, fmt.Errorf("nil type param")
|
||||
}
|
||||
|
||||
typeParam, err := p.parseType(t.Constraint())
|
||||
if err != nil {
|
||||
return nil, newParseTypeError("parse constraint type", t.Constraint().String(), err)
|
||||
}
|
||||
|
||||
return typeParam, nil
|
||||
}
|
||||
|
||||
type parseTypeError struct {
|
||||
message string
|
||||
typeString string
|
||||
error error
|
||||
}
|
||||
|
||||
func newParseTypeError(message string, typeString string, error error) *parseTypeError {
|
||||
return &parseTypeError{typeString: typeString, error: error, message: message}
|
||||
}
|
||||
|
||||
func (p parseTypeError) Error() string {
|
||||
if p.error != nil {
|
||||
return fmt.Sprintf("%s: error parsing %s: %s", p.message, p.typeString, p.error)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s: error parsing type %s", p.message, p.typeString)
|
||||
}
|
||||
|
||||
func (p parseTypeError) Unwrap() error {
|
||||
return p.error
|
||||
}
|
256
vendor/go.uber.org/mock/mockgen/reflect.go
generated
vendored
256
vendor/go.uber.org/mock/mockgen/reflect.go
generated
vendored
|
@ -1,256 +0,0 @@
|
|||
// Copyright 2012 Google Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package main
|
||||
|
||||
// This file contains the model construction by reflection.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/gob"
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/build"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"go.uber.org/mock/mockgen/model"
|
||||
)
|
||||
|
||||
var (
|
||||
progOnly = flag.Bool("prog_only", false, "(reflect mode) Only generate the reflection program; write it to stdout and exit.")
|
||||
execOnly = flag.String("exec_only", "", "(reflect mode) If set, execute this reflection program.")
|
||||
buildFlags = flag.String("build_flags", "", "(reflect mode) Additional flags for go build.")
|
||||
)
|
||||
|
||||
// reflectMode generates mocks via reflection on an interface.
|
||||
func reflectMode(importPath string, symbols []string) (*model.Package, error) {
|
||||
if *execOnly != "" {
|
||||
return run(*execOnly)
|
||||
}
|
||||
|
||||
program, err := writeProgram(importPath, symbols)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if *progOnly {
|
||||
if _, err := os.Stdout.Write(program); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
wd, _ := os.Getwd()
|
||||
|
||||
// Try to run the reflection program in the current working directory.
|
||||
if p, err := runInDir(program, wd); err == nil {
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// Try to run the program in the same directory as the input package.
|
||||
if p, err := build.Import(importPath, wd, build.FindOnly); err == nil {
|
||||
dir := p.Dir
|
||||
if p, err := runInDir(program, dir); err == nil {
|
||||
return p, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Try to run it in a standard temp directory.
|
||||
return runInDir(program, "")
|
||||
}
|
||||
|
||||
func writeProgram(importPath string, symbols []string) ([]byte, error) {
|
||||
var program bytes.Buffer
|
||||
data := reflectData{
|
||||
ImportPath: importPath,
|
||||
Symbols: symbols,
|
||||
}
|
||||
if err := reflectProgram.Execute(&program, &data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return program.Bytes(), nil
|
||||
}
|
||||
|
||||
// run the given program and parse the output as a model.Package.
|
||||
func run(program string) (*model.Package, error) {
|
||||
f, err := os.CreateTemp("", "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
filename := f.Name()
|
||||
defer os.Remove(filename)
|
||||
if err := f.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Run the program.
|
||||
cmd := exec.Command(program, "-output", filename)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f, err = os.Open(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Process output.
|
||||
var pkg model.Package
|
||||
if err := gob.NewDecoder(f).Decode(&pkg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := f.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &pkg, nil
|
||||
}
|
||||
|
||||
// runInDir writes the given program into the given dir, runs it there, and
|
||||
// parses the output as a model.Package.
|
||||
func runInDir(program []byte, dir string) (*model.Package, error) {
|
||||
// We use TempDir instead of TempFile so we can control the filename.
|
||||
tmpDir, err := os.MkdirTemp(dir, "gomock_reflect_")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err := os.RemoveAll(tmpDir); err != nil {
|
||||
log.Printf("failed to remove temp directory: %s", err)
|
||||
}
|
||||
}()
|
||||
const progSource = "prog.go"
|
||||
var progBinary = "prog.bin"
|
||||
if runtime.GOOS == "windows" {
|
||||
// Windows won't execute a program unless it has a ".exe" suffix.
|
||||
progBinary += ".exe"
|
||||
}
|
||||
|
||||
if err := os.WriteFile(filepath.Join(tmpDir, progSource), program, 0600); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cmdArgs := []string{}
|
||||
cmdArgs = append(cmdArgs, "build")
|
||||
if *buildFlags != "" {
|
||||
cmdArgs = append(cmdArgs, strings.Split(*buildFlags, " ")...)
|
||||
}
|
||||
cmdArgs = append(cmdArgs, "-o", progBinary, progSource)
|
||||
|
||||
// Build the program.
|
||||
buf := bytes.NewBuffer(nil)
|
||||
cmd := exec.Command("go", cmdArgs...)
|
||||
cmd.Dir = tmpDir
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = io.MultiWriter(os.Stderr, buf)
|
||||
if err := cmd.Run(); err != nil {
|
||||
sErr := buf.String()
|
||||
if strings.Contains(sErr, `cannot find package "."`) &&
|
||||
strings.Contains(sErr, "go.uber.org/mock/mockgen/model") {
|
||||
fmt.Fprint(os.Stderr, "Please reference the steps in the README to fix this error:\n\thttps://go.uber.org/mock#reflect-vendoring-error.\n")
|
||||
return nil, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return run(filepath.Join(tmpDir, progBinary))
|
||||
}
|
||||
|
||||
type reflectData struct {
|
||||
ImportPath string
|
||||
Symbols []string
|
||||
}
|
||||
|
||||
// This program reflects on an interface value, and prints the
|
||||
// gob encoding of a model.Package to standard output.
|
||||
// JSON doesn't work because of the model.Type interface.
|
||||
var reflectProgram = template.Must(template.New("program").Parse(`
|
||||
// Code generated by MockGen. DO NOT EDIT.
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/gob"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"reflect"
|
||||
|
||||
"go.uber.org/mock/mockgen/model"
|
||||
|
||||
pkg_ {{printf "%q" .ImportPath}}
|
||||
)
|
||||
|
||||
var output = flag.String("output", "", "The output file name, or empty to use stdout.")
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
its := []struct{
|
||||
sym string
|
||||
typ reflect.Type
|
||||
}{
|
||||
{{range .Symbols}}
|
||||
{ {{printf "%q" .}}, reflect.TypeOf((*pkg_.{{.}})(nil)).Elem()},
|
||||
{{end}}
|
||||
}
|
||||
pkg := &model.Package{
|
||||
// NOTE: This behaves contrary to documented behaviour if the
|
||||
// package name is not the final component of the import path.
|
||||
// The reflect package doesn't expose the package name, though.
|
||||
Name: path.Base({{printf "%q" .ImportPath}}),
|
||||
}
|
||||
|
||||
for _, it := range its {
|
||||
intf, err := model.InterfaceFromInterfaceType(it.typ)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Reflection: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
intf.Name = it.sym
|
||||
pkg.Interfaces = append(pkg.Interfaces, intf)
|
||||
}
|
||||
|
||||
outfile := os.Stdout
|
||||
if len(*output) != 0 {
|
||||
var err error
|
||||
outfile, err = os.Create(*output)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to open output file %q", *output)
|
||||
}
|
||||
defer func() {
|
||||
if err := outfile.Close(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to close output file %q", *output)
|
||||
os.Exit(1)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
if err := gob.NewEncoder(outfile).Encode(pkg); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "gob encode: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
`))
|
4
vendor/golang.org/x/exp/LICENSE
generated
vendored
4
vendor/golang.org/x/exp/LICENSE
generated
vendored
|
@ -1,4 +1,4 @@
|
|||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
Copyright 2009 The Go Authors.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
|
@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
|
|||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
* Neither the name of Google LLC nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
|
|
4
vendor/golang.org/x/mod/LICENSE
generated
vendored
4
vendor/golang.org/x/mod/LICENSE
generated
vendored
|
@ -1,4 +1,4 @@
|
|||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
Copyright 2009 The Go Authors.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
|
@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
|
|||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
* Neither the name of Google LLC nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
|
|
7
vendor/golang.org/x/mod/modfile/read.go
generated
vendored
7
vendor/golang.org/x/mod/modfile/read.go
generated
vendored
|
@ -226,8 +226,9 @@ func (x *FileSyntax) Cleanup() {
|
|||
continue
|
||||
}
|
||||
if ww == 1 && len(stmt.RParen.Comments.Before) == 0 {
|
||||
// Collapse block into single line.
|
||||
line := &Line{
|
||||
// Collapse block into single line but keep the Line reference used by the
|
||||
// parsed File structure.
|
||||
*stmt.Line[0] = Line{
|
||||
Comments: Comments{
|
||||
Before: commentsAdd(stmt.Before, stmt.Line[0].Before),
|
||||
Suffix: commentsAdd(stmt.Line[0].Suffix, stmt.Suffix),
|
||||
|
@ -235,7 +236,7 @@ func (x *FileSyntax) Cleanup() {
|
|||
},
|
||||
Token: stringsAdd(stmt.Token, stmt.Line[0].Token),
|
||||
}
|
||||
x.Stmt[w] = line
|
||||
x.Stmt[w] = stmt.Line[0]
|
||||
w++
|
||||
continue
|
||||
}
|
||||
|
|
80
vendor/golang.org/x/mod/modfile/rule.go
generated
vendored
80
vendor/golang.org/x/mod/modfile/rule.go
generated
vendored
|
@ -43,6 +43,7 @@ type File struct {
|
|||
Exclude []*Exclude
|
||||
Replace []*Replace
|
||||
Retract []*Retract
|
||||
Tool []*Tool
|
||||
|
||||
Syntax *FileSyntax
|
||||
}
|
||||
|
@ -93,6 +94,12 @@ type Retract struct {
|
|||
Syntax *Line
|
||||
}
|
||||
|
||||
// A Tool is a single tool statement.
|
||||
type Tool struct {
|
||||
Path string
|
||||
Syntax *Line
|
||||
}
|
||||
|
||||
// A VersionInterval represents a range of versions with upper and lower bounds.
|
||||
// Intervals are closed: both bounds are included. When Low is equal to High,
|
||||
// the interval may refer to a single version ('v1.2.3') or an interval
|
||||
|
@ -297,7 +304,7 @@ func parseToFile(file string, data []byte, fix VersionFixer, strict bool) (parse
|
|||
})
|
||||
}
|
||||
continue
|
||||
case "module", "godebug", "require", "exclude", "replace", "retract":
|
||||
case "module", "godebug", "require", "exclude", "replace", "retract", "tool":
|
||||
for _, l := range x.Line {
|
||||
f.add(&errs, x, l, x.Token[0], l.Token, fix, strict)
|
||||
}
|
||||
|
@ -509,6 +516,21 @@ func (f *File) add(errs *ErrorList, block *LineBlock, line *Line, verb string, a
|
|||
Syntax: line,
|
||||
}
|
||||
f.Retract = append(f.Retract, retract)
|
||||
|
||||
case "tool":
|
||||
if len(args) != 1 {
|
||||
errorf("tool directive expects exactly one argument")
|
||||
return
|
||||
}
|
||||
s, err := parseString(&args[0])
|
||||
if err != nil {
|
||||
errorf("invalid quoted string: %v", err)
|
||||
return
|
||||
}
|
||||
f.Tool = append(f.Tool, &Tool{
|
||||
Path: s,
|
||||
Syntax: line,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1567,6 +1589,36 @@ func (f *File) DropRetract(vi VersionInterval) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// AddTool adds a new tool directive with the given path.
|
||||
// It does nothing if the tool line already exists.
|
||||
func (f *File) AddTool(path string) error {
|
||||
for _, t := range f.Tool {
|
||||
if t.Path == path {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
f.Tool = append(f.Tool, &Tool{
|
||||
Path: path,
|
||||
Syntax: f.Syntax.addLine(nil, "tool", path),
|
||||
})
|
||||
|
||||
f.SortBlocks()
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveTool removes a tool directive with the given path.
|
||||
// It does nothing if no such tool directive exists.
|
||||
func (f *File) DropTool(path string) error {
|
||||
for _, t := range f.Tool {
|
||||
if t.Path == path {
|
||||
t.Syntax.markRemoved()
|
||||
*t = Tool{}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *File) SortBlocks() {
|
||||
f.removeDups() // otherwise sorting is unsafe
|
||||
|
||||
|
@ -1593,9 +1645,9 @@ func (f *File) SortBlocks() {
|
|||
}
|
||||
}
|
||||
|
||||
// removeDups removes duplicate exclude and replace directives.
|
||||
// removeDups removes duplicate exclude, replace and tool directives.
|
||||
//
|
||||
// Earlier exclude directives take priority.
|
||||
// Earlier exclude and tool directives take priority.
|
||||
//
|
||||
// Later replace directives take priority.
|
||||
//
|
||||
|
@ -1605,10 +1657,10 @@ func (f *File) SortBlocks() {
|
|||
// retract directives are not de-duplicated since comments are
|
||||
// meaningful, and versions may be retracted multiple times.
|
||||
func (f *File) removeDups() {
|
||||
removeDups(f.Syntax, &f.Exclude, &f.Replace)
|
||||
removeDups(f.Syntax, &f.Exclude, &f.Replace, &f.Tool)
|
||||
}
|
||||
|
||||
func removeDups(syntax *FileSyntax, exclude *[]*Exclude, replace *[]*Replace) {
|
||||
func removeDups(syntax *FileSyntax, exclude *[]*Exclude, replace *[]*Replace, tool *[]*Tool) {
|
||||
kill := make(map[*Line]bool)
|
||||
|
||||
// Remove duplicate excludes.
|
||||
|
@ -1649,6 +1701,24 @@ func removeDups(syntax *FileSyntax, exclude *[]*Exclude, replace *[]*Replace) {
|
|||
}
|
||||
*replace = repl
|
||||
|
||||
if tool != nil {
|
||||
haveTool := make(map[string]bool)
|
||||
for _, t := range *tool {
|
||||
if haveTool[t.Path] {
|
||||
kill[t.Syntax] = true
|
||||
continue
|
||||
}
|
||||
haveTool[t.Path] = true
|
||||
}
|
||||
var newTool []*Tool
|
||||
for _, t := range *tool {
|
||||
if !kill[t.Syntax] {
|
||||
newTool = append(newTool, t)
|
||||
}
|
||||
}
|
||||
*tool = newTool
|
||||
}
|
||||
|
||||
// Duplicate require and retract directives are not removed.
|
||||
|
||||
// Drop killed statements from the syntax tree.
|
||||
|
|
2
vendor/golang.org/x/mod/modfile/work.go
generated
vendored
2
vendor/golang.org/x/mod/modfile/work.go
generated
vendored
|
@ -331,5 +331,5 @@ func (f *WorkFile) SortBlocks() {
|
|||
// retract directives are not de-duplicated since comments are
|
||||
// meaningful, and versions may be retracted multiple times.
|
||||
func (f *WorkFile) removeDups() {
|
||||
removeDups(f.Syntax, nil, &f.Replace)
|
||||
removeDups(f.Syntax, nil, &f.Replace, nil)
|
||||
}
|
||||
|
|
2
vendor/golang.org/x/mod/module/module.go
generated
vendored
2
vendor/golang.org/x/mod/module/module.go
generated
vendored
|
@ -506,7 +506,6 @@ var badWindowsNames = []string{
|
|||
"PRN",
|
||||
"AUX",
|
||||
"NUL",
|
||||
"COM0",
|
||||
"COM1",
|
||||
"COM2",
|
||||
"COM3",
|
||||
|
@ -516,7 +515,6 @@ var badWindowsNames = []string{
|
|||
"COM7",
|
||||
"COM8",
|
||||
"COM9",
|
||||
"LPT0",
|
||||
"LPT1",
|
||||
"LPT2",
|
||||
"LPT3",
|
||||
|
|
12
vendor/golang.org/x/sys/unix/syscall_dragonfly.go
generated
vendored
12
vendor/golang.org/x/sys/unix/syscall_dragonfly.go
generated
vendored
|
@ -246,6 +246,18 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
|
|||
return sendfile(outfd, infd, offset, count)
|
||||
}
|
||||
|
||||
func Dup3(oldfd, newfd, flags int) error {
|
||||
if oldfd == newfd || flags&^O_CLOEXEC != 0 {
|
||||
return EINVAL
|
||||
}
|
||||
how := F_DUP2FD
|
||||
if flags&O_CLOEXEC != 0 {
|
||||
how = F_DUP2FD_CLOEXEC
|
||||
}
|
||||
_, err := fcntl(oldfd, how, newfd)
|
||||
return err
|
||||
}
|
||||
|
||||
/*
|
||||
* Exposed directly
|
||||
*/
|
||||
|
|
11
vendor/golang.org/x/sys/windows/dll_windows.go
generated
vendored
11
vendor/golang.org/x/sys/windows/dll_windows.go
generated
vendored
|
@ -43,8 +43,8 @@ type DLL struct {
|
|||
// LoadDLL loads DLL file into memory.
|
||||
//
|
||||
// Warning: using LoadDLL without an absolute path name is subject to
|
||||
// DLL preloading attacks. To safely load a system DLL, use LazyDLL
|
||||
// with System set to true, or use LoadLibraryEx directly.
|
||||
// DLL preloading attacks. To safely load a system DLL, use [NewLazySystemDLL],
|
||||
// or use [LoadLibraryEx] directly.
|
||||
func LoadDLL(name string) (dll *DLL, err error) {
|
||||
namep, err := UTF16PtrFromString(name)
|
||||
if err != nil {
|
||||
|
@ -271,6 +271,9 @@ func (d *LazyDLL) NewProc(name string) *LazyProc {
|
|||
}
|
||||
|
||||
// NewLazyDLL creates new LazyDLL associated with DLL file.
|
||||
//
|
||||
// Warning: using NewLazyDLL without an absolute path name is subject to
|
||||
// DLL preloading attacks. To safely load a system DLL, use [NewLazySystemDLL].
|
||||
func NewLazyDLL(name string) *LazyDLL {
|
||||
return &LazyDLL{Name: name}
|
||||
}
|
||||
|
@ -410,7 +413,3 @@ func loadLibraryEx(name string, system bool) (*DLL, error) {
|
|||
}
|
||||
return &DLL{Name: name, Handle: h}, nil
|
||||
}
|
||||
|
||||
type errString string
|
||||
|
||||
func (s errString) Error() string { return string(s) }
|
||||
|
|
4
vendor/golang.org/x/tools/LICENSE
generated
vendored
4
vendor/golang.org/x/tools/LICENSE
generated
vendored
|
@ -1,4 +1,4 @@
|
|||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
Copyright 2009 The Go Authors.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
|
@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
|
|||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
* Neither the name of Google LLC nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
|
|
266
vendor/golang.org/x/tools/cover/profile.go
generated
vendored
Normal file
266
vendor/golang.org/x/tools/cover/profile.go
generated
vendored
Normal file
|
@ -0,0 +1,266 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package cover provides support for parsing coverage profiles
|
||||
// generated by "go test -coverprofile=cover.out".
|
||||
package cover // import "golang.org/x/tools/cover"
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Profile represents the profiling data for a specific file.
|
||||
type Profile struct {
|
||||
FileName string
|
||||
Mode string
|
||||
Blocks []ProfileBlock
|
||||
}
|
||||
|
||||
// ProfileBlock represents a single block of profiling data.
|
||||
type ProfileBlock struct {
|
||||
StartLine, StartCol int
|
||||
EndLine, EndCol int
|
||||
NumStmt, Count int
|
||||
}
|
||||
|
||||
type byFileName []*Profile
|
||||
|
||||
func (p byFileName) Len() int { return len(p) }
|
||||
func (p byFileName) Less(i, j int) bool { return p[i].FileName < p[j].FileName }
|
||||
func (p byFileName) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
|
||||
// ParseProfiles parses profile data in the specified file and returns a
|
||||
// Profile for each source file described therein.
|
||||
func ParseProfiles(fileName string) ([]*Profile, error) {
|
||||
pf, err := os.Open(fileName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer pf.Close()
|
||||
return ParseProfilesFromReader(pf)
|
||||
}
|
||||
|
||||
// ParseProfilesFromReader parses profile data from the Reader and
|
||||
// returns a Profile for each source file described therein.
|
||||
func ParseProfilesFromReader(rd io.Reader) ([]*Profile, error) {
|
||||
// First line is "mode: foo", where foo is "set", "count", or "atomic".
|
||||
// Rest of file is in the format
|
||||
// encoding/base64/base64.go:34.44,37.40 3 1
|
||||
// where the fields are: name.go:line.column,line.column numberOfStatements count
|
||||
files := make(map[string]*Profile)
|
||||
s := bufio.NewScanner(rd)
|
||||
mode := ""
|
||||
for s.Scan() {
|
||||
line := s.Text()
|
||||
if mode == "" {
|
||||
const p = "mode: "
|
||||
if !strings.HasPrefix(line, p) || line == p {
|
||||
return nil, fmt.Errorf("bad mode line: %v", line)
|
||||
}
|
||||
mode = line[len(p):]
|
||||
continue
|
||||
}
|
||||
fn, b, err := parseLine(line)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("line %q doesn't match expected format: %v", line, err)
|
||||
}
|
||||
p := files[fn]
|
||||
if p == nil {
|
||||
p = &Profile{
|
||||
FileName: fn,
|
||||
Mode: mode,
|
||||
}
|
||||
files[fn] = p
|
||||
}
|
||||
p.Blocks = append(p.Blocks, b)
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, p := range files {
|
||||
sort.Sort(blocksByStart(p.Blocks))
|
||||
// Merge samples from the same location.
|
||||
j := 1
|
||||
for i := 1; i < len(p.Blocks); i++ {
|
||||
b := p.Blocks[i]
|
||||
last := p.Blocks[j-1]
|
||||
if b.StartLine == last.StartLine &&
|
||||
b.StartCol == last.StartCol &&
|
||||
b.EndLine == last.EndLine &&
|
||||
b.EndCol == last.EndCol {
|
||||
if b.NumStmt != last.NumStmt {
|
||||
return nil, fmt.Errorf("inconsistent NumStmt: changed from %d to %d", last.NumStmt, b.NumStmt)
|
||||
}
|
||||
if mode == "set" {
|
||||
p.Blocks[j-1].Count |= b.Count
|
||||
} else {
|
||||
p.Blocks[j-1].Count += b.Count
|
||||
}
|
||||
continue
|
||||
}
|
||||
p.Blocks[j] = b
|
||||
j++
|
||||
}
|
||||
p.Blocks = p.Blocks[:j]
|
||||
}
|
||||
// Generate a sorted slice.
|
||||
profiles := make([]*Profile, 0, len(files))
|
||||
for _, profile := range files {
|
||||
profiles = append(profiles, profile)
|
||||
}
|
||||
sort.Sort(byFileName(profiles))
|
||||
return profiles, nil
|
||||
}
|
||||
|
||||
// parseLine parses a line from a coverage file.
|
||||
// It is equivalent to the regex
|
||||
// ^(.+):([0-9]+)\.([0-9]+),([0-9]+)\.([0-9]+) ([0-9]+) ([0-9]+)$
|
||||
//
|
||||
// However, it is much faster: https://golang.org/cl/179377
|
||||
func parseLine(l string) (fileName string, block ProfileBlock, err error) {
|
||||
end := len(l)
|
||||
|
||||
b := ProfileBlock{}
|
||||
b.Count, end, err = seekBack(l, ' ', end, "Count")
|
||||
if err != nil {
|
||||
return "", b, err
|
||||
}
|
||||
b.NumStmt, end, err = seekBack(l, ' ', end, "NumStmt")
|
||||
if err != nil {
|
||||
return "", b, err
|
||||
}
|
||||
b.EndCol, end, err = seekBack(l, '.', end, "EndCol")
|
||||
if err != nil {
|
||||
return "", b, err
|
||||
}
|
||||
b.EndLine, end, err = seekBack(l, ',', end, "EndLine")
|
||||
if err != nil {
|
||||
return "", b, err
|
||||
}
|
||||
b.StartCol, end, err = seekBack(l, '.', end, "StartCol")
|
||||
if err != nil {
|
||||
return "", b, err
|
||||
}
|
||||
b.StartLine, end, err = seekBack(l, ':', end, "StartLine")
|
||||
if err != nil {
|
||||
return "", b, err
|
||||
}
|
||||
fn := l[0:end]
|
||||
if fn == "" {
|
||||
return "", b, errors.New("a FileName cannot be blank")
|
||||
}
|
||||
return fn, b, nil
|
||||
}
|
||||
|
||||
// seekBack searches backwards from end to find sep in l, then returns the
|
||||
// value between sep and end as an integer.
|
||||
// If seekBack fails, the returned error will reference what.
|
||||
func seekBack(l string, sep byte, end int, what string) (value int, nextSep int, err error) {
|
||||
// Since we're seeking backwards and we know only ASCII is legal for these values,
|
||||
// we can ignore the possibility of non-ASCII characters.
|
||||
for start := end - 1; start >= 0; start-- {
|
||||
if l[start] == sep {
|
||||
i, err := strconv.Atoi(l[start+1 : end])
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("couldn't parse %q: %v", what, err)
|
||||
}
|
||||
if i < 0 {
|
||||
return 0, 0, fmt.Errorf("negative values are not allowed for %s, found %d", what, i)
|
||||
}
|
||||
return i, start, nil
|
||||
}
|
||||
}
|
||||
return 0, 0, fmt.Errorf("couldn't find a %s before %s", string(sep), what)
|
||||
}
|
||||
|
||||
type blocksByStart []ProfileBlock
|
||||
|
||||
func (b blocksByStart) Len() int { return len(b) }
|
||||
func (b blocksByStart) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||
func (b blocksByStart) Less(i, j int) bool {
|
||||
bi, bj := b[i], b[j]
|
||||
return bi.StartLine < bj.StartLine || bi.StartLine == bj.StartLine && bi.StartCol < bj.StartCol
|
||||
}
|
||||
|
||||
// Boundary represents the position in a source file of the beginning or end of a
|
||||
// block as reported by the coverage profile. In HTML mode, it will correspond to
|
||||
// the opening or closing of a <span> tag and will be used to colorize the source
|
||||
type Boundary struct {
|
||||
Offset int // Location as a byte offset in the source file.
|
||||
Start bool // Is this the start of a block?
|
||||
Count int // Event count from the cover profile.
|
||||
Norm float64 // Count normalized to [0..1].
|
||||
Index int // Order in input file.
|
||||
}
|
||||
|
||||
// Boundaries returns a Profile as a set of Boundary objects within the provided src.
|
||||
func (p *Profile) Boundaries(src []byte) (boundaries []Boundary) {
|
||||
// Find maximum count.
|
||||
max := 0
|
||||
for _, b := range p.Blocks {
|
||||
if b.Count > max {
|
||||
max = b.Count
|
||||
}
|
||||
}
|
||||
// Divisor for normalization.
|
||||
divisor := math.Log(float64(max))
|
||||
|
||||
// boundary returns a Boundary, populating the Norm field with a normalized Count.
|
||||
index := 0
|
||||
boundary := func(offset int, start bool, count int) Boundary {
|
||||
b := Boundary{Offset: offset, Start: start, Count: count, Index: index}
|
||||
index++
|
||||
if !start || count == 0 {
|
||||
return b
|
||||
}
|
||||
if max <= 1 {
|
||||
b.Norm = 0.8 // Profile is in"set" mode; we want a heat map. Use cov8 in the CSS.
|
||||
} else if count > 0 {
|
||||
b.Norm = math.Log(float64(count)) / divisor
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
line, col := 1, 2 // TODO: Why is this 2?
|
||||
for si, bi := 0, 0; si < len(src) && bi < len(p.Blocks); {
|
||||
b := p.Blocks[bi]
|
||||
if b.StartLine == line && b.StartCol == col {
|
||||
boundaries = append(boundaries, boundary(si, true, b.Count))
|
||||
}
|
||||
if b.EndLine == line && b.EndCol == col || line > b.EndLine {
|
||||
boundaries = append(boundaries, boundary(si, false, 0))
|
||||
bi++
|
||||
continue // Don't advance through src; maybe the next block starts here.
|
||||
}
|
||||
if src[si] == '\n' {
|
||||
line++
|
||||
col = 0
|
||||
}
|
||||
col++
|
||||
si++
|
||||
}
|
||||
sort.Sort(boundariesByPos(boundaries))
|
||||
return
|
||||
}
|
||||
|
||||
type boundariesByPos []Boundary
|
||||
|
||||
func (b boundariesByPos) Len() int { return len(b) }
|
||||
func (b boundariesByPos) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||
func (b boundariesByPos) Less(i, j int) bool {
|
||||
if b[i].Offset == b[j].Offset {
|
||||
// Boundaries at the same offset should be ordered according to
|
||||
// their original position.
|
||||
return b[i].Index < b[j].Index
|
||||
}
|
||||
return b[i].Offset < b[j].Offset
|
||||
}
|
24
vendor/golang.org/x/tools/go/ast/astutil/enclosing.go
generated
vendored
24
vendor/golang.org/x/tools/go/ast/astutil/enclosing.go
generated
vendored
|
@ -106,8 +106,21 @@ func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Nod
|
|||
|
||||
// Does augmented child strictly contain [start, end)?
|
||||
if augPos <= start && end <= augEnd {
|
||||
_, isToken := child.(tokenNode)
|
||||
return isToken || visit(child)
|
||||
if is[tokenNode](child) {
|
||||
return true
|
||||
}
|
||||
|
||||
// childrenOf elides the FuncType node beneath FuncDecl.
|
||||
// Add it back here for TypeParams, Params, Results,
|
||||
// all FieldLists). But we don't add it back for the "func" token
|
||||
// even though it is is the tree at FuncDecl.Type.Func.
|
||||
if decl, ok := node.(*ast.FuncDecl); ok {
|
||||
if fields, ok := child.(*ast.FieldList); ok && fields != decl.Recv {
|
||||
path = append(path, decl.Type)
|
||||
}
|
||||
}
|
||||
|
||||
return visit(child)
|
||||
}
|
||||
|
||||
// Does [start, end) overlap multiple children?
|
||||
|
@ -313,6 +326,8 @@ func childrenOf(n ast.Node) []ast.Node {
|
|||
//
|
||||
// As a workaround, we inline the case for FuncType
|
||||
// here and order things correctly.
|
||||
// We also need to insert the elided FuncType just
|
||||
// before the 'visit' recursion.
|
||||
//
|
||||
children = nil // discard ast.Walk(FuncDecl) info subtrees
|
||||
children = append(children, tok(n.Type.Func, len("func")))
|
||||
|
@ -632,3 +647,8 @@ func NodeDescription(n ast.Node) string {
|
|||
}
|
||||
panic(fmt.Sprintf("unexpected node type: %T", n))
|
||||
}
|
||||
|
||||
func is[T any](x any) bool {
|
||||
_, ok := x.(T)
|
||||
return ok
|
||||
}
|
||||
|
|
5
vendor/golang.org/x/tools/go/ast/astutil/imports.go
generated
vendored
5
vendor/golang.org/x/tools/go/ast/astutil/imports.go
generated
vendored
|
@ -344,7 +344,12 @@ func RewriteImport(fset *token.FileSet, f *ast.File, oldPath, newPath string) (r
|
|||
}
|
||||
|
||||
// UsesImport reports whether a given import is used.
|
||||
// The provided File must have been parsed with syntactic object resolution
|
||||
// (not using go/parser.SkipObjectResolution).
|
||||
func UsesImport(f *ast.File, path string) (used bool) {
|
||||
if f.Scope == nil {
|
||||
panic("file f was not parsed with syntactic object resolution")
|
||||
}
|
||||
spec := importSpec(f, path)
|
||||
if spec == nil {
|
||||
return
|
||||
|
|
11
vendor/golang.org/x/tools/go/ast/astutil/util.go
generated
vendored
11
vendor/golang.org/x/tools/go/ast/astutil/util.go
generated
vendored
|
@ -7,12 +7,5 @@ package astutil
|
|||
import "go/ast"
|
||||
|
||||
// Unparen returns e with any enclosing parentheses stripped.
|
||||
func Unparen(e ast.Expr) ast.Expr {
|
||||
for {
|
||||
p, ok := e.(*ast.ParenExpr)
|
||||
if !ok {
|
||||
return e
|
||||
}
|
||||
e = p.X
|
||||
}
|
||||
}
|
||||
// Deprecated: use [ast.Unparen].
|
||||
func Unparen(e ast.Expr) ast.Expr { return ast.Unparen(e) }
|
||||
|
|
13
vendor/golang.org/x/tools/go/ast/inspector/inspector.go
generated
vendored
13
vendor/golang.org/x/tools/go/ast/inspector/inspector.go
generated
vendored
|
@ -73,6 +73,15 @@ func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) {
|
|||
// check, Preorder is almost twice as fast as Nodes. The two
|
||||
// features seem to contribute similar slowdowns (~1.4x each).
|
||||
|
||||
// This function is equivalent to the PreorderSeq call below,
|
||||
// but to avoid the additional dynamic call (which adds 13-35%
|
||||
// to the benchmarks), we expand it out.
|
||||
//
|
||||
// in.PreorderSeq(types...)(func(n ast.Node) bool {
|
||||
// f(n)
|
||||
// return true
|
||||
// })
|
||||
|
||||
mask := maskOf(types)
|
||||
for i := 0; i < len(in.events); {
|
||||
ev := in.events[i]
|
||||
|
@ -171,7 +180,9 @@ func (in *Inspector) WithStack(types []ast.Node, f func(n ast.Node, push bool, s
|
|||
// traverse builds the table of events representing a traversal.
|
||||
func traverse(files []*ast.File) []event {
|
||||
// Preallocate approximate number of events
|
||||
// based on source file extent.
|
||||
// based on source file extent of the declarations.
|
||||
// (We use End-Pos not FileStart-FileEnd to neglect
|
||||
// the effect of long doc comments.)
|
||||
// This makes traverse faster by 4x (!).
|
||||
var extent int
|
||||
for _, f := range files {
|
||||
|
|
85
vendor/golang.org/x/tools/go/ast/inspector/iter.go
generated
vendored
Normal file
85
vendor/golang.org/x/tools/go/ast/inspector/iter.go
generated
vendored
Normal file
|
@ -0,0 +1,85 @@
|
|||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.23
|
||||
|
||||
package inspector
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
"iter"
|
||||
)
|
||||
|
||||
// PreorderSeq returns an iterator that visits all the
|
||||
// nodes of the files supplied to New in depth-first order.
|
||||
// It visits each node n before n's children.
|
||||
// The complete traversal sequence is determined by ast.Inspect.
|
||||
//
|
||||
// The types argument, if non-empty, enables type-based
|
||||
// filtering of events: only nodes whose type matches an
|
||||
// element of the types slice are included in the sequence.
|
||||
func (in *Inspector) PreorderSeq(types ...ast.Node) iter.Seq[ast.Node] {
|
||||
|
||||
// This implementation is identical to Preorder,
|
||||
// except that it supports breaking out of the loop.
|
||||
|
||||
return func(yield func(ast.Node) bool) {
|
||||
mask := maskOf(types)
|
||||
for i := 0; i < len(in.events); {
|
||||
ev := in.events[i]
|
||||
if ev.index > i {
|
||||
// push
|
||||
if ev.typ&mask != 0 {
|
||||
if !yield(ev.node) {
|
||||
break
|
||||
}
|
||||
}
|
||||
pop := ev.index
|
||||
if in.events[pop].typ&mask == 0 {
|
||||
// Subtrees do not contain types: skip them and pop.
|
||||
i = pop + 1
|
||||
continue
|
||||
}
|
||||
}
|
||||
i++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// All[N] returns an iterator over all the nodes of type N.
|
||||
// N must be a pointer-to-struct type that implements ast.Node.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// for call := range All[*ast.CallExpr](in) { ... }
|
||||
func All[N interface {
|
||||
*S
|
||||
ast.Node
|
||||
}, S any](in *Inspector) iter.Seq[N] {
|
||||
|
||||
// To avoid additional dynamic call overheads,
|
||||
// we duplicate rather than call the logic of PreorderSeq.
|
||||
|
||||
mask := typeOf((N)(nil))
|
||||
return func(yield func(N) bool) {
|
||||
for i := 0; i < len(in.events); {
|
||||
ev := in.events[i]
|
||||
if ev.index > i {
|
||||
// push
|
||||
if ev.typ&mask != 0 {
|
||||
if !yield(ev.node.(N)) {
|
||||
break
|
||||
}
|
||||
}
|
||||
pop := ev.index
|
||||
if in.events[pop].typ&mask == 0 {
|
||||
// Subtrees do not contain types: skip them and pop.
|
||||
i = pop + 1
|
||||
continue
|
||||
}
|
||||
}
|
||||
i++
|
||||
}
|
||||
}
|
||||
}
|
117
vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go
generated
vendored
117
vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go
generated
vendored
|
@ -2,22 +2,64 @@
|
|||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package gcexportdata provides functions for locating, reading, and
|
||||
// writing export data files containing type information produced by the
|
||||
// gc compiler. This package supports go1.7 export data format and all
|
||||
// later versions.
|
||||
// Package gcexportdata provides functions for reading and writing
|
||||
// export data, which is a serialized description of the API of a Go
|
||||
// package including the names, kinds, types, and locations of all
|
||||
// exported declarations.
|
||||
//
|
||||
// Although it might seem convenient for this package to live alongside
|
||||
// go/types in the standard library, this would cause version skew
|
||||
// problems for developer tools that use it, since they must be able to
|
||||
// consume the outputs of the gc compiler both before and after a Go
|
||||
// update such as from Go 1.7 to Go 1.8. Because this package lives in
|
||||
// golang.org/x/tools, sites can update their version of this repo some
|
||||
// time before the Go 1.8 release and rebuild and redeploy their
|
||||
// developer tools, which will then be able to consume both Go 1.7 and
|
||||
// Go 1.8 export data files, so they will work before and after the
|
||||
// Go update. (See discussion at https://golang.org/issue/15651.)
|
||||
package gcexportdata // import "golang.org/x/tools/go/gcexportdata"
|
||||
// The standard Go compiler (cmd/compile) writes an export data file
|
||||
// for each package it compiles, which it later reads when compiling
|
||||
// packages that import the earlier one. The compiler must thus
|
||||
// contain logic to both write and read export data.
|
||||
// (See the "Export" section in the cmd/compile/README file.)
|
||||
//
|
||||
// The [Read] function in this package can read files produced by the
|
||||
// compiler, producing [go/types] data structures. As a matter of
|
||||
// policy, Read supports export data files produced by only the last
|
||||
// two Go releases plus tip; see https://go.dev/issue/68898. The
|
||||
// export data files produced by the compiler contain additional
|
||||
// details related to generics, inlining, and other optimizations that
|
||||
// cannot be decoded by the [Read] function.
|
||||
//
|
||||
// In files written by the compiler, the export data is not at the
|
||||
// start of the file. Before calling Read, use [NewReader] to locate
|
||||
// the desired portion of the file.
|
||||
//
|
||||
// The [Write] function in this package encodes the exported API of a
|
||||
// Go package ([types.Package]) as a file. Such files can be later
|
||||
// decoded by Read, but cannot be consumed by the compiler.
|
||||
//
|
||||
// # Future changes
|
||||
//
|
||||
// Although Read supports the formats written by both Write and the
|
||||
// compiler, the two are quite different, and there is an open
|
||||
// proposal (https://go.dev/issue/69491) to separate these APIs.
|
||||
//
|
||||
// Under that proposal, this package would ultimately provide only the
|
||||
// Read operation for compiler export data, which must be defined in
|
||||
// this module (golang.org/x/tools), not in the standard library, to
|
||||
// avoid version skew for developer tools that need to read compiler
|
||||
// export data both before and after a Go release, such as from Go
|
||||
// 1.23 to Go 1.24. Because this package lives in the tools module,
|
||||
// clients can update their version of the module some time before the
|
||||
// Go 1.24 release and rebuild and redeploy their tools, which will
|
||||
// then be able to consume both Go 1.23 and Go 1.24 export data files,
|
||||
// so they will work before and after the Go update. (See discussion
|
||||
// at https://go.dev/issue/15651.)
|
||||
//
|
||||
// The operations to import and export [go/types] data structures
|
||||
// would be defined in the go/types package as Import and Export.
|
||||
// [Write] would (eventually) delegate to Export,
|
||||
// and [Read], when it detects a file produced by Export,
|
||||
// would delegate to Import.
|
||||
//
|
||||
// # Deprecations
|
||||
//
|
||||
// The [NewImporter] and [Find] functions are deprecated and should
|
||||
// not be used in new code. The [WriteBundle] and [ReadBundle]
|
||||
// functions are experimental, and there is an open proposal to
|
||||
// deprecate them (https://go.dev/issue/69573).
|
||||
package gcexportdata
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
|
@ -64,24 +106,18 @@ func Find(importPath, srcDir string) (filename, path string) {
|
|||
// additional trailing data beyond the end of the export data.
|
||||
func NewReader(r io.Reader) (io.Reader, error) {
|
||||
buf := bufio.NewReader(r)
|
||||
_, size, err := gcimporter.FindExportData(buf)
|
||||
size, err := gcimporter.FindExportData(buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if size >= 0 {
|
||||
// We were given an archive and found the __.PKGDEF in it.
|
||||
// This tells us the size of the export data, and we don't
|
||||
// need to return the entire file.
|
||||
return &io.LimitedReader{
|
||||
R: buf,
|
||||
N: size,
|
||||
}, nil
|
||||
} else {
|
||||
// We were given an object file. As such, we don't know how large
|
||||
// the export data is and must return the entire file.
|
||||
return buf, nil
|
||||
}
|
||||
// We were given an archive and found the __.PKGDEF in it.
|
||||
// This tells us the size of the export data, and we don't
|
||||
// need to return the entire file.
|
||||
return &io.LimitedReader{
|
||||
R: buf,
|
||||
N: size,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// readAll works the same way as io.ReadAll, but avoids allocations and copies
|
||||
|
@ -100,6 +136,11 @@ func readAll(r io.Reader) ([]byte, error) {
|
|||
// Read reads export data from in, decodes it, and returns type
|
||||
// information for the package.
|
||||
//
|
||||
// Read is capable of reading export data produced by [Write] at the
|
||||
// same source code version, or by the last two Go releases (plus tip)
|
||||
// of the standard Go compiler. Reading files from older compilers may
|
||||
// produce an error.
|
||||
//
|
||||
// The package path (effectively its linker symbol prefix) is
|
||||
// specified by path, since unlike the package name, this information
|
||||
// may not be recorded in the export data.
|
||||
|
@ -128,14 +169,26 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package,
|
|||
// (from "version"). Select appropriate importer.
|
||||
if len(data) > 0 {
|
||||
switch data[0] {
|
||||
case 'v', 'c', 'd': // binary, till go1.10
|
||||
case 'v', 'c', 'd':
|
||||
// binary, produced by cmd/compile till go1.10
|
||||
return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0])
|
||||
|
||||
case 'i': // indexed, till go1.19
|
||||
case 'i':
|
||||
// indexed, produced by cmd/compile till go1.19,
|
||||
// and also by [Write].
|
||||
//
|
||||
// If proposal #69491 is accepted, go/types
|
||||
// serialization will be implemented by
|
||||
// types.Export, to which Write would eventually
|
||||
// delegate (explicitly dropping any pretence at
|
||||
// inter-version Write-Read compatibility).
|
||||
// This [Read] function would delegate to types.Import
|
||||
// when it detects that the file was produced by Export.
|
||||
_, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path)
|
||||
return pkg, err
|
||||
|
||||
case 'u': // unified, from go1.20
|
||||
case 'u':
|
||||
// unified, produced by cmd/compile since go1.20
|
||||
_, pkg, err := gcimporter.UImportData(fset, imports, data[1:], path)
|
||||
return pkg, err
|
||||
|
||||
|
|
54
vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go
generated
vendored
54
vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go
generated
vendored
|
@ -1,54 +0,0 @@
|
|||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package packagesdriver fetches type sizes for go/packages and go/analysis.
|
||||
package packagesdriver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/tools/internal/gocommand"
|
||||
)
|
||||
|
||||
// TODO(adonovan): move back into go/packages.
|
||||
func GetSizesForArgsGolist(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (string, string, error) {
|
||||
inv.Verb = "list"
|
||||
inv.Args = []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"}
|
||||
stdout, stderr, friendlyErr, rawErr := gocmdRunner.RunRaw(ctx, inv)
|
||||
var goarch, compiler string
|
||||
if rawErr != nil {
|
||||
rawErrMsg := rawErr.Error()
|
||||
if strings.Contains(rawErrMsg, "cannot find main module") ||
|
||||
strings.Contains(rawErrMsg, "go.mod file not found") {
|
||||
// User's running outside of a module.
|
||||
// All bets are off. Get GOARCH and guess compiler is gc.
|
||||
// TODO(matloob): Is this a problem in practice?
|
||||
inv.Verb = "env"
|
||||
inv.Args = []string{"GOARCH"}
|
||||
envout, enverr := gocmdRunner.Run(ctx, inv)
|
||||
if enverr != nil {
|
||||
return "", "", enverr
|
||||
}
|
||||
goarch = strings.TrimSpace(envout.String())
|
||||
compiler = "gc"
|
||||
} else if friendlyErr != nil {
|
||||
return "", "", friendlyErr
|
||||
} else {
|
||||
// This should be unreachable, but be defensive
|
||||
// in case RunRaw's error results are inconsistent.
|
||||
return "", "", rawErr
|
||||
}
|
||||
} else {
|
||||
fields := strings.Fields(stdout.String())
|
||||
if len(fields) < 2 {
|
||||
return "", "", fmt.Errorf("could not parse GOARCH and Go compiler in format \"<GOARCH> <compiler>\":\nstdout: <<%s>>\nstderr: <<%s>>",
|
||||
stdout.String(), stderr.String())
|
||||
}
|
||||
goarch = fields[0]
|
||||
compiler = fields[1]
|
||||
}
|
||||
return compiler, goarch, nil
|
||||
}
|
15
vendor/golang.org/x/tools/go/packages/doc.go
generated
vendored
15
vendor/golang.org/x/tools/go/packages/doc.go
generated
vendored
|
@ -64,7 +64,7 @@ graph using the Imports fields.
|
|||
|
||||
The Load function can be configured by passing a pointer to a Config as
|
||||
the first argument. A nil Config is equivalent to the zero Config, which
|
||||
causes Load to run in LoadFiles mode, collecting minimal information.
|
||||
causes Load to run in [LoadFiles] mode, collecting minimal information.
|
||||
See the documentation for type Config for details.
|
||||
|
||||
As noted earlier, the Config.Mode controls the amount of detail
|
||||
|
@ -72,14 +72,14 @@ reported about the loaded packages. See the documentation for type LoadMode
|
|||
for details.
|
||||
|
||||
Most tools should pass their command-line arguments (after any flags)
|
||||
uninterpreted to [Load], so that it can interpret them
|
||||
uninterpreted to Load, so that it can interpret them
|
||||
according to the conventions of the underlying build system.
|
||||
|
||||
See the Example function for typical usage.
|
||||
|
||||
# The driver protocol
|
||||
|
||||
[Load] may be used to load Go packages even in Go projects that use
|
||||
Load may be used to load Go packages even in Go projects that use
|
||||
alternative build systems, by installing an appropriate "driver"
|
||||
program for the build system and specifying its location in the
|
||||
GOPACKAGESDRIVER environment variable.
|
||||
|
@ -97,6 +97,15 @@ JSON-encoded [DriverRequest] message providing additional information
|
|||
is written to the driver's standard input. The driver must write a
|
||||
JSON-encoded [DriverResponse] message to its standard output. (This
|
||||
message differs from the JSON schema produced by 'go list'.)
|
||||
|
||||
The value of the PWD environment variable seen by the driver process
|
||||
is the preferred name of its working directory. (The working directory
|
||||
may have other aliases due to symbolic links; see the comment on the
|
||||
Dir field of [exec.Cmd] for related information.)
|
||||
When the driver process emits in its response the name of a file
|
||||
that is a descendant of this directory, it must use an absolute path
|
||||
that has the value of PWD as a prefix, to ensure that the returned
|
||||
filenames satisfy the original query.
|
||||
*/
|
||||
package packages // import "golang.org/x/tools/go/packages"
|
||||
|
||||
|
|
15
vendor/golang.org/x/tools/go/packages/external.go
generated
vendored
15
vendor/golang.org/x/tools/go/packages/external.go
generated
vendored
|
@ -13,6 +13,7 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"slices"
|
||||
"strings"
|
||||
)
|
||||
|
||||
|
@ -79,10 +80,10 @@ type DriverResponse struct {
|
|||
|
||||
// driver is the type for functions that query the build system for the
|
||||
// packages named by the patterns.
|
||||
type driver func(cfg *Config, patterns ...string) (*DriverResponse, error)
|
||||
type driver func(cfg *Config, patterns []string) (*DriverResponse, error)
|
||||
|
||||
// findExternalDriver returns the file path of a tool that supplies
|
||||
// the build system package structure, or "" if not found."
|
||||
// the build system package structure, or "" if not found.
|
||||
// If GOPACKAGESDRIVER is set in the environment findExternalTool returns its
|
||||
// value, otherwise it searches for a binary named gopackagesdriver on the PATH.
|
||||
func findExternalDriver(cfg *Config) driver {
|
||||
|
@ -103,7 +104,7 @@ func findExternalDriver(cfg *Config) driver {
|
|||
return nil
|
||||
}
|
||||
}
|
||||
return func(cfg *Config, words ...string) (*DriverResponse, error) {
|
||||
return func(cfg *Config, patterns []string) (*DriverResponse, error) {
|
||||
req, err := json.Marshal(DriverRequest{
|
||||
Mode: cfg.Mode,
|
||||
Env: cfg.Env,
|
||||
|
@ -117,7 +118,7 @@ func findExternalDriver(cfg *Config) driver {
|
|||
|
||||
buf := new(bytes.Buffer)
|
||||
stderr := new(bytes.Buffer)
|
||||
cmd := exec.CommandContext(cfg.Context, tool, words...)
|
||||
cmd := exec.CommandContext(cfg.Context, tool, patterns...)
|
||||
cmd.Dir = cfg.Dir
|
||||
// The cwd gets resolved to the real path. On Darwin, where
|
||||
// /tmp is a symlink, this breaks anything that expects the
|
||||
|
@ -131,7 +132,7 @@ func findExternalDriver(cfg *Config) driver {
|
|||
// command.
|
||||
//
|
||||
// (See similar trick in Invocation.run in ../../internal/gocommand/invoke.go)
|
||||
cmd.Env = append(slicesClip(cfg.Env), "PWD="+cfg.Dir)
|
||||
cmd.Env = append(slices.Clip(cfg.Env), "PWD="+cfg.Dir)
|
||||
cmd.Stdin = bytes.NewReader(req)
|
||||
cmd.Stdout = buf
|
||||
cmd.Stderr = stderr
|
||||
|
@ -150,7 +151,3 @@ func findExternalDriver(cfg *Config) driver {
|
|||
return &response, nil
|
||||
}
|
||||
}
|
||||
|
||||
// slicesClip removes unused capacity from the slice, returning s[:len(s):len(s)].
|
||||
// TODO(adonovan): use go1.21 slices.Clip.
|
||||
func slicesClip[S ~[]E, E any](s S) S { return s[:len(s):len(s)] }
|
||||
|
|
85
vendor/golang.org/x/tools/go/packages/golist.go
generated
vendored
85
vendor/golang.org/x/tools/go/packages/golist.go
generated
vendored
|
@ -21,7 +21,6 @@ import (
|
|||
"sync"
|
||||
"unicode"
|
||||
|
||||
"golang.org/x/tools/go/internal/packagesdriver"
|
||||
"golang.org/x/tools/internal/gocommand"
|
||||
"golang.org/x/tools/internal/packagesinternal"
|
||||
)
|
||||
|
@ -81,6 +80,12 @@ type golistState struct {
|
|||
cfg *Config
|
||||
ctx context.Context
|
||||
|
||||
runner *gocommand.Runner
|
||||
|
||||
// overlay is the JSON file that encodes the Config.Overlay
|
||||
// mapping, used by 'go list -overlay=...'.
|
||||
overlay string
|
||||
|
||||
envOnce sync.Once
|
||||
goEnvError error
|
||||
goEnv map[string]string
|
||||
|
@ -128,7 +133,10 @@ func (state *golistState) mustGetEnv() map[string]string {
|
|||
// goListDriver uses the go list command to interpret the patterns and produce
|
||||
// the build system package structure.
|
||||
// See driver for more details.
|
||||
func goListDriver(cfg *Config, patterns ...string) (_ *DriverResponse, err error) {
|
||||
//
|
||||
// overlay is the JSON file that encodes the cfg.Overlay
|
||||
// mapping, used by 'go list -overlay=...'
|
||||
func goListDriver(cfg *Config, runner *gocommand.Runner, overlay string, patterns []string) (_ *DriverResponse, err error) {
|
||||
// Make sure that any asynchronous go commands are killed when we return.
|
||||
parentCtx := cfg.Context
|
||||
if parentCtx == nil {
|
||||
|
@ -143,13 +151,15 @@ func goListDriver(cfg *Config, patterns ...string) (_ *DriverResponse, err error
|
|||
cfg: cfg,
|
||||
ctx: ctx,
|
||||
vendorDirs: map[string]bool{},
|
||||
overlay: overlay,
|
||||
runner: runner,
|
||||
}
|
||||
|
||||
// Fill in response.Sizes asynchronously if necessary.
|
||||
if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 {
|
||||
if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 {
|
||||
errCh := make(chan error)
|
||||
go func() {
|
||||
compiler, arch, err := packagesdriver.GetSizesForArgsGolist(ctx, state.cfgInvocation(), cfg.gocmdRunner)
|
||||
compiler, arch, err := getSizesForArgs(ctx, state.cfgInvocation(), runner)
|
||||
response.dr.Compiler = compiler
|
||||
response.dr.Arch = arch
|
||||
errCh <- err
|
||||
|
@ -495,13 +505,14 @@ func (state *golistState) createDriverResponse(words ...string) (*DriverResponse
|
|||
pkg := &Package{
|
||||
Name: p.Name,
|
||||
ID: p.ImportPath,
|
||||
Dir: p.Dir,
|
||||
GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles),
|
||||
CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles),
|
||||
OtherFiles: absJoin(p.Dir, otherFiles(p)...),
|
||||
EmbedFiles: absJoin(p.Dir, p.EmbedFiles),
|
||||
EmbedPatterns: absJoin(p.Dir, p.EmbedPatterns),
|
||||
IgnoredFiles: absJoin(p.Dir, p.IgnoredGoFiles, p.IgnoredOtherFiles),
|
||||
forTest: p.ForTest,
|
||||
ForTest: p.ForTest,
|
||||
depsErrors: p.DepsErrors,
|
||||
Module: p.Module,
|
||||
}
|
||||
|
@ -682,7 +693,7 @@ func (state *golistState) shouldAddFilenameFromError(p *jsonPackage) bool {
|
|||
// getGoVersion returns the effective minor version of the go command.
|
||||
func (state *golistState) getGoVersion() (int, error) {
|
||||
state.goVersionOnce.Do(func() {
|
||||
state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.cfg.gocmdRunner)
|
||||
state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.runner)
|
||||
})
|
||||
return state.goVersion, state.goVersionError
|
||||
}
|
||||
|
@ -752,7 +763,7 @@ func jsonFlag(cfg *Config, goVersion int) string {
|
|||
}
|
||||
}
|
||||
addFields("Name", "ImportPath", "Error") // These fields are always needed
|
||||
if cfg.Mode&NeedFiles != 0 || cfg.Mode&NeedTypes != 0 {
|
||||
if cfg.Mode&NeedFiles != 0 || cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 {
|
||||
addFields("Dir", "GoFiles", "IgnoredGoFiles", "IgnoredOtherFiles", "CFiles",
|
||||
"CgoFiles", "CXXFiles", "MFiles", "HFiles", "FFiles", "SFiles",
|
||||
"SwigFiles", "SwigCXXFiles", "SysoFiles")
|
||||
|
@ -760,7 +771,7 @@ func jsonFlag(cfg *Config, goVersion int) string {
|
|||
addFields("TestGoFiles", "XTestGoFiles")
|
||||
}
|
||||
}
|
||||
if cfg.Mode&NeedTypes != 0 {
|
||||
if cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 {
|
||||
// CompiledGoFiles seems to be required for the test case TestCgoNoSyntax,
|
||||
// even when -compiled isn't passed in.
|
||||
// TODO(#52435): Should we make the test ask for -compiled, or automatically
|
||||
|
@ -785,7 +796,7 @@ func jsonFlag(cfg *Config, goVersion int) string {
|
|||
// Request Dir in the unlikely case Export is not absolute.
|
||||
addFields("Dir", "Export")
|
||||
}
|
||||
if cfg.Mode&needInternalForTest != 0 {
|
||||
if cfg.Mode&NeedForTest != 0 {
|
||||
addFields("ForTest")
|
||||
}
|
||||
if cfg.Mode&needInternalDepsErrors != 0 {
|
||||
|
@ -841,7 +852,7 @@ func (state *golistState) cfgInvocation() gocommand.Invocation {
|
|||
Env: cfg.Env,
|
||||
Logf: cfg.Logf,
|
||||
WorkingDir: cfg.Dir,
|
||||
Overlay: cfg.goListOverlayFile,
|
||||
Overlay: state.overlay,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -852,11 +863,8 @@ func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer,
|
|||
inv := state.cfgInvocation()
|
||||
inv.Verb = verb
|
||||
inv.Args = args
|
||||
gocmdRunner := cfg.gocmdRunner
|
||||
if gocmdRunner == nil {
|
||||
gocmdRunner = &gocommand.Runner{}
|
||||
}
|
||||
stdout, stderr, friendlyErr, err := gocmdRunner.RunRaw(cfg.Context, inv)
|
||||
|
||||
stdout, stderr, friendlyErr, err := state.runner.RunRaw(cfg.Context, inv)
|
||||
if err != nil {
|
||||
// Check for 'go' executable not being found.
|
||||
if ee, ok := err.(*exec.Error); ok && ee.Err == exec.ErrNotFound {
|
||||
|
@ -880,6 +888,12 @@ func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer,
|
|||
return nil, friendlyErr
|
||||
}
|
||||
|
||||
// Return an error if 'go list' failed due to missing tools in
|
||||
// $GOROOT/pkg/tool/$GOOS_$GOARCH (#69606).
|
||||
if len(stderr.String()) > 0 && strings.Contains(stderr.String(), `go: no such tool`) {
|
||||
return nil, friendlyErr
|
||||
}
|
||||
|
||||
// Is there an error running the C compiler in cgo? This will be reported in the "Error" field
|
||||
// and should be suppressed by go list -e.
|
||||
//
|
||||
|
@ -1024,3 +1038,44 @@ func cmdDebugStr(cmd *exec.Cmd) string {
|
|||
}
|
||||
return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], strings.Join(args, " "))
|
||||
}
|
||||
|
||||
// getSizesForArgs queries 'go list' for the appropriate
|
||||
// Compiler and GOARCH arguments to pass to [types.SizesFor].
|
||||
func getSizesForArgs(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (string, string, error) {
|
||||
inv.Verb = "list"
|
||||
inv.Args = []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"}
|
||||
stdout, stderr, friendlyErr, rawErr := gocmdRunner.RunRaw(ctx, inv)
|
||||
var goarch, compiler string
|
||||
if rawErr != nil {
|
||||
rawErrMsg := rawErr.Error()
|
||||
if strings.Contains(rawErrMsg, "cannot find main module") ||
|
||||
strings.Contains(rawErrMsg, "go.mod file not found") {
|
||||
// User's running outside of a module.
|
||||
// All bets are off. Get GOARCH and guess compiler is gc.
|
||||
// TODO(matloob): Is this a problem in practice?
|
||||
inv.Verb = "env"
|
||||
inv.Args = []string{"GOARCH"}
|
||||
envout, enverr := gocmdRunner.Run(ctx, inv)
|
||||
if enverr != nil {
|
||||
return "", "", enverr
|
||||
}
|
||||
goarch = strings.TrimSpace(envout.String())
|
||||
compiler = "gc"
|
||||
} else if friendlyErr != nil {
|
||||
return "", "", friendlyErr
|
||||
} else {
|
||||
// This should be unreachable, but be defensive
|
||||
// in case RunRaw's error results are inconsistent.
|
||||
return "", "", rawErr
|
||||
}
|
||||
} else {
|
||||
fields := strings.Fields(stdout.String())
|
||||
if len(fields) < 2 {
|
||||
return "", "", fmt.Errorf("could not parse GOARCH and Go compiler in format \"<GOARCH> <compiler>\":\nstdout: <<%s>>\nstderr: <<%s>>",
|
||||
stdout.String(), stderr.String())
|
||||
}
|
||||
goarch = fields[0]
|
||||
compiler = fields[1]
|
||||
}
|
||||
return compiler, goarch, nil
|
||||
}
|
||||
|
|
72
vendor/golang.org/x/tools/go/packages/loadmode_string.go
generated
vendored
72
vendor/golang.org/x/tools/go/packages/loadmode_string.go
generated
vendored
|
@ -9,49 +9,47 @@ import (
|
|||
"strings"
|
||||
)
|
||||
|
||||
var allModes = []LoadMode{
|
||||
NeedName,
|
||||
NeedFiles,
|
||||
NeedCompiledGoFiles,
|
||||
NeedImports,
|
||||
NeedDeps,
|
||||
NeedExportFile,
|
||||
NeedTypes,
|
||||
NeedSyntax,
|
||||
NeedTypesInfo,
|
||||
NeedTypesSizes,
|
||||
var modes = [...]struct {
|
||||
mode LoadMode
|
||||
name string
|
||||
}{
|
||||
{NeedName, "NeedName"},
|
||||
{NeedFiles, "NeedFiles"},
|
||||
{NeedCompiledGoFiles, "NeedCompiledGoFiles"},
|
||||
{NeedImports, "NeedImports"},
|
||||
{NeedDeps, "NeedDeps"},
|
||||
{NeedExportFile, "NeedExportFile"},
|
||||
{NeedTypes, "NeedTypes"},
|
||||
{NeedSyntax, "NeedSyntax"},
|
||||
{NeedTypesInfo, "NeedTypesInfo"},
|
||||
{NeedTypesSizes, "NeedTypesSizes"},
|
||||
{NeedForTest, "NeedForTest"},
|
||||
{NeedModule, "NeedModule"},
|
||||
{NeedEmbedFiles, "NeedEmbedFiles"},
|
||||
{NeedEmbedPatterns, "NeedEmbedPatterns"},
|
||||
}
|
||||
|
||||
var modeStrings = []string{
|
||||
"NeedName",
|
||||
"NeedFiles",
|
||||
"NeedCompiledGoFiles",
|
||||
"NeedImports",
|
||||
"NeedDeps",
|
||||
"NeedExportFile",
|
||||
"NeedTypes",
|
||||
"NeedSyntax",
|
||||
"NeedTypesInfo",
|
||||
"NeedTypesSizes",
|
||||
}
|
||||
|
||||
func (mod LoadMode) String() string {
|
||||
m := mod
|
||||
if m == 0 {
|
||||
func (mode LoadMode) String() string {
|
||||
if mode == 0 {
|
||||
return "LoadMode(0)"
|
||||
}
|
||||
var out []string
|
||||
for i, x := range allModes {
|
||||
if x > m {
|
||||
break
|
||||
}
|
||||
if (m & x) != 0 {
|
||||
out = append(out, modeStrings[i])
|
||||
m = m ^ x
|
||||
// named bits
|
||||
for _, item := range modes {
|
||||
if (mode & item.mode) != 0 {
|
||||
mode ^= item.mode
|
||||
out = append(out, item.name)
|
||||
}
|
||||
}
|
||||
if m != 0 {
|
||||
out = append(out, "Unknown")
|
||||
// unnamed residue
|
||||
if mode != 0 {
|
||||
if out == nil {
|
||||
return fmt.Sprintf("LoadMode(%#x)", int(mode))
|
||||
}
|
||||
out = append(out, fmt.Sprintf("%#x", int(mode)))
|
||||
}
|
||||
return fmt.Sprintf("LoadMode(%s)", strings.Join(out, "|"))
|
||||
if len(out) == 1 {
|
||||
return out[0]
|
||||
}
|
||||
return "(" + strings.Join(out, "|") + ")"
|
||||
}
|
||||
|
|
406
vendor/golang.org/x/tools/go/packages/packages.go
generated
vendored
406
vendor/golang.org/x/tools/go/packages/packages.go
generated
vendored
|
@ -16,13 +16,13 @@ import (
|
|||
"go/scanner"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
@ -31,7 +31,6 @@ import (
|
|||
"golang.org/x/tools/internal/gocommand"
|
||||
"golang.org/x/tools/internal/packagesinternal"
|
||||
"golang.org/x/tools/internal/typesinternal"
|
||||
"golang.org/x/tools/internal/versions"
|
||||
)
|
||||
|
||||
// A LoadMode controls the amount of detail to return when loading.
|
||||
|
@ -44,20 +43,33 @@ import (
|
|||
// ID and Errors (if present) will always be filled.
|
||||
// [Load] may return more information than requested.
|
||||
//
|
||||
// The Mode flag is a union of several bits named NeedName,
|
||||
// NeedFiles, and so on, each of which determines whether
|
||||
// a given field of Package (Name, Files, etc) should be
|
||||
// populated.
|
||||
//
|
||||
// For convenience, we provide named constants for the most
|
||||
// common combinations of Need flags:
|
||||
//
|
||||
// [LoadFiles] lists of files in each package
|
||||
// [LoadImports] ... plus imports
|
||||
// [LoadTypes] ... plus type information
|
||||
// [LoadSyntax] ... plus type-annotated syntax
|
||||
// [LoadAllSyntax] ... for all dependencies
|
||||
//
|
||||
// Unfortunately there are a number of open bugs related to
|
||||
// interactions among the LoadMode bits:
|
||||
// - https://github.com/golang/go/issues/48226
|
||||
// - https://github.com/golang/go/issues/56633
|
||||
// - https://github.com/golang/go/issues/56677
|
||||
// - https://github.com/golang/go/issues/58726
|
||||
// - https://github.com/golang/go/issues/63517
|
||||
// - https://github.com/golang/go/issues/56633
|
||||
// - https://github.com/golang/go/issues/56677
|
||||
// - https://github.com/golang/go/issues/58726
|
||||
// - https://github.com/golang/go/issues/63517
|
||||
type LoadMode int
|
||||
|
||||
const (
|
||||
// NeedName adds Name and PkgPath.
|
||||
NeedName LoadMode = 1 << iota
|
||||
|
||||
// NeedFiles adds GoFiles and OtherFiles.
|
||||
// NeedFiles adds Dir, GoFiles, OtherFiles, and IgnoredFiles
|
||||
NeedFiles
|
||||
|
||||
// NeedCompiledGoFiles adds CompiledGoFiles.
|
||||
|
@ -76,10 +88,10 @@ const (
|
|||
// NeedTypes adds Types, Fset, and IllTyped.
|
||||
NeedTypes
|
||||
|
||||
// NeedSyntax adds Syntax.
|
||||
// NeedSyntax adds Syntax and Fset.
|
||||
NeedSyntax
|
||||
|
||||
// NeedTypesInfo adds TypesInfo.
|
||||
// NeedTypesInfo adds TypesInfo and Fset.
|
||||
NeedTypesInfo
|
||||
|
||||
// NeedTypesSizes adds TypesSizes.
|
||||
|
@ -88,9 +100,10 @@ const (
|
|||
// needInternalDepsErrors adds the internal deps errors field for use by gopls.
|
||||
needInternalDepsErrors
|
||||
|
||||
// needInternalForTest adds the internal forTest field.
|
||||
// NeedForTest adds ForTest.
|
||||
//
|
||||
// Tests must also be set on the context for this field to be populated.
|
||||
needInternalForTest
|
||||
NeedForTest
|
||||
|
||||
// typecheckCgo enables full support for type checking cgo. Requires Go 1.15+.
|
||||
// Modifies CompiledGoFiles and Types, and has no effect on its own.
|
||||
|
@ -104,27 +117,24 @@ const (
|
|||
|
||||
// NeedEmbedPatterns adds EmbedPatterns.
|
||||
NeedEmbedPatterns
|
||||
|
||||
// Be sure to update loadmode_string.go when adding new items!
|
||||
)
|
||||
|
||||
const (
|
||||
// Deprecated: LoadFiles exists for historical compatibility
|
||||
// and should not be used. Please directly specify the needed fields using the Need values.
|
||||
// LoadFiles loads the name and file names for the initial packages.
|
||||
LoadFiles = NeedName | NeedFiles | NeedCompiledGoFiles
|
||||
|
||||
// Deprecated: LoadImports exists for historical compatibility
|
||||
// and should not be used. Please directly specify the needed fields using the Need values.
|
||||
// LoadImports loads the name, file names, and import mapping for the initial packages.
|
||||
LoadImports = LoadFiles | NeedImports
|
||||
|
||||
// Deprecated: LoadTypes exists for historical compatibility
|
||||
// and should not be used. Please directly specify the needed fields using the Need values.
|
||||
// LoadTypes loads exported type information for the initial packages.
|
||||
LoadTypes = LoadImports | NeedTypes | NeedTypesSizes
|
||||
|
||||
// Deprecated: LoadSyntax exists for historical compatibility
|
||||
// and should not be used. Please directly specify the needed fields using the Need values.
|
||||
// LoadSyntax loads typed syntax for the initial packages.
|
||||
LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo
|
||||
|
||||
// Deprecated: LoadAllSyntax exists for historical compatibility
|
||||
// and should not be used. Please directly specify the needed fields using the Need values.
|
||||
// LoadAllSyntax loads typed syntax for the initial packages and all dependencies.
|
||||
LoadAllSyntax = LoadSyntax | NeedDeps
|
||||
|
||||
// Deprecated: NeedExportsFile is a historical misspelling of NeedExportFile.
|
||||
|
@ -134,13 +144,7 @@ const (
|
|||
// A Config specifies details about how packages should be loaded.
|
||||
// The zero value is a valid configuration.
|
||||
//
|
||||
// Calls to Load do not modify this struct.
|
||||
//
|
||||
// TODO(adonovan): #67702: this is currently false: in fact,
|
||||
// calls to [Load] do not modify the public fields of this struct, but
|
||||
// may modify hidden fields, so concurrent calls to [Load] must not
|
||||
// use the same Config. But perhaps we should reestablish the
|
||||
// documented invariant.
|
||||
// Calls to [Load] do not modify this struct.
|
||||
type Config struct {
|
||||
// Mode controls the level of information returned for each package.
|
||||
Mode LoadMode
|
||||
|
@ -171,19 +175,10 @@ type Config struct {
|
|||
//
|
||||
Env []string
|
||||
|
||||
// gocmdRunner guards go command calls from concurrency errors.
|
||||
gocmdRunner *gocommand.Runner
|
||||
|
||||
// BuildFlags is a list of command-line flags to be passed through to
|
||||
// the build system's query tool.
|
||||
BuildFlags []string
|
||||
|
||||
// modFile will be used for -modfile in go command invocations.
|
||||
modFile string
|
||||
|
||||
// modFlag will be used for -modfile in go command invocations.
|
||||
modFlag string
|
||||
|
||||
// Fset provides source position information for syntax trees and types.
|
||||
// If Fset is nil, Load will use a new fileset, but preserve Fset's value.
|
||||
Fset *token.FileSet
|
||||
|
@ -230,21 +225,24 @@ type Config struct {
|
|||
// drivers may vary in their level of support for overlays.
|
||||
Overlay map[string][]byte
|
||||
|
||||
// goListOverlayFile is the JSON file that encodes the Overlay
|
||||
// mapping, used by 'go list -overlay=...'
|
||||
goListOverlayFile string
|
||||
// -- Hidden configuration fields only for use in x/tools --
|
||||
|
||||
// modFile will be used for -modfile in go command invocations.
|
||||
modFile string
|
||||
|
||||
// modFlag will be used for -modfile in go command invocations.
|
||||
modFlag string
|
||||
}
|
||||
|
||||
// Load loads and returns the Go packages named by the given patterns.
|
||||
//
|
||||
// Config specifies loading options;
|
||||
// nil behaves the same as an empty Config.
|
||||
// The cfg parameter specifies loading options; nil behaves the same as an empty [Config].
|
||||
//
|
||||
// The [Config.Mode] field is a set of bits that determine what kinds
|
||||
// of information should be computed and returned. Modes that require
|
||||
// more information tend to be slower. See [LoadMode] for details
|
||||
// and important caveats. Its zero value is equivalent to
|
||||
// NeedName | NeedFiles | NeedCompiledGoFiles.
|
||||
// [NeedName] | [NeedFiles] | [NeedCompiledGoFiles].
|
||||
//
|
||||
// Each call to Load returns a new set of [Package] instances.
|
||||
// The Packages and their Imports form a directed acyclic graph.
|
||||
|
@ -261,7 +259,7 @@ type Config struct {
|
|||
// Errors associated with a particular package are recorded in the
|
||||
// corresponding Package's Errors list, and do not cause Load to
|
||||
// return an error. Clients may need to handle such errors before
|
||||
// proceeding with further analysis. The PrintErrors function is
|
||||
// proceeding with further analysis. The [PrintErrors] function is
|
||||
// provided for convenient display of all errors.
|
||||
func Load(cfg *Config, patterns ...string) ([]*Package, error) {
|
||||
ld := newLoader(cfg)
|
||||
|
@ -324,21 +322,24 @@ func defaultDriver(cfg *Config, patterns ...string) (*DriverResponse, bool, erro
|
|||
} else if !response.NotHandled {
|
||||
return response, true, nil
|
||||
}
|
||||
// (fall through)
|
||||
// not handled: fall through
|
||||
}
|
||||
|
||||
// go list fallback
|
||||
//
|
||||
|
||||
// Write overlays once, as there are many calls
|
||||
// to 'go list' (one per chunk plus others too).
|
||||
overlay, cleanupOverlay, err := gocommand.WriteOverlays(cfg.Overlay)
|
||||
overlayFile, cleanupOverlay, err := gocommand.WriteOverlays(cfg.Overlay)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
defer cleanupOverlay()
|
||||
cfg.goListOverlayFile = overlay
|
||||
|
||||
response, err := callDriverOnChunks(goListDriver, cfg, chunks)
|
||||
var runner gocommand.Runner // (shared across many 'go list' calls)
|
||||
driver := func(cfg *Config, patterns []string) (*DriverResponse, error) {
|
||||
return goListDriver(cfg, &runner, overlayFile, patterns)
|
||||
}
|
||||
response, err := callDriverOnChunks(driver, cfg, chunks)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
@ -376,16 +377,14 @@ func splitIntoChunks(patterns []string, argMax int) ([][]string, error) {
|
|||
|
||||
func callDriverOnChunks(driver driver, cfg *Config, chunks [][]string) (*DriverResponse, error) {
|
||||
if len(chunks) == 0 {
|
||||
return driver(cfg)
|
||||
return driver(cfg, nil)
|
||||
}
|
||||
responses := make([]*DriverResponse, len(chunks))
|
||||
errNotHandled := errors.New("driver returned NotHandled")
|
||||
var g errgroup.Group
|
||||
for i, chunk := range chunks {
|
||||
i := i
|
||||
chunk := chunk
|
||||
g.Go(func() (err error) {
|
||||
responses[i], err = driver(cfg, chunk...)
|
||||
responses[i], err = driver(cfg, chunk)
|
||||
if responses[i] != nil && responses[i].NotHandled {
|
||||
err = errNotHandled
|
||||
}
|
||||
|
@ -435,6 +434,12 @@ type Package struct {
|
|||
// PkgPath is the package path as used by the go/types package.
|
||||
PkgPath string
|
||||
|
||||
// Dir is the directory associated with the package, if it exists.
|
||||
//
|
||||
// For packages listed by the go command, this is the directory containing
|
||||
// the package files.
|
||||
Dir string
|
||||
|
||||
// Errors contains any errors encountered querying the metadata
|
||||
// of the package, or while parsing or type-checking its files.
|
||||
Errors []Error
|
||||
|
@ -522,8 +527,8 @@ type Package struct {
|
|||
|
||||
// -- internal --
|
||||
|
||||
// forTest is the package under test, if any.
|
||||
forTest string
|
||||
// ForTest is the package under test, if any.
|
||||
ForTest string
|
||||
|
||||
// depsErrors is the DepsErrors field from the go list response, if any.
|
||||
depsErrors []*packagesinternal.PackageError
|
||||
|
@ -552,9 +557,6 @@ type ModuleError struct {
|
|||
}
|
||||
|
||||
func init() {
|
||||
packagesinternal.GetForTest = func(p interface{}) string {
|
||||
return p.(*Package).forTest
|
||||
}
|
||||
packagesinternal.GetDepsErrors = func(p interface{}) []*packagesinternal.PackageError {
|
||||
return p.(*Package).depsErrors
|
||||
}
|
||||
|
@ -566,7 +568,6 @@ func init() {
|
|||
}
|
||||
packagesinternal.TypecheckCgo = int(typecheckCgo)
|
||||
packagesinternal.DepsErrors = int(needInternalDepsErrors)
|
||||
packagesinternal.ForTest = int(needInternalForTest)
|
||||
}
|
||||
|
||||
// An Error describes a problem with a package's metadata, syntax, or types.
|
||||
|
@ -682,18 +683,19 @@ func (p *Package) String() string { return p.ID }
|
|||
// loaderPackage augments Package with state used during the loading phase
|
||||
type loaderPackage struct {
|
||||
*Package
|
||||
importErrors map[string]error // maps each bad import to its error
|
||||
loadOnce sync.Once
|
||||
color uint8 // for cycle detection
|
||||
needsrc bool // load from source (Mode >= LoadTypes)
|
||||
needtypes bool // type information is either requested or depended on
|
||||
initial bool // package was matched by a pattern
|
||||
goVersion int // minor version number of go command on PATH
|
||||
importErrors map[string]error // maps each bad import to its error
|
||||
preds []*loaderPackage // packages that import this one
|
||||
unfinishedSuccs atomic.Int32 // number of direct imports not yet loaded
|
||||
color uint8 // for cycle detection
|
||||
needsrc bool // load from source (Mode >= LoadTypes)
|
||||
needtypes bool // type information is either requested or depended on
|
||||
initial bool // package was matched by a pattern
|
||||
goVersion int // minor version number of go command on PATH
|
||||
}
|
||||
|
||||
// loader holds the working state of a single call to load.
|
||||
type loader struct {
|
||||
pkgs map[string]*loaderPackage
|
||||
pkgs map[string]*loaderPackage // keyed by Package.ID
|
||||
Config
|
||||
sizes types.Sizes // non-nil if needed by mode
|
||||
parseCache map[string]*parseValue
|
||||
|
@ -739,9 +741,6 @@ func newLoader(cfg *Config) *loader {
|
|||
if ld.Config.Env == nil {
|
||||
ld.Config.Env = os.Environ()
|
||||
}
|
||||
if ld.Config.gocmdRunner == nil {
|
||||
ld.Config.gocmdRunner = &gocommand.Runner{}
|
||||
}
|
||||
if ld.Context == nil {
|
||||
ld.Context = context.Background()
|
||||
}
|
||||
|
@ -755,7 +754,7 @@ func newLoader(cfg *Config) *loader {
|
|||
ld.requestedMode = ld.Mode
|
||||
ld.Mode = impliedLoadMode(ld.Mode)
|
||||
|
||||
if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 {
|
||||
if ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0 {
|
||||
if ld.Fset == nil {
|
||||
ld.Fset = token.NewFileSet()
|
||||
}
|
||||
|
@ -764,6 +763,7 @@ func newLoader(cfg *Config) *loader {
|
|||
// because we load source if export data is missing.
|
||||
if ld.ParseFile == nil {
|
||||
ld.ParseFile = func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) {
|
||||
// We implicitly promise to keep doing ast.Object resolution. :(
|
||||
const mode = parser.AllErrors | parser.ParseComments
|
||||
return parser.ParseFile(fset, filename, src, mode)
|
||||
}
|
||||
|
@ -795,7 +795,7 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) {
|
|||
exportDataInvalid := len(ld.Overlay) > 0 || pkg.ExportFile == "" && pkg.PkgPath != "unsafe"
|
||||
// This package needs type information if the caller requested types and the package is
|
||||
// either a root, or it's a non-root and the user requested dependencies ...
|
||||
needtypes := (ld.Mode&NeedTypes|NeedTypesInfo != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0))
|
||||
needtypes := (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0))
|
||||
// This package needs source if the call requested source (or types info, which implies source)
|
||||
// and the package is either a root, or itas a non- root and the user requested dependencies...
|
||||
needsrc := ((ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) ||
|
||||
|
@ -820,9 +820,10 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) {
|
|||
}
|
||||
}
|
||||
|
||||
if ld.Mode&NeedImports != 0 {
|
||||
// Materialize the import graph.
|
||||
|
||||
// Materialize the import graph if it is needed (NeedImports),
|
||||
// or if we'll be using loadPackages (Need{Syntax|Types|TypesInfo}).
|
||||
var leaves []*loaderPackage // packages with no unfinished successors
|
||||
if ld.Mode&(NeedImports|NeedSyntax|NeedTypes|NeedTypesInfo) != 0 {
|
||||
const (
|
||||
white = 0 // new
|
||||
grey = 1 // in progress
|
||||
|
@ -841,63 +842,76 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) {
|
|||
// dependency on a package that does. These are the only packages
|
||||
// for which we load source code.
|
||||
var stack []*loaderPackage
|
||||
var visit func(lpkg *loaderPackage) bool
|
||||
visit = func(lpkg *loaderPackage) bool {
|
||||
switch lpkg.color {
|
||||
case black:
|
||||
return lpkg.needsrc
|
||||
case grey:
|
||||
var visit func(from, lpkg *loaderPackage) bool
|
||||
visit = func(from, lpkg *loaderPackage) bool {
|
||||
if lpkg.color == grey {
|
||||
panic("internal error: grey node")
|
||||
}
|
||||
lpkg.color = grey
|
||||
stack = append(stack, lpkg) // push
|
||||
stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports
|
||||
lpkg.Imports = make(map[string]*Package, len(stubs))
|
||||
for importPath, ipkg := range stubs {
|
||||
var importErr error
|
||||
imp := ld.pkgs[ipkg.ID]
|
||||
if imp == nil {
|
||||
// (includes package "C" when DisableCgo)
|
||||
importErr = fmt.Errorf("missing package: %q", ipkg.ID)
|
||||
} else if imp.color == grey {
|
||||
importErr = fmt.Errorf("import cycle: %s", stack)
|
||||
}
|
||||
if importErr != nil {
|
||||
if lpkg.importErrors == nil {
|
||||
lpkg.importErrors = make(map[string]error)
|
||||
if lpkg.color == white {
|
||||
lpkg.color = grey
|
||||
stack = append(stack, lpkg) // push
|
||||
stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports
|
||||
lpkg.Imports = make(map[string]*Package, len(stubs))
|
||||
for importPath, ipkg := range stubs {
|
||||
var importErr error
|
||||
imp := ld.pkgs[ipkg.ID]
|
||||
if imp == nil {
|
||||
// (includes package "C" when DisableCgo)
|
||||
importErr = fmt.Errorf("missing package: %q", ipkg.ID)
|
||||
} else if imp.color == grey {
|
||||
importErr = fmt.Errorf("import cycle: %s", stack)
|
||||
}
|
||||
lpkg.importErrors[importPath] = importErr
|
||||
continue
|
||||
if importErr != nil {
|
||||
if lpkg.importErrors == nil {
|
||||
lpkg.importErrors = make(map[string]error)
|
||||
}
|
||||
lpkg.importErrors[importPath] = importErr
|
||||
continue
|
||||
}
|
||||
|
||||
if visit(lpkg, imp) {
|
||||
lpkg.needsrc = true
|
||||
}
|
||||
lpkg.Imports[importPath] = imp.Package
|
||||
}
|
||||
|
||||
if visit(imp) {
|
||||
lpkg.needsrc = true
|
||||
// -- postorder --
|
||||
|
||||
// Complete type information is required for the
|
||||
// immediate dependencies of each source package.
|
||||
if lpkg.needsrc && ld.Mode&NeedTypes != 0 {
|
||||
for _, ipkg := range lpkg.Imports {
|
||||
ld.pkgs[ipkg.ID].needtypes = true
|
||||
}
|
||||
}
|
||||
lpkg.Imports[importPath] = imp.Package
|
||||
|
||||
// NeedTypeSizes causes TypeSizes to be set even
|
||||
// on packages for which types aren't needed.
|
||||
if ld.Mode&NeedTypesSizes != 0 {
|
||||
lpkg.TypesSizes = ld.sizes
|
||||
}
|
||||
|
||||
// Add packages with no imports directly to the queue of leaves.
|
||||
if len(lpkg.Imports) == 0 {
|
||||
leaves = append(leaves, lpkg)
|
||||
}
|
||||
|
||||
stack = stack[:len(stack)-1] // pop
|
||||
lpkg.color = black
|
||||
}
|
||||
|
||||
// Complete type information is required for the
|
||||
// immediate dependencies of each source package.
|
||||
if lpkg.needsrc && ld.Mode&NeedTypes != 0 {
|
||||
for _, ipkg := range lpkg.Imports {
|
||||
ld.pkgs[ipkg.ID].needtypes = true
|
||||
}
|
||||
// Add edge from predecessor.
|
||||
if from != nil {
|
||||
from.unfinishedSuccs.Add(+1) // incref
|
||||
lpkg.preds = append(lpkg.preds, from)
|
||||
}
|
||||
|
||||
// NeedTypeSizes causes TypeSizes to be set even
|
||||
// on packages for which types aren't needed.
|
||||
if ld.Mode&NeedTypesSizes != 0 {
|
||||
lpkg.TypesSizes = ld.sizes
|
||||
}
|
||||
stack = stack[:len(stack)-1] // pop
|
||||
lpkg.color = black
|
||||
|
||||
return lpkg.needsrc
|
||||
}
|
||||
|
||||
// For each initial package, create its import DAG.
|
||||
for _, lpkg := range initial {
|
||||
visit(lpkg)
|
||||
visit(nil, lpkg)
|
||||
}
|
||||
|
||||
} else {
|
||||
|
@ -910,16 +924,45 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) {
|
|||
|
||||
// Load type data and syntax if needed, starting at
|
||||
// the initial packages (roots of the import DAG).
|
||||
if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 {
|
||||
var wg sync.WaitGroup
|
||||
for _, lpkg := range initial {
|
||||
wg.Add(1)
|
||||
go func(lpkg *loaderPackage) {
|
||||
ld.loadRecursive(lpkg)
|
||||
wg.Done()
|
||||
}(lpkg)
|
||||
if ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0 {
|
||||
|
||||
// We avoid using g.SetLimit to limit concurrency as
|
||||
// it makes g.Go stop accepting work, which prevents
|
||||
// workers from enqeuing, and thus finishing, and thus
|
||||
// allowing the group to make progress: deadlock.
|
||||
//
|
||||
// Instead we use the ioLimit and cpuLimit semaphores.
|
||||
g, _ := errgroup.WithContext(ld.Context)
|
||||
|
||||
// enqueues adds a package to the type-checking queue.
|
||||
// It must have no unfinished successors.
|
||||
var enqueue func(*loaderPackage)
|
||||
enqueue = func(lpkg *loaderPackage) {
|
||||
g.Go(func() error {
|
||||
// Parse and type-check.
|
||||
ld.loadPackage(lpkg)
|
||||
|
||||
// Notify each waiting predecessor,
|
||||
// and enqueue it when it becomes a leaf.
|
||||
for _, pred := range lpkg.preds {
|
||||
if pred.unfinishedSuccs.Add(-1) == 0 { // decref
|
||||
enqueue(pred)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Load leaves first, adding new packages
|
||||
// to the queue as they become leaves.
|
||||
for _, leaf := range leaves {
|
||||
enqueue(leaf)
|
||||
}
|
||||
|
||||
if err := g.Wait(); err != nil {
|
||||
return nil, err // cancelled
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// If the context is done, return its error and
|
||||
|
@ -961,12 +1004,14 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) {
|
|||
}
|
||||
if ld.requestedMode&NeedTypes == 0 {
|
||||
ld.pkgs[i].Types = nil
|
||||
ld.pkgs[i].Fset = nil
|
||||
ld.pkgs[i].IllTyped = false
|
||||
}
|
||||
if ld.requestedMode&NeedSyntax == 0 {
|
||||
ld.pkgs[i].Syntax = nil
|
||||
}
|
||||
if ld.requestedMode&(NeedSyntax|NeedTypes|NeedTypesInfo) == 0 {
|
||||
ld.pkgs[i].Fset = nil
|
||||
}
|
||||
if ld.requestedMode&NeedTypesInfo == 0 {
|
||||
ld.pkgs[i].TypesInfo = nil
|
||||
}
|
||||
|
@ -981,31 +1026,10 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) {
|
|||
return result, nil
|
||||
}
|
||||
|
||||
// loadRecursive loads the specified package and its dependencies,
|
||||
// recursively, in parallel, in topological order.
|
||||
// It is atomic and idempotent.
|
||||
// Precondition: ld.Mode&NeedTypes.
|
||||
func (ld *loader) loadRecursive(lpkg *loaderPackage) {
|
||||
lpkg.loadOnce.Do(func() {
|
||||
// Load the direct dependencies, in parallel.
|
||||
var wg sync.WaitGroup
|
||||
for _, ipkg := range lpkg.Imports {
|
||||
imp := ld.pkgs[ipkg.ID]
|
||||
wg.Add(1)
|
||||
go func(imp *loaderPackage) {
|
||||
ld.loadRecursive(imp)
|
||||
wg.Done()
|
||||
}(imp)
|
||||
}
|
||||
wg.Wait()
|
||||
ld.loadPackage(lpkg)
|
||||
})
|
||||
}
|
||||
|
||||
// loadPackage loads the specified package.
|
||||
// loadPackage loads/parses/typechecks the specified package.
|
||||
// It must be called only once per Package,
|
||||
// after immediate dependencies are loaded.
|
||||
// Precondition: ld.Mode & NeedTypes.
|
||||
// Precondition: ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0.
|
||||
func (ld *loader) loadPackage(lpkg *loaderPackage) {
|
||||
if lpkg.PkgPath == "unsafe" {
|
||||
// Fill in the blanks to avoid surprises.
|
||||
|
@ -1041,6 +1065,10 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
|
|||
if !lpkg.needtypes && !lpkg.needsrc {
|
||||
return
|
||||
}
|
||||
|
||||
// TODO(adonovan): this condition looks wrong:
|
||||
// I think it should be lpkg.needtypes && !lpg.needsrc,
|
||||
// so that NeedSyntax without NeedTypes can be satisfied by export data.
|
||||
if !lpkg.needsrc {
|
||||
if err := ld.loadFromExportData(lpkg); err != nil {
|
||||
lpkg.Errors = append(lpkg.Errors, Error{
|
||||
|
@ -1146,7 +1174,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
|
|||
}
|
||||
|
||||
lpkg.Syntax = files
|
||||
if ld.Config.Mode&NeedTypes == 0 {
|
||||
if ld.Config.Mode&(NeedTypes|NeedTypesInfo) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -1157,16 +1185,20 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
|
|||
return
|
||||
}
|
||||
|
||||
lpkg.TypesInfo = &types.Info{
|
||||
Types: make(map[ast.Expr]types.TypeAndValue),
|
||||
Defs: make(map[*ast.Ident]types.Object),
|
||||
Uses: make(map[*ast.Ident]types.Object),
|
||||
Implicits: make(map[ast.Node]types.Object),
|
||||
Instances: make(map[*ast.Ident]types.Instance),
|
||||
Scopes: make(map[ast.Node]*types.Scope),
|
||||
Selections: make(map[*ast.SelectorExpr]*types.Selection),
|
||||
// Populate TypesInfo only if needed, as it
|
||||
// causes the type checker to work much harder.
|
||||
if ld.Config.Mode&NeedTypesInfo != 0 {
|
||||
lpkg.TypesInfo = &types.Info{
|
||||
Types: make(map[ast.Expr]types.TypeAndValue),
|
||||
Defs: make(map[*ast.Ident]types.Object),
|
||||
Uses: make(map[*ast.Ident]types.Object),
|
||||
Implicits: make(map[ast.Node]types.Object),
|
||||
Instances: make(map[*ast.Ident]types.Instance),
|
||||
Scopes: make(map[ast.Node]*types.Scope),
|
||||
Selections: make(map[*ast.SelectorExpr]*types.Selection),
|
||||
FileVersions: make(map[*ast.File]string),
|
||||
}
|
||||
}
|
||||
versions.InitFileVersions(lpkg.TypesInfo)
|
||||
lpkg.TypesSizes = ld.sizes
|
||||
|
||||
importer := importerFunc(func(path string) (*types.Package, error) {
|
||||
|
@ -1219,6 +1251,10 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
|
|||
}
|
||||
}
|
||||
|
||||
// Type-checking is CPU intensive.
|
||||
cpuLimit <- unit{} // acquire a token
|
||||
defer func() { <-cpuLimit }() // release a token
|
||||
|
||||
typErr := types.NewChecker(tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax)
|
||||
lpkg.importErrors = nil // no longer needed
|
||||
|
||||
|
@ -1283,8 +1319,11 @@ type importerFunc func(path string) (*types.Package, error)
|
|||
func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) }
|
||||
|
||||
// We use a counting semaphore to limit
|
||||
// the number of parallel I/O calls per process.
|
||||
var ioLimit = make(chan bool, 20)
|
||||
// the number of parallel I/O calls or CPU threads per process.
|
||||
var (
|
||||
ioLimit = make(chan unit, 20)
|
||||
cpuLimit = make(chan unit, runtime.GOMAXPROCS(0))
|
||||
)
|
||||
|
||||
func (ld *loader) parseFile(filename string) (*ast.File, error) {
|
||||
ld.parseCacheMu.Lock()
|
||||
|
@ -1301,20 +1340,28 @@ func (ld *loader) parseFile(filename string) (*ast.File, error) {
|
|||
|
||||
var src []byte
|
||||
for f, contents := range ld.Config.Overlay {
|
||||
// TODO(adonovan): Inefficient for large overlays.
|
||||
// Do an exact name-based map lookup
|
||||
// (for nonexistent files) followed by a
|
||||
// FileID-based map lookup (for existing ones).
|
||||
if sameFile(f, filename) {
|
||||
src = contents
|
||||
break
|
||||
}
|
||||
}
|
||||
var err error
|
||||
if src == nil {
|
||||
ioLimit <- true // wait
|
||||
ioLimit <- unit{} // acquire a token
|
||||
src, err = os.ReadFile(filename)
|
||||
<-ioLimit // signal
|
||||
<-ioLimit // release a token
|
||||
}
|
||||
if err != nil {
|
||||
v.err = err
|
||||
} else {
|
||||
// Parsing is CPU intensive.
|
||||
cpuLimit <- unit{} // acquire a token
|
||||
v.f, v.err = ld.ParseFile(ld.Fset, filename, src)
|
||||
<-cpuLimit // release a token
|
||||
}
|
||||
|
||||
close(v.ready)
|
||||
|
@ -1329,18 +1376,21 @@ func (ld *loader) parseFile(filename string) (*ast.File, error) {
|
|||
// Because files are scanned in parallel, the token.Pos
|
||||
// positions of the resulting ast.Files are not ordered.
|
||||
func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) {
|
||||
var wg sync.WaitGroup
|
||||
n := len(filenames)
|
||||
parsed := make([]*ast.File, n)
|
||||
errors := make([]error, n)
|
||||
for i, file := range filenames {
|
||||
wg.Add(1)
|
||||
go func(i int, filename string) {
|
||||
var (
|
||||
n = len(filenames)
|
||||
parsed = make([]*ast.File, n)
|
||||
errors = make([]error, n)
|
||||
)
|
||||
var g errgroup.Group
|
||||
for i, filename := range filenames {
|
||||
// This creates goroutines unnecessarily in the
|
||||
// cache-hit case, but that case is uncommon.
|
||||
g.Go(func() error {
|
||||
parsed[i], errors[i] = ld.parseFile(filename)
|
||||
wg.Done()
|
||||
}(i, file)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
wg.Wait()
|
||||
g.Wait()
|
||||
|
||||
// Eliminate nils, preserving order.
|
||||
var o int
|
||||
|
@ -1499,6 +1549,10 @@ func impliedLoadMode(loadMode LoadMode) LoadMode {
|
|||
// All these things require knowing the import graph.
|
||||
loadMode |= NeedImports
|
||||
}
|
||||
if loadMode&NeedTypes != 0 {
|
||||
// Types require the GoVersion from Module.
|
||||
loadMode |= NeedModule
|
||||
}
|
||||
|
||||
return loadMode
|
||||
}
|
||||
|
@ -1507,4 +1561,4 @@ func usesExportData(cfg *Config) bool {
|
|||
return cfg.Mode&NeedExportFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0
|
||||
}
|
||||
|
||||
var _ interface{} = io.Discard // assert build toolchain is go1.16 or later
|
||||
type unit struct{}
|
||||
|
|
9
vendor/golang.org/x/tools/go/packages/visit.go
generated
vendored
9
vendor/golang.org/x/tools/go/packages/visit.go
generated
vendored
|
@ -49,11 +49,20 @@ func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) {
|
|||
// PrintErrors returns the number of errors printed.
|
||||
func PrintErrors(pkgs []*Package) int {
|
||||
var n int
|
||||
errModules := make(map[*Module]bool)
|
||||
Visit(pkgs, nil, func(pkg *Package) {
|
||||
for _, err := range pkg.Errors {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
n++
|
||||
}
|
||||
|
||||
// Print pkg.Module.Error once if present.
|
||||
mod := pkg.Module
|
||||
if mod != nil && mod.Error != nil && !errModules[mod] {
|
||||
errModules[mod] = true
|
||||
fmt.Fprintln(os.Stderr, mod.Error.Err)
|
||||
n++
|
||||
}
|
||||
})
|
||||
return n
|
||||
}
|
||||
|
|
186
vendor/golang.org/x/tools/go/types/objectpath/objectpath.go
generated
vendored
186
vendor/golang.org/x/tools/go/types/objectpath/objectpath.go
generated
vendored
|
@ -51,7 +51,7 @@ type Path string
|
|||
//
|
||||
// PO package->object Package.Scope.Lookup
|
||||
// OT object->type Object.Type
|
||||
// TT type->type Type.{Elem,Key,Params,Results,Underlying} [EKPRU]
|
||||
// TT type->type Type.{Elem,Key,{,{,Recv}Type}Params,Results,Underlying,Rhs} [EKPRUTrCa]
|
||||
// TO type->object Type.{At,Field,Method,Obj} [AFMO]
|
||||
//
|
||||
// All valid paths start with a package and end at an object
|
||||
|
@ -63,8 +63,8 @@ type Path string
|
|||
// - The only PO operator is Package.Scope.Lookup, which requires an identifier.
|
||||
// - The only OT operator is Object.Type,
|
||||
// which we encode as '.' because dot cannot appear in an identifier.
|
||||
// - The TT operators are encoded as [EKPRUTC];
|
||||
// one of these (TypeParam) requires an integer operand,
|
||||
// - The TT operators are encoded as [EKPRUTrCa];
|
||||
// two of these ({,Recv}TypeParams) require an integer operand,
|
||||
// which is encoded as a string of decimal digits.
|
||||
// - The TO operators are encoded as [AFMO];
|
||||
// three of these (At,Field,Method) require an integer operand,
|
||||
|
@ -98,19 +98,21 @@ const (
|
|||
opType = '.' // .Type() (Object)
|
||||
|
||||
// type->type operators
|
||||
opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map)
|
||||
opKey = 'K' // .Key() (Map)
|
||||
opParams = 'P' // .Params() (Signature)
|
||||
opResults = 'R' // .Results() (Signature)
|
||||
opUnderlying = 'U' // .Underlying() (Named)
|
||||
opTypeParam = 'T' // .TypeParams.At(i) (Named, Signature)
|
||||
opConstraint = 'C' // .Constraint() (TypeParam)
|
||||
opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map)
|
||||
opKey = 'K' // .Key() (Map)
|
||||
opParams = 'P' // .Params() (Signature)
|
||||
opResults = 'R' // .Results() (Signature)
|
||||
opUnderlying = 'U' // .Underlying() (Named)
|
||||
opTypeParam = 'T' // .TypeParams.At(i) (Named, Signature)
|
||||
opRecvTypeParam = 'r' // .RecvTypeParams.At(i) (Signature)
|
||||
opConstraint = 'C' // .Constraint() (TypeParam)
|
||||
opRhs = 'a' // .Rhs() (Alias)
|
||||
|
||||
// type->object operators
|
||||
opAt = 'A' // .At(i) (Tuple)
|
||||
opField = 'F' // .Field(i) (Struct)
|
||||
opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored)
|
||||
opObj = 'O' // .Obj() (Named, TypeParam)
|
||||
opAt = 'A' // .At(i) (Tuple)
|
||||
opField = 'F' // .Field(i) (Struct)
|
||||
opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored)
|
||||
opObj = 'O' // .Obj() (Named, TypeParam)
|
||||
)
|
||||
|
||||
// For is equivalent to new(Encoder).For(obj).
|
||||
|
@ -226,7 +228,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) {
|
|||
// Reject obviously non-viable cases.
|
||||
switch obj := obj.(type) {
|
||||
case *types.TypeName:
|
||||
if _, ok := aliases.Unalias(obj.Type()).(*types.TypeParam); !ok {
|
||||
if _, ok := types.Unalias(obj.Type()).(*types.TypeParam); !ok {
|
||||
// With the exception of type parameters, only package-level type names
|
||||
// have a path.
|
||||
return "", fmt.Errorf("no path for %v", obj)
|
||||
|
@ -278,21 +280,26 @@ func (enc *Encoder) For(obj types.Object) (Path, error) {
|
|||
path = append(path, opType)
|
||||
|
||||
T := o.Type()
|
||||
|
||||
if tname.IsAlias() {
|
||||
// type alias
|
||||
if r := find(obj, T, path, nil); r != nil {
|
||||
if alias, ok := T.(*types.Alias); ok {
|
||||
if r := findTypeParam(obj, aliases.TypeParams(alias), path, opTypeParam); r != nil {
|
||||
return Path(r), nil
|
||||
}
|
||||
} else {
|
||||
if named, _ := T.(*types.Named); named != nil {
|
||||
if r := findTypeParam(obj, named.TypeParams(), path, nil); r != nil {
|
||||
// generic named type
|
||||
return Path(r), nil
|
||||
}
|
||||
if r := find(obj, aliases.Rhs(alias), append(path, opRhs)); r != nil {
|
||||
return Path(r), nil
|
||||
}
|
||||
|
||||
} else if tname.IsAlias() {
|
||||
// legacy alias
|
||||
if r := find(obj, T, path); r != nil {
|
||||
return Path(r), nil
|
||||
}
|
||||
|
||||
} else if named, ok := T.(*types.Named); ok {
|
||||
// defined (named) type
|
||||
if r := find(obj, T.Underlying(), append(path, opUnderlying), nil); r != nil {
|
||||
if r := findTypeParam(obj, named.TypeParams(), path, opTypeParam); r != nil {
|
||||
return Path(r), nil
|
||||
}
|
||||
if r := find(obj, named.Underlying(), append(path, opUnderlying)); r != nil {
|
||||
return Path(r), nil
|
||||
}
|
||||
}
|
||||
|
@ -305,7 +312,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) {
|
|||
if _, ok := o.(*types.TypeName); !ok {
|
||||
if o.Exported() {
|
||||
// exported non-type (const, var, func)
|
||||
if r := find(obj, o.Type(), append(path, opType), nil); r != nil {
|
||||
if r := find(obj, o.Type(), append(path, opType)); r != nil {
|
||||
return Path(r), nil
|
||||
}
|
||||
}
|
||||
|
@ -313,7 +320,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) {
|
|||
}
|
||||
|
||||
// Inspect declared methods of defined types.
|
||||
if T, ok := aliases.Unalias(o.Type()).(*types.Named); ok {
|
||||
if T, ok := types.Unalias(o.Type()).(*types.Named); ok {
|
||||
path = append(path, opType)
|
||||
// The method index here is always with respect
|
||||
// to the underlying go/types data structures,
|
||||
|
@ -325,7 +332,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) {
|
|||
if m == obj {
|
||||
return Path(path2), nil // found declared method
|
||||
}
|
||||
if r := find(obj, m.Type(), append(path2, opType), nil); r != nil {
|
||||
if r := find(obj, m.Type(), append(path2, opType)); r != nil {
|
||||
return Path(r), nil
|
||||
}
|
||||
}
|
||||
|
@ -440,43 +447,64 @@ func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) {
|
|||
//
|
||||
// The seen map is used to short circuit cycles through type parameters. If
|
||||
// nil, it will be allocated as necessary.
|
||||
func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]bool) []byte {
|
||||
//
|
||||
// The seenMethods map is used internally to short circuit cycles through
|
||||
// interface methods, such as occur in the following example:
|
||||
//
|
||||
// type I interface { f() interface{I} }
|
||||
//
|
||||
// See golang/go#68046 for details.
|
||||
func find(obj types.Object, T types.Type, path []byte) []byte {
|
||||
return (&finder{obj: obj}).find(T, path)
|
||||
}
|
||||
|
||||
// finder closes over search state for a call to find.
|
||||
type finder struct {
|
||||
obj types.Object // the sought object
|
||||
seenTParamNames map[*types.TypeName]bool // for cycle breaking through type parameters
|
||||
seenMethods map[*types.Func]bool // for cycle breaking through recursive interfaces
|
||||
}
|
||||
|
||||
func (f *finder) find(T types.Type, path []byte) []byte {
|
||||
switch T := T.(type) {
|
||||
case *aliases.Alias:
|
||||
return find(obj, aliases.Unalias(T), path, seen)
|
||||
case *types.Alias:
|
||||
return f.find(types.Unalias(T), path)
|
||||
case *types.Basic, *types.Named:
|
||||
// Named types belonging to pkg were handled already,
|
||||
// so T must belong to another package. No path.
|
||||
return nil
|
||||
case *types.Pointer:
|
||||
return find(obj, T.Elem(), append(path, opElem), seen)
|
||||
return f.find(T.Elem(), append(path, opElem))
|
||||
case *types.Slice:
|
||||
return find(obj, T.Elem(), append(path, opElem), seen)
|
||||
return f.find(T.Elem(), append(path, opElem))
|
||||
case *types.Array:
|
||||
return find(obj, T.Elem(), append(path, opElem), seen)
|
||||
return f.find(T.Elem(), append(path, opElem))
|
||||
case *types.Chan:
|
||||
return find(obj, T.Elem(), append(path, opElem), seen)
|
||||
return f.find(T.Elem(), append(path, opElem))
|
||||
case *types.Map:
|
||||
if r := find(obj, T.Key(), append(path, opKey), seen); r != nil {
|
||||
if r := f.find(T.Key(), append(path, opKey)); r != nil {
|
||||
return r
|
||||
}
|
||||
return find(obj, T.Elem(), append(path, opElem), seen)
|
||||
return f.find(T.Elem(), append(path, opElem))
|
||||
case *types.Signature:
|
||||
if r := findTypeParam(obj, T.TypeParams(), path, seen); r != nil {
|
||||
if r := f.findTypeParam(T.RecvTypeParams(), path, opRecvTypeParam); r != nil {
|
||||
return r
|
||||
}
|
||||
if r := find(obj, T.Params(), append(path, opParams), seen); r != nil {
|
||||
if r := f.findTypeParam(T.TypeParams(), path, opTypeParam); r != nil {
|
||||
return r
|
||||
}
|
||||
return find(obj, T.Results(), append(path, opResults), seen)
|
||||
if r := f.find(T.Params(), append(path, opParams)); r != nil {
|
||||
return r
|
||||
}
|
||||
return f.find(T.Results(), append(path, opResults))
|
||||
case *types.Struct:
|
||||
for i := 0; i < T.NumFields(); i++ {
|
||||
fld := T.Field(i)
|
||||
path2 := appendOpArg(path, opField, i)
|
||||
if fld == obj {
|
||||
if fld == f.obj {
|
||||
return path2 // found field var
|
||||
}
|
||||
if r := find(obj, fld.Type(), append(path2, opType), seen); r != nil {
|
||||
if r := f.find(fld.Type(), append(path2, opType)); r != nil {
|
||||
return r
|
||||
}
|
||||
}
|
||||
|
@ -485,10 +513,10 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]
|
|||
for i := 0; i < T.Len(); i++ {
|
||||
v := T.At(i)
|
||||
path2 := appendOpArg(path, opAt, i)
|
||||
if v == obj {
|
||||
if v == f.obj {
|
||||
return path2 // found param/result var
|
||||
}
|
||||
if r := find(obj, v.Type(), append(path2, opType), seen); r != nil {
|
||||
if r := f.find(v.Type(), append(path2, opType)); r != nil {
|
||||
return r
|
||||
}
|
||||
}
|
||||
|
@ -496,28 +524,35 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]
|
|||
case *types.Interface:
|
||||
for i := 0; i < T.NumMethods(); i++ {
|
||||
m := T.Method(i)
|
||||
if f.seenMethods[m] {
|
||||
return nil
|
||||
}
|
||||
path2 := appendOpArg(path, opMethod, i)
|
||||
if m == obj {
|
||||
if m == f.obj {
|
||||
return path2 // found interface method
|
||||
}
|
||||
if r := find(obj, m.Type(), append(path2, opType), seen); r != nil {
|
||||
if f.seenMethods == nil {
|
||||
f.seenMethods = make(map[*types.Func]bool)
|
||||
}
|
||||
f.seenMethods[m] = true
|
||||
if r := f.find(m.Type(), append(path2, opType)); r != nil {
|
||||
return r
|
||||
}
|
||||
}
|
||||
return nil
|
||||
case *types.TypeParam:
|
||||
name := T.Obj()
|
||||
if name == obj {
|
||||
return append(path, opObj)
|
||||
}
|
||||
if seen[name] {
|
||||
if f.seenTParamNames[name] {
|
||||
return nil
|
||||
}
|
||||
if seen == nil {
|
||||
seen = make(map[*types.TypeName]bool)
|
||||
if name == f.obj {
|
||||
return append(path, opObj)
|
||||
}
|
||||
seen[name] = true
|
||||
if r := find(obj, T.Constraint(), append(path, opConstraint), seen); r != nil {
|
||||
if f.seenTParamNames == nil {
|
||||
f.seenTParamNames = make(map[*types.TypeName]bool)
|
||||
}
|
||||
f.seenTParamNames[name] = true
|
||||
if r := f.find(T.Constraint(), append(path, opConstraint)); r != nil {
|
||||
return r
|
||||
}
|
||||
return nil
|
||||
|
@ -525,11 +560,15 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]
|
|||
panic(T)
|
||||
}
|
||||
|
||||
func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, seen map[*types.TypeName]bool) []byte {
|
||||
func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, op byte) []byte {
|
||||
return (&finder{obj: obj}).findTypeParam(list, path, op)
|
||||
}
|
||||
|
||||
func (f *finder) findTypeParam(list *types.TypeParamList, path []byte, op byte) []byte {
|
||||
for i := 0; i < list.Len(); i++ {
|
||||
tparam := list.At(i)
|
||||
path2 := appendOpArg(path, opTypeParam, i)
|
||||
if r := find(obj, tparam, path2, seen); r != nil {
|
||||
path2 := appendOpArg(path, op, i)
|
||||
if r := f.find(tparam, path2); r != nil {
|
||||
return r
|
||||
}
|
||||
}
|
||||
|
@ -580,10 +619,10 @@ func Object(pkg *types.Package, p Path) (types.Object, error) {
|
|||
code := suffix[0]
|
||||
suffix = suffix[1:]
|
||||
|
||||
// Codes [AFM] have an integer operand.
|
||||
// Codes [AFMTr] have an integer operand.
|
||||
var index int
|
||||
switch code {
|
||||
case opAt, opField, opMethod, opTypeParam:
|
||||
case opAt, opField, opMethod, opTypeParam, opRecvTypeParam:
|
||||
rest := strings.TrimLeft(suffix, "0123456789")
|
||||
numerals := suffix[:len(suffix)-len(rest)]
|
||||
suffix = rest
|
||||
|
@ -616,7 +655,7 @@ func Object(pkg *types.Package, p Path) (types.Object, error) {
|
|||
|
||||
// Inv: t != nil, obj == nil
|
||||
|
||||
t = aliases.Unalias(t)
|
||||
t = types.Unalias(t)
|
||||
switch code {
|
||||
case opElem:
|
||||
hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map
|
||||
|
@ -653,6 +692,16 @@ func Object(pkg *types.Package, p Path) (types.Object, error) {
|
|||
}
|
||||
t = named.Underlying()
|
||||
|
||||
case opRhs:
|
||||
if alias, ok := t.(*types.Alias); ok {
|
||||
t = aliases.Rhs(alias)
|
||||
} else if false && aliases.Enabled() {
|
||||
// The Enabled check is too expensive, so for now we
|
||||
// simply assume that aliases are not enabled.
|
||||
// TODO(adonovan): replace with "if true {" when go1.24 is assured.
|
||||
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want alias)", code, t, t)
|
||||
}
|
||||
|
||||
case opTypeParam:
|
||||
hasTypeParams, ok := t.(hasTypeParams) // Named, Signature
|
||||
if !ok {
|
||||
|
@ -664,6 +713,17 @@ func Object(pkg *types.Package, p Path) (types.Object, error) {
|
|||
}
|
||||
t = tparams.At(index)
|
||||
|
||||
case opRecvTypeParam:
|
||||
sig, ok := t.(*types.Signature) // Signature
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t)
|
||||
}
|
||||
rtparams := sig.RecvTypeParams()
|
||||
if n := rtparams.Len(); index >= n {
|
||||
return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n)
|
||||
}
|
||||
t = rtparams.At(index)
|
||||
|
||||
case opConstraint:
|
||||
tparam, ok := t.(*types.TypeParam)
|
||||
if !ok {
|
||||
|
@ -725,6 +785,10 @@ func Object(pkg *types.Package, p Path) (types.Object, error) {
|
|||
}
|
||||
}
|
||||
|
||||
if obj == nil {
|
||||
panic(p) // path does not end in an object-valued operator
|
||||
}
|
||||
|
||||
if obj.Pkg() != pkg {
|
||||
return nil, fmt.Errorf("path denotes %s, which belongs to a different package", obj)
|
||||
}
|
||||
|
|
68
vendor/golang.org/x/tools/go/types/typeutil/callee.go
generated
vendored
Normal file
68
vendor/golang.org/x/tools/go/types/typeutil/callee.go
generated
vendored
Normal file
|
@ -0,0 +1,68 @@
|
|||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package typeutil
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
"go/types"
|
||||
|
||||
"golang.org/x/tools/internal/typeparams"
|
||||
)
|
||||
|
||||
// Callee returns the named target of a function call, if any:
|
||||
// a function, method, builtin, or variable.
|
||||
//
|
||||
// Functions and methods may potentially have type parameters.
|
||||
func Callee(info *types.Info, call *ast.CallExpr) types.Object {
|
||||
fun := ast.Unparen(call.Fun)
|
||||
|
||||
// Look through type instantiation if necessary.
|
||||
isInstance := false
|
||||
switch fun.(type) {
|
||||
case *ast.IndexExpr, *ast.IndexListExpr:
|
||||
// When extracting the callee from an *IndexExpr, we need to check that
|
||||
// it is a *types.Func and not a *types.Var.
|
||||
// Example: Don't match a slice m within the expression `m[0]()`.
|
||||
isInstance = true
|
||||
fun, _, _, _ = typeparams.UnpackIndexExpr(fun)
|
||||
}
|
||||
|
||||
var obj types.Object
|
||||
switch fun := fun.(type) {
|
||||
case *ast.Ident:
|
||||
obj = info.Uses[fun] // type, var, builtin, or declared func
|
||||
case *ast.SelectorExpr:
|
||||
if sel, ok := info.Selections[fun]; ok {
|
||||
obj = sel.Obj() // method or field
|
||||
} else {
|
||||
obj = info.Uses[fun.Sel] // qualified identifier?
|
||||
}
|
||||
}
|
||||
if _, ok := obj.(*types.TypeName); ok {
|
||||
return nil // T(x) is a conversion, not a call
|
||||
}
|
||||
// A Func is required to match instantiations.
|
||||
if _, ok := obj.(*types.Func); isInstance && !ok {
|
||||
return nil // Was not a Func.
|
||||
}
|
||||
return obj
|
||||
}
|
||||
|
||||
// StaticCallee returns the target (function or method) of a static function
|
||||
// call, if any. It returns nil for calls to builtins.
|
||||
//
|
||||
// Note: for calls of instantiated functions and methods, StaticCallee returns
|
||||
// the corresponding generic function or method on the generic type.
|
||||
func StaticCallee(info *types.Info, call *ast.CallExpr) *types.Func {
|
||||
if f, ok := Callee(info, call).(*types.Func); ok && !interfaceMethod(f) {
|
||||
return f
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func interfaceMethod(f *types.Func) bool {
|
||||
recv := f.Type().(*types.Signature).Recv()
|
||||
return recv != nil && types.IsInterface(recv.Type())
|
||||
}
|
30
vendor/golang.org/x/tools/go/types/typeutil/imports.go
generated
vendored
Normal file
30
vendor/golang.org/x/tools/go/types/typeutil/imports.go
generated
vendored
Normal file
|
@ -0,0 +1,30 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package typeutil
|
||||
|
||||
import "go/types"
|
||||
|
||||
// Dependencies returns all dependencies of the specified packages.
|
||||
//
|
||||
// Dependent packages appear in topological order: if package P imports
|
||||
// package Q, Q appears earlier than P in the result.
|
||||
// The algorithm follows import statements in the order they
|
||||
// appear in the source code, so the result is a total order.
|
||||
func Dependencies(pkgs ...*types.Package) []*types.Package {
|
||||
var result []*types.Package
|
||||
seen := make(map[*types.Package]bool)
|
||||
var visit func(pkgs []*types.Package)
|
||||
visit = func(pkgs []*types.Package) {
|
||||
for _, p := range pkgs {
|
||||
if !seen[p] {
|
||||
seen[p] = true
|
||||
visit(p.Imports())
|
||||
result = append(result, p)
|
||||
}
|
||||
}
|
||||
}
|
||||
visit(pkgs)
|
||||
return result
|
||||
}
|
517
vendor/golang.org/x/tools/go/types/typeutil/map.go
generated
vendored
Normal file
517
vendor/golang.org/x/tools/go/types/typeutil/map.go
generated
vendored
Normal file
|
@ -0,0 +1,517 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package typeutil defines various utilities for types, such as Map,
|
||||
// a mapping from types.Type to any values.
|
||||
package typeutil // import "golang.org/x/tools/go/types/typeutil"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/types"
|
||||
"reflect"
|
||||
|
||||
"golang.org/x/tools/internal/typeparams"
|
||||
)
|
||||
|
||||
// Map is a hash-table-based mapping from types (types.Type) to
|
||||
// arbitrary any values. The concrete types that implement
|
||||
// the Type interface are pointers. Since they are not canonicalized,
|
||||
// == cannot be used to check for equivalence, and thus we cannot
|
||||
// simply use a Go map.
|
||||
//
|
||||
// Just as with map[K]V, a nil *Map is a valid empty map.
|
||||
//
|
||||
// Not thread-safe.
|
||||
type Map struct {
|
||||
hasher Hasher // shared by many Maps
|
||||
table map[uint32][]entry // maps hash to bucket; entry.key==nil means unused
|
||||
length int // number of map entries
|
||||
}
|
||||
|
||||
// entry is an entry (key/value association) in a hash bucket.
|
||||
type entry struct {
|
||||
key types.Type
|
||||
value any
|
||||
}
|
||||
|
||||
// SetHasher sets the hasher used by Map.
|
||||
//
|
||||
// All Hashers are functionally equivalent but contain internal state
|
||||
// used to cache the results of hashing previously seen types.
|
||||
//
|
||||
// A single Hasher created by MakeHasher() may be shared among many
|
||||
// Maps. This is recommended if the instances have many keys in
|
||||
// common, as it will amortize the cost of hash computation.
|
||||
//
|
||||
// A Hasher may grow without bound as new types are seen. Even when a
|
||||
// type is deleted from the map, the Hasher never shrinks, since other
|
||||
// types in the map may reference the deleted type indirectly.
|
||||
//
|
||||
// Hashers are not thread-safe, and read-only operations such as
|
||||
// Map.Lookup require updates to the hasher, so a full Mutex lock (not a
|
||||
// read-lock) is require around all Map operations if a shared
|
||||
// hasher is accessed from multiple threads.
|
||||
//
|
||||
// If SetHasher is not called, the Map will create a private hasher at
|
||||
// the first call to Insert.
|
||||
func (m *Map) SetHasher(hasher Hasher) {
|
||||
m.hasher = hasher
|
||||
}
|
||||
|
||||
// Delete removes the entry with the given key, if any.
|
||||
// It returns true if the entry was found.
|
||||
func (m *Map) Delete(key types.Type) bool {
|
||||
if m != nil && m.table != nil {
|
||||
hash := m.hasher.Hash(key)
|
||||
bucket := m.table[hash]
|
||||
for i, e := range bucket {
|
||||
if e.key != nil && types.Identical(key, e.key) {
|
||||
// We can't compact the bucket as it
|
||||
// would disturb iterators.
|
||||
bucket[i] = entry{}
|
||||
m.length--
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// At returns the map entry for the given key.
|
||||
// The result is nil if the entry is not present.
|
||||
func (m *Map) At(key types.Type) any {
|
||||
if m != nil && m.table != nil {
|
||||
for _, e := range m.table[m.hasher.Hash(key)] {
|
||||
if e.key != nil && types.Identical(key, e.key) {
|
||||
return e.value
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set sets the map entry for key to val,
|
||||
// and returns the previous entry, if any.
|
||||
func (m *Map) Set(key types.Type, value any) (prev any) {
|
||||
if m.table != nil {
|
||||
hash := m.hasher.Hash(key)
|
||||
bucket := m.table[hash]
|
||||
var hole *entry
|
||||
for i, e := range bucket {
|
||||
if e.key == nil {
|
||||
hole = &bucket[i]
|
||||
} else if types.Identical(key, e.key) {
|
||||
prev = e.value
|
||||
bucket[i].value = value
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if hole != nil {
|
||||
*hole = entry{key, value} // overwrite deleted entry
|
||||
} else {
|
||||
m.table[hash] = append(bucket, entry{key, value})
|
||||
}
|
||||
} else {
|
||||
if m.hasher.memo == nil {
|
||||
m.hasher = MakeHasher()
|
||||
}
|
||||
hash := m.hasher.Hash(key)
|
||||
m.table = map[uint32][]entry{hash: {entry{key, value}}}
|
||||
}
|
||||
|
||||
m.length++
|
||||
return
|
||||
}
|
||||
|
||||
// Len returns the number of map entries.
|
||||
func (m *Map) Len() int {
|
||||
if m != nil {
|
||||
return m.length
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Iterate calls function f on each entry in the map in unspecified order.
|
||||
//
|
||||
// If f should mutate the map, Iterate provides the same guarantees as
|
||||
// Go maps: if f deletes a map entry that Iterate has not yet reached,
|
||||
// f will not be invoked for it, but if f inserts a map entry that
|
||||
// Iterate has not yet reached, whether or not f will be invoked for
|
||||
// it is unspecified.
|
||||
func (m *Map) Iterate(f func(key types.Type, value any)) {
|
||||
if m != nil {
|
||||
for _, bucket := range m.table {
|
||||
for _, e := range bucket {
|
||||
if e.key != nil {
|
||||
f(e.key, e.value)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Keys returns a new slice containing the set of map keys.
|
||||
// The order is unspecified.
|
||||
func (m *Map) Keys() []types.Type {
|
||||
keys := make([]types.Type, 0, m.Len())
|
||||
m.Iterate(func(key types.Type, _ any) {
|
||||
keys = append(keys, key)
|
||||
})
|
||||
return keys
|
||||
}
|
||||
|
||||
func (m *Map) toString(values bool) string {
|
||||
if m == nil {
|
||||
return "{}"
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
fmt.Fprint(&buf, "{")
|
||||
sep := ""
|
||||
m.Iterate(func(key types.Type, value any) {
|
||||
fmt.Fprint(&buf, sep)
|
||||
sep = ", "
|
||||
fmt.Fprint(&buf, key)
|
||||
if values {
|
||||
fmt.Fprintf(&buf, ": %q", value)
|
||||
}
|
||||
})
|
||||
fmt.Fprint(&buf, "}")
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// String returns a string representation of the map's entries.
|
||||
// Values are printed using fmt.Sprintf("%v", v).
|
||||
// Order is unspecified.
|
||||
func (m *Map) String() string {
|
||||
return m.toString(true)
|
||||
}
|
||||
|
||||
// KeysString returns a string representation of the map's key set.
|
||||
// Order is unspecified.
|
||||
func (m *Map) KeysString() string {
|
||||
return m.toString(false)
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// Hasher
|
||||
|
||||
// A Hasher maps each type to its hash value.
|
||||
// For efficiency, a hasher uses memoization; thus its memory
|
||||
// footprint grows monotonically over time.
|
||||
// Hashers are not thread-safe.
|
||||
// Hashers have reference semantics.
|
||||
// Call MakeHasher to create a Hasher.
|
||||
type Hasher struct {
|
||||
memo map[types.Type]uint32
|
||||
|
||||
// ptrMap records pointer identity.
|
||||
ptrMap map[any]uint32
|
||||
|
||||
// sigTParams holds type parameters from the signature being hashed.
|
||||
// Signatures are considered identical modulo renaming of type parameters, so
|
||||
// within the scope of a signature type the identity of the signature's type
|
||||
// parameters is just their index.
|
||||
//
|
||||
// Since the language does not currently support referring to uninstantiated
|
||||
// generic types or functions, and instantiated signatures do not have type
|
||||
// parameter lists, we should never encounter a second non-empty type
|
||||
// parameter list when hashing a generic signature.
|
||||
sigTParams *types.TypeParamList
|
||||
}
|
||||
|
||||
// MakeHasher returns a new Hasher instance.
|
||||
func MakeHasher() Hasher {
|
||||
return Hasher{
|
||||
memo: make(map[types.Type]uint32),
|
||||
ptrMap: make(map[any]uint32),
|
||||
sigTParams: nil,
|
||||
}
|
||||
}
|
||||
|
||||
// Hash computes a hash value for the given type t such that
|
||||
// Identical(t, t') => Hash(t) == Hash(t').
|
||||
func (h Hasher) Hash(t types.Type) uint32 {
|
||||
hash, ok := h.memo[t]
|
||||
if !ok {
|
||||
hash = h.hashFor(t)
|
||||
h.memo[t] = hash
|
||||
}
|
||||
return hash
|
||||
}
|
||||
|
||||
// hashString computes the Fowler–Noll–Vo hash of s.
|
||||
func hashString(s string) uint32 {
|
||||
var h uint32
|
||||
for i := 0; i < len(s); i++ {
|
||||
h ^= uint32(s[i])
|
||||
h *= 16777619
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
// hashFor computes the hash of t.
|
||||
func (h Hasher) hashFor(t types.Type) uint32 {
|
||||
// See Identical for rationale.
|
||||
switch t := t.(type) {
|
||||
case *types.Basic:
|
||||
return uint32(t.Kind())
|
||||
|
||||
case *types.Alias:
|
||||
return h.Hash(types.Unalias(t))
|
||||
|
||||
case *types.Array:
|
||||
return 9043 + 2*uint32(t.Len()) + 3*h.Hash(t.Elem())
|
||||
|
||||
case *types.Slice:
|
||||
return 9049 + 2*h.Hash(t.Elem())
|
||||
|
||||
case *types.Struct:
|
||||
var hash uint32 = 9059
|
||||
for i, n := 0, t.NumFields(); i < n; i++ {
|
||||
f := t.Field(i)
|
||||
if f.Anonymous() {
|
||||
hash += 8861
|
||||
}
|
||||
hash += hashString(t.Tag(i))
|
||||
hash += hashString(f.Name()) // (ignore f.Pkg)
|
||||
hash += h.Hash(f.Type())
|
||||
}
|
||||
return hash
|
||||
|
||||
case *types.Pointer:
|
||||
return 9067 + 2*h.Hash(t.Elem())
|
||||
|
||||
case *types.Signature:
|
||||
var hash uint32 = 9091
|
||||
if t.Variadic() {
|
||||
hash *= 8863
|
||||
}
|
||||
|
||||
// Use a separate hasher for types inside of the signature, where type
|
||||
// parameter identity is modified to be (index, constraint). We must use a
|
||||
// new memo for this hasher as type identity may be affected by this
|
||||
// masking. For example, in func[T any](*T), the identity of *T depends on
|
||||
// whether we are mapping the argument in isolation, or recursively as part
|
||||
// of hashing the signature.
|
||||
//
|
||||
// We should never encounter a generic signature while hashing another
|
||||
// generic signature, but defensively set sigTParams only if h.mask is
|
||||
// unset.
|
||||
tparams := t.TypeParams()
|
||||
if h.sigTParams == nil && tparams.Len() != 0 {
|
||||
h = Hasher{
|
||||
// There may be something more efficient than discarding the existing
|
||||
// memo, but it would require detecting whether types are 'tainted' by
|
||||
// references to type parameters.
|
||||
memo: make(map[types.Type]uint32),
|
||||
// Re-using ptrMap ensures that pointer identity is preserved in this
|
||||
// hasher.
|
||||
ptrMap: h.ptrMap,
|
||||
sigTParams: tparams,
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < tparams.Len(); i++ {
|
||||
tparam := tparams.At(i)
|
||||
hash += 7 * h.Hash(tparam.Constraint())
|
||||
}
|
||||
|
||||
return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results())
|
||||
|
||||
case *types.Union:
|
||||
return h.hashUnion(t)
|
||||
|
||||
case *types.Interface:
|
||||
// Interfaces are identical if they have the same set of methods, with
|
||||
// identical names and types, and they have the same set of type
|
||||
// restrictions. See go/types.identical for more details.
|
||||
var hash uint32 = 9103
|
||||
|
||||
// Hash methods.
|
||||
for i, n := 0, t.NumMethods(); i < n; i++ {
|
||||
// Method order is not significant.
|
||||
// Ignore m.Pkg().
|
||||
m := t.Method(i)
|
||||
// Use shallow hash on method signature to
|
||||
// avoid anonymous interface cycles.
|
||||
hash += 3*hashString(m.Name()) + 5*h.shallowHash(m.Type())
|
||||
}
|
||||
|
||||
// Hash type restrictions.
|
||||
terms, err := typeparams.InterfaceTermSet(t)
|
||||
// if err != nil t has invalid type restrictions.
|
||||
if err == nil {
|
||||
hash += h.hashTermSet(terms)
|
||||
}
|
||||
|
||||
return hash
|
||||
|
||||
case *types.Map:
|
||||
return 9109 + 2*h.Hash(t.Key()) + 3*h.Hash(t.Elem())
|
||||
|
||||
case *types.Chan:
|
||||
return 9127 + 2*uint32(t.Dir()) + 3*h.Hash(t.Elem())
|
||||
|
||||
case *types.Named:
|
||||
hash := h.hashPtr(t.Obj())
|
||||
targs := t.TypeArgs()
|
||||
for i := 0; i < targs.Len(); i++ {
|
||||
targ := targs.At(i)
|
||||
hash += 2 * h.Hash(targ)
|
||||
}
|
||||
return hash
|
||||
|
||||
case *types.TypeParam:
|
||||
return h.hashTypeParam(t)
|
||||
|
||||
case *types.Tuple:
|
||||
return h.hashTuple(t)
|
||||
}
|
||||
|
||||
panic(fmt.Sprintf("%T: %v", t, t))
|
||||
}
|
||||
|
||||
func (h Hasher) hashTuple(tuple *types.Tuple) uint32 {
|
||||
// See go/types.identicalTypes for rationale.
|
||||
n := tuple.Len()
|
||||
hash := 9137 + 2*uint32(n)
|
||||
for i := 0; i < n; i++ {
|
||||
hash += 3 * h.Hash(tuple.At(i).Type())
|
||||
}
|
||||
return hash
|
||||
}
|
||||
|
||||
func (h Hasher) hashUnion(t *types.Union) uint32 {
|
||||
// Hash type restrictions.
|
||||
terms, err := typeparams.UnionTermSet(t)
|
||||
// if err != nil t has invalid type restrictions. Fall back on a non-zero
|
||||
// hash.
|
||||
if err != nil {
|
||||
return 9151
|
||||
}
|
||||
return h.hashTermSet(terms)
|
||||
}
|
||||
|
||||
func (h Hasher) hashTermSet(terms []*types.Term) uint32 {
|
||||
hash := 9157 + 2*uint32(len(terms))
|
||||
for _, term := range terms {
|
||||
// term order is not significant.
|
||||
termHash := h.Hash(term.Type())
|
||||
if term.Tilde() {
|
||||
termHash *= 9161
|
||||
}
|
||||
hash += 3 * termHash
|
||||
}
|
||||
return hash
|
||||
}
|
||||
|
||||
// hashTypeParam returns a hash of the type parameter t, with a hash value
|
||||
// depending on whether t is contained in h.sigTParams.
|
||||
//
|
||||
// If h.sigTParams is set and contains t, then we are in the process of hashing
|
||||
// a signature, and the hash value of t must depend only on t's index and
|
||||
// constraint: signatures are considered identical modulo type parameter
|
||||
// renaming. To avoid infinite recursion, we only hash the type parameter
|
||||
// index, and rely on types.Identical to handle signatures where constraints
|
||||
// are not identical.
|
||||
//
|
||||
// Otherwise the hash of t depends only on t's pointer identity.
|
||||
func (h Hasher) hashTypeParam(t *types.TypeParam) uint32 {
|
||||
if h.sigTParams != nil {
|
||||
i := t.Index()
|
||||
if i >= 0 && i < h.sigTParams.Len() && t == h.sigTParams.At(i) {
|
||||
return 9173 + 3*uint32(i)
|
||||
}
|
||||
}
|
||||
return h.hashPtr(t.Obj())
|
||||
}
|
||||
|
||||
// hashPtr hashes the pointer identity of ptr. It uses h.ptrMap to ensure that
|
||||
// pointers values are not dependent on the GC.
|
||||
func (h Hasher) hashPtr(ptr any) uint32 {
|
||||
if hash, ok := h.ptrMap[ptr]; ok {
|
||||
return hash
|
||||
}
|
||||
hash := uint32(reflect.ValueOf(ptr).Pointer())
|
||||
h.ptrMap[ptr] = hash
|
||||
return hash
|
||||
}
|
||||
|
||||
// shallowHash computes a hash of t without looking at any of its
|
||||
// element Types, to avoid potential anonymous cycles in the types of
|
||||
// interface methods.
|
||||
//
|
||||
// When an unnamed non-empty interface type appears anywhere among the
|
||||
// arguments or results of an interface method, there is a potential
|
||||
// for endless recursion. Consider:
|
||||
//
|
||||
// type X interface { m() []*interface { X } }
|
||||
//
|
||||
// The problem is that the Methods of the interface in m's result type
|
||||
// include m itself; there is no mention of the named type X that
|
||||
// might help us break the cycle.
|
||||
// (See comment in go/types.identical, case *Interface, for more.)
|
||||
func (h Hasher) shallowHash(t types.Type) uint32 {
|
||||
// t is the type of an interface method (Signature),
|
||||
// its params or results (Tuples), or their immediate
|
||||
// elements (mostly Slice, Pointer, Basic, Named),
|
||||
// so there's no need to optimize anything else.
|
||||
switch t := t.(type) {
|
||||
case *types.Alias:
|
||||
return h.shallowHash(types.Unalias(t))
|
||||
|
||||
case *types.Signature:
|
||||
var hash uint32 = 604171
|
||||
if t.Variadic() {
|
||||
hash *= 971767
|
||||
}
|
||||
// The Signature/Tuple recursion is always finite
|
||||
// and invariably shallow.
|
||||
return hash + 1062599*h.shallowHash(t.Params()) + 1282529*h.shallowHash(t.Results())
|
||||
|
||||
case *types.Tuple:
|
||||
n := t.Len()
|
||||
hash := 9137 + 2*uint32(n)
|
||||
for i := 0; i < n; i++ {
|
||||
hash += 53471161 * h.shallowHash(t.At(i).Type())
|
||||
}
|
||||
return hash
|
||||
|
||||
case *types.Basic:
|
||||
return 45212177 * uint32(t.Kind())
|
||||
|
||||
case *types.Array:
|
||||
return 1524181 + 2*uint32(t.Len())
|
||||
|
||||
case *types.Slice:
|
||||
return 2690201
|
||||
|
||||
case *types.Struct:
|
||||
return 3326489
|
||||
|
||||
case *types.Pointer:
|
||||
return 4393139
|
||||
|
||||
case *types.Union:
|
||||
return 562448657
|
||||
|
||||
case *types.Interface:
|
||||
return 2124679 // no recursion here
|
||||
|
||||
case *types.Map:
|
||||
return 9109
|
||||
|
||||
case *types.Chan:
|
||||
return 9127
|
||||
|
||||
case *types.Named:
|
||||
return h.hashPtr(t.Obj())
|
||||
|
||||
case *types.TypeParam:
|
||||
return h.hashPtr(t.Obj())
|
||||
}
|
||||
panic(fmt.Sprintf("shallowHash: %T: %v", t, t))
|
||||
}
|
71
vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go
generated
vendored
Normal file
71
vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go
generated
vendored
Normal file
|
@ -0,0 +1,71 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This file implements a cache of method sets.
|
||||
|
||||
package typeutil
|
||||
|
||||
import (
|
||||
"go/types"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// A MethodSetCache records the method set of each type T for which
|
||||
// MethodSet(T) is called so that repeat queries are fast.
|
||||
// The zero value is a ready-to-use cache instance.
|
||||
type MethodSetCache struct {
|
||||
mu sync.Mutex
|
||||
named map[*types.Named]struct{ value, pointer *types.MethodSet } // method sets for named N and *N
|
||||
others map[types.Type]*types.MethodSet // all other types
|
||||
}
|
||||
|
||||
// MethodSet returns the method set of type T. It is thread-safe.
|
||||
//
|
||||
// If cache is nil, this function is equivalent to types.NewMethodSet(T).
|
||||
// Utility functions can thus expose an optional *MethodSetCache
|
||||
// parameter to clients that care about performance.
|
||||
func (cache *MethodSetCache) MethodSet(T types.Type) *types.MethodSet {
|
||||
if cache == nil {
|
||||
return types.NewMethodSet(T)
|
||||
}
|
||||
cache.mu.Lock()
|
||||
defer cache.mu.Unlock()
|
||||
|
||||
switch T := types.Unalias(T).(type) {
|
||||
case *types.Named:
|
||||
return cache.lookupNamed(T).value
|
||||
|
||||
case *types.Pointer:
|
||||
if N, ok := types.Unalias(T.Elem()).(*types.Named); ok {
|
||||
return cache.lookupNamed(N).pointer
|
||||
}
|
||||
}
|
||||
|
||||
// all other types
|
||||
// (The map uses pointer equivalence, not type identity.)
|
||||
mset := cache.others[T]
|
||||
if mset == nil {
|
||||
mset = types.NewMethodSet(T)
|
||||
if cache.others == nil {
|
||||
cache.others = make(map[types.Type]*types.MethodSet)
|
||||
}
|
||||
cache.others[T] = mset
|
||||
}
|
||||
return mset
|
||||
}
|
||||
|
||||
func (cache *MethodSetCache) lookupNamed(named *types.Named) struct{ value, pointer *types.MethodSet } {
|
||||
if cache.named == nil {
|
||||
cache.named = make(map[*types.Named]struct{ value, pointer *types.MethodSet })
|
||||
}
|
||||
// Avoid recomputing mset(*T) for each distinct Pointer
|
||||
// instance whose underlying type is a named type.
|
||||
msets, ok := cache.named[named]
|
||||
if !ok {
|
||||
msets.value = types.NewMethodSet(named)
|
||||
msets.pointer = types.NewMethodSet(types.NewPointer(named))
|
||||
cache.named[named] = msets
|
||||
}
|
||||
return msets
|
||||
}
|
53
vendor/golang.org/x/tools/go/types/typeutil/ui.go
generated
vendored
Normal file
53
vendor/golang.org/x/tools/go/types/typeutil/ui.go
generated
vendored
Normal file
|
@ -0,0 +1,53 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package typeutil
|
||||
|
||||
// This file defines utilities for user interfaces that display types.
|
||||
|
||||
import (
|
||||
"go/types"
|
||||
)
|
||||
|
||||
// IntuitiveMethodSet returns the intuitive method set of a type T,
|
||||
// which is the set of methods you can call on an addressable value of
|
||||
// that type.
|
||||
//
|
||||
// The result always contains MethodSet(T), and is exactly MethodSet(T)
|
||||
// for interface types and for pointer-to-concrete types.
|
||||
// For all other concrete types T, the result additionally
|
||||
// contains each method belonging to *T if there is no identically
|
||||
// named method on T itself.
|
||||
//
|
||||
// This corresponds to user intuition about method sets;
|
||||
// this function is intended only for user interfaces.
|
||||
//
|
||||
// The order of the result is as for types.MethodSet(T).
|
||||
func IntuitiveMethodSet(T types.Type, msets *MethodSetCache) []*types.Selection {
|
||||
isPointerToConcrete := func(T types.Type) bool {
|
||||
ptr, ok := types.Unalias(T).(*types.Pointer)
|
||||
return ok && !types.IsInterface(ptr.Elem())
|
||||
}
|
||||
|
||||
var result []*types.Selection
|
||||
mset := msets.MethodSet(T)
|
||||
if types.IsInterface(T) || isPointerToConcrete(T) {
|
||||
for i, n := 0, mset.Len(); i < n; i++ {
|
||||
result = append(result, mset.At(i))
|
||||
}
|
||||
} else {
|
||||
// T is some other concrete type.
|
||||
// Report methods of T and *T, preferring those of T.
|
||||
pmset := msets.MethodSet(types.NewPointer(T))
|
||||
for i, n := 0, pmset.Len(); i < n; i++ {
|
||||
meth := pmset.At(i)
|
||||
if m := mset.Lookup(meth.Obj().Pkg(), meth.Obj().Name()); m != nil {
|
||||
meth = m
|
||||
}
|
||||
result = append(result, meth)
|
||||
}
|
||||
|
||||
}
|
||||
return result
|
||||
}
|
10
vendor/golang.org/x/tools/internal/aliases/aliases.go
generated
vendored
10
vendor/golang.org/x/tools/internal/aliases/aliases.go
generated
vendored
|
@ -22,11 +22,17 @@ import (
|
|||
// GODEBUG=gotypesalias=... by invoking the type checker. The Enabled
|
||||
// function is expensive and should be called once per task (e.g.
|
||||
// package import), not once per call to NewAlias.
|
||||
func NewAlias(enabled bool, pos token.Pos, pkg *types.Package, name string, rhs types.Type) *types.TypeName {
|
||||
//
|
||||
// Precondition: enabled || len(tparams)==0.
|
||||
// If materialized aliases are disabled, there must not be any type parameters.
|
||||
func NewAlias(enabled bool, pos token.Pos, pkg *types.Package, name string, rhs types.Type, tparams []*types.TypeParam) *types.TypeName {
|
||||
if enabled {
|
||||
tname := types.NewTypeName(pos, pkg, name, nil)
|
||||
newAlias(tname, rhs)
|
||||
SetTypeParams(types.NewAlias(tname, rhs), tparams)
|
||||
return tname
|
||||
}
|
||||
if len(tparams) > 0 {
|
||||
panic("cannot create an alias with type parameters when gotypesalias is not enabled")
|
||||
}
|
||||
return types.NewTypeName(pos, pkg, name, rhs)
|
||||
}
|
||||
|
|
31
vendor/golang.org/x/tools/internal/aliases/aliases_go121.go
generated
vendored
31
vendor/golang.org/x/tools/internal/aliases/aliases_go121.go
generated
vendored
|
@ -1,31 +0,0 @@
|
|||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !go1.22
|
||||
// +build !go1.22
|
||||
|
||||
package aliases
|
||||
|
||||
import (
|
||||
"go/types"
|
||||
)
|
||||
|
||||
// Alias is a placeholder for a go/types.Alias for <=1.21.
|
||||
// It will never be created by go/types.
|
||||
type Alias struct{}
|
||||
|
||||
func (*Alias) String() string { panic("unreachable") }
|
||||
func (*Alias) Underlying() types.Type { panic("unreachable") }
|
||||
func (*Alias) Obj() *types.TypeName { panic("unreachable") }
|
||||
func Rhs(alias *Alias) types.Type { panic("unreachable") }
|
||||
|
||||
// Unalias returns the type t for go <=1.21.
|
||||
func Unalias(t types.Type) types.Type { return t }
|
||||
|
||||
func newAlias(name *types.TypeName, rhs types.Type) *Alias { panic("unreachable") }
|
||||
|
||||
// Enabled reports whether [NewAlias] should create [types.Alias] types.
|
||||
//
|
||||
// Before go1.22, this function always returns false.
|
||||
func Enabled() bool { return false }
|
55
vendor/golang.org/x/tools/internal/aliases/aliases_go122.go
generated
vendored
55
vendor/golang.org/x/tools/internal/aliases/aliases_go122.go
generated
vendored
|
@ -2,9 +2,6 @@
|
|||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.22
|
||||
// +build go1.22
|
||||
|
||||
package aliases
|
||||
|
||||
import (
|
||||
|
@ -14,31 +11,51 @@ import (
|
|||
"go/types"
|
||||
)
|
||||
|
||||
// Alias is an alias of types.Alias.
|
||||
type Alias = types.Alias
|
||||
|
||||
// Rhs returns the type on the right-hand side of the alias declaration.
|
||||
func Rhs(alias *Alias) types.Type {
|
||||
func Rhs(alias *types.Alias) types.Type {
|
||||
if alias, ok := any(alias).(interface{ Rhs() types.Type }); ok {
|
||||
return alias.Rhs() // go1.23+
|
||||
}
|
||||
|
||||
// go1.22's Alias didn't have the Rhs method,
|
||||
// so Unalias is the best we can do.
|
||||
return Unalias(alias)
|
||||
return types.Unalias(alias)
|
||||
}
|
||||
|
||||
// Unalias is a wrapper of types.Unalias.
|
||||
func Unalias(t types.Type) types.Type { return types.Unalias(t) }
|
||||
// TypeParams returns the type parameter list of the alias.
|
||||
func TypeParams(alias *types.Alias) *types.TypeParamList {
|
||||
if alias, ok := any(alias).(interface{ TypeParams() *types.TypeParamList }); ok {
|
||||
return alias.TypeParams() // go1.23+
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// newAlias is an internal alias around types.NewAlias.
|
||||
// Direct usage is discouraged as the moment.
|
||||
// Try to use NewAlias instead.
|
||||
func newAlias(tname *types.TypeName, rhs types.Type) *Alias {
|
||||
a := types.NewAlias(tname, rhs)
|
||||
// TODO(go.dev/issue/65455): Remove kludgy workaround to set a.actual as a side-effect.
|
||||
Unalias(a)
|
||||
return a
|
||||
// SetTypeParams sets the type parameters of the alias type.
|
||||
func SetTypeParams(alias *types.Alias, tparams []*types.TypeParam) {
|
||||
if alias, ok := any(alias).(interface {
|
||||
SetTypeParams(tparams []*types.TypeParam)
|
||||
}); ok {
|
||||
alias.SetTypeParams(tparams) // go1.23+
|
||||
} else if len(tparams) > 0 {
|
||||
panic("cannot set type parameters of an Alias type in go1.22")
|
||||
}
|
||||
}
|
||||
|
||||
// TypeArgs returns the type arguments used to instantiate the Alias type.
|
||||
func TypeArgs(alias *types.Alias) *types.TypeList {
|
||||
if alias, ok := any(alias).(interface{ TypeArgs() *types.TypeList }); ok {
|
||||
return alias.TypeArgs() // go1.23+
|
||||
}
|
||||
return nil // empty (go1.22)
|
||||
}
|
||||
|
||||
// Origin returns the generic Alias type of which alias is an instance.
|
||||
// If alias is not an instance of a generic alias, Origin returns alias.
|
||||
func Origin(alias *types.Alias) *types.Alias {
|
||||
if alias, ok := any(alias).(interface{ Origin() *types.Alias }); ok {
|
||||
return alias.Origin() // go1.23+
|
||||
}
|
||||
return alias // not an instance of a generic alias (go1.22)
|
||||
}
|
||||
|
||||
// Enabled reports whether [NewAlias] should create [types.Alias] types.
|
||||
|
@ -56,7 +73,7 @@ func Enabled() bool {
|
|||
// many tests. Therefore any attempt to cache the result
|
||||
// is just incorrect.
|
||||
fset := token.NewFileSet()
|
||||
f, _ := parser.ParseFile(fset, "a.go", "package p; type A = int", 0)
|
||||
f, _ := parser.ParseFile(fset, "a.go", "package p; type A = int", parser.SkipObjectResolution)
|
||||
pkg, _ := new(types.Config).Check("p", fset, []*ast.File{f}, nil)
|
||||
_, enabled := pkg.Scope().Lookup("A").Type().(*types.Alias)
|
||||
return enabled
|
||||
|
|
61
vendor/golang.org/x/tools/internal/gcimporter/bimport.go
generated
vendored
61
vendor/golang.org/x/tools/internal/gcimporter/bimport.go
generated
vendored
|
@ -87,64 +87,3 @@ func chanDir(d int) types.ChanDir {
|
|||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
var predeclOnce sync.Once
|
||||
var predecl []types.Type // initialized lazily
|
||||
|
||||
func predeclared() []types.Type {
|
||||
predeclOnce.Do(func() {
|
||||
// initialize lazily to be sure that all
|
||||
// elements have been initialized before
|
||||
predecl = []types.Type{ // basic types
|
||||
types.Typ[types.Bool],
|
||||
types.Typ[types.Int],
|
||||
types.Typ[types.Int8],
|
||||
types.Typ[types.Int16],
|
||||
types.Typ[types.Int32],
|
||||
types.Typ[types.Int64],
|
||||
types.Typ[types.Uint],
|
||||
types.Typ[types.Uint8],
|
||||
types.Typ[types.Uint16],
|
||||
types.Typ[types.Uint32],
|
||||
types.Typ[types.Uint64],
|
||||
types.Typ[types.Uintptr],
|
||||
types.Typ[types.Float32],
|
||||
types.Typ[types.Float64],
|
||||
types.Typ[types.Complex64],
|
||||
types.Typ[types.Complex128],
|
||||
types.Typ[types.String],
|
||||
|
||||
// basic type aliases
|
||||
types.Universe.Lookup("byte").Type(),
|
||||
types.Universe.Lookup("rune").Type(),
|
||||
|
||||
// error
|
||||
types.Universe.Lookup("error").Type(),
|
||||
|
||||
// untyped types
|
||||
types.Typ[types.UntypedBool],
|
||||
types.Typ[types.UntypedInt],
|
||||
types.Typ[types.UntypedRune],
|
||||
types.Typ[types.UntypedFloat],
|
||||
types.Typ[types.UntypedComplex],
|
||||
types.Typ[types.UntypedString],
|
||||
types.Typ[types.UntypedNil],
|
||||
|
||||
// package unsafe
|
||||
types.Typ[types.UnsafePointer],
|
||||
|
||||
// invalid type
|
||||
types.Typ[types.Invalid], // only appears in packages with errors
|
||||
|
||||
// used internally by gc; never used by this package or in .a files
|
||||
anyType{},
|
||||
}
|
||||
predecl = append(predecl, additionalPredeclared()...)
|
||||
})
|
||||
return predecl
|
||||
}
|
||||
|
||||
type anyType struct{}
|
||||
|
||||
func (t anyType) Underlying() types.Type { return t }
|
||||
func (t anyType) String() string { return "any" }
|
||||
|
|
75
vendor/golang.org/x/tools/internal/gcimporter/exportdata.go
generated
vendored
75
vendor/golang.org/x/tools/internal/gcimporter/exportdata.go
generated
vendored
|
@ -39,12 +39,15 @@ func readGopackHeader(r *bufio.Reader) (name string, size int64, err error) {
|
|||
}
|
||||
|
||||
// FindExportData positions the reader r at the beginning of the
|
||||
// export data section of an underlying GC-created object/archive
|
||||
// export data section of an underlying cmd/compile created archive
|
||||
// file by reading from it. The reader must be positioned at the
|
||||
// start of the file before calling this function. The hdr result
|
||||
// is the string before the export data, either "$$" or "$$B".
|
||||
// The size result is the length of the export data in bytes, or -1 if not known.
|
||||
func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) {
|
||||
// start of the file before calling this function.
|
||||
// The size result is the length of the export data in bytes.
|
||||
//
|
||||
// This function is needed by [gcexportdata.Read], which must
|
||||
// accept inputs produced by the last two releases of cmd/compile,
|
||||
// plus tip.
|
||||
func FindExportData(r *bufio.Reader) (size int64, err error) {
|
||||
// Read first line to make sure this is an object file.
|
||||
line, err := r.ReadSlice('\n')
|
||||
if err != nil {
|
||||
|
@ -52,28 +55,33 @@ func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) {
|
|||
return
|
||||
}
|
||||
|
||||
if string(line) == "!<arch>\n" {
|
||||
// Archive file. Scan to __.PKGDEF.
|
||||
var name string
|
||||
if name, size, err = readGopackHeader(r); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// First entry should be __.PKGDEF.
|
||||
if name != "__.PKGDEF" {
|
||||
err = fmt.Errorf("go archive is missing __.PKGDEF")
|
||||
return
|
||||
}
|
||||
|
||||
// Read first line of __.PKGDEF data, so that line
|
||||
// is once again the first line of the input.
|
||||
if line, err = r.ReadSlice('\n'); err != nil {
|
||||
err = fmt.Errorf("can't find export data (%v)", err)
|
||||
return
|
||||
}
|
||||
size -= int64(len(line))
|
||||
// Is the first line an archive file signature?
|
||||
if string(line) != "!<arch>\n" {
|
||||
err = fmt.Errorf("not the start of an archive file (%q)", line)
|
||||
return
|
||||
}
|
||||
|
||||
// Archive file. Scan to __.PKGDEF.
|
||||
var name string
|
||||
if name, size, err = readGopackHeader(r); err != nil {
|
||||
return
|
||||
}
|
||||
arsize := size
|
||||
|
||||
// First entry should be __.PKGDEF.
|
||||
if name != "__.PKGDEF" {
|
||||
err = fmt.Errorf("go archive is missing __.PKGDEF")
|
||||
return
|
||||
}
|
||||
|
||||
// Read first line of __.PKGDEF data, so that line
|
||||
// is once again the first line of the input.
|
||||
if line, err = r.ReadSlice('\n'); err != nil {
|
||||
err = fmt.Errorf("can't find export data (%v)", err)
|
||||
return
|
||||
}
|
||||
size -= int64(len(line))
|
||||
|
||||
// Now at __.PKGDEF in archive or still at beginning of file.
|
||||
// Either way, line should begin with "go object ".
|
||||
if !strings.HasPrefix(string(line), "go object ") {
|
||||
|
@ -81,8 +89,8 @@ func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) {
|
|||
return
|
||||
}
|
||||
|
||||
// Skip over object header to export data.
|
||||
// Begins after first line starting with $$.
|
||||
// Skip over object headers to get to the export data section header "$$B\n".
|
||||
// Object headers are lines that do not start with '$'.
|
||||
for line[0] != '$' {
|
||||
if line, err = r.ReadSlice('\n'); err != nil {
|
||||
err = fmt.Errorf("can't find export data (%v)", err)
|
||||
|
@ -90,9 +98,18 @@ func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) {
|
|||
}
|
||||
size -= int64(len(line))
|
||||
}
|
||||
hdr = string(line)
|
||||
|
||||
// Check for the binary export data section header "$$B\n".
|
||||
hdr := string(line)
|
||||
if hdr != "$$B\n" {
|
||||
err = fmt.Errorf("unknown export data header: %q", hdr)
|
||||
return
|
||||
}
|
||||
// TODO(taking): Remove end-of-section marker "\n$$\n" from size.
|
||||
|
||||
if size < 0 {
|
||||
size = -1
|
||||
err = fmt.Errorf("invalid size (%d) in the archive file: %d bytes remain without section headers (recompile package)", arsize, size)
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue