Update deps

This commit is contained in:
Frank Denis 2024-11-08 08:07:43 +01:00
parent 55b2ed9851
commit ee400254ac
139 changed files with 5480 additions and 3125 deletions

20
go.mod
View file

@ -1,6 +1,6 @@
module github.com/dnscrypt/dnscrypt-proxy
go 1.23
go 1.23.2
require (
github.com/BurntSushi/toml v1.4.0
@ -19,11 +19,11 @@ require (
github.com/kardianos/service v1.2.2
github.com/miekg/dns v1.1.62
github.com/opencoff/go-sieve v0.2.1
github.com/powerman/check v1.7.0
github.com/powerman/check v1.8.0
github.com/quic-go/quic-go v0.48.1
golang.org/x/crypto v0.28.0
golang.org/x/crypto v0.29.0
golang.org/x/net v0.30.0
golang.org/x/sys v0.26.0
golang.org/x/sys v0.27.0
gopkg.in/natefinch/lumberjack.v2 v2.2.1
)
@ -39,14 +39,14 @@ require (
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/powerman/deepequal v0.1.0 // indirect
github.com/quic-go/qpack v0.5.1 // indirect
github.com/smartystreets/goconvey v1.7.2 // indirect
github.com/smartystreets/goconvey v1.8.1 // indirect
go.uber.org/mock v0.4.0 // indirect
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect
golang.org/x/mod v0.18.0 // indirect
golang.org/x/sync v0.8.0 // indirect
golang.org/x/text v0.19.0 // indirect
golang.org/x/sync v0.9.0 // indirect
golang.org/x/text v0.20.0 // indirect
golang.org/x/tools v0.22.0 // indirect
google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect
google.golang.org/grpc v1.53.0 // indirect
google.golang.org/protobuf v1.33.0 // indirect
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
google.golang.org/grpc v1.56.3 // indirect
google.golang.org/protobuf v1.34.2 // indirect
)

49
go.sum
View file

@ -24,8 +24,8 @@ github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 h1:yAJXTCF9TqKcTiHJAE8dj7HMvPfh66eeA2JYW7eFpSE=
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g=
github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k=
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-syslog v1.0.0 h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwMrUAE=
@ -67,59 +67,54 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/powerman/check v1.7.0 h1:PtRow0L73QgYSmXUBI5qe5MnDu3kowTAKQSHTbDH8Zs=
github.com/powerman/check v1.7.0/go.mod h1:pCQPDCCVj1ksGj9OaMqFBjvet5Jg8TbMB3UJj8Nx98g=
github.com/powerman/check v1.8.0 h1:ba57bphImhYRNlozdHYDTexfb/DyCb7j1rwuoFdEhdA=
github.com/powerman/check v1.8.0/go.mod h1:XSQq6GKXh06fZZKLwOgXDtcjo7OBbUwVPC5HP8oDXuQ=
github.com/powerman/deepequal v0.1.0 h1:sVwtyTsBuYIvdbLR1O2wzRY63YgPqdGZmk/o80l+C/U=
github.com/powerman/deepequal v0.1.0/go.mod h1:3k7aG/slufBhUANdN67o/UPg8i5YaiJ6FmibWX0cn04=
github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI=
github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg=
github.com/quic-go/quic-go v0.48.1 h1:y/8xmfWI9qmGTc+lBr4jKRUWLGSlSigv847ULJ4hYXA=
github.com/quic-go/quic-go v0.48.1/go.mod h1:yBgs3rWBOADpga7F+jJsb6Ybg1LSYiQvwWlLX+/6HMs=
github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs=
github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo=
github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg=
github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM=
github.com/smarty/assertions v1.15.0 h1:cR//PqUBUiQRakZWqBiFFQ9wb8emQGDb0HeGdqGByCY=
github.com/smarty/assertions v1.15.0/go.mod h1:yABtdzeQs6l1brC900WlRNwj6ZR55d7B+E8C6HtKdec=
github.com/smartystreets/goconvey v1.8.1 h1:qGjIddxOk4grTu9JPOU31tVfq3cNdBlNa5sSznIX1xY=
github.com/smartystreets/goconvey v1.8.1/go.mod h1:+/u4qLyY6x1jReYOp7GOM2FSt8aP9CzCZL03bI28W60=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU=
go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw=
golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U=
golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ=
golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg=
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM=
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc=
golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0=
golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4=
golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU=
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ=
golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20190529164535-6a60838ec259/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s=
golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug=
golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4=
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA=
golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w=
google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc=
google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw=
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A=
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU=
google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc=
google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=

View file

@ -6,4 +6,3 @@
# na/**/me - exclude path (* doesn't match /) to file/dir "na/me", "na/*/me", "na/*/*/me", …
# !name - include previously excluded path …
/.buildcache/
/cover.out

File diff suppressed because it is too large Load diff

14
vendor/github.com/powerman/check/0-tools.go generated vendored Normal file
View file

@ -0,0 +1,14 @@
//go:build generate
// NOTE: Prefix 0- in this file's name ensures that `go generate ./...` processes it first.
//go:generate mkdir -p .buildcache/bin
//go:generate -command GOINSTALL env "GOBIN=$PWD/.buildcache/bin" go install
//go:generate -command INSTALL-SHELLCHECK sh -c ".buildcache/bin/shellcheck --version 2>/dev/null | grep -wq \"$DOLLAR{DOLLAR}{1}\" || curl -sSfL https://github.com/koalaman/shellcheck/releases/download/v\"$DOLLAR{DOLLAR}{1}\"/shellcheck-v\"$DOLLAR{DOLLAR}{1}\".\"$(uname)\".x86_64.tar.xz | tar xJf - -C .buildcache/bin --strip-components=1 shellcheck-v\"$DOLLAR{DOLLAR}{1}\"/shellcheck" -sh
package tools
//go:generate GOINSTALL github.com/golangci/golangci-lint/cmd/golangci-lint@v1.61.0
//go:generate GOINSTALL github.com/mattn/goveralls@v0.0.12
//go:generate GOINSTALL gotest.tools/gotestsum@v1.12.0
//go:generate INSTALL-SHELLCHECK 0.10.0

View file

@ -1,4 +1,10 @@
# check [![GoDoc](https://godoc.org/github.com/powerman/check?status.svg)](http://godoc.org/github.com/powerman/check) [![Go Report Card](https://goreportcard.com/badge/github.com/powerman/check)](https://goreportcard.com/report/github.com/powerman/check) [![CircleCI](https://circleci.com/gh/powerman/check.svg?style=svg)](https://circleci.com/gh/powerman/check) [![Coverage Status](https://coveralls.io/repos/github/powerman/check/badge.svg?branch=master)](https://coveralls.io/github/powerman/check?branch=master)
# check
[![Go Reference](https://pkg.go.dev/badge/github.com/powerman/check.svg)](https://pkg.go.dev/github.com/powerman/check)
[![CI/CD](https://github.com/powerman/check/actions/workflows/CI&CD.yml/badge.svg)](https://github.com/powerman/check/actions/workflows/CI&CD.yml)
[![Coverage Status](https://coveralls.io/repos/github/powerman/check/badge.svg?branch=master)](https://coveralls.io/github/powerman/check?branch=master)
[![Go Report Card](https://goreportcard.com/badge/github.com/powerman/check)](https://goreportcard.com/report/github.com/powerman/check)
[![Release](https://img.shields.io/github/v/release/powerman/check)](https://github.com/powerman/check/releases/latest)
Helpers to complement Go [testing](https://golang.org/pkg/testing/)
package.
@ -12,10 +18,10 @@ on steroids. :)
## Features
- Compelling output from failed tests:
- Very easy-to-read dumps for expected and actual values.
- Same text diff you loved in testify/assert.
- Also visual diff in [GoConvey](http://goconvey.co/) web UI, if you
use it (recommended).
- Very easy-to-read dumps for expected and actual values.
- Same text diff you loved in testify/assert.
- Also visual diff in [GoConvey](http://goconvey.co/) web UI, if you
use it (recommended).
- Statistics with amount of passed/failed checks.
- Colored output in terminal.
- 100% compatible with testing package - check package just provide
@ -35,18 +41,18 @@ package to have more clean/concise test code and cool dump/diff.
import "github.com/powerman/check"
func TestSomething(tt *testing.T) {
t := check.T(tt)
t.Equal(2, 2)
t.Log("You can use new t just like usual *testing.T")
t.Run("Subtests/Parallel example", func(tt *testing.T) {
t := check.T(tt)
t.Parallel()
t.NotEqual(2, 3, "should not be 3!")
obj, err := NewObj()
if t.Nil(err) {
t.Match(obj.field, `^\d+$`)
}
})
t := check.T(tt)
t.Equal(2, 2)
t.Log("You can use new t just like usual *testing.T")
t.Run("Subtests/Parallel example", func(tt *testing.T) {
t := check.T(tt)
t.Parallel()
t.NotEqual(2, 3, "should not be 3!")
obj, err := NewObj()
if t.Nil(err) {
t.Match(obj.field, `^\d+$`)
}
})
}
```
@ -67,7 +73,7 @@ import _ "github.com/smartystreets/goconvey/convey"
Require [Go 1.9](https://golang.org/doc/go1.9#test-helper).
```
```sh
go get github.com/powerman/check
```
@ -84,5 +90,5 @@ go get github.com/powerman/check
- Complicated:
- [ ] Show line of source_test.go with failed test (like gocheck).
- [ ] Auto-detect missed `t:=check.T(tt)` - try to intercept `Run()` and
`Parallel()` for detecting using wrong `t` (looks like golangci-lint's
tparallel catch at least `Parallel()` case).
`Parallel()` for detecting using wrong `t` (looks like golangci-lint's
tparallel catch at least `Parallel()` case).

View file

@ -12,7 +12,7 @@ import (
"testing"
"time"
pkgerrors "github.com/pkg/errors"
pkgerrors "github.com/pkg/errors" //nolint:depguard // By design.
"github.com/powerman/deepequal"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
@ -171,8 +171,8 @@ func (t *C) report(ok bool, msg []any, checker string, name []string, args []any
}
failureLong := failure.String()
wantDiff := len(dump) == 2 && name[0] == nameActual && name[1] == nameExpected
if wantDiff { //nolint:nestif // No idea how to simplify.
wantDiff := len(dump) == 2 && name[0] == nameActual && name[1] == nameExpected //nolint:gosec // False positive.
if wantDiff { //nolint:nestif // No idea how to simplify.
if reportToGoConvey(dump[0].String(), dump[1].String(), failureShort) == nil {
t.Fail()
} else {
@ -731,8 +731,8 @@ func (t *C) Err(actual, expected error, msg ...any) bool {
t.Helper()
actual2 := unwrapErr(actual)
equal := fmt.Sprintf("%#v", actual2) == fmt.Sprintf("%#v", expected)
_, proto1 := actual2.(interface{ GRPCStatus() *status.Status }) //nolint:errorlint // False positive.
_, proto2 := expected.(interface{ GRPCStatus() *status.Status }) //nolint:errorlint // False positive.
_, proto1 := actual2.(interface{ GRPCStatus() *status.Status })
_, proto2 := expected.(interface{ GRPCStatus() *status.Status })
if proto1 || proto2 {
equal = proto.Equal(status.Convert(actual2).Proto(), status.Convert(expected).Proto())
}
@ -782,8 +782,8 @@ func (t *C) NotErr(actual, expected error, msg ...any) bool {
t.Helper()
actual2 := unwrapErr(actual)
notEqual := fmt.Sprintf("%#v", actual2) != fmt.Sprintf("%#v", expected)
_, proto1 := actual2.(interface{ GRPCStatus() *status.Status }) //nolint:errorlint // False positive.
_, proto2 := expected.(interface{ GRPCStatus() *status.Status }) //nolint:errorlint // False positive.
_, proto1 := actual2.(interface{ GRPCStatus() *status.Status })
_, proto2 := expected.(interface{ GRPCStatus() *status.Status })
if proto1 || proto2 {
notEqual = !proto.Equal(status.Convert(actual2).Proto(), status.Convert(expected).Proto())
}
@ -1007,14 +1007,14 @@ func (t *C) GE(actual, expected any, msg ...any) bool {
// - floats
// - strings
// - time.Time
func (t *C) Between(actual, min, max any, msg ...any) bool {
func (t *C) Between(actual, minimum, maximum any, msg ...any) bool {
t.Helper()
return t.report3(actual, min, max, msg,
isBetween(actual, min, max))
return t.report3(actual, minimum, maximum, msg,
isBetween(actual, minimum, maximum))
}
func isBetween(actual, min, max any) bool {
switch v, vmin, vmax := reflect.ValueOf(actual), reflect.ValueOf(min), reflect.ValueOf(max); v.Kind() { //nolint:exhaustive // Covered by default case.
func isBetween(actual, minimum, maximum any) bool {
switch v, vmin, vmax := reflect.ValueOf(actual), reflect.ValueOf(minimum), reflect.ValueOf(maximum); v.Kind() { //nolint:exhaustive // Covered by default case.
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return vmin.Int() < v.Int() && v.Int() < vmax.Int()
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
@ -1025,8 +1025,8 @@ func isBetween(actual, min, max any) bool {
return vmin.String() < v.String() && v.String() < vmax.String()
default:
if actualTime, ok := actual.(time.Time); ok {
minTime := min.(time.Time) //nolint:forcetypeassert // Want panic.
maxTime := max.(time.Time) //nolint:forcetypeassert // Want panic.
minTime := minimum.(time.Time) //nolint:forcetypeassert // Want panic.
maxTime := maximum.(time.Time) //nolint:forcetypeassert // Want panic.
return minTime.Before(actualTime) && actualTime.Before(maxTime)
}
}
@ -1041,10 +1041,10 @@ func isBetween(actual, min, max any) bool {
// - floats
// - strings
// - time.Time
func (t *C) NotBetween(actual, min, max any, msg ...any) bool {
func (t *C) NotBetween(actual, minimum, maximum any, msg ...any) bool {
t.Helper()
return t.report3(actual, min, max, msg,
!isBetween(actual, min, max))
return t.report3(actual, minimum, maximum, msg,
!isBetween(actual, minimum, maximum))
}
// BetweenOrEqual checks for min <= actual <= max.
@ -1055,10 +1055,10 @@ func (t *C) NotBetween(actual, min, max any, msg ...any) bool {
// - floats
// - strings
// - time.Time
func (t *C) BetweenOrEqual(actual, min, max any, msg ...any) bool {
func (t *C) BetweenOrEqual(actual, minimum, maximum any, msg ...any) bool {
t.Helper()
return t.report3(actual, min, max, msg,
isBetween(actual, min, max) || isEqual(actual, min) || isEqual(actual, max))
return t.report3(actual, minimum, maximum, msg,
isBetween(actual, minimum, maximum) || isEqual(actual, minimum) || isEqual(actual, maximum))
}
// NotBetweenOrEqual checks for actual < min or max < actual.
@ -1069,10 +1069,10 @@ func (t *C) BetweenOrEqual(actual, min, max any, msg ...any) bool {
// - floats
// - strings
// - time.Time
func (t *C) NotBetweenOrEqual(actual, min, max any, msg ...any) bool {
func (t *C) NotBetweenOrEqual(actual, minimum, maximum any, msg ...any) bool {
t.Helper()
return t.report3(actual, min, max, msg,
!(isBetween(actual, min, max) || isEqual(actual, min) || isEqual(actual, max)))
return t.report3(actual, minimum, maximum, msg,
!(isBetween(actual, minimum, maximum) || isEqual(actual, minimum) || isEqual(actual, maximum)))
}
// InDelta checks for expected-delta <= actual <= expected+delta.
@ -1091,14 +1091,14 @@ func (t *C) InDelta(actual, expected, delta any, msg ...any) bool {
func isInDelta(actual, expected, delta any) bool {
switch v, e, d := reflect.ValueOf(actual), reflect.ValueOf(expected), reflect.ValueOf(delta); v.Kind() { //nolint:exhaustive // Covered by default case.
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
min, max := e.Int()-d.Int(), e.Int()+d.Int()
return min <= v.Int() && v.Int() <= max
minimum, maximum := e.Int()-d.Int(), e.Int()+d.Int()
return minimum <= v.Int() && v.Int() <= maximum
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
min, max := e.Uint()-d.Uint(), e.Uint()+d.Uint()
return min <= v.Uint() && v.Uint() <= max
minimum, maximum := e.Uint()-d.Uint(), e.Uint()+d.Uint()
return minimum <= v.Uint() && v.Uint() <= maximum
case reflect.Float32, reflect.Float64:
min, max := e.Float()-d.Float(), e.Float()+d.Float()
return min <= v.Float() && v.Float() <= max
minimum, maximum := e.Float()-d.Float(), e.Float()+d.Float()
return minimum <= v.Float() && v.Float() <= maximum
default:
if actualTime, ok := actual.(time.Time); ok {
expectedTime := expected.(time.Time) //nolint:forcetypeassert // Want panic.
@ -1344,7 +1344,7 @@ func (t *C) Implements(actual, expected any, msg ...any) bool {
func isImplements(actual, expected any) bool {
typActual := reflect.TypeOf(actual)
if typActual.Kind() != reflect.Ptr {
typActual = reflect.PtrTo(typActual)
typActual = reflect.PointerTo(typActual)
}
return typActual.Implements(reflect.TypeOf(expected).Elem())
}

View file

@ -53,7 +53,7 @@ func (c testStat) String() string {
//nolint:gochecknoglobals // By design.
var (
statsMu sync.Mutex
stats = map[*testing.T]*testStat{}
stats = make(map[*testing.T]*testStat)
)
// Report output statistics about passed/failed checks.

View file

@ -1,9 +0,0 @@
//go:build generate
//go:generate mkdir -p .buildcache/bin
//go:generate -command GOINSTALL env "GOBIN=$PWD/.buildcache/bin" go install
package tools
//go:generate GOINSTALL github.com/golangci/golangci-lint/cmd/golangci-lint@v1.52.0
//go:generate GOINSTALL github.com/mattn/goveralls@v0.0.11

View file

@ -1,4 +1,6 @@
Copyright (c) 2016 SmartyStreets, LLC
MIT License
Copyright (c) 2022 Smarty
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

View file

@ -11,12 +11,12 @@ type Printer struct {
prefix string
}
func (self *Printer) Println(message string, values ...interface{}) {
func (self *Printer) Println(message string, values ...any) {
formatted := self.format(message, values...) + newline
self.out.Write([]byte(formatted))
}
func (self *Printer) Print(message string, values ...interface{}) {
func (self *Printer) Print(message string, values ...any) {
formatted := self.format(message, values...)
self.out.Write([]byte(formatted))
}
@ -25,7 +25,7 @@ func (self *Printer) Insert(text string) {
self.out.Write([]byte(text))
}
func (self *Printer) format(message string, values ...interface{}) string {
func (self *Printer) format(message string, values ...any) string {
var formatted string
if len(values) == 0 {
formatted = self.prefix + message

View file

@ -76,7 +76,7 @@ func removePackagePath(name string) string {
/////////////////// FailureView ////////////////////////
// This struct is also declared in github.com/smartystreets/assertions.
// FailureView is also declared in github.com/smarty/assertions.
// The json struct tags should be equal in both declarations.
type FailureView struct {
Message string `json:"Message"`
@ -92,7 +92,7 @@ type AssertionResult struct {
Expected string
Actual string
Failure string
Error interface{}
Error any
StackTrace string
Skipped bool
}
@ -117,7 +117,7 @@ func parseFailure(failure string, report *AssertionResult) {
report.Failure = failure
}
}
func NewErrorReport(err interface{}) *AssertionResult {
func NewErrorReport(err any) *AssertionResult {
report := new(AssertionResult)
report.File, report.Line = caller()
report.StackTrace = fullStackTrace()

View file

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build (!arm64 && !s390x && !ppc64le) || !gc || purego
//go:build (!arm64 && !s390x && !ppc64 && !ppc64le) || !gc || purego
package chacha20

View file

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build gc && !purego
//go:build gc && !purego && (ppc64 || ppc64le)
package chacha20

View file

@ -19,7 +19,7 @@
// The differences in this and the original implementation are
// due to the calling conventions and initialization of constants.
//go:build gc && !purego
//go:build gc && !purego && (ppc64 || ppc64le)
#include "textflag.h"
@ -36,32 +36,68 @@
// for VPERMXOR
#define MASK R18
DATA consts<>+0x00(SB)/8, $0x3320646e61707865
DATA consts<>+0x08(SB)/8, $0x6b20657479622d32
DATA consts<>+0x10(SB)/8, $0x0000000000000001
DATA consts<>+0x18(SB)/8, $0x0000000000000000
DATA consts<>+0x20(SB)/8, $0x0000000000000004
DATA consts<>+0x28(SB)/8, $0x0000000000000000
DATA consts<>+0x30(SB)/8, $0x0a0b08090e0f0c0d
DATA consts<>+0x38(SB)/8, $0x0203000106070405
DATA consts<>+0x40(SB)/8, $0x090a0b080d0e0f0c
DATA consts<>+0x48(SB)/8, $0x0102030005060704
DATA consts<>+0x50(SB)/8, $0x6170786561707865
DATA consts<>+0x58(SB)/8, $0x6170786561707865
DATA consts<>+0x60(SB)/8, $0x3320646e3320646e
DATA consts<>+0x68(SB)/8, $0x3320646e3320646e
DATA consts<>+0x70(SB)/8, $0x79622d3279622d32
DATA consts<>+0x78(SB)/8, $0x79622d3279622d32
DATA consts<>+0x80(SB)/8, $0x6b2065746b206574
DATA consts<>+0x88(SB)/8, $0x6b2065746b206574
DATA consts<>+0x90(SB)/8, $0x0000000100000000
DATA consts<>+0x98(SB)/8, $0x0000000300000002
DATA consts<>+0xa0(SB)/8, $0x5566774411223300
DATA consts<>+0xa8(SB)/8, $0xddeeffcc99aabb88
DATA consts<>+0xb0(SB)/8, $0x6677445522330011
DATA consts<>+0xb8(SB)/8, $0xeeffccddaabb8899
DATA consts<>+0x00(SB)/4, $0x61707865
DATA consts<>+0x04(SB)/4, $0x3320646e
DATA consts<>+0x08(SB)/4, $0x79622d32
DATA consts<>+0x0c(SB)/4, $0x6b206574
DATA consts<>+0x10(SB)/4, $0x00000001
DATA consts<>+0x14(SB)/4, $0x00000000
DATA consts<>+0x18(SB)/4, $0x00000000
DATA consts<>+0x1c(SB)/4, $0x00000000
DATA consts<>+0x20(SB)/4, $0x00000004
DATA consts<>+0x24(SB)/4, $0x00000000
DATA consts<>+0x28(SB)/4, $0x00000000
DATA consts<>+0x2c(SB)/4, $0x00000000
DATA consts<>+0x30(SB)/4, $0x0e0f0c0d
DATA consts<>+0x34(SB)/4, $0x0a0b0809
DATA consts<>+0x38(SB)/4, $0x06070405
DATA consts<>+0x3c(SB)/4, $0x02030001
DATA consts<>+0x40(SB)/4, $0x0d0e0f0c
DATA consts<>+0x44(SB)/4, $0x090a0b08
DATA consts<>+0x48(SB)/4, $0x05060704
DATA consts<>+0x4c(SB)/4, $0x01020300
DATA consts<>+0x50(SB)/4, $0x61707865
DATA consts<>+0x54(SB)/4, $0x61707865
DATA consts<>+0x58(SB)/4, $0x61707865
DATA consts<>+0x5c(SB)/4, $0x61707865
DATA consts<>+0x60(SB)/4, $0x3320646e
DATA consts<>+0x64(SB)/4, $0x3320646e
DATA consts<>+0x68(SB)/4, $0x3320646e
DATA consts<>+0x6c(SB)/4, $0x3320646e
DATA consts<>+0x70(SB)/4, $0x79622d32
DATA consts<>+0x74(SB)/4, $0x79622d32
DATA consts<>+0x78(SB)/4, $0x79622d32
DATA consts<>+0x7c(SB)/4, $0x79622d32
DATA consts<>+0x80(SB)/4, $0x6b206574
DATA consts<>+0x84(SB)/4, $0x6b206574
DATA consts<>+0x88(SB)/4, $0x6b206574
DATA consts<>+0x8c(SB)/4, $0x6b206574
DATA consts<>+0x90(SB)/4, $0x00000000
DATA consts<>+0x94(SB)/4, $0x00000001
DATA consts<>+0x98(SB)/4, $0x00000002
DATA consts<>+0x9c(SB)/4, $0x00000003
DATA consts<>+0xa0(SB)/4, $0x11223300
DATA consts<>+0xa4(SB)/4, $0x55667744
DATA consts<>+0xa8(SB)/4, $0x99aabb88
DATA consts<>+0xac(SB)/4, $0xddeeffcc
DATA consts<>+0xb0(SB)/4, $0x22330011
DATA consts<>+0xb4(SB)/4, $0x66774455
DATA consts<>+0xb8(SB)/4, $0xaabb8899
DATA consts<>+0xbc(SB)/4, $0xeeffccdd
GLOBL consts<>(SB), RODATA, $0xc0
#ifdef GOARCH_ppc64
#define BE_XXBRW_INIT() \
LVSL (R0)(R0), V24 \
VSPLTISB $3, V25 \
VXOR V24, V25, V24 \
#define BE_XXBRW(vr) VPERM vr, vr, V24, vr
#else
#define BE_XXBRW_INIT()
#define BE_XXBRW(vr)
#endif
//func chaCha20_ctr32_vsx(out, inp *byte, len int, key *[8]uint32, counter *uint32)
TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40
MOVD out+0(FP), OUT
@ -94,6 +130,8 @@ TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40
// Clear V27
VXOR V27, V27, V27
BE_XXBRW_INIT()
// V28
LXVW4X (CONSTBASE)(R11), VS60
@ -299,6 +337,11 @@ loop_vsx:
VADDUWM V8, V18, V8
VADDUWM V12, V19, V12
BE_XXBRW(V0)
BE_XXBRW(V4)
BE_XXBRW(V8)
BE_XXBRW(V12)
CMPU LEN, $64
BLT tail_vsx
@ -327,6 +370,11 @@ loop_vsx:
VADDUWM V9, V18, V8
VADDUWM V13, V19, V12
BE_XXBRW(V0)
BE_XXBRW(V4)
BE_XXBRW(V8)
BE_XXBRW(V12)
CMPU LEN, $64
BLT tail_vsx
@ -334,8 +382,8 @@ loop_vsx:
LXVW4X (INP)(R8), VS60
LXVW4X (INP)(R9), VS61
LXVW4X (INP)(R10), VS62
VXOR V27, V0, V27
VXOR V27, V0, V27
VXOR V28, V4, V28
VXOR V29, V8, V29
VXOR V30, V12, V30
@ -354,6 +402,11 @@ loop_vsx:
VADDUWM V10, V18, V8
VADDUWM V14, V19, V12
BE_XXBRW(V0)
BE_XXBRW(V4)
BE_XXBRW(V8)
BE_XXBRW(V12)
CMPU LEN, $64
BLT tail_vsx
@ -381,6 +434,11 @@ loop_vsx:
VADDUWM V11, V18, V8
VADDUWM V15, V19, V12
BE_XXBRW(V0)
BE_XXBRW(V4)
BE_XXBRW(V8)
BE_XXBRW(V12)
CMPU LEN, $64
BLT tail_vsx
@ -408,9 +466,9 @@ loop_vsx:
done_vsx:
// Increment counter by number of 64 byte blocks
MOVD (CNT), R14
MOVWZ (CNT), R14
ADD BLOCKS, R14
MOVD R14, (CNT)
MOVWZ R14, (CNT)
RET
tail_vsx:

View file

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build (!amd64 && !ppc64le && !s390x) || !gc || purego
//go:build (!amd64 && !ppc64le && !ppc64 && !s390x) || !gc || purego
package poly1305

View file

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build gc && !purego
//go:build gc && !purego && (ppc64 || ppc64le)
package poly1305

View file

@ -2,15 +2,25 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build gc && !purego
//go:build gc && !purego && (ppc64 || ppc64le)
#include "textflag.h"
// This was ported from the amd64 implementation.
#ifdef GOARCH_ppc64le
#define LE_MOVD MOVD
#define LE_MOVWZ MOVWZ
#define LE_MOVHZ MOVHZ
#else
#define LE_MOVD MOVDBR
#define LE_MOVWZ MOVWBR
#define LE_MOVHZ MOVHBR
#endif
#define POLY1305_ADD(msg, h0, h1, h2, t0, t1, t2) \
MOVD (msg), t0; \
MOVD 8(msg), t1; \
LE_MOVD (msg)( R0), t0; \
LE_MOVD (msg)(R24), t1; \
MOVD $1, t2; \
ADDC t0, h0, h0; \
ADDE t1, h1, h1; \
@ -50,10 +60,6 @@
ADDE t3, h1, h1; \
ADDZE h2
DATA ·poly1305Mask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF
DATA ·poly1305Mask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC
GLOBL ·poly1305Mask<>(SB), RODATA, $16
// func update(state *[7]uint64, msg []byte)
TEXT ·update(SB), $0-32
MOVD state+0(FP), R3
@ -66,6 +72,8 @@ TEXT ·update(SB), $0-32
MOVD 24(R3), R11 // r0
MOVD 32(R3), R12 // r1
MOVD $8, R24
CMP R5, $16
BLT bytes_between_0_and_15
@ -94,7 +102,7 @@ flush_buffer:
// Greater than 8 -- load the rightmost remaining bytes in msg
// and put into R17 (h1)
MOVD (R4)(R21), R17
LE_MOVD (R4)(R21), R17
MOVD $16, R22
// Find the offset to those bytes
@ -118,7 +126,7 @@ just1:
BLT less8
// Exactly 8
MOVD (R4), R16
LE_MOVD (R4), R16
CMP R17, $0
@ -133,7 +141,7 @@ less8:
MOVD $0, R22 // shift count
CMP R5, $4
BLT less4
MOVWZ (R4), R16
LE_MOVWZ (R4), R16
ADD $4, R4
ADD $-4, R5
MOVD $32, R22
@ -141,7 +149,7 @@ less8:
less4:
CMP R5, $2
BLT less2
MOVHZ (R4), R21
LE_MOVHZ (R4), R21
SLD R22, R21, R21
OR R16, R21, R16
ADD $16, R22

17
vendor/golang.org/x/sys/cpu/asm_darwin_x86_gc.s generated vendored Normal file
View file

@ -0,0 +1,17 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build darwin && amd64 && gc
#include "textflag.h"
TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sysctl(SB)
GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8
DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB)
TEXT libc_sysctlbyname_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sysctlbyname(SB)
GLOBL ·libc_sysctlbyname_trampoline_addr(SB), RODATA, $8
DATA ·libc_sysctlbyname_trampoline_addr(SB)/8, $libc_sysctlbyname_trampoline<>(SB)

61
vendor/golang.org/x/sys/cpu/cpu_darwin_x86.go generated vendored Normal file
View file

@ -0,0 +1,61 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build darwin && amd64 && gc
package cpu
// darwinSupportsAVX512 checks Darwin kernel for AVX512 support via sysctl
// call (see issue 43089). It also restricts AVX512 support for Darwin to
// kernel version 21.3.0 (MacOS 12.2.0) or later (see issue 49233).
//
// Background:
// Darwin implements a special mechanism to economize on thread state when
// AVX512 specific registers are not in use. This scheme minimizes state when
// preempting threads that haven't yet used any AVX512 instructions, but adds
// special requirements to check for AVX512 hardware support at runtime (e.g.
// via sysctl call or commpage inspection). See issue 43089 and link below for
// full background:
// https://github.com/apple-oss-distributions/xnu/blob/xnu-11215.1.10/osfmk/i386/fpu.c#L214-L240
//
// Additionally, all versions of the Darwin kernel from 19.6.0 through 21.2.0
// (corresponding to MacOS 10.15.6 - 12.1) have a bug that can cause corruption
// of the AVX512 mask registers (K0-K7) upon signal return. For this reason
// AVX512 is considered unsafe to use on Darwin for kernel versions prior to
// 21.3.0, where a fix has been confirmed. See issue 49233 for full background.
func darwinSupportsAVX512() bool {
return darwinSysctlEnabled([]byte("hw.optional.avx512f\x00")) && darwinKernelVersionCheck(21, 3, 0)
}
// Ensure Darwin kernel version is at least major.minor.patch, avoiding dependencies
func darwinKernelVersionCheck(major, minor, patch int) bool {
var release [256]byte
err := darwinOSRelease(&release)
if err != nil {
return false
}
var mmp [3]int
c := 0
Loop:
for _, b := range release[:] {
switch {
case b >= '0' && b <= '9':
mmp[c] = 10*mmp[c] + int(b-'0')
case b == '.':
c++
if c > 2 {
return false
}
case b == 0:
break Loop
default:
return false
}
}
if c != 2 {
return false
}
return mmp[0] > major || mmp[0] == major && (mmp[1] > minor || mmp[1] == minor && mmp[2] >= patch)
}

View file

@ -6,10 +6,10 @@
package cpu
// cpuid is implemented in cpu_x86.s for gc compiler
// cpuid is implemented in cpu_gc_x86.s for gc compiler
// and in cpu_gccgo.c for gccgo.
func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32)
// xgetbv with ecx = 0 is implemented in cpu_x86.s for gc compiler
// xgetbv with ecx = 0 is implemented in cpu_gc_x86.s for gc compiler
// and in cpu_gccgo.c for gccgo.
func xgetbv() (eax, edx uint32)

View file

@ -18,7 +18,7 @@ TEXT ·cpuid(SB), NOSPLIT, $0-24
RET
// func xgetbv() (eax, edx uint32)
TEXT ·xgetbv(SB),NOSPLIT,$0-8
TEXT ·xgetbv(SB), NOSPLIT, $0-8
MOVL $0, CX
XGETBV
MOVL AX, eax+0(FP)

View file

@ -23,9 +23,3 @@ func xgetbv() (eax, edx uint32) {
gccgoXgetbv(&a, &d)
return a, d
}
// gccgo doesn't build on Darwin, per:
// https://github.com/Homebrew/homebrew-core/blob/HEAD/Formula/gcc.rb#L76
func darwinSupportsAVX512() bool {
return false
}

View file

@ -110,7 +110,6 @@ func doinit() {
ARM64.HasASIMDFHM = isSet(hwCap, hwcap_ASIMDFHM)
ARM64.HasDIT = isSet(hwCap, hwcap_DIT)
// HWCAP2 feature bits
ARM64.HasSVE2 = isSet(hwCap2, hwcap2_SVE2)
ARM64.HasI8MM = isSet(hwCap2, hwcap2_I8MM)

11
vendor/golang.org/x/sys/cpu/cpu_other_x86.go generated vendored Normal file
View file

@ -0,0 +1,11 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build 386 || amd64p32 || (amd64 && (!darwin || !gc))
package cpu
func darwinSupportsAVX512() bool {
panic("only implemented for gc && amd64 && darwin")
}

View file

@ -92,10 +92,8 @@ func archInit() {
osSupportsAVX = isSet(1, eax) && isSet(2, eax)
if runtime.GOOS == "darwin" {
// Darwin doesn't save/restore AVX-512 mask registers correctly across signal handlers.
// Since users can't rely on mask register contents, let's not advertise AVX-512 support.
// See issue 49233.
osSupportsAVX512 = false
// Darwin requires special AVX512 checks, see cpu_darwin_x86.go
osSupportsAVX512 = osSupportsAVX && darwinSupportsAVX512()
} else {
// Check if OPMASK and ZMM registers have OS support.
osSupportsAVX512 = osSupportsAVX && isSet(5, eax) && isSet(6, eax) && isSet(7, eax)

98
vendor/golang.org/x/sys/cpu/syscall_darwin_x86_gc.go generated vendored Normal file
View file

@ -0,0 +1,98 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Minimal copy of x/sys/unix so the cpu package can make a
// system call on Darwin without depending on x/sys/unix.
//go:build darwin && amd64 && gc
package cpu
import (
"syscall"
"unsafe"
)
type _C_int int32
// adapted from unix.Uname() at x/sys/unix/syscall_darwin.go L419
func darwinOSRelease(release *[256]byte) error {
// from x/sys/unix/zerrors_openbsd_amd64.go
const (
CTL_KERN = 0x1
KERN_OSRELEASE = 0x2
)
mib := []_C_int{CTL_KERN, KERN_OSRELEASE}
n := unsafe.Sizeof(*release)
return sysctl(mib, &release[0], &n, nil, 0)
}
type Errno = syscall.Errno
var _zero uintptr // Single-word zero for use when we need a valid pointer to 0 bytes.
// from x/sys/unix/zsyscall_darwin_amd64.go L791-807
func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error {
var _p0 unsafe.Pointer
if len(mib) > 0 {
_p0 = unsafe.Pointer(&mib[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
if _, _, err := syscall_syscall6(
libc_sysctl_trampoline_addr,
uintptr(_p0),
uintptr(len(mib)),
uintptr(unsafe.Pointer(old)),
uintptr(unsafe.Pointer(oldlen)),
uintptr(unsafe.Pointer(new)),
uintptr(newlen),
); err != 0 {
return err
}
return nil
}
var libc_sysctl_trampoline_addr uintptr
// adapted from internal/cpu/cpu_arm64_darwin.go
func darwinSysctlEnabled(name []byte) bool {
out := int32(0)
nout := unsafe.Sizeof(out)
if ret := sysctlbyname(&name[0], (*byte)(unsafe.Pointer(&out)), &nout, nil, 0); ret != nil {
return false
}
return out > 0
}
//go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib"
var libc_sysctlbyname_trampoline_addr uintptr
// adapted from runtime/sys_darwin.go in the pattern of sysctl() above, as defined in x/sys/unix
func sysctlbyname(name *byte, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error {
if _, _, err := syscall_syscall6(
libc_sysctlbyname_trampoline_addr,
uintptr(unsafe.Pointer(name)),
uintptr(unsafe.Pointer(old)),
uintptr(unsafe.Pointer(oldlen)),
uintptr(unsafe.Pointer(new)),
uintptr(newlen),
0,
); err != 0 {
return err
}
return nil
}
//go:cgo_import_dynamic libc_sysctlbyname sysctlbyname "/usr/lib/libSystem.B.dylib"
// Implemented in the runtime package (runtime/sys_darwin.go)
func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno)
//go:linkname syscall_syscall6 syscall.syscall6

View file

@ -58,6 +58,102 @@ func IoctlGetEthtoolDrvinfo(fd int, ifname string) (*EthtoolDrvinfo, error) {
return &value, err
}
// IoctlGetEthtoolTsInfo fetches ethtool timestamping and PHC
// association for the network device specified by ifname.
func IoctlGetEthtoolTsInfo(fd int, ifname string) (*EthtoolTsInfo, error) {
ifr, err := NewIfreq(ifname)
if err != nil {
return nil, err
}
value := EthtoolTsInfo{Cmd: ETHTOOL_GET_TS_INFO}
ifrd := ifr.withData(unsafe.Pointer(&value))
err = ioctlIfreqData(fd, SIOCETHTOOL, &ifrd)
return &value, err
}
// IoctlGetHwTstamp retrieves the hardware timestamping configuration
// for the network device specified by ifname.
func IoctlGetHwTstamp(fd int, ifname string) (*HwTstampConfig, error) {
ifr, err := NewIfreq(ifname)
if err != nil {
return nil, err
}
value := HwTstampConfig{}
ifrd := ifr.withData(unsafe.Pointer(&value))
err = ioctlIfreqData(fd, SIOCGHWTSTAMP, &ifrd)
return &value, err
}
// IoctlSetHwTstamp updates the hardware timestamping configuration for
// the network device specified by ifname.
func IoctlSetHwTstamp(fd int, ifname string, cfg *HwTstampConfig) error {
ifr, err := NewIfreq(ifname)
if err != nil {
return err
}
ifrd := ifr.withData(unsafe.Pointer(cfg))
return ioctlIfreqData(fd, SIOCSHWTSTAMP, &ifrd)
}
// FdToClockID derives the clock ID from the file descriptor number
// - see clock_gettime(3), FD_TO_CLOCKID macros. The resulting ID is
// suitable for system calls like ClockGettime.
func FdToClockID(fd int) int32 { return int32((int(^fd) << 3) | 3) }
// IoctlPtpClockGetcaps returns the description of a given PTP device.
func IoctlPtpClockGetcaps(fd int) (*PtpClockCaps, error) {
var value PtpClockCaps
err := ioctlPtr(fd, PTP_CLOCK_GETCAPS2, unsafe.Pointer(&value))
return &value, err
}
// IoctlPtpSysOffsetPrecise returns a description of the clock
// offset compared to the system clock.
func IoctlPtpSysOffsetPrecise(fd int) (*PtpSysOffsetPrecise, error) {
var value PtpSysOffsetPrecise
err := ioctlPtr(fd, PTP_SYS_OFFSET_PRECISE2, unsafe.Pointer(&value))
return &value, err
}
// IoctlPtpSysOffsetExtended returns an extended description of the
// clock offset compared to the system clock. The samples parameter
// specifies the desired number of measurements.
func IoctlPtpSysOffsetExtended(fd int, samples uint) (*PtpSysOffsetExtended, error) {
value := PtpSysOffsetExtended{Samples: uint32(samples)}
err := ioctlPtr(fd, PTP_SYS_OFFSET_EXTENDED2, unsafe.Pointer(&value))
return &value, err
}
// IoctlPtpPinGetfunc returns the configuration of the specified
// I/O pin on given PTP device.
func IoctlPtpPinGetfunc(fd int, index uint) (*PtpPinDesc, error) {
value := PtpPinDesc{Index: uint32(index)}
err := ioctlPtr(fd, PTP_PIN_GETFUNC2, unsafe.Pointer(&value))
return &value, err
}
// IoctlPtpPinSetfunc updates configuration of the specified PTP
// I/O pin.
func IoctlPtpPinSetfunc(fd int, pd *PtpPinDesc) error {
return ioctlPtr(fd, PTP_PIN_SETFUNC2, unsafe.Pointer(pd))
}
// IoctlPtpPeroutRequest configures the periodic output mode of the
// PTP I/O pins.
func IoctlPtpPeroutRequest(fd int, r *PtpPeroutRequest) error {
return ioctlPtr(fd, PTP_PEROUT_REQUEST2, unsafe.Pointer(r))
}
// IoctlPtpExttsRequest configures the external timestamping mode
// of the PTP I/O pins.
func IoctlPtpExttsRequest(fd int, r *PtpExttsRequest) error {
return ioctlPtr(fd, PTP_EXTTS_REQUEST2, unsafe.Pointer(r))
}
// IoctlGetWatchdogInfo fetches information about a watchdog device from the
// Linux watchdog API. For more information, see:
// https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html.

View file

@ -158,6 +158,16 @@ includes_Linux='
#endif
#define _GNU_SOURCE
// See the description in unix/linux/types.go
#if defined(__ARM_EABI__) || \
(defined(__mips__) && (_MIPS_SIM == _ABIO32)) || \
(defined(__powerpc__) && (!defined(__powerpc64__)))
# ifdef _TIME_BITS
# undef _TIME_BITS
# endif
# define _TIME_BITS 32
#endif
// <sys/ioctl.h> is broken on powerpc64, as it fails to include definitions of
// these structures. We just include them copied from <bits/termios.h>.
#if defined(__powerpc__)
@ -256,6 +266,7 @@ struct ltchars {
#include <linux/nsfs.h>
#include <linux/perf_event.h>
#include <linux/pps.h>
#include <linux/ptp_clock.h>
#include <linux/ptrace.h>
#include <linux/random.h>
#include <linux/reboot.h>
@ -527,6 +538,7 @@ ccflags="$@"
$2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MREMAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL|TCPOPT|UDP)_/ ||
$2 ~ /^NFC_(GENL|PROTO|COMM|RF|SE|DIRECTION|LLCP|SOCKPROTO)_/ ||
$2 ~ /^NFC_.*_(MAX)?SIZE$/ ||
$2 ~ /^PTP_/ ||
$2 ~ /^RAW_PAYLOAD_/ ||
$2 ~ /^[US]F_/ ||
$2 ~ /^TP_STATUS_/ ||

View file

@ -1860,6 +1860,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
//sys ClockAdjtime(clockid int32, buf *Timex) (state int, err error)
//sys ClockGetres(clockid int32, res *Timespec) (err error)
//sys ClockGettime(clockid int32, time *Timespec) (err error)
//sys ClockSettime(clockid int32, time *Timespec) (err error)
//sys ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error)
//sys Close(fd int) (err error)
//sys CloseRange(first uint, last uint, flags uint) (err error)

View file

@ -768,6 +768,15 @@ func Munmap(b []byte) (err error) {
return mapper.Munmap(b)
}
func MmapPtr(fd int, offset int64, addr unsafe.Pointer, length uintptr, prot int, flags int) (ret unsafe.Pointer, err error) {
xaddr, err := mapper.mmap(uintptr(addr), length, prot, flags, fd, offset)
return unsafe.Pointer(xaddr), err
}
func MunmapPtr(addr unsafe.Pointer, length uintptr) (err error) {
return mapper.munmap(uintptr(addr), length)
}
//sys Gethostname(buf []byte) (err error) = SYS___GETHOSTNAME_A
//sysnb Getgid() (gid int)
//sysnb Getpid() (pid int)
@ -816,10 +825,10 @@ func Lstat(path string, stat *Stat_t) (err error) {
// for checking symlinks begins with $VERSION/ $SYSNAME/ $SYSSYMR/ $SYSSYMA/
func isSpecialPath(path []byte) (v bool) {
var special = [4][8]byte{
[8]byte{'V', 'E', 'R', 'S', 'I', 'O', 'N', '/'},
[8]byte{'S', 'Y', 'S', 'N', 'A', 'M', 'E', '/'},
[8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'R', '/'},
[8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'A', '/'}}
{'V', 'E', 'R', 'S', 'I', 'O', 'N', '/'},
{'S', 'Y', 'S', 'N', 'A', 'M', 'E', '/'},
{'S', 'Y', 'S', 'S', 'Y', 'M', 'R', '/'},
{'S', 'Y', 'S', 'S', 'Y', 'M', 'A', '/'}}
var i, j int
for i = 0; i < len(special); i++ {
@ -3115,3 +3124,90 @@ func legacy_Mkfifoat(dirfd int, path string, mode uint32) (err error) {
//sys Posix_openpt(oflag int) (fd int, err error) = SYS_POSIX_OPENPT
//sys Grantpt(fildes int) (rc int, err error) = SYS_GRANTPT
//sys Unlockpt(fildes int) (rc int, err error) = SYS_UNLOCKPT
func fcntlAsIs(fd uintptr, cmd int, arg uintptr) (val int, err error) {
runtime.EnterSyscall()
r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCNTL<<4, uintptr(fd), uintptr(cmd), arg)
runtime.ExitSyscall()
val = int(r0)
if int64(r0) == -1 {
err = errnoErr2(e1, e2)
}
return
}
func Fcntl(fd uintptr, cmd int, op interface{}) (ret int, err error) {
switch op.(type) {
case *Flock_t:
err = FcntlFlock(fd, cmd, op.(*Flock_t))
if err != nil {
ret = -1
}
return
case int:
return FcntlInt(fd, cmd, op.(int))
case *F_cnvrt:
return fcntlAsIs(fd, cmd, uintptr(unsafe.Pointer(op.(*F_cnvrt))))
case unsafe.Pointer:
return fcntlAsIs(fd, cmd, uintptr(op.(unsafe.Pointer)))
default:
return -1, EINVAL
}
return
}
func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
if raceenabled {
raceReleaseMerge(unsafe.Pointer(&ioSync))
}
return sendfile(outfd, infd, offset, count)
}
func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
// TODO: use LE call instead if the call is implemented
originalOffset, err := Seek(infd, 0, SEEK_CUR)
if err != nil {
return -1, err
}
//start reading data from in_fd
if offset != nil {
_, err := Seek(infd, *offset, SEEK_SET)
if err != nil {
return -1, err
}
}
buf := make([]byte, count)
readBuf := make([]byte, 0)
var n int = 0
for i := 0; i < count; i += n {
n, err := Read(infd, buf)
if n == 0 {
if err != nil {
return -1, err
} else { // EOF
break
}
}
readBuf = append(readBuf, buf...)
buf = buf[0:0]
}
n2, err := Write(outfd, readBuf)
if err != nil {
return -1, err
}
//When sendfile() returns, this variable will be set to the
// offset of the byte following the last byte that was read.
if offset != nil {
*offset = *offset + int64(n)
// If offset is not NULL, then sendfile() does not modify the file
// offset of in_fd
_, err := Seek(infd, originalOffset, SEEK_SET)
if err != nil {
return -1, err
}
}
return n2, nil
}

View file

@ -2625,6 +2625,28 @@ const (
PR_UNALIGN_NOPRINT = 0x1
PR_UNALIGN_SIGBUS = 0x2
PSTOREFS_MAGIC = 0x6165676c
PTP_CLK_MAGIC = '='
PTP_ENABLE_FEATURE = 0x1
PTP_EXTTS_EDGES = 0x6
PTP_EXTTS_EVENT_VALID = 0x1
PTP_EXTTS_V1_VALID_FLAGS = 0x7
PTP_EXTTS_VALID_FLAGS = 0x1f
PTP_EXT_OFFSET = 0x10
PTP_FALLING_EDGE = 0x4
PTP_MAX_SAMPLES = 0x19
PTP_PEROUT_DUTY_CYCLE = 0x2
PTP_PEROUT_ONE_SHOT = 0x1
PTP_PEROUT_PHASE = 0x4
PTP_PEROUT_V1_VALID_FLAGS = 0x0
PTP_PEROUT_VALID_FLAGS = 0x7
PTP_PIN_GETFUNC = 0xc0603d06
PTP_PIN_GETFUNC2 = 0xc0603d0f
PTP_RISING_EDGE = 0x2
PTP_STRICT_FLAGS = 0x8
PTP_SYS_OFFSET_EXTENDED = 0xc4c03d09
PTP_SYS_OFFSET_EXTENDED2 = 0xc4c03d12
PTP_SYS_OFFSET_PRECISE = 0xc0403d08
PTP_SYS_OFFSET_PRECISE2 = 0xc0403d11
PTRACE_ATTACH = 0x10
PTRACE_CONT = 0x7
PTRACE_DETACH = 0x11

View file

@ -237,6 +237,20 @@ const (
PPPIOCUNBRIDGECHAN = 0x7434
PPPIOCXFERUNIT = 0x744e
PR_SET_PTRACER_ANY = 0xffffffff
PTP_CLOCK_GETCAPS = 0x80503d01
PTP_CLOCK_GETCAPS2 = 0x80503d0a
PTP_ENABLE_PPS = 0x40043d04
PTP_ENABLE_PPS2 = 0x40043d0d
PTP_EXTTS_REQUEST = 0x40103d02
PTP_EXTTS_REQUEST2 = 0x40103d0b
PTP_MASK_CLEAR_ALL = 0x3d13
PTP_MASK_EN_SINGLE = 0x40043d14
PTP_PEROUT_REQUEST = 0x40383d03
PTP_PEROUT_REQUEST2 = 0x40383d0c
PTP_PIN_SETFUNC = 0x40603d07
PTP_PIN_SETFUNC2 = 0x40603d10
PTP_SYS_OFFSET = 0x43403d05
PTP_SYS_OFFSET2 = 0x43403d0e
PTRACE_GETFPREGS = 0xe
PTRACE_GETFPXREGS = 0x12
PTRACE_GET_THREAD_AREA = 0x19

View file

@ -237,6 +237,20 @@ const (
PPPIOCUNBRIDGECHAN = 0x7434
PPPIOCXFERUNIT = 0x744e
PR_SET_PTRACER_ANY = 0xffffffffffffffff
PTP_CLOCK_GETCAPS = 0x80503d01
PTP_CLOCK_GETCAPS2 = 0x80503d0a
PTP_ENABLE_PPS = 0x40043d04
PTP_ENABLE_PPS2 = 0x40043d0d
PTP_EXTTS_REQUEST = 0x40103d02
PTP_EXTTS_REQUEST2 = 0x40103d0b
PTP_MASK_CLEAR_ALL = 0x3d13
PTP_MASK_EN_SINGLE = 0x40043d14
PTP_PEROUT_REQUEST = 0x40383d03
PTP_PEROUT_REQUEST2 = 0x40383d0c
PTP_PIN_SETFUNC = 0x40603d07
PTP_PIN_SETFUNC2 = 0x40603d10
PTP_SYS_OFFSET = 0x43403d05
PTP_SYS_OFFSET2 = 0x43403d0e
PTRACE_ARCH_PRCTL = 0x1e
PTRACE_GETFPREGS = 0xe
PTRACE_GETFPXREGS = 0x12

View file

@ -234,6 +234,20 @@ const (
PPPIOCUNBRIDGECHAN = 0x7434
PPPIOCXFERUNIT = 0x744e
PR_SET_PTRACER_ANY = 0xffffffff
PTP_CLOCK_GETCAPS = 0x80503d01
PTP_CLOCK_GETCAPS2 = 0x80503d0a
PTP_ENABLE_PPS = 0x40043d04
PTP_ENABLE_PPS2 = 0x40043d0d
PTP_EXTTS_REQUEST = 0x40103d02
PTP_EXTTS_REQUEST2 = 0x40103d0b
PTP_MASK_CLEAR_ALL = 0x3d13
PTP_MASK_EN_SINGLE = 0x40043d14
PTP_PEROUT_REQUEST = 0x40383d03
PTP_PEROUT_REQUEST2 = 0x40383d0c
PTP_PIN_SETFUNC = 0x40603d07
PTP_PIN_SETFUNC2 = 0x40603d10
PTP_SYS_OFFSET = 0x43403d05
PTP_SYS_OFFSET2 = 0x43403d0e
PTRACE_GETCRUNCHREGS = 0x19
PTRACE_GETFDPIC = 0x1f
PTRACE_GETFDPIC_EXEC = 0x0

View file

@ -240,6 +240,20 @@ const (
PROT_BTI = 0x10
PROT_MTE = 0x20
PR_SET_PTRACER_ANY = 0xffffffffffffffff
PTP_CLOCK_GETCAPS = 0x80503d01
PTP_CLOCK_GETCAPS2 = 0x80503d0a
PTP_ENABLE_PPS = 0x40043d04
PTP_ENABLE_PPS2 = 0x40043d0d
PTP_EXTTS_REQUEST = 0x40103d02
PTP_EXTTS_REQUEST2 = 0x40103d0b
PTP_MASK_CLEAR_ALL = 0x3d13
PTP_MASK_EN_SINGLE = 0x40043d14
PTP_PEROUT_REQUEST = 0x40383d03
PTP_PEROUT_REQUEST2 = 0x40383d0c
PTP_PIN_SETFUNC = 0x40603d07
PTP_PIN_SETFUNC2 = 0x40603d10
PTP_SYS_OFFSET = 0x43403d05
PTP_SYS_OFFSET2 = 0x43403d0e
PTRACE_PEEKMTETAGS = 0x21
PTRACE_POKEMTETAGS = 0x22
PTRACE_SYSEMU = 0x1f

View file

@ -238,6 +238,20 @@ const (
PPPIOCUNBRIDGECHAN = 0x7434
PPPIOCXFERUNIT = 0x744e
PR_SET_PTRACER_ANY = 0xffffffffffffffff
PTP_CLOCK_GETCAPS = 0x80503d01
PTP_CLOCK_GETCAPS2 = 0x80503d0a
PTP_ENABLE_PPS = 0x40043d04
PTP_ENABLE_PPS2 = 0x40043d0d
PTP_EXTTS_REQUEST = 0x40103d02
PTP_EXTTS_REQUEST2 = 0x40103d0b
PTP_MASK_CLEAR_ALL = 0x3d13
PTP_MASK_EN_SINGLE = 0x40043d14
PTP_PEROUT_REQUEST = 0x40383d03
PTP_PEROUT_REQUEST2 = 0x40383d0c
PTP_PIN_SETFUNC = 0x40603d07
PTP_PIN_SETFUNC2 = 0x40603d10
PTP_SYS_OFFSET = 0x43403d05
PTP_SYS_OFFSET2 = 0x43403d0e
PTRACE_SYSEMU = 0x1f
PTRACE_SYSEMU_SINGLESTEP = 0x20
RLIMIT_AS = 0x9

View file

@ -234,6 +234,20 @@ const (
PPPIOCUNBRIDGECHAN = 0x20007434
PPPIOCXFERUNIT = 0x2000744e
PR_SET_PTRACER_ANY = 0xffffffff
PTP_CLOCK_GETCAPS = 0x40503d01
PTP_CLOCK_GETCAPS2 = 0x40503d0a
PTP_ENABLE_PPS = 0x80043d04
PTP_ENABLE_PPS2 = 0x80043d0d
PTP_EXTTS_REQUEST = 0x80103d02
PTP_EXTTS_REQUEST2 = 0x80103d0b
PTP_MASK_CLEAR_ALL = 0x20003d13
PTP_MASK_EN_SINGLE = 0x80043d14
PTP_PEROUT_REQUEST = 0x80383d03
PTP_PEROUT_REQUEST2 = 0x80383d0c
PTP_PIN_SETFUNC = 0x80603d07
PTP_PIN_SETFUNC2 = 0x80603d10
PTP_SYS_OFFSET = 0x83403d05
PTP_SYS_OFFSET2 = 0x83403d0e
PTRACE_GETFPREGS = 0xe
PTRACE_GET_THREAD_AREA = 0x19
PTRACE_GET_THREAD_AREA_3264 = 0xc4

View file

@ -234,6 +234,20 @@ const (
PPPIOCUNBRIDGECHAN = 0x20007434
PPPIOCXFERUNIT = 0x2000744e
PR_SET_PTRACER_ANY = 0xffffffffffffffff
PTP_CLOCK_GETCAPS = 0x40503d01
PTP_CLOCK_GETCAPS2 = 0x40503d0a
PTP_ENABLE_PPS = 0x80043d04
PTP_ENABLE_PPS2 = 0x80043d0d
PTP_EXTTS_REQUEST = 0x80103d02
PTP_EXTTS_REQUEST2 = 0x80103d0b
PTP_MASK_CLEAR_ALL = 0x20003d13
PTP_MASK_EN_SINGLE = 0x80043d14
PTP_PEROUT_REQUEST = 0x80383d03
PTP_PEROUT_REQUEST2 = 0x80383d0c
PTP_PIN_SETFUNC = 0x80603d07
PTP_PIN_SETFUNC2 = 0x80603d10
PTP_SYS_OFFSET = 0x83403d05
PTP_SYS_OFFSET2 = 0x83403d0e
PTRACE_GETFPREGS = 0xe
PTRACE_GET_THREAD_AREA = 0x19
PTRACE_GET_THREAD_AREA_3264 = 0xc4

View file

@ -234,6 +234,20 @@ const (
PPPIOCUNBRIDGECHAN = 0x20007434
PPPIOCXFERUNIT = 0x2000744e
PR_SET_PTRACER_ANY = 0xffffffffffffffff
PTP_CLOCK_GETCAPS = 0x40503d01
PTP_CLOCK_GETCAPS2 = 0x40503d0a
PTP_ENABLE_PPS = 0x80043d04
PTP_ENABLE_PPS2 = 0x80043d0d
PTP_EXTTS_REQUEST = 0x80103d02
PTP_EXTTS_REQUEST2 = 0x80103d0b
PTP_MASK_CLEAR_ALL = 0x20003d13
PTP_MASK_EN_SINGLE = 0x80043d14
PTP_PEROUT_REQUEST = 0x80383d03
PTP_PEROUT_REQUEST2 = 0x80383d0c
PTP_PIN_SETFUNC = 0x80603d07
PTP_PIN_SETFUNC2 = 0x80603d10
PTP_SYS_OFFSET = 0x83403d05
PTP_SYS_OFFSET2 = 0x83403d0e
PTRACE_GETFPREGS = 0xe
PTRACE_GET_THREAD_AREA = 0x19
PTRACE_GET_THREAD_AREA_3264 = 0xc4

View file

@ -234,6 +234,20 @@ const (
PPPIOCUNBRIDGECHAN = 0x20007434
PPPIOCXFERUNIT = 0x2000744e
PR_SET_PTRACER_ANY = 0xffffffff
PTP_CLOCK_GETCAPS = 0x40503d01
PTP_CLOCK_GETCAPS2 = 0x40503d0a
PTP_ENABLE_PPS = 0x80043d04
PTP_ENABLE_PPS2 = 0x80043d0d
PTP_EXTTS_REQUEST = 0x80103d02
PTP_EXTTS_REQUEST2 = 0x80103d0b
PTP_MASK_CLEAR_ALL = 0x20003d13
PTP_MASK_EN_SINGLE = 0x80043d14
PTP_PEROUT_REQUEST = 0x80383d03
PTP_PEROUT_REQUEST2 = 0x80383d0c
PTP_PIN_SETFUNC = 0x80603d07
PTP_PIN_SETFUNC2 = 0x80603d10
PTP_SYS_OFFSET = 0x83403d05
PTP_SYS_OFFSET2 = 0x83403d0e
PTRACE_GETFPREGS = 0xe
PTRACE_GET_THREAD_AREA = 0x19
PTRACE_GET_THREAD_AREA_3264 = 0xc4

View file

@ -237,6 +237,20 @@ const (
PPPIOCXFERUNIT = 0x2000744e
PROT_SAO = 0x10
PR_SET_PTRACER_ANY = 0xffffffff
PTP_CLOCK_GETCAPS = 0x40503d01
PTP_CLOCK_GETCAPS2 = 0x40503d0a
PTP_ENABLE_PPS = 0x80043d04
PTP_ENABLE_PPS2 = 0x80043d0d
PTP_EXTTS_REQUEST = 0x80103d02
PTP_EXTTS_REQUEST2 = 0x80103d0b
PTP_MASK_CLEAR_ALL = 0x20003d13
PTP_MASK_EN_SINGLE = 0x80043d14
PTP_PEROUT_REQUEST = 0x80383d03
PTP_PEROUT_REQUEST2 = 0x80383d0c
PTP_PIN_SETFUNC = 0x80603d07
PTP_PIN_SETFUNC2 = 0x80603d10
PTP_SYS_OFFSET = 0x83403d05
PTP_SYS_OFFSET2 = 0x83403d0e
PTRACE_GETEVRREGS = 0x14
PTRACE_GETFPREGS = 0xe
PTRACE_GETREGS64 = 0x16

View file

@ -237,6 +237,20 @@ const (
PPPIOCXFERUNIT = 0x2000744e
PROT_SAO = 0x10
PR_SET_PTRACER_ANY = 0xffffffffffffffff
PTP_CLOCK_GETCAPS = 0x40503d01
PTP_CLOCK_GETCAPS2 = 0x40503d0a
PTP_ENABLE_PPS = 0x80043d04
PTP_ENABLE_PPS2 = 0x80043d0d
PTP_EXTTS_REQUEST = 0x80103d02
PTP_EXTTS_REQUEST2 = 0x80103d0b
PTP_MASK_CLEAR_ALL = 0x20003d13
PTP_MASK_EN_SINGLE = 0x80043d14
PTP_PEROUT_REQUEST = 0x80383d03
PTP_PEROUT_REQUEST2 = 0x80383d0c
PTP_PIN_SETFUNC = 0x80603d07
PTP_PIN_SETFUNC2 = 0x80603d10
PTP_SYS_OFFSET = 0x83403d05
PTP_SYS_OFFSET2 = 0x83403d0e
PTRACE_GETEVRREGS = 0x14
PTRACE_GETFPREGS = 0xe
PTRACE_GETREGS64 = 0x16

View file

@ -237,6 +237,20 @@ const (
PPPIOCXFERUNIT = 0x2000744e
PROT_SAO = 0x10
PR_SET_PTRACER_ANY = 0xffffffffffffffff
PTP_CLOCK_GETCAPS = 0x40503d01
PTP_CLOCK_GETCAPS2 = 0x40503d0a
PTP_ENABLE_PPS = 0x80043d04
PTP_ENABLE_PPS2 = 0x80043d0d
PTP_EXTTS_REQUEST = 0x80103d02
PTP_EXTTS_REQUEST2 = 0x80103d0b
PTP_MASK_CLEAR_ALL = 0x20003d13
PTP_MASK_EN_SINGLE = 0x80043d14
PTP_PEROUT_REQUEST = 0x80383d03
PTP_PEROUT_REQUEST2 = 0x80383d0c
PTP_PIN_SETFUNC = 0x80603d07
PTP_PIN_SETFUNC2 = 0x80603d10
PTP_SYS_OFFSET = 0x83403d05
PTP_SYS_OFFSET2 = 0x83403d0e
PTRACE_GETEVRREGS = 0x14
PTRACE_GETFPREGS = 0xe
PTRACE_GETREGS64 = 0x16

View file

@ -234,6 +234,20 @@ const (
PPPIOCUNBRIDGECHAN = 0x7434
PPPIOCXFERUNIT = 0x744e
PR_SET_PTRACER_ANY = 0xffffffffffffffff
PTP_CLOCK_GETCAPS = 0x80503d01
PTP_CLOCK_GETCAPS2 = 0x80503d0a
PTP_ENABLE_PPS = 0x40043d04
PTP_ENABLE_PPS2 = 0x40043d0d
PTP_EXTTS_REQUEST = 0x40103d02
PTP_EXTTS_REQUEST2 = 0x40103d0b
PTP_MASK_CLEAR_ALL = 0x3d13
PTP_MASK_EN_SINGLE = 0x40043d14
PTP_PEROUT_REQUEST = 0x40383d03
PTP_PEROUT_REQUEST2 = 0x40383d0c
PTP_PIN_SETFUNC = 0x40603d07
PTP_PIN_SETFUNC2 = 0x40603d10
PTP_SYS_OFFSET = 0x43403d05
PTP_SYS_OFFSET2 = 0x43403d0e
PTRACE_GETFDPIC = 0x21
PTRACE_GETFDPIC_EXEC = 0x0
PTRACE_GETFDPIC_INTERP = 0x1

View file

@ -234,6 +234,20 @@ const (
PPPIOCUNBRIDGECHAN = 0x7434
PPPIOCXFERUNIT = 0x744e
PR_SET_PTRACER_ANY = 0xffffffffffffffff
PTP_CLOCK_GETCAPS = 0x80503d01
PTP_CLOCK_GETCAPS2 = 0x80503d0a
PTP_ENABLE_PPS = 0x40043d04
PTP_ENABLE_PPS2 = 0x40043d0d
PTP_EXTTS_REQUEST = 0x40103d02
PTP_EXTTS_REQUEST2 = 0x40103d0b
PTP_MASK_CLEAR_ALL = 0x3d13
PTP_MASK_EN_SINGLE = 0x40043d14
PTP_PEROUT_REQUEST = 0x40383d03
PTP_PEROUT_REQUEST2 = 0x40383d0c
PTP_PIN_SETFUNC = 0x40603d07
PTP_PIN_SETFUNC2 = 0x40603d10
PTP_SYS_OFFSET = 0x43403d05
PTP_SYS_OFFSET2 = 0x43403d0e
PTRACE_DISABLE_TE = 0x5010
PTRACE_ENABLE_TE = 0x5009
PTRACE_GET_LAST_BREAK = 0x5006

View file

@ -239,6 +239,20 @@ const (
PPPIOCUNBRIDGECHAN = 0x20007434
PPPIOCXFERUNIT = 0x2000744e
PR_SET_PTRACER_ANY = 0xffffffffffffffff
PTP_CLOCK_GETCAPS = 0x40503d01
PTP_CLOCK_GETCAPS2 = 0x40503d0a
PTP_ENABLE_PPS = 0x80043d04
PTP_ENABLE_PPS2 = 0x80043d0d
PTP_EXTTS_REQUEST = 0x80103d02
PTP_EXTTS_REQUEST2 = 0x80103d0b
PTP_MASK_CLEAR_ALL = 0x20003d13
PTP_MASK_EN_SINGLE = 0x80043d14
PTP_PEROUT_REQUEST = 0x80383d03
PTP_PEROUT_REQUEST2 = 0x80383d0c
PTP_PIN_SETFUNC = 0x80603d07
PTP_PIN_SETFUNC2 = 0x80603d10
PTP_SYS_OFFSET = 0x83403d05
PTP_SYS_OFFSET2 = 0x83403d0e
PTRACE_GETFPAREGS = 0x14
PTRACE_GETFPREGS = 0xe
PTRACE_GETFPREGS64 = 0x19

View file

@ -592,6 +592,16 @@ func ClockGettime(clockid int32, time *Timespec) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ClockSettime(clockid int32, time *Timespec) (err error) {
_, _, e1 := Syscall(SYS_CLOCK_SETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) {
_, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0)
if e1 != 0 {

View file

@ -1752,12 +1752,6 @@ const (
IFLA_IPVLAN_UNSPEC = 0x0
IFLA_IPVLAN_MODE = 0x1
IFLA_IPVLAN_FLAGS = 0x2
NETKIT_NEXT = -0x1
NETKIT_PASS = 0x0
NETKIT_DROP = 0x2
NETKIT_REDIRECT = 0x7
NETKIT_L2 = 0x0
NETKIT_L3 = 0x1
IFLA_NETKIT_UNSPEC = 0x0
IFLA_NETKIT_PEER_INFO = 0x1
IFLA_NETKIT_PRIMARY = 0x2
@ -1796,6 +1790,7 @@ const (
IFLA_VXLAN_DF = 0x1d
IFLA_VXLAN_VNIFILTER = 0x1e
IFLA_VXLAN_LOCALBYPASS = 0x1f
IFLA_VXLAN_LABEL_POLICY = 0x20
IFLA_GENEVE_UNSPEC = 0x0
IFLA_GENEVE_ID = 0x1
IFLA_GENEVE_REMOTE = 0x2
@ -1825,6 +1820,8 @@ const (
IFLA_GTP_ROLE = 0x4
IFLA_GTP_CREATE_SOCKETS = 0x5
IFLA_GTP_RESTART_COUNT = 0x6
IFLA_GTP_LOCAL = 0x7
IFLA_GTP_LOCAL6 = 0x8
IFLA_BOND_UNSPEC = 0x0
IFLA_BOND_MODE = 0x1
IFLA_BOND_ACTIVE_SLAVE = 0x2
@ -1857,6 +1854,7 @@ const (
IFLA_BOND_AD_LACP_ACTIVE = 0x1d
IFLA_BOND_MISSED_MAX = 0x1e
IFLA_BOND_NS_IP6_TARGET = 0x1f
IFLA_BOND_COUPLED_CONTROL = 0x20
IFLA_BOND_AD_INFO_UNSPEC = 0x0
IFLA_BOND_AD_INFO_AGGREGATOR = 0x1
IFLA_BOND_AD_INFO_NUM_PORTS = 0x2
@ -1925,6 +1923,7 @@ const (
IFLA_HSR_SEQ_NR = 0x5
IFLA_HSR_VERSION = 0x6
IFLA_HSR_PROTOCOL = 0x7
IFLA_HSR_INTERLINK = 0x8
IFLA_STATS_UNSPEC = 0x0
IFLA_STATS_LINK_64 = 0x1
IFLA_STATS_LINK_XSTATS = 0x2
@ -1977,6 +1976,15 @@ const (
IFLA_DSA_MASTER = 0x1
)
const (
NETKIT_NEXT = -0x1
NETKIT_PASS = 0x0
NETKIT_DROP = 0x2
NETKIT_REDIRECT = 0x7
NETKIT_L2 = 0x0
NETKIT_L3 = 0x1
)
const (
NF_INET_PRE_ROUTING = 0x0
NF_INET_LOCAL_IN = 0x1
@ -4110,6 +4118,106 @@ type EthtoolDrvinfo struct {
Regdump_len uint32
}
type EthtoolTsInfo struct {
Cmd uint32
So_timestamping uint32
Phc_index int32
Tx_types uint32
Tx_reserved [3]uint32
Rx_filters uint32
Rx_reserved [3]uint32
}
type HwTstampConfig struct {
Flags int32
Tx_type int32
Rx_filter int32
}
const (
HWTSTAMP_FILTER_NONE = 0x0
HWTSTAMP_FILTER_ALL = 0x1
HWTSTAMP_FILTER_SOME = 0x2
HWTSTAMP_FILTER_PTP_V1_L4_EVENT = 0x3
HWTSTAMP_FILTER_PTP_V2_L4_EVENT = 0x6
HWTSTAMP_FILTER_PTP_V2_L2_EVENT = 0x9
HWTSTAMP_FILTER_PTP_V2_EVENT = 0xc
)
const (
HWTSTAMP_TX_OFF = 0x0
HWTSTAMP_TX_ON = 0x1
HWTSTAMP_TX_ONESTEP_SYNC = 0x2
)
type (
PtpClockCaps struct {
Max_adj int32
N_alarm int32
N_ext_ts int32
N_per_out int32
Pps int32
N_pins int32
Cross_timestamping int32
Adjust_phase int32
Max_phase_adj int32
Rsv [11]int32
}
PtpClockTime struct {
Sec int64
Nsec uint32
Reserved uint32
}
PtpExttsEvent struct {
T PtpClockTime
Index uint32
Flags uint32
Rsv [2]uint32
}
PtpExttsRequest struct {
Index uint32
Flags uint32
Rsv [2]uint32
}
PtpPeroutRequest struct {
StartOrPhase PtpClockTime
Period PtpClockTime
Index uint32
Flags uint32
On PtpClockTime
}
PtpPinDesc struct {
Name [64]byte
Index uint32
Func uint32
Chan uint32
Rsv [5]uint32
}
PtpSysOffset struct {
Samples uint32
Rsv [3]uint32
Ts [51]PtpClockTime
}
PtpSysOffsetExtended struct {
Samples uint32
Rsv [3]uint32
Ts [25][3]PtpClockTime
}
PtpSysOffsetPrecise struct {
Device PtpClockTime
Realtime PtpClockTime
Monoraw PtpClockTime
Rsv [4]uint32
}
)
const (
PTP_PF_NONE = 0x0
PTP_PF_EXTTS = 0x1
PTP_PF_PEROUT = 0x2
PTP_PF_PHYSYNC = 0x3
)
type (
HIDRawReportDescriptor struct {
Size uint32

View file

@ -377,6 +377,12 @@ type Flock_t struct {
Pid int32
}
type F_cnvrt struct {
Cvtcmd int32
Pccsid int16
Fccsid int16
}
type Termios struct {
Cflag uint32
Iflag uint32

View file

@ -725,20 +725,12 @@ func DurationSinceBoot() time.Duration {
}
func Ftruncate(fd Handle, length int64) (err error) {
curoffset, e := Seek(fd, 0, 1)
if e != nil {
return e
type _FILE_END_OF_FILE_INFO struct {
EndOfFile int64
}
defer Seek(fd, curoffset, 0)
_, e = Seek(fd, length, 0)
if e != nil {
return e
}
e = SetEndOfFile(fd)
if e != nil {
return e
}
return nil
var info _FILE_END_OF_FILE_INFO
info.EndOfFile = length
return SetFileInformationByHandle(fd, FileEndOfFileInfo, (*byte)(unsafe.Pointer(&info)), uint32(unsafe.Sizeof(info)))
}
func Gettimeofday(tv *Timeval) (err error) {
@ -894,6 +886,11 @@ const socket_error = uintptr(^uint32(0))
//sys GetACP() (acp uint32) = kernel32.GetACP
//sys MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar
//sys getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) = iphlpapi.GetBestInterfaceEx
//sys GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) = iphlpapi.GetIfEntry2Ex
//sys GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) = iphlpapi.GetUnicastIpAddressEntry
//sys NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyIpInterfaceChange
//sys NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyUnicastIpAddressChange
//sys CancelMibChangeNotify2(notificationHandle Handle) (errcode error) = iphlpapi.CancelMibChangeNotify2
// For testing: clients can set this flag to force
// creation of IPv6 sockets to return EAFNOSUPPORT.
@ -1685,13 +1682,16 @@ func (s NTStatus) Error() string {
// do not use NTUnicodeString, and instead UTF16PtrFromString should be used for
// the more common *uint16 string type.
func NewNTUnicodeString(s string) (*NTUnicodeString, error) {
var u NTUnicodeString
s16, err := UTF16PtrFromString(s)
s16, err := UTF16FromString(s)
if err != nil {
return nil, err
}
RtlInitUnicodeString(&u, s16)
return &u, nil
n := uint16(len(s16) * 2)
return &NTUnicodeString{
Length: n - 2, // subtract 2 bytes for the NULL terminator
MaximumLength: n,
Buffer: &s16[0],
}, nil
}
// Slice returns a uint16 slice that aliases the data in the NTUnicodeString.

View file

@ -2203,6 +2203,132 @@ const (
IfOperStatusLowerLayerDown = 7
)
const (
IF_MAX_PHYS_ADDRESS_LENGTH = 32
IF_MAX_STRING_SIZE = 256
)
// MIB_IF_ENTRY_LEVEL enumeration from netioapi.h or
// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/nf-netioapi-getifentry2ex.
const (
MibIfEntryNormal = 0
MibIfEntryNormalWithoutStatistics = 2
)
// MIB_NOTIFICATION_TYPE enumeration from netioapi.h or
// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ne-netioapi-mib_notification_type.
const (
MibParameterNotification = 0
MibAddInstance = 1
MibDeleteInstance = 2
MibInitialNotification = 3
)
// MibIfRow2 stores information about a particular interface. See
// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_if_row2.
type MibIfRow2 struct {
InterfaceLuid uint64
InterfaceIndex uint32
InterfaceGuid GUID
Alias [IF_MAX_STRING_SIZE + 1]uint16
Description [IF_MAX_STRING_SIZE + 1]uint16
PhysicalAddressLength uint32
PhysicalAddress [IF_MAX_PHYS_ADDRESS_LENGTH]uint8
PermanentPhysicalAddress [IF_MAX_PHYS_ADDRESS_LENGTH]uint8
Mtu uint32
Type uint32
TunnelType uint32
MediaType uint32
PhysicalMediumType uint32
AccessType uint32
DirectionType uint32
InterfaceAndOperStatusFlags uint8
OperStatus uint32
AdminStatus uint32
MediaConnectState uint32
NetworkGuid GUID
ConnectionType uint32
TransmitLinkSpeed uint64
ReceiveLinkSpeed uint64
InOctets uint64
InUcastPkts uint64
InNUcastPkts uint64
InDiscards uint64
InErrors uint64
InUnknownProtos uint64
InUcastOctets uint64
InMulticastOctets uint64
InBroadcastOctets uint64
OutOctets uint64
OutUcastPkts uint64
OutNUcastPkts uint64
OutDiscards uint64
OutErrors uint64
OutUcastOctets uint64
OutMulticastOctets uint64
OutBroadcastOctets uint64
OutQLen uint64
}
// MIB_UNICASTIPADDRESS_ROW stores information about a unicast IP address. See
// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_unicastipaddress_row.
type MibUnicastIpAddressRow struct {
Address RawSockaddrInet6 // SOCKADDR_INET union
InterfaceLuid uint64
InterfaceIndex uint32
PrefixOrigin uint32
SuffixOrigin uint32
ValidLifetime uint32
PreferredLifetime uint32
OnLinkPrefixLength uint8
SkipAsSource uint8
DadState uint32
ScopeId uint32
CreationTimeStamp Filetime
}
const ScopeLevelCount = 16
// MIB_IPINTERFACE_ROW stores interface management information for a particular IP address family on a network interface.
// See https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_ipinterface_row.
type MibIpInterfaceRow struct {
Family uint16
InterfaceLuid uint64
InterfaceIndex uint32
MaxReassemblySize uint32
InterfaceIdentifier uint64
MinRouterAdvertisementInterval uint32
MaxRouterAdvertisementInterval uint32
AdvertisingEnabled uint8
ForwardingEnabled uint8
WeakHostSend uint8
WeakHostReceive uint8
UseAutomaticMetric uint8
UseNeighborUnreachabilityDetection uint8
ManagedAddressConfigurationSupported uint8
OtherStatefulConfigurationSupported uint8
AdvertiseDefaultRoute uint8
RouterDiscoveryBehavior uint32
DadTransmits uint32
BaseReachableTime uint32
RetransmitTime uint32
PathMtuDiscoveryTimeout uint32
LinkLocalAddressBehavior uint32
LinkLocalAddressTimeout uint32
ZoneIndices [ScopeLevelCount]uint32
SitePrefixLength uint32
Metric uint32
NlMtu uint32
Connected uint8
SupportsWakeUpPatterns uint8
SupportsNeighborDiscovery uint8
SupportsRouterDiscovery uint8
ReachableTime uint32
TransmitOffload uint32
ReceiveOffload uint32
DisableDefaultRoutes uint8
}
// Console related constants used for the mode parameter to SetConsoleMode. See
// https://docs.microsoft.com/en-us/windows/console/setconsolemode for details.

View file

@ -181,10 +181,15 @@ var (
procDnsRecordListFree = moddnsapi.NewProc("DnsRecordListFree")
procDwmGetWindowAttribute = moddwmapi.NewProc("DwmGetWindowAttribute")
procDwmSetWindowAttribute = moddwmapi.NewProc("DwmSetWindowAttribute")
procCancelMibChangeNotify2 = modiphlpapi.NewProc("CancelMibChangeNotify2")
procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses")
procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo")
procGetBestInterfaceEx = modiphlpapi.NewProc("GetBestInterfaceEx")
procGetIfEntry = modiphlpapi.NewProc("GetIfEntry")
procGetIfEntry2Ex = modiphlpapi.NewProc("GetIfEntry2Ex")
procGetUnicastIpAddressEntry = modiphlpapi.NewProc("GetUnicastIpAddressEntry")
procNotifyIpInterfaceChange = modiphlpapi.NewProc("NotifyIpInterfaceChange")
procNotifyUnicastIpAddressChange = modiphlpapi.NewProc("NotifyUnicastIpAddressChange")
procAddDllDirectory = modkernel32.NewProc("AddDllDirectory")
procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject")
procCancelIo = modkernel32.NewProc("CancelIo")
@ -1606,6 +1611,14 @@ func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, si
return
}
func CancelMibChangeNotify2(notificationHandle Handle) (errcode error) {
r0, _, _ := syscall.SyscallN(procCancelMibChangeNotify2.Addr(), uintptr(notificationHandle))
if r0 != 0 {
errcode = syscall.Errno(r0)
}
return
}
func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) {
r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0)
if r0 != 0 {
@ -1638,6 +1651,46 @@ func GetIfEntry(pIfRow *MibIfRow) (errcode error) {
return
}
func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) {
r0, _, _ := syscall.SyscallN(procGetIfEntry2Ex.Addr(), uintptr(level), uintptr(unsafe.Pointer(row)))
if r0 != 0 {
errcode = syscall.Errno(r0)
}
return
}
func GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) {
r0, _, _ := syscall.SyscallN(procGetUnicastIpAddressEntry.Addr(), uintptr(unsafe.Pointer(row)))
if r0 != 0 {
errcode = syscall.Errno(r0)
}
return
}
func NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) {
var _p0 uint32
if initialNotification {
_p0 = 1
}
r0, _, _ := syscall.SyscallN(procNotifyIpInterfaceChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)))
if r0 != 0 {
errcode = syscall.Errno(r0)
}
return
}
func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) {
var _p0 uint32
if initialNotification {
_p0 = 1
}
r0, _, _ := syscall.SyscallN(procNotifyUnicastIpAddressChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)))
if r0 != 0 {
errcode = syscall.Errno(r0)
}
return
}
func AddDllDirectory(path *uint16) (cookie uintptr, err error) {
r0, _, e1 := syscall.Syscall(procAddDllDirectory.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0)
cookie = uintptr(r0)

130
vendor/google.golang.org/grpc/attributes/attributes.go generated vendored Normal file
View file

@ -0,0 +1,130 @@
/*
*
* Copyright 2019 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package attributes defines a generic key/value store used in various gRPC
// components.
//
// # Experimental
//
// Notice: This package is EXPERIMENTAL and may be changed or removed in a
// later release.
package attributes
import (
"fmt"
"strings"
)
// Attributes is an immutable struct for storing and retrieving generic
// key/value pairs. Keys must be hashable, and users should define their own
// types for keys. Values should not be modified after they are added to an
// Attributes or if they were received from one. If values implement 'Equal(o
// interface{}) bool', it will be called by (*Attributes).Equal to determine
// whether two values with the same key should be considered equal.
type Attributes struct {
m map[interface{}]interface{}
}
// New returns a new Attributes containing the key/value pair.
func New(key, value interface{}) *Attributes {
return &Attributes{m: map[interface{}]interface{}{key: value}}
}
// WithValue returns a new Attributes containing the previous keys and values
// and the new key/value pair. If the same key appears multiple times, the
// last value overwrites all previous values for that key. To remove an
// existing key, use a nil value. value should not be modified later.
func (a *Attributes) WithValue(key, value interface{}) *Attributes {
if a == nil {
return New(key, value)
}
n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+1)}
for k, v := range a.m {
n.m[k] = v
}
n.m[key] = value
return n
}
// Value returns the value associated with these attributes for key, or nil if
// no value is associated with key. The returned value should not be modified.
func (a *Attributes) Value(key interface{}) interface{} {
if a == nil {
return nil
}
return a.m[key]
}
// Equal returns whether a and o are equivalent. If 'Equal(o interface{})
// bool' is implemented for a value in the attributes, it is called to
// determine if the value matches the one stored in the other attributes. If
// Equal is not implemented, standard equality is used to determine if the two
// values are equal. Note that some types (e.g. maps) aren't comparable by
// default, so they must be wrapped in a struct, or in an alias type, with Equal
// defined.
func (a *Attributes) Equal(o *Attributes) bool {
if a == nil && o == nil {
return true
}
if a == nil || o == nil {
return false
}
if len(a.m) != len(o.m) {
return false
}
for k, v := range a.m {
ov, ok := o.m[k]
if !ok {
// o missing element of a
return false
}
if eq, ok := v.(interface{ Equal(o interface{}) bool }); ok {
if !eq.Equal(ov) {
return false
}
} else if v != ov {
// Fallback to a standard equality check if Value is unimplemented.
return false
}
}
return true
}
// String prints the attribute map. If any key or values throughout the map
// implement fmt.Stringer, it calls that method and appends.
func (a *Attributes) String() string {
var sb strings.Builder
sb.WriteString("{")
first := true
for k, v := range a.m {
var key, val string
if str, ok := k.(interface{ String() string }); ok {
key = str.String()
}
if str, ok := v.(interface{ String() string }); ok {
val = str.String()
}
if !first {
sb.WriteString(", ")
}
sb.WriteString(fmt.Sprintf("%q: %q, ", key, val))
first = false
}
sb.WriteString("}")
return sb.String()
}

View file

@ -18,7 +18,15 @@
package codes
import "strconv"
import (
"strconv"
"google.golang.org/grpc/internal"
)
func init() {
internal.CanonicalString = canonicalString
}
func (c Code) String() string {
switch c {
@ -60,3 +68,44 @@ func (c Code) String() string {
return "Code(" + strconv.FormatInt(int64(c), 10) + ")"
}
}
func canonicalString(c Code) string {
switch c {
case OK:
return "OK"
case Canceled:
return "CANCELLED"
case Unknown:
return "UNKNOWN"
case InvalidArgument:
return "INVALID_ARGUMENT"
case DeadlineExceeded:
return "DEADLINE_EXCEEDED"
case NotFound:
return "NOT_FOUND"
case AlreadyExists:
return "ALREADY_EXISTS"
case PermissionDenied:
return "PERMISSION_DENIED"
case ResourceExhausted:
return "RESOURCE_EXHAUSTED"
case FailedPrecondition:
return "FAILED_PRECONDITION"
case Aborted:
return "ABORTED"
case OutOfRange:
return "OUT_OF_RANGE"
case Unimplemented:
return "UNIMPLEMENTED"
case Internal:
return "INTERNAL"
case Unavailable:
return "UNAVAILABLE"
case DataLoss:
return "DATA_LOSS"
case Unauthenticated:
return "UNAUTHENTICATED"
default:
return "CODE(" + strconv.FormatInt(int64(c), 10) + ")"
}
}

View file

@ -0,0 +1,94 @@
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package connectivity defines connectivity semantics.
// For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md.
package connectivity
import (
"google.golang.org/grpc/grpclog"
)
var logger = grpclog.Component("core")
// State indicates the state of connectivity.
// It can be the state of a ClientConn or SubConn.
type State int
func (s State) String() string {
switch s {
case Idle:
return "IDLE"
case Connecting:
return "CONNECTING"
case Ready:
return "READY"
case TransientFailure:
return "TRANSIENT_FAILURE"
case Shutdown:
return "SHUTDOWN"
default:
logger.Errorf("unknown connectivity state: %d", s)
return "INVALID_STATE"
}
}
const (
// Idle indicates the ClientConn is idle.
Idle State = iota
// Connecting indicates the ClientConn is connecting.
Connecting
// Ready indicates the ClientConn is ready for work.
Ready
// TransientFailure indicates the ClientConn has seen a failure but expects to recover.
TransientFailure
// Shutdown indicates the ClientConn has started shutting down.
Shutdown
)
// ServingMode indicates the current mode of operation of the server.
//
// Only xDS enabled gRPC servers currently report their serving mode.
type ServingMode int
const (
// ServingModeStarting indicates that the server is starting up.
ServingModeStarting ServingMode = iota
// ServingModeServing indicates that the server contains all required
// configuration and is serving RPCs.
ServingModeServing
// ServingModeNotServing indicates that the server is not accepting new
// connections. Existing connections will be closed gracefully, allowing
// in-progress RPCs to complete. A server enters this mode when it does not
// contain the required configuration to serve RPCs.
ServingModeNotServing
)
func (s ServingMode) String() string {
switch s {
case ServingModeStarting:
return "STARTING"
case ServingModeServing:
return "SERVING"
case ServingModeNotServing:
return "NOT_SERVING"
default:
logger.Errorf("unknown serving mode: %d", s)
return "INVALID_MODE"
}
}

View file

@ -0,0 +1,291 @@
/*
*
* Copyright 2014 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package credentials implements various credentials supported by gRPC library,
// which encapsulate all the state needed by a client to authenticate with a
// server and make various assertions, e.g., about the client's identity, role,
// or whether it is authorized to make a particular call.
package credentials // import "google.golang.org/grpc/credentials"
import (
"context"
"errors"
"fmt"
"net"
"github.com/golang/protobuf/proto"
"google.golang.org/grpc/attributes"
icredentials "google.golang.org/grpc/internal/credentials"
)
// PerRPCCredentials defines the common interface for the credentials which need to
// attach security information to every RPC (e.g., oauth2).
type PerRPCCredentials interface {
// GetRequestMetadata gets the current request metadata, refreshing tokens
// if required. This should be called by the transport layer on each
// request, and the data should be populated in headers or other
// context. If a status code is returned, it will be used as the status for
// the RPC (restricted to an allowable set of codes as defined by gRFC
// A54). uri is the URI of the entry point for the request. When supported
// by the underlying implementation, ctx can be used for timeout and
// cancellation. Additionally, RequestInfo data will be available via ctx
// to this call. TODO(zhaoq): Define the set of the qualified keys instead
// of leaving it as an arbitrary string.
GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error)
// RequireTransportSecurity indicates whether the credentials requires
// transport security.
RequireTransportSecurity() bool
}
// SecurityLevel defines the protection level on an established connection.
//
// This API is experimental.
type SecurityLevel int
const (
// InvalidSecurityLevel indicates an invalid security level.
// The zero SecurityLevel value is invalid for backward compatibility.
InvalidSecurityLevel SecurityLevel = iota
// NoSecurity indicates a connection is insecure.
NoSecurity
// IntegrityOnly indicates a connection only provides integrity protection.
IntegrityOnly
// PrivacyAndIntegrity indicates a connection provides both privacy and integrity protection.
PrivacyAndIntegrity
)
// String returns SecurityLevel in a string format.
func (s SecurityLevel) String() string {
switch s {
case NoSecurity:
return "NoSecurity"
case IntegrityOnly:
return "IntegrityOnly"
case PrivacyAndIntegrity:
return "PrivacyAndIntegrity"
}
return fmt.Sprintf("invalid SecurityLevel: %v", int(s))
}
// CommonAuthInfo contains authenticated information common to AuthInfo implementations.
// It should be embedded in a struct implementing AuthInfo to provide additional information
// about the credentials.
//
// This API is experimental.
type CommonAuthInfo struct {
SecurityLevel SecurityLevel
}
// GetCommonAuthInfo returns the pointer to CommonAuthInfo struct.
func (c CommonAuthInfo) GetCommonAuthInfo() CommonAuthInfo {
return c
}
// ProtocolInfo provides information regarding the gRPC wire protocol version,
// security protocol, security protocol version in use, server name, etc.
type ProtocolInfo struct {
// ProtocolVersion is the gRPC wire protocol version.
ProtocolVersion string
// SecurityProtocol is the security protocol in use.
SecurityProtocol string
// SecurityVersion is the security protocol version. It is a static version string from the
// credentials, not a value that reflects per-connection protocol negotiation. To retrieve
// details about the credentials used for a connection, use the Peer's AuthInfo field instead.
//
// Deprecated: please use Peer.AuthInfo.
SecurityVersion string
// ServerName is the user-configured server name.
ServerName string
}
// AuthInfo defines the common interface for the auth information the users are interested in.
// A struct that implements AuthInfo should embed CommonAuthInfo by including additional
// information about the credentials in it.
type AuthInfo interface {
AuthType() string
}
// ErrConnDispatched indicates that rawConn has been dispatched out of gRPC
// and the caller should not close rawConn.
var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC")
// TransportCredentials defines the common interface for all the live gRPC wire
// protocols and supported transport security protocols (e.g., TLS, SSL).
type TransportCredentials interface {
// ClientHandshake does the authentication handshake specified by the
// corresponding authentication protocol on rawConn for clients. It returns
// the authenticated connection and the corresponding auth information
// about the connection. The auth information should embed CommonAuthInfo
// to return additional information about the credentials. Implementations
// must use the provided context to implement timely cancellation. gRPC
// will try to reconnect if the error returned is a temporary error
// (io.EOF, context.DeadlineExceeded or err.Temporary() == true). If the
// returned error is a wrapper error, implementations should make sure that
// the error implements Temporary() to have the correct retry behaviors.
// Additionally, ClientHandshakeInfo data will be available via the context
// passed to this call.
//
// The second argument to this method is the `:authority` header value used
// while creating new streams on this connection after authentication
// succeeds. Implementations must use this as the server name during the
// authentication handshake.
//
// If the returned net.Conn is closed, it MUST close the net.Conn provided.
ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error)
// ServerHandshake does the authentication handshake for servers. It returns
// the authenticated connection and the corresponding auth information about
// the connection. The auth information should embed CommonAuthInfo to return additional information
// about the credentials.
//
// If the returned net.Conn is closed, it MUST close the net.Conn provided.
ServerHandshake(net.Conn) (net.Conn, AuthInfo, error)
// Info provides the ProtocolInfo of this TransportCredentials.
Info() ProtocolInfo
// Clone makes a copy of this TransportCredentials.
Clone() TransportCredentials
// OverrideServerName specifies the value used for the following:
// - verifying the hostname on the returned certificates
// - as SNI in the client's handshake to support virtual hosting
// - as the value for `:authority` header at stream creation time
//
// Deprecated: use grpc.WithAuthority instead. Will be supported
// throughout 1.x.
OverrideServerName(string) error
}
// Bundle is a combination of TransportCredentials and PerRPCCredentials.
//
// It also contains a mode switching method, so it can be used as a combination
// of different credential policies.
//
// Bundle cannot be used together with individual TransportCredentials.
// PerRPCCredentials from Bundle will be appended to other PerRPCCredentials.
//
// This API is experimental.
type Bundle interface {
// TransportCredentials returns the transport credentials from the Bundle.
//
// Implementations must return non-nil transport credentials. If transport
// security is not needed by the Bundle, implementations may choose to
// return insecure.NewCredentials().
TransportCredentials() TransportCredentials
// PerRPCCredentials returns the per-RPC credentials from the Bundle.
//
// May be nil if per-RPC credentials are not needed.
PerRPCCredentials() PerRPCCredentials
// NewWithMode should make a copy of Bundle, and switch mode. Modifying the
// existing Bundle may cause races.
//
// NewWithMode returns nil if the requested mode is not supported.
NewWithMode(mode string) (Bundle, error)
}
// RequestInfo contains request data attached to the context passed to GetRequestMetadata calls.
//
// This API is experimental.
type RequestInfo struct {
// The method passed to Invoke or NewStream for this RPC. (For proto methods, this has the format "/some.Service/Method")
Method string
// AuthInfo contains the information from a security handshake (TransportCredentials.ClientHandshake, TransportCredentials.ServerHandshake)
AuthInfo AuthInfo
}
// RequestInfoFromContext extracts the RequestInfo from the context if it exists.
//
// This API is experimental.
func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) {
ri, ok = icredentials.RequestInfoFromContext(ctx).(RequestInfo)
return ri, ok
}
// ClientHandshakeInfo holds data to be passed to ClientHandshake. This makes
// it possible to pass arbitrary data to the handshaker from gRPC, resolver,
// balancer etc. Individual credential implementations control the actual
// format of the data that they are willing to receive.
//
// This API is experimental.
type ClientHandshakeInfo struct {
// Attributes contains the attributes for the address. It could be provided
// by the gRPC, resolver, balancer etc.
Attributes *attributes.Attributes
}
// ClientHandshakeInfoFromContext returns the ClientHandshakeInfo struct stored
// in ctx.
//
// This API is experimental.
func ClientHandshakeInfoFromContext(ctx context.Context) ClientHandshakeInfo {
chi, _ := icredentials.ClientHandshakeInfoFromContext(ctx).(ClientHandshakeInfo)
return chi
}
// CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one.
// It returns success if 1) the condition is satisified or 2) AuthInfo struct does not implement GetCommonAuthInfo() method
// or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility.
//
// This API is experimental.
func CheckSecurityLevel(ai AuthInfo, level SecurityLevel) error {
type internalInfo interface {
GetCommonAuthInfo() CommonAuthInfo
}
if ai == nil {
return errors.New("AuthInfo is nil")
}
if ci, ok := ai.(internalInfo); ok {
// CommonAuthInfo.SecurityLevel has an invalid value.
if ci.GetCommonAuthInfo().SecurityLevel == InvalidSecurityLevel {
return nil
}
if ci.GetCommonAuthInfo().SecurityLevel < level {
return fmt.Errorf("requires SecurityLevel %v; connection has %v", level, ci.GetCommonAuthInfo().SecurityLevel)
}
}
// The condition is satisfied or AuthInfo struct does not implement GetCommonAuthInfo() method.
return nil
}
// ChannelzSecurityInfo defines the interface that security protocols should implement
// in order to provide security info to channelz.
//
// This API is experimental.
type ChannelzSecurityInfo interface {
GetSecurityValue() ChannelzSecurityValue
}
// ChannelzSecurityValue defines the interface that GetSecurityValue() return value
// should satisfy. This interface should only be satisfied by *TLSChannelzSecurityValue
// and *OtherChannelzSecurityValue.
//
// This API is experimental.
type ChannelzSecurityValue interface {
isChannelzSecurityValue()
}
// OtherChannelzSecurityValue defines the struct that non-TLS protocol should return
// from GetSecurityValue(), which contains protocol specific security info. Note
// the Value field will be sent to users of channelz requesting channel info, and
// thus sensitive info should better be avoided.
//
// This API is experimental.
type OtherChannelzSecurityValue struct {
ChannelzSecurityValue
Name string
Value proto.Message
}

236
vendor/google.golang.org/grpc/credentials/tls.go generated vendored Normal file
View file

@ -0,0 +1,236 @@
/*
*
* Copyright 2014 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package credentials
import (
"context"
"crypto/tls"
"crypto/x509"
"fmt"
"net"
"net/url"
"os"
credinternal "google.golang.org/grpc/internal/credentials"
)
// TLSInfo contains the auth information for a TLS authenticated connection.
// It implements the AuthInfo interface.
type TLSInfo struct {
State tls.ConnectionState
CommonAuthInfo
// This API is experimental.
SPIFFEID *url.URL
}
// AuthType returns the type of TLSInfo as a string.
func (t TLSInfo) AuthType() string {
return "tls"
}
// GetSecurityValue returns security info requested by channelz.
func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue {
v := &TLSChannelzSecurityValue{
StandardName: cipherSuiteLookup[t.State.CipherSuite],
}
// Currently there's no way to get LocalCertificate info from tls package.
if len(t.State.PeerCertificates) > 0 {
v.RemoteCertificate = t.State.PeerCertificates[0].Raw
}
return v
}
// tlsCreds is the credentials required for authenticating a connection using TLS.
type tlsCreds struct {
// TLS configuration
config *tls.Config
}
func (c tlsCreds) Info() ProtocolInfo {
return ProtocolInfo{
SecurityProtocol: "tls",
SecurityVersion: "1.2",
ServerName: c.config.ServerName,
}
}
func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) {
// use local cfg to avoid clobbering ServerName if using multiple endpoints
cfg := credinternal.CloneTLSConfig(c.config)
if cfg.ServerName == "" {
serverName, _, err := net.SplitHostPort(authority)
if err != nil {
// If the authority had no host port or if the authority cannot be parsed, use it as-is.
serverName = authority
}
cfg.ServerName = serverName
}
conn := tls.Client(rawConn, cfg)
errChannel := make(chan error, 1)
go func() {
errChannel <- conn.Handshake()
close(errChannel)
}()
select {
case err := <-errChannel:
if err != nil {
conn.Close()
return nil, nil, err
}
case <-ctx.Done():
conn.Close()
return nil, nil, ctx.Err()
}
tlsInfo := TLSInfo{
State: conn.ConnectionState(),
CommonAuthInfo: CommonAuthInfo{
SecurityLevel: PrivacyAndIntegrity,
},
}
id := credinternal.SPIFFEIDFromState(conn.ConnectionState())
if id != nil {
tlsInfo.SPIFFEID = id
}
return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil
}
func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) {
conn := tls.Server(rawConn, c.config)
if err := conn.Handshake(); err != nil {
conn.Close()
return nil, nil, err
}
tlsInfo := TLSInfo{
State: conn.ConnectionState(),
CommonAuthInfo: CommonAuthInfo{
SecurityLevel: PrivacyAndIntegrity,
},
}
id := credinternal.SPIFFEIDFromState(conn.ConnectionState())
if id != nil {
tlsInfo.SPIFFEID = id
}
return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil
}
func (c *tlsCreds) Clone() TransportCredentials {
return NewTLS(c.config)
}
func (c *tlsCreds) OverrideServerName(serverNameOverride string) error {
c.config.ServerName = serverNameOverride
return nil
}
// NewTLS uses c to construct a TransportCredentials based on TLS.
func NewTLS(c *tls.Config) TransportCredentials {
tc := &tlsCreds{credinternal.CloneTLSConfig(c)}
tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos)
return tc
}
// NewClientTLSFromCert constructs TLS credentials from the provided root
// certificate authority certificate(s) to validate server connections. If
// certificates to establish the identity of the client need to be included in
// the credentials (eg: for mTLS), use NewTLS instead, where a complete
// tls.Config can be specified.
// serverNameOverride is for testing only. If set to a non empty string,
// it will override the virtual host name of authority (e.g. :authority header
// field) in requests.
func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials {
return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp})
}
// NewClientTLSFromFile constructs TLS credentials from the provided root
// certificate authority certificate file(s) to validate server connections. If
// certificates to establish the identity of the client need to be included in
// the credentials (eg: for mTLS), use NewTLS instead, where a complete
// tls.Config can be specified.
// serverNameOverride is for testing only. If set to a non empty string,
// it will override the virtual host name of authority (e.g. :authority header
// field) in requests.
func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) {
b, err := os.ReadFile(certFile)
if err != nil {
return nil, err
}
cp := x509.NewCertPool()
if !cp.AppendCertsFromPEM(b) {
return nil, fmt.Errorf("credentials: failed to append certificates")
}
return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil
}
// NewServerTLSFromCert constructs TLS credentials from the input certificate for server.
func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials {
return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}})
}
// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key
// file for server.
func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) {
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
if err != nil {
return nil, err
}
return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil
}
// TLSChannelzSecurityValue defines the struct that TLS protocol should return
// from GetSecurityValue(), containing security info like cipher and certificate used.
//
// # Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
type TLSChannelzSecurityValue struct {
ChannelzSecurityValue
StandardName string
LocalCertificate []byte
RemoteCertificate []byte
}
var cipherSuiteLookup = map[uint16]string{
tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA",
tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA",
tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA",
tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA",
tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256",
tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384",
tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA",
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA",
tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA",
tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA",
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA",
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA",
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV",
tls.TLS_RSA_WITH_AES_128_CBC_SHA256: "TLS_RSA_WITH_AES_128_CBC_SHA256",
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256",
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256",
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
tls.TLS_AES_128_GCM_SHA256: "TLS_AES_128_GCM_SHA256",
tls.TLS_AES_256_GCM_SHA384: "TLS_AES_256_GCM_SHA384",
tls.TLS_CHACHA20_POLY1305_SHA256: "TLS_CHACHA20_POLY1305_SHA256",
}

117
vendor/google.golang.org/grpc/grpclog/component.go generated vendored Normal file
View file

@ -0,0 +1,117 @@
/*
*
* Copyright 2020 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package grpclog
import (
"fmt"
"google.golang.org/grpc/internal/grpclog"
)
// componentData records the settings for a component.
type componentData struct {
name string
}
var cache = map[string]*componentData{}
func (c *componentData) InfoDepth(depth int, args ...interface{}) {
args = append([]interface{}{"[" + string(c.name) + "]"}, args...)
grpclog.InfoDepth(depth+1, args...)
}
func (c *componentData) WarningDepth(depth int, args ...interface{}) {
args = append([]interface{}{"[" + string(c.name) + "]"}, args...)
grpclog.WarningDepth(depth+1, args...)
}
func (c *componentData) ErrorDepth(depth int, args ...interface{}) {
args = append([]interface{}{"[" + string(c.name) + "]"}, args...)
grpclog.ErrorDepth(depth+1, args...)
}
func (c *componentData) FatalDepth(depth int, args ...interface{}) {
args = append([]interface{}{"[" + string(c.name) + "]"}, args...)
grpclog.FatalDepth(depth+1, args...)
}
func (c *componentData) Info(args ...interface{}) {
c.InfoDepth(1, args...)
}
func (c *componentData) Warning(args ...interface{}) {
c.WarningDepth(1, args...)
}
func (c *componentData) Error(args ...interface{}) {
c.ErrorDepth(1, args...)
}
func (c *componentData) Fatal(args ...interface{}) {
c.FatalDepth(1, args...)
}
func (c *componentData) Infof(format string, args ...interface{}) {
c.InfoDepth(1, fmt.Sprintf(format, args...))
}
func (c *componentData) Warningf(format string, args ...interface{}) {
c.WarningDepth(1, fmt.Sprintf(format, args...))
}
func (c *componentData) Errorf(format string, args ...interface{}) {
c.ErrorDepth(1, fmt.Sprintf(format, args...))
}
func (c *componentData) Fatalf(format string, args ...interface{}) {
c.FatalDepth(1, fmt.Sprintf(format, args...))
}
func (c *componentData) Infoln(args ...interface{}) {
c.InfoDepth(1, args...)
}
func (c *componentData) Warningln(args ...interface{}) {
c.WarningDepth(1, args...)
}
func (c *componentData) Errorln(args ...interface{}) {
c.ErrorDepth(1, args...)
}
func (c *componentData) Fatalln(args ...interface{}) {
c.FatalDepth(1, args...)
}
func (c *componentData) V(l int) bool {
return V(l)
}
// Component creates a new component and returns it for logging. If a component
// with the name already exists, nothing will be created and it will be
// returned. SetLoggerV2 will panic if it is called with a logger created by
// Component.
func Component(componentName string) DepthLoggerV2 {
if cData, ok := cache[componentName]; ok {
return cData
}
c := &componentData{componentName}
cache[componentName] = c
return c
}

132
vendor/google.golang.org/grpc/grpclog/grpclog.go generated vendored Normal file
View file

@ -0,0 +1,132 @@
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package grpclog defines logging for grpc.
//
// All logs in transport and grpclb packages only go to verbose level 2.
// All logs in other packages in grpc are logged in spite of the verbosity level.
//
// In the default logger,
// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL,
// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL.
package grpclog // import "google.golang.org/grpc/grpclog"
import (
"os"
"google.golang.org/grpc/internal/grpclog"
)
func init() {
SetLoggerV2(newLoggerV2())
}
// V reports whether verbosity level l is at least the requested verbose level.
func V(l int) bool {
return grpclog.Logger.V(l)
}
// Info logs to the INFO log.
func Info(args ...interface{}) {
grpclog.Logger.Info(args...)
}
// Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf.
func Infof(format string, args ...interface{}) {
grpclog.Logger.Infof(format, args...)
}
// Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println.
func Infoln(args ...interface{}) {
grpclog.Logger.Infoln(args...)
}
// Warning logs to the WARNING log.
func Warning(args ...interface{}) {
grpclog.Logger.Warning(args...)
}
// Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf.
func Warningf(format string, args ...interface{}) {
grpclog.Logger.Warningf(format, args...)
}
// Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println.
func Warningln(args ...interface{}) {
grpclog.Logger.Warningln(args...)
}
// Error logs to the ERROR log.
func Error(args ...interface{}) {
grpclog.Logger.Error(args...)
}
// Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf.
func Errorf(format string, args ...interface{}) {
grpclog.Logger.Errorf(format, args...)
}
// Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println.
func Errorln(args ...interface{}) {
grpclog.Logger.Errorln(args...)
}
// Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print.
// It calls os.Exit() with exit code 1.
func Fatal(args ...interface{}) {
grpclog.Logger.Fatal(args...)
// Make sure fatal logs will exit.
os.Exit(1)
}
// Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf.
// It calls os.Exit() with exit code 1.
func Fatalf(format string, args ...interface{}) {
grpclog.Logger.Fatalf(format, args...)
// Make sure fatal logs will exit.
os.Exit(1)
}
// Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println.
// It calle os.Exit()) with exit code 1.
func Fatalln(args ...interface{}) {
grpclog.Logger.Fatalln(args...)
// Make sure fatal logs will exit.
os.Exit(1)
}
// Print prints to the logger. Arguments are handled in the manner of fmt.Print.
//
// Deprecated: use Info.
func Print(args ...interface{}) {
grpclog.Logger.Info(args...)
}
// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf.
//
// Deprecated: use Infof.
func Printf(format string, args ...interface{}) {
grpclog.Logger.Infof(format, args...)
}
// Println prints to the logger. Arguments are handled in the manner of fmt.Println.
//
// Deprecated: use Infoln.
func Println(args ...interface{}) {
grpclog.Logger.Infoln(args...)
}

87
vendor/google.golang.org/grpc/grpclog/logger.go generated vendored Normal file
View file

@ -0,0 +1,87 @@
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package grpclog
import "google.golang.org/grpc/internal/grpclog"
// Logger mimics golang's standard Logger as an interface.
//
// Deprecated: use LoggerV2.
type Logger interface {
Fatal(args ...interface{})
Fatalf(format string, args ...interface{})
Fatalln(args ...interface{})
Print(args ...interface{})
Printf(format string, args ...interface{})
Println(args ...interface{})
}
// SetLogger sets the logger that is used in grpc. Call only from
// init() functions.
//
// Deprecated: use SetLoggerV2.
func SetLogger(l Logger) {
grpclog.Logger = &loggerWrapper{Logger: l}
}
// loggerWrapper wraps Logger into a LoggerV2.
type loggerWrapper struct {
Logger
}
func (g *loggerWrapper) Info(args ...interface{}) {
g.Logger.Print(args...)
}
func (g *loggerWrapper) Infoln(args ...interface{}) {
g.Logger.Println(args...)
}
func (g *loggerWrapper) Infof(format string, args ...interface{}) {
g.Logger.Printf(format, args...)
}
func (g *loggerWrapper) Warning(args ...interface{}) {
g.Logger.Print(args...)
}
func (g *loggerWrapper) Warningln(args ...interface{}) {
g.Logger.Println(args...)
}
func (g *loggerWrapper) Warningf(format string, args ...interface{}) {
g.Logger.Printf(format, args...)
}
func (g *loggerWrapper) Error(args ...interface{}) {
g.Logger.Print(args...)
}
func (g *loggerWrapper) Errorln(args ...interface{}) {
g.Logger.Println(args...)
}
func (g *loggerWrapper) Errorf(format string, args ...interface{}) {
g.Logger.Printf(format, args...)
}
func (g *loggerWrapper) V(l int) bool {
// Returns true for all verbose level.
return true
}

258
vendor/google.golang.org/grpc/grpclog/loggerv2.go generated vendored Normal file
View file

@ -0,0 +1,258 @@
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package grpclog
import (
"encoding/json"
"fmt"
"io"
"log"
"os"
"strconv"
"strings"
"google.golang.org/grpc/internal/grpclog"
)
// LoggerV2 does underlying logging work for grpclog.
type LoggerV2 interface {
// Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
Info(args ...interface{})
// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
Infoln(args ...interface{})
// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
Infof(format string, args ...interface{})
// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
Warning(args ...interface{})
// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
Warningln(args ...interface{})
// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
Warningf(format string, args ...interface{})
// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
Error(args ...interface{})
// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
Errorln(args ...interface{})
// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
Errorf(format string, args ...interface{})
// Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print.
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
// Implementations may also call os.Exit() with a non-zero exit code.
Fatal(args ...interface{})
// Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
// Implementations may also call os.Exit() with a non-zero exit code.
Fatalln(args ...interface{})
// Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
// Implementations may also call os.Exit() with a non-zero exit code.
Fatalf(format string, args ...interface{})
// V reports whether verbosity level l is at least the requested verbose level.
V(l int) bool
}
// SetLoggerV2 sets logger that is used in grpc to a V2 logger.
// Not mutex-protected, should be called before any gRPC functions.
func SetLoggerV2(l LoggerV2) {
if _, ok := l.(*componentData); ok {
panic("cannot use component logger as grpclog logger")
}
grpclog.Logger = l
grpclog.DepthLogger, _ = l.(grpclog.DepthLoggerV2)
}
const (
// infoLog indicates Info severity.
infoLog int = iota
// warningLog indicates Warning severity.
warningLog
// errorLog indicates Error severity.
errorLog
// fatalLog indicates Fatal severity.
fatalLog
)
// severityName contains the string representation of each severity.
var severityName = []string{
infoLog: "INFO",
warningLog: "WARNING",
errorLog: "ERROR",
fatalLog: "FATAL",
}
// loggerT is the default logger used by grpclog.
type loggerT struct {
m []*log.Logger
v int
jsonFormat bool
}
// NewLoggerV2 creates a loggerV2 with the provided writers.
// Fatal logs will be written to errorW, warningW, infoW, followed by exit(1).
// Error logs will be written to errorW, warningW and infoW.
// Warning logs will be written to warningW and infoW.
// Info logs will be written to infoW.
func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 {
return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{})
}
// NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and
// verbosity level.
func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 {
return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{verbose: v})
}
type loggerV2Config struct {
verbose int
jsonFormat bool
}
func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config) LoggerV2 {
var m []*log.Logger
flag := log.LstdFlags
if c.jsonFormat {
flag = 0
}
m = append(m, log.New(infoW, "", flag))
m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag))
ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal.
m = append(m, log.New(ew, "", flag))
m = append(m, log.New(ew, "", flag))
return &loggerT{m: m, v: c.verbose, jsonFormat: c.jsonFormat}
}
// newLoggerV2 creates a loggerV2 to be used as default logger.
// All logs are written to stderr.
func newLoggerV2() LoggerV2 {
errorW := io.Discard
warningW := io.Discard
infoW := io.Discard
logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL")
switch logLevel {
case "", "ERROR", "error": // If env is unset, set level to ERROR.
errorW = os.Stderr
case "WARNING", "warning":
warningW = os.Stderr
case "INFO", "info":
infoW = os.Stderr
}
var v int
vLevel := os.Getenv("GRPC_GO_LOG_VERBOSITY_LEVEL")
if vl, err := strconv.Atoi(vLevel); err == nil {
v = vl
}
jsonFormat := strings.EqualFold(os.Getenv("GRPC_GO_LOG_FORMATTER"), "json")
return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{
verbose: v,
jsonFormat: jsonFormat,
})
}
func (g *loggerT) output(severity int, s string) {
sevStr := severityName[severity]
if !g.jsonFormat {
g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s))
return
}
// TODO: we can also include the logging component, but that needs more
// (API) changes.
b, _ := json.Marshal(map[string]string{
"severity": sevStr,
"message": s,
})
g.m[severity].Output(2, string(b))
}
func (g *loggerT) Info(args ...interface{}) {
g.output(infoLog, fmt.Sprint(args...))
}
func (g *loggerT) Infoln(args ...interface{}) {
g.output(infoLog, fmt.Sprintln(args...))
}
func (g *loggerT) Infof(format string, args ...interface{}) {
g.output(infoLog, fmt.Sprintf(format, args...))
}
func (g *loggerT) Warning(args ...interface{}) {
g.output(warningLog, fmt.Sprint(args...))
}
func (g *loggerT) Warningln(args ...interface{}) {
g.output(warningLog, fmt.Sprintln(args...))
}
func (g *loggerT) Warningf(format string, args ...interface{}) {
g.output(warningLog, fmt.Sprintf(format, args...))
}
func (g *loggerT) Error(args ...interface{}) {
g.output(errorLog, fmt.Sprint(args...))
}
func (g *loggerT) Errorln(args ...interface{}) {
g.output(errorLog, fmt.Sprintln(args...))
}
func (g *loggerT) Errorf(format string, args ...interface{}) {
g.output(errorLog, fmt.Sprintf(format, args...))
}
func (g *loggerT) Fatal(args ...interface{}) {
g.output(fatalLog, fmt.Sprint(args...))
os.Exit(1)
}
func (g *loggerT) Fatalln(args ...interface{}) {
g.output(fatalLog, fmt.Sprintln(args...))
os.Exit(1)
}
func (g *loggerT) Fatalf(format string, args ...interface{}) {
g.output(fatalLog, fmt.Sprintf(format, args...))
os.Exit(1)
}
func (g *loggerT) V(l int) bool {
return l <= g.v
}
// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements
// DepthLoggerV2, the below functions will be called with the appropriate stack
// depth set for trivial functions the logger may ignore.
//
// # Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
type DepthLoggerV2 interface {
LoggerV2
// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println.
InfoDepth(depth int, args ...interface{})
// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println.
WarningDepth(depth int, args ...interface{})
// ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println.
ErrorDepth(depth int, args ...interface{})
// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println.
FatalDepth(depth int, args ...interface{})
}

View file

@ -0,0 +1,49 @@
/*
* Copyright 2021 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package credentials
import (
"context"
)
// requestInfoKey is a struct to be used as the key to store RequestInfo in a
// context.
type requestInfoKey struct{}
// NewRequestInfoContext creates a context with ri.
func NewRequestInfoContext(ctx context.Context, ri interface{}) context.Context {
return context.WithValue(ctx, requestInfoKey{}, ri)
}
// RequestInfoFromContext extracts the RequestInfo from ctx.
func RequestInfoFromContext(ctx context.Context) interface{} {
return ctx.Value(requestInfoKey{})
}
// clientHandshakeInfoKey is a struct used as the key to store
// ClientHandshakeInfo in a context.
type clientHandshakeInfoKey struct{}
// ClientHandshakeInfoFromContext extracts the ClientHandshakeInfo from ctx.
func ClientHandshakeInfoFromContext(ctx context.Context) interface{} {
return ctx.Value(clientHandshakeInfoKey{})
}
// NewClientHandshakeInfoContext creates a context with chi.
func NewClientHandshakeInfoContext(ctx context.Context, chi interface{}) context.Context {
return context.WithValue(ctx, clientHandshakeInfoKey{}, chi)
}

View file

@ -0,0 +1,75 @@
/*
*
* Copyright 2020 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package credentials defines APIs for parsing SPIFFE ID.
//
// All APIs in this package are experimental.
package credentials
import (
"crypto/tls"
"crypto/x509"
"net/url"
"google.golang.org/grpc/grpclog"
)
var logger = grpclog.Component("credentials")
// SPIFFEIDFromState parses the SPIFFE ID from State. If the SPIFFE ID format
// is invalid, return nil with warning.
func SPIFFEIDFromState(state tls.ConnectionState) *url.URL {
if len(state.PeerCertificates) == 0 || len(state.PeerCertificates[0].URIs) == 0 {
return nil
}
return SPIFFEIDFromCert(state.PeerCertificates[0])
}
// SPIFFEIDFromCert parses the SPIFFE ID from x509.Certificate. If the SPIFFE
// ID format is invalid, return nil with warning.
func SPIFFEIDFromCert(cert *x509.Certificate) *url.URL {
if cert == nil || cert.URIs == nil {
return nil
}
var spiffeID *url.URL
for _, uri := range cert.URIs {
if uri == nil || uri.Scheme != "spiffe" || uri.Opaque != "" || (uri.User != nil && uri.User.Username() != "") {
continue
}
// From this point, we assume the uri is intended for a SPIFFE ID.
if len(uri.String()) > 2048 {
logger.Warning("invalid SPIFFE ID: total ID length larger than 2048 bytes")
return nil
}
if len(uri.Host) == 0 || len(uri.Path) == 0 {
logger.Warning("invalid SPIFFE ID: domain or workload ID is empty")
return nil
}
if len(uri.Host) > 255 {
logger.Warning("invalid SPIFFE ID: domain length larger than 255 characters")
return nil
}
// A valid SPIFFE certificate can only have exactly one URI SAN field.
if len(cert.URIs) > 1 {
logger.Warning("invalid SPIFFE ID: multiple URI SANs")
return nil
}
spiffeID = uri
}
return spiffeID
}

View file

@ -0,0 +1,58 @@
/*
*
* Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package credentials
import (
"net"
"syscall"
)
type sysConn = syscall.Conn
// syscallConn keeps reference of rawConn to support syscall.Conn for channelz.
// SyscallConn() (the method in interface syscall.Conn) is explicitly
// implemented on this type,
//
// Interface syscall.Conn is implemented by most net.Conn implementations (e.g.
// TCPConn, UnixConn), but is not part of net.Conn interface. So wrapper conns
// that embed net.Conn don't implement syscall.Conn. (Side note: tls.Conn
// doesn't embed net.Conn, so even if syscall.Conn is part of net.Conn, it won't
// help here).
type syscallConn struct {
net.Conn
// sysConn is a type alias of syscall.Conn. It's necessary because the name
// `Conn` collides with `net.Conn`.
sysConn
}
// WrapSyscallConn tries to wrap rawConn and newConn into a net.Conn that
// implements syscall.Conn. rawConn will be used to support syscall, and newConn
// will be used for read/write.
//
// This function returns newConn if rawConn doesn't implement syscall.Conn.
func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn {
sysConn, ok := rawConn.(syscall.Conn)
if !ok {
return newConn
}
return &syscallConn{
Conn: newConn,
sysConn: sysConn,
}
}

View file

@ -0,0 +1,52 @@
/*
*
* Copyright 2020 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package credentials
import (
"crypto/tls"
)
const alpnProtoStrH2 = "h2"
// AppendH2ToNextProtos appends h2 to next protos.
func AppendH2ToNextProtos(ps []string) []string {
for _, p := range ps {
if p == alpnProtoStrH2 {
return ps
}
}
ret := make([]string, 0, len(ps)+1)
ret = append(ret, ps...)
return append(ret, alpnProtoStrH2)
}
// CloneTLSConfig returns a shallow clone of the exported
// fields of cfg, ignoring the unexported sync.Once, which
// contains a mutex and must not be copied.
//
// If cfg is nil, a new zero tls.Config is returned.
//
// TODO: inline this function if possible.
func CloneTLSConfig(cfg *tls.Config) *tls.Config {
if cfg == nil {
return &tls.Config{}
}
return cfg.Clone()
}

View file

@ -0,0 +1,126 @@
/*
*
* Copyright 2020 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package grpclog (internal) defines depth logging for grpc.
package grpclog
import (
"os"
)
// Logger is the logger used for the non-depth log functions.
var Logger LoggerV2
// DepthLogger is the logger used for the depth log functions.
var DepthLogger DepthLoggerV2
// InfoDepth logs to the INFO log at the specified depth.
func InfoDepth(depth int, args ...interface{}) {
if DepthLogger != nil {
DepthLogger.InfoDepth(depth, args...)
} else {
Logger.Infoln(args...)
}
}
// WarningDepth logs to the WARNING log at the specified depth.
func WarningDepth(depth int, args ...interface{}) {
if DepthLogger != nil {
DepthLogger.WarningDepth(depth, args...)
} else {
Logger.Warningln(args...)
}
}
// ErrorDepth logs to the ERROR log at the specified depth.
func ErrorDepth(depth int, args ...interface{}) {
if DepthLogger != nil {
DepthLogger.ErrorDepth(depth, args...)
} else {
Logger.Errorln(args...)
}
}
// FatalDepth logs to the FATAL log at the specified depth.
func FatalDepth(depth int, args ...interface{}) {
if DepthLogger != nil {
DepthLogger.FatalDepth(depth, args...)
} else {
Logger.Fatalln(args...)
}
os.Exit(1)
}
// LoggerV2 does underlying logging work for grpclog.
// This is a copy of the LoggerV2 defined in the external grpclog package. It
// is defined here to avoid a circular dependency.
type LoggerV2 interface {
// Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
Info(args ...interface{})
// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
Infoln(args ...interface{})
// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
Infof(format string, args ...interface{})
// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
Warning(args ...interface{})
// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
Warningln(args ...interface{})
// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
Warningf(format string, args ...interface{})
// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
Error(args ...interface{})
// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
Errorln(args ...interface{})
// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
Errorf(format string, args ...interface{})
// Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print.
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
// Implementations may also call os.Exit() with a non-zero exit code.
Fatal(args ...interface{})
// Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
// Implementations may also call os.Exit() with a non-zero exit code.
Fatalln(args ...interface{})
// Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
// Implementations may also call os.Exit() with a non-zero exit code.
Fatalf(format string, args ...interface{})
// V reports whether verbosity level l is at least the requested verbose level.
V(l int) bool
}
// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements
// DepthLoggerV2, the below functions will be called with the appropriate stack
// depth set for trivial functions the logger may ignore.
// This is a copy of the DepthLoggerV2 defined in the external grpclog package.
// It is defined here to avoid a circular dependency.
//
// # Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
type DepthLoggerV2 interface {
// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println.
InfoDepth(depth int, args ...interface{})
// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println.
WarningDepth(depth int, args ...interface{})
// ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println.
ErrorDepth(depth int, args ...interface{})
// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println.
FatalDepth(depth int, args ...interface{})
}

View file

@ -0,0 +1,93 @@
/*
*
* Copyright 2020 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package grpclog
import (
"fmt"
)
// PrefixLogger does logging with a prefix.
//
// Logging method on a nil logs without any prefix.
type PrefixLogger struct {
logger DepthLoggerV2
prefix string
}
// Infof does info logging.
func (pl *PrefixLogger) Infof(format string, args ...interface{}) {
if pl != nil {
// Handle nil, so the tests can pass in a nil logger.
format = pl.prefix + format
pl.logger.InfoDepth(1, fmt.Sprintf(format, args...))
return
}
InfoDepth(1, fmt.Sprintf(format, args...))
}
// Warningf does warning logging.
func (pl *PrefixLogger) Warningf(format string, args ...interface{}) {
if pl != nil {
format = pl.prefix + format
pl.logger.WarningDepth(1, fmt.Sprintf(format, args...))
return
}
WarningDepth(1, fmt.Sprintf(format, args...))
}
// Errorf does error logging.
func (pl *PrefixLogger) Errorf(format string, args ...interface{}) {
if pl != nil {
format = pl.prefix + format
pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...))
return
}
ErrorDepth(1, fmt.Sprintf(format, args...))
}
// Debugf does info logging at verbose level 2.
func (pl *PrefixLogger) Debugf(format string, args ...interface{}) {
// TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe
// rewrite PrefixLogger a little to ensure that we don't use the global
// `Logger` here, and instead use the `logger` field.
if !Logger.V(2) {
return
}
if pl != nil {
// Handle nil, so the tests can pass in a nil logger.
format = pl.prefix + format
pl.logger.InfoDepth(1, fmt.Sprintf(format, args...))
return
}
InfoDepth(1, fmt.Sprintf(format, args...))
}
// V reports whether verbosity level l is at least the requested verbose level.
func (pl *PrefixLogger) V(l int) bool {
// TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe
// rewrite PrefixLogger a little to ensure that we don't use the global
// `Logger` here, and instead use the `logger` field.
return Logger.V(l)
}
// NewPrefixLogger creates a prefix logger with the given prefix.
func NewPrefixLogger(logger DepthLoggerV2, prefix string) *PrefixLogger {
return &PrefixLogger{logger: logger, prefix: prefix}
}

194
vendor/google.golang.org/grpc/internal/internal.go generated vendored Normal file
View file

@ -0,0 +1,194 @@
/*
* Copyright 2016 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package internal contains gRPC-internal code, to avoid polluting
// the godoc of the top-level grpc package. It must not import any grpc
// symbols to avoid circular dependencies.
package internal
import (
"context"
"time"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/serviceconfig"
)
var (
// WithHealthCheckFunc is set by dialoptions.go
WithHealthCheckFunc interface{} // func (HealthChecker) DialOption
// HealthCheckFunc is used to provide client-side LB channel health checking
HealthCheckFunc HealthChecker
// BalancerUnregister is exported by package balancer to unregister a balancer.
BalancerUnregister func(name string)
// KeepaliveMinPingTime is the minimum ping interval. This must be 10s by
// default, but tests may wish to set it lower for convenience.
KeepaliveMinPingTime = 10 * time.Second
// ParseServiceConfig parses a JSON representation of the service config.
ParseServiceConfig interface{} // func(string) *serviceconfig.ParseResult
// EqualServiceConfigForTesting is for testing service config generation and
// parsing. Both a and b should be returned by ParseServiceConfig.
// This function compares the config without rawJSON stripped, in case the
// there's difference in white space.
EqualServiceConfigForTesting func(a, b serviceconfig.Config) bool
// GetCertificateProviderBuilder returns the registered builder for the
// given name. This is set by package certprovider for use from xDS
// bootstrap code while parsing certificate provider configs in the
// bootstrap file.
GetCertificateProviderBuilder interface{} // func(string) certprovider.Builder
// GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo
// stored in the passed in attributes. This is set by
// credentials/xds/xds.go.
GetXDSHandshakeInfoForTesting interface{} // func (*attributes.Attributes) *xds.HandshakeInfo
// GetServerCredentials returns the transport credentials configured on a
// gRPC server. An xDS-enabled server needs to know what type of credentials
// is configured on the underlying gRPC server. This is set by server.go.
GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials
// CanonicalString returns the canonical string of the code defined here:
// https://github.com/grpc/grpc/blob/master/doc/statuscodes.md.
//
// This is used in the 1.0 release of gcp/observability, and thus must not be
// deleted or changed.
CanonicalString interface{} // func (codes.Code) string
// DrainServerTransports initiates a graceful close of existing connections
// on a gRPC server accepted on the provided listener address. An
// xDS-enabled server invokes this method on a grpc.Server when a particular
// listener moves to "not-serving" mode.
DrainServerTransports interface{} // func(*grpc.Server, string)
// AddGlobalServerOptions adds an array of ServerOption that will be
// effective globally for newly created servers. The priority will be: 1.
// user-provided; 2. this method; 3. default values.
//
// This is used in the 1.0 release of gcp/observability, and thus must not be
// deleted or changed.
AddGlobalServerOptions interface{} // func(opt ...ServerOption)
// ClearGlobalServerOptions clears the array of extra ServerOption. This
// method is useful in testing and benchmarking.
//
// This is used in the 1.0 release of gcp/observability, and thus must not be
// deleted or changed.
ClearGlobalServerOptions func()
// AddGlobalDialOptions adds an array of DialOption that will be effective
// globally for newly created client channels. The priority will be: 1.
// user-provided; 2. this method; 3. default values.
//
// This is used in the 1.0 release of gcp/observability, and thus must not be
// deleted or changed.
AddGlobalDialOptions interface{} // func(opt ...DialOption)
// DisableGlobalDialOptions returns a DialOption that prevents the
// ClientConn from applying the global DialOptions (set via
// AddGlobalDialOptions).
//
// This is used in the 1.0 release of gcp/observability, and thus must not be
// deleted or changed.
DisableGlobalDialOptions interface{} // func() grpc.DialOption
// ClearGlobalDialOptions clears the array of extra DialOption. This
// method is useful in testing and benchmarking.
//
// This is used in the 1.0 release of gcp/observability, and thus must not be
// deleted or changed.
ClearGlobalDialOptions func()
// JoinDialOptions combines the dial options passed as arguments into a
// single dial option.
JoinDialOptions interface{} // func(...grpc.DialOption) grpc.DialOption
// JoinServerOptions combines the server options passed as arguments into a
// single server option.
JoinServerOptions interface{} // func(...grpc.ServerOption) grpc.ServerOption
// WithBinaryLogger returns a DialOption that specifies the binary logger
// for a ClientConn.
//
// This is used in the 1.0 release of gcp/observability, and thus must not be
// deleted or changed.
WithBinaryLogger interface{} // func(binarylog.Logger) grpc.DialOption
// BinaryLogger returns a ServerOption that can set the binary logger for a
// server.
//
// This is used in the 1.0 release of gcp/observability, and thus must not be
// deleted or changed.
BinaryLogger interface{} // func(binarylog.Logger) grpc.ServerOption
// NewXDSResolverWithConfigForTesting creates a new xds resolver builder using
// the provided xds bootstrap config instead of the global configuration from
// the supported environment variables. The resolver.Builder is meant to be
// used in conjunction with the grpc.WithResolvers DialOption.
//
// Testing Only
//
// This function should ONLY be used for testing and may not work with some
// other features, including the CSDS service.
NewXDSResolverWithConfigForTesting interface{} // func([]byte) (resolver.Builder, error)
// RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster
// Specifier Plugin for testing purposes, regardless of the XDSRLS environment
// variable.
//
// TODO: Remove this function once the RLS env var is removed.
RegisterRLSClusterSpecifierPluginForTesting func()
// UnregisterRLSClusterSpecifierPluginForTesting unregisters the RLS Cluster
// Specifier Plugin for testing purposes. This is needed because there is no way
// to unregister the RLS Cluster Specifier Plugin after registering it solely
// for testing purposes using RegisterRLSClusterSpecifierPluginForTesting().
//
// TODO: Remove this function once the RLS env var is removed.
UnregisterRLSClusterSpecifierPluginForTesting func()
// RegisterRBACHTTPFilterForTesting registers the RBAC HTTP Filter for testing
// purposes, regardless of the RBAC environment variable.
//
// TODO: Remove this function once the RBAC env var is removed.
RegisterRBACHTTPFilterForTesting func()
// UnregisterRBACHTTPFilterForTesting unregisters the RBAC HTTP Filter for
// testing purposes. This is needed because there is no way to unregister the
// HTTP Filter after registering it solely for testing purposes using
// RegisterRBACHTTPFilterForTesting().
//
// TODO: Remove this function once the RBAC env var is removed.
UnregisterRBACHTTPFilterForTesting func()
// ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY.
ORCAAllowAnyMinReportingInterval interface{} // func(so *orca.ServiceOptions)
)
// HealthChecker defines the signature of the client-side LB channel health checking function.
//
// The implementation is expected to create a health checking RPC stream by
// calling newStream(), watch for the health status of serviceName, and report
// it's health back by calling setConnectivityState().
//
// The health checking protocol is defined at:
// https://github.com/grpc/grpc/blob/master/doc/health-checking.md
type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), serviceName string) error
const (
// CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode.
CredsBundleModeFallback = "fallback"
// CredsBundleModeBalancer switches GoogleDefaultCreds to grpclb balancer
// mode.
CredsBundleModeBalancer = "balancer"
// CredsBundleModeBackendFromBalancer switches GoogleDefaultCreds to mode
// that supports backend returned by grpclb balancer.
CredsBundleModeBackendFromBalancer = "backend-from-balancer"
)
// RLSLoadBalancingPolicyName is the name of the RLS LB policy.
//
// It currently has an experimental suffix which would be removed once
// end-to-end testing of the policy is completed.
const RLSLoadBalancingPolicyName = "rls_experimental"

View file

@ -0,0 +1,40 @@
/*
* Copyright 2021 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package internal
import (
"google.golang.org/grpc/attributes"
"google.golang.org/grpc/resolver"
)
// handshakeClusterNameKey is the type used as the key to store cluster name in
// the Attributes field of resolver.Address.
type handshakeClusterNameKey struct{}
// SetXDSHandshakeClusterName returns a copy of addr in which the Attributes field
// is updated with the cluster name.
func SetXDSHandshakeClusterName(addr resolver.Address, clusterName string) resolver.Address {
addr.Attributes = addr.Attributes.WithValue(handshakeClusterNameKey{}, clusterName)
return addr
}
// GetXDSHandshakeClusterName returns cluster name stored in attr.
func GetXDSHandshakeClusterName(attr *attributes.Attributes) (string, bool) {
v := attr.Value(handshakeClusterNameKey{})
name, ok := v.(string)
return name, ok
}

138
vendor/google.golang.org/grpc/resolver/map.go generated vendored Normal file
View file

@ -0,0 +1,138 @@
/*
*
* Copyright 2021 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package resolver
type addressMapEntry struct {
addr Address
value interface{}
}
// AddressMap is a map of addresses to arbitrary values taking into account
// Attributes. BalancerAttributes are ignored, as are Metadata and Type.
// Multiple accesses may not be performed concurrently. Must be created via
// NewAddressMap; do not construct directly.
type AddressMap struct {
// The underlying map is keyed by an Address with fields that we don't care
// about being set to their zero values. The only fields that we care about
// are `Addr`, `ServerName` and `Attributes`. Since we need to be able to
// distinguish between addresses with same `Addr` and `ServerName`, but
// different `Attributes`, we cannot store the `Attributes` in the map key.
//
// The comparison operation for structs work as follows:
// Struct values are comparable if all their fields are comparable. Two
// struct values are equal if their corresponding non-blank fields are equal.
//
// The value type of the map contains a slice of addresses which match the key
// in their `Addr` and `ServerName` fields and contain the corresponding value
// associated with them.
m map[Address]addressMapEntryList
}
func toMapKey(addr *Address) Address {
return Address{Addr: addr.Addr, ServerName: addr.ServerName}
}
type addressMapEntryList []*addressMapEntry
// NewAddressMap creates a new AddressMap.
func NewAddressMap() *AddressMap {
return &AddressMap{m: make(map[Address]addressMapEntryList)}
}
// find returns the index of addr in the addressMapEntry slice, or -1 if not
// present.
func (l addressMapEntryList) find(addr Address) int {
for i, entry := range l {
// Attributes are the only thing to match on here, since `Addr` and
// `ServerName` are already equal.
if entry.addr.Attributes.Equal(addr.Attributes) {
return i
}
}
return -1
}
// Get returns the value for the address in the map, if present.
func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) {
addrKey := toMapKey(&addr)
entryList := a.m[addrKey]
if entry := entryList.find(addr); entry != -1 {
return entryList[entry].value, true
}
return nil, false
}
// Set updates or adds the value to the address in the map.
func (a *AddressMap) Set(addr Address, value interface{}) {
addrKey := toMapKey(&addr)
entryList := a.m[addrKey]
if entry := entryList.find(addr); entry != -1 {
entryList[entry].value = value
return
}
a.m[addrKey] = append(entryList, &addressMapEntry{addr: addr, value: value})
}
// Delete removes addr from the map.
func (a *AddressMap) Delete(addr Address) {
addrKey := toMapKey(&addr)
entryList := a.m[addrKey]
entry := entryList.find(addr)
if entry == -1 {
return
}
if len(entryList) == 1 {
entryList = nil
} else {
copy(entryList[entry:], entryList[entry+1:])
entryList = entryList[:len(entryList)-1]
}
a.m[addrKey] = entryList
}
// Len returns the number of entries in the map.
func (a *AddressMap) Len() int {
ret := 0
for _, entryList := range a.m {
ret += len(entryList)
}
return ret
}
// Keys returns a slice of all current map keys.
func (a *AddressMap) Keys() []Address {
ret := make([]Address, 0, a.Len())
for _, entryList := range a.m {
for _, entry := range entryList {
ret = append(ret, entry.addr)
}
}
return ret
}
// Values returns a slice of all current map values.
func (a *AddressMap) Values() []interface{} {
ret := make([]interface{}, 0, a.Len())
for _, entryList := range a.m {
for _, entry := range entryList {
ret = append(ret, entry.value)
}
}
return ret
}

330
vendor/google.golang.org/grpc/resolver/resolver.go generated vendored Normal file
View file

@ -0,0 +1,330 @@
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package resolver defines APIs for name resolution in gRPC.
// All APIs in this package are experimental.
package resolver
import (
"context"
"fmt"
"net"
"net/url"
"strings"
"google.golang.org/grpc/attributes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/serviceconfig"
)
var (
// m is a map from scheme to resolver builder.
m = make(map[string]Builder)
// defaultScheme is the default scheme to use.
defaultScheme = "passthrough"
)
// TODO(bar) install dns resolver in init(){}.
// Register registers the resolver builder to the resolver map. b.Scheme will
// be used as the scheme registered with this builder. The registry is case
// sensitive, and schemes should not contain any uppercase characters.
//
// NOTE: this function must only be called during initialization time (i.e. in
// an init() function), and is not thread-safe. If multiple Resolvers are
// registered with the same name, the one registered last will take effect.
func Register(b Builder) {
m[b.Scheme()] = b
}
// Get returns the resolver builder registered with the given scheme.
//
// If no builder is register with the scheme, nil will be returned.
func Get(scheme string) Builder {
if b, ok := m[scheme]; ok {
return b
}
return nil
}
// SetDefaultScheme sets the default scheme that will be used. The default
// default scheme is "passthrough".
//
// NOTE: this function must only be called during initialization time (i.e. in
// an init() function), and is not thread-safe. The scheme set last overrides
// previously set values.
func SetDefaultScheme(scheme string) {
defaultScheme = scheme
}
// GetDefaultScheme gets the default scheme that will be used.
func GetDefaultScheme() string {
return defaultScheme
}
// AddressType indicates the address type returned by name resolution.
//
// Deprecated: use Attributes in Address instead.
type AddressType uint8
const (
// Backend indicates the address is for a backend server.
//
// Deprecated: use Attributes in Address instead.
Backend AddressType = iota
// GRPCLB indicates the address is for a grpclb load balancer.
//
// Deprecated: to select the GRPCLB load balancing policy, use a service
// config with a corresponding loadBalancingConfig. To supply balancer
// addresses to the GRPCLB load balancing policy, set State.Attributes
// using balancer/grpclb/state.Set.
GRPCLB
)
// Address represents a server the client connects to.
//
// # Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
type Address struct {
// Addr is the server address on which a connection will be established.
Addr string
// ServerName is the name of this address.
// If non-empty, the ServerName is used as the transport certification authority for
// the address, instead of the hostname from the Dial target string. In most cases,
// this should not be set.
//
// If Type is GRPCLB, ServerName should be the name of the remote load
// balancer, not the name of the backend.
//
// WARNING: ServerName must only be populated with trusted values. It
// is insecure to populate it with data from untrusted inputs since untrusted
// values could be used to bypass the authority checks performed by TLS.
ServerName string
// Attributes contains arbitrary data about this address intended for
// consumption by the SubConn.
Attributes *attributes.Attributes
// BalancerAttributes contains arbitrary data about this address intended
// for consumption by the LB policy. These attributes do not affect SubConn
// creation, connection establishment, handshaking, etc.
BalancerAttributes *attributes.Attributes
// Type is the type of this address.
//
// Deprecated: use Attributes instead.
Type AddressType
// Metadata is the information associated with Addr, which may be used
// to make load balancing decision.
//
// Deprecated: use Attributes instead.
Metadata interface{}
}
// Equal returns whether a and o are identical. Metadata is compared directly,
// not with any recursive introspection.
func (a Address) Equal(o Address) bool {
return a.Addr == o.Addr && a.ServerName == o.ServerName &&
a.Attributes.Equal(o.Attributes) &&
a.BalancerAttributes.Equal(o.BalancerAttributes) &&
a.Type == o.Type && a.Metadata == o.Metadata
}
// String returns JSON formatted string representation of the address.
func (a Address) String() string {
var sb strings.Builder
sb.WriteString(fmt.Sprintf("{Addr: %q, ", a.Addr))
sb.WriteString(fmt.Sprintf("ServerName: %q, ", a.ServerName))
if a.Attributes != nil {
sb.WriteString(fmt.Sprintf("Attributes: %v, ", a.Attributes.String()))
}
if a.BalancerAttributes != nil {
sb.WriteString(fmt.Sprintf("BalancerAttributes: %v", a.BalancerAttributes.String()))
}
sb.WriteString("}")
return sb.String()
}
// BuildOptions includes additional information for the builder to create
// the resolver.
type BuildOptions struct {
// DisableServiceConfig indicates whether a resolver implementation should
// fetch service config data.
DisableServiceConfig bool
// DialCreds is the transport credentials used by the ClientConn for
// communicating with the target gRPC service (set via
// WithTransportCredentials). In cases where a name resolution service
// requires the same credentials, the resolver may use this field. In most
// cases though, it is not appropriate, and this field may be ignored.
DialCreds credentials.TransportCredentials
// CredsBundle is the credentials bundle used by the ClientConn for
// communicating with the target gRPC service (set via
// WithCredentialsBundle). In cases where a name resolution service
// requires the same credentials, the resolver may use this field. In most
// cases though, it is not appropriate, and this field may be ignored.
CredsBundle credentials.Bundle
// Dialer is the custom dialer used by the ClientConn for dialling the
// target gRPC service (set via WithDialer). In cases where a name
// resolution service requires the same dialer, the resolver may use this
// field. In most cases though, it is not appropriate, and this field may
// be ignored.
Dialer func(context.Context, string) (net.Conn, error)
}
// State contains the current Resolver state relevant to the ClientConn.
type State struct {
// Addresses is the latest set of resolved addresses for the target.
Addresses []Address
// ServiceConfig contains the result from parsing the latest service
// config. If it is nil, it indicates no service config is present or the
// resolver does not provide service configs.
ServiceConfig *serviceconfig.ParseResult
// Attributes contains arbitrary data about the resolver intended for
// consumption by the load balancing policy.
Attributes *attributes.Attributes
}
// ClientConn contains the callbacks for resolver to notify any updates
// to the gRPC ClientConn.
//
// This interface is to be implemented by gRPC. Users should not need a
// brand new implementation of this interface. For the situations like
// testing, the new implementation should embed this interface. This allows
// gRPC to add new methods to this interface.
type ClientConn interface {
// UpdateState updates the state of the ClientConn appropriately.
//
// If an error is returned, the resolver should try to resolve the
// target again. The resolver should use a backoff timer to prevent
// overloading the server with requests. If a resolver is certain that
// reresolving will not change the result, e.g. because it is
// a watch-based resolver, returned errors can be ignored.
//
// If the resolved State is the same as the last reported one, calling
// UpdateState can be omitted.
UpdateState(State) error
// ReportError notifies the ClientConn that the Resolver encountered an
// error. The ClientConn will notify the load balancer and begin calling
// ResolveNow on the Resolver with exponential backoff.
ReportError(error)
// NewAddress is called by resolver to notify ClientConn a new list
// of resolved addresses.
// The address list should be the complete list of resolved addresses.
//
// Deprecated: Use UpdateState instead.
NewAddress(addresses []Address)
// NewServiceConfig is called by resolver to notify ClientConn a new
// service config. The service config should be provided as a json string.
//
// Deprecated: Use UpdateState instead.
NewServiceConfig(serviceConfig string)
// ParseServiceConfig parses the provided service config and returns an
// object that provides the parsed config.
ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult
}
// Target represents a target for gRPC, as specified in:
// https://github.com/grpc/grpc/blob/master/doc/naming.md.
// It is parsed from the target string that gets passed into Dial or DialContext
// by the user. And gRPC passes it to the resolver and the balancer.
//
// If the target follows the naming spec, and the parsed scheme is registered
// with gRPC, we will parse the target string according to the spec. If the
// target does not contain a scheme or if the parsed scheme is not registered
// (i.e. no corresponding resolver available to resolve the endpoint), we will
// apply the default scheme, and will attempt to reparse it.
//
// Examples:
//
// - "dns://some_authority/foo.bar"
// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"}
// - "foo.bar"
// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"}
// - "unknown_scheme://authority/endpoint"
// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"}
type Target struct {
// Deprecated: use URL.Scheme instead.
Scheme string
// Deprecated: use URL.Host instead.
Authority string
// URL contains the parsed dial target with an optional default scheme added
// to it if the original dial target contained no scheme or contained an
// unregistered scheme. Any query params specified in the original dial
// target can be accessed from here.
URL url.URL
}
// Endpoint retrieves endpoint without leading "/" from either `URL.Path`
// or `URL.Opaque`. The latter is used when the former is empty.
func (t Target) Endpoint() string {
endpoint := t.URL.Path
if endpoint == "" {
endpoint = t.URL.Opaque
}
// For targets of the form "[scheme]://[authority]/endpoint, the endpoint
// value returned from url.Parse() contains a leading "/". Although this is
// in accordance with RFC 3986, we do not want to break existing resolver
// implementations which expect the endpoint without the leading "/". So, we
// end up stripping the leading "/" here. But this will result in an
// incorrect parsing for something like "unix:///path/to/socket". Since we
// own the "unix" resolver, we can workaround in the unix resolver by using
// the `URL` field.
return strings.TrimPrefix(endpoint, "/")
}
// Builder creates a resolver that will be used to watch name resolution updates.
type Builder interface {
// Build creates a new resolver for the given target.
//
// gRPC dial calls Build synchronously, and fails if the returned error is
// not nil.
Build(target Target, cc ClientConn, opts BuildOptions) (Resolver, error)
// Scheme returns the scheme supported by this resolver. Scheme is defined
// at https://github.com/grpc/grpc/blob/master/doc/naming.md. The returned
// string should not contain uppercase characters, as they will not match
// the parsed target's scheme as defined in RFC 3986.
Scheme() string
}
// ResolveNowOptions includes additional information for ResolveNow.
type ResolveNowOptions struct{}
// Resolver watches for the updates on the specified target.
// Updates include address updates and service config updates.
type Resolver interface {
// ResolveNow will be called by gRPC to try to resolve the target name
// again. It's just a hint, resolver can ignore this if it's not necessary.
//
// It could be called multiple times concurrently.
ResolveNow(ResolveNowOptions)
// Close closes the resolver.
Close()
}
// UnregisterForTesting removes the resolver builder with the given scheme from the
// resolver map.
// This function is for testing only.
func UnregisterForTesting(scheme string) {
delete(m, scheme)
}

View file

@ -0,0 +1,44 @@
/*
*
* Copyright 2019 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package serviceconfig defines types and methods for operating on gRPC
// service configs.
//
// # Experimental
//
// Notice: This package is EXPERIMENTAL and may be changed or removed in a
// later release.
package serviceconfig
// Config represents an opaque data structure holding a service config.
type Config interface {
isServiceConfig()
}
// LoadBalancingConfig represents an opaque data structure holding a load
// balancing config.
type LoadBalancingConfig interface {
isLoadBalancingConfig()
}
// ParseResult contains a service config or an error. Exactly one must be
// non-nil.
type ParseResult struct {
Config Config
Err error
}

View file

@ -77,9 +77,18 @@ func FromProto(s *spb.Status) *Status {
// FromError returns a Status representation of err.
//
// - If err was produced by this package or implements the method `GRPCStatus()
// *Status`, the appropriate Status is returned.
// *Status` and `GRPCStatus()` does not return nil, or if err wraps a type
// satisfying this, the Status from `GRPCStatus()` is returned. For wrapped
// errors, the message returned contains the entire err.Error() text and not
// just the wrapped status. In that case, ok is true.
//
// - If err is nil, a Status is returned with codes.OK and no message.
// - If err is nil, a Status is returned with codes.OK and no message, and ok
// is true.
//
// - If err implements the method `GRPCStatus() *Status` and `GRPCStatus()`
// returns nil (which maps to Codes.OK), or if err wraps a type
// satisfying this, a Status is returned with codes.Unknown and err's
// Error() message, and ok is false.
//
// - Otherwise, err is an error not compatible with this package. In this
// case, a Status is returned with codes.Unknown and err's Error() message,
@ -88,10 +97,29 @@ func FromError(err error) (s *Status, ok bool) {
if err == nil {
return nil, true
}
if se, ok := err.(interface {
GRPCStatus() *Status
}); ok {
return se.GRPCStatus(), true
type grpcstatus interface{ GRPCStatus() *Status }
if gs, ok := err.(grpcstatus); ok {
if gs.GRPCStatus() == nil {
// Error has status nil, which maps to codes.OK. There
// is no sensible behavior for this, so we turn it into
// an error with codes.Unknown and discard the existing
// status.
return New(codes.Unknown, err.Error()), false
}
return gs.GRPCStatus(), true
}
var gs grpcstatus
if errors.As(err, &gs) {
if gs.GRPCStatus() == nil {
// Error wraps an error that has status nil, which maps
// to codes.OK. There is no sensible behavior for this,
// so we turn it into an error with codes.Unknown and
// discard the existing status.
return New(codes.Unknown, err.Error()), false
}
p := gs.GRPCStatus().Proto()
p.Message = err.Error()
return status.FromProto(p), true
}
return New(codes.Unknown, err.Error()), false
}
@ -103,19 +131,16 @@ func Convert(err error) *Status {
return s
}
// Code returns the Code of the error if it is a Status error, codes.OK if err
// is nil, or codes.Unknown otherwise.
// Code returns the Code of the error if it is a Status error or if it wraps a
// Status error. If that is not the case, it returns codes.OK if err is nil, or
// codes.Unknown otherwise.
func Code(err error) codes.Code {
// Don't use FromError to avoid allocation of OK status.
if err == nil {
return codes.OK
}
if se, ok := err.(interface {
GRPCStatus() *Status
}); ok {
return se.GRPCStatus().Code()
}
return codes.Unknown
return Convert(err).Code()
}
// FromContextError converts a context error or wrapped context error into a

View file

@ -84,7 +84,7 @@ type decoder struct {
}
// newError returns an error object with position info.
func (d decoder) newError(pos int, f string, x ...interface{}) error {
func (d decoder) newError(pos int, f string, x ...any) error {
line, column := d.Position(pos)
head := fmt.Sprintf("(line %d:%d): ", line, column)
return errors.New(head+f, x...)
@ -96,7 +96,7 @@ func (d decoder) unexpectedTokenError(tok text.Token) error {
}
// syntaxError returns a syntax error for given position.
func (d decoder) syntaxError(pos int, f string, x ...interface{}) error {
func (d decoder) syntaxError(pos int, f string, x ...any) error {
line, column := d.Position(pos)
head := fmt.Sprintf("syntax error (line %d:%d): ", line, column)
return errors.New(head+f, x...)

View file

@ -27,15 +27,17 @@ const defaultIndent = " "
// Format formats the message as a multiline string.
// This function is only intended for human consumption and ignores errors.
// Do not depend on the output being stable. It may change over time across
// different versions of the program.
// Do not depend on the output being stable. Its output will change across
// different builds of your program, even when using the same version of the
// protobuf module.
func Format(m proto.Message) string {
return MarshalOptions{Multiline: true}.Format(m)
}
// Marshal writes the given [proto.Message] in textproto format using default
// options. Do not depend on the output being stable. It may change over time
// across different versions of the program.
// options. Do not depend on the output being stable. Its output will change
// across different builds of your program, even when using the same version of
// the protobuf module.
func Marshal(m proto.Message) ([]byte, error) {
return MarshalOptions{}.Marshal(m)
}
@ -84,8 +86,9 @@ type MarshalOptions struct {
// Format formats the message as a string.
// This method is only intended for human consumption and ignores errors.
// Do not depend on the output being stable. It may change over time across
// different versions of the program.
// Do not depend on the output being stable. Its output will change across
// different builds of your program, even when using the same version of the
// protobuf module.
func (o MarshalOptions) Format(m proto.Message) string {
if m == nil || !m.ProtoReflect().IsValid() {
return "<nil>" // invalid syntax, but okay since this is for debugging
@ -98,8 +101,9 @@ func (o MarshalOptions) Format(m proto.Message) string {
}
// Marshal writes the given [proto.Message] in textproto format using options in
// MarshalOptions object. Do not depend on the output being stable. It may
// change over time across different versions of the program.
// MarshalOptions object. Do not depend on the output being stable. Its output
// will change across different builds of your program, even when using the
// same version of the protobuf module.
func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) {
return o.marshal(nil, m)
}

View file

@ -252,6 +252,7 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool, record fu
{rv.MethodByName("Values"), "Values"},
{rv.MethodByName("ReservedNames"), "ReservedNames"},
{rv.MethodByName("ReservedRanges"), "ReservedRanges"},
{rv.MethodByName("IsClosed"), "IsClosed"},
}...)
case protoreflect.EnumValueDescriptor:

View file

@ -0,0 +1,13 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package editionssupport defines constants for editions that are supported.
package editionssupport
import descriptorpb "google.golang.org/protobuf/types/descriptorpb"
const (
Minimum = descriptorpb.Edition_EDITION_PROTO2
Maximum = descriptorpb.Edition_EDITION_2023
)

View file

@ -32,6 +32,7 @@ var byteType = reflect.TypeOf(byte(0))
func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescriptors) protoreflect.FieldDescriptor {
f := new(filedesc.Field)
f.L0.ParentFile = filedesc.SurrogateProto2
f.L1.EditionFeatures = f.L0.ParentFile.L1.EditionFeatures
for len(tag) > 0 {
i := strings.IndexByte(tag, ',')
if i < 0 {
@ -107,8 +108,7 @@ func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescri
f.L1.StringName.InitJSON(jsonName)
}
case s == "packed":
f.L1.HasPacked = true
f.L1.IsPacked = true
f.L1.EditionFeatures.IsPacked = true
case strings.HasPrefix(s, "weak="):
f.L1.IsWeak = true
f.L1.Message = filedesc.PlaceholderMessage(protoreflect.FullName(s[len("weak="):]))

View file

@ -601,7 +601,7 @@ func (d *Decoder) consumeToken(kind Kind, size int, attrs uint8) Token {
// newSyntaxError returns a syntax error with line and column information for
// current position.
func (d *Decoder) newSyntaxError(f string, x ...interface{}) error {
func (d *Decoder) newSyntaxError(f string, x ...any) error {
e := errors.New(f, x...)
line, column := d.Position(len(d.orig) - len(d.in))
return errors.New("syntax error (line %d:%d): %v", line, column, e)

View file

@ -17,7 +17,7 @@ var Error = errors.New("protobuf error")
// New formats a string according to the format specifier and arguments and
// returns an error that has a "proto" prefix.
func New(f string, x ...interface{}) error {
func New(f string, x ...any) error {
return &prefixError{s: format(f, x...)}
}
@ -43,7 +43,7 @@ func (e *prefixError) Unwrap() error {
// Wrap returns an error that has a "proto" prefix, the formatted string described
// by the format specifier and arguments, and a suffix of err. The error wraps err.
func Wrap(err error, f string, x ...interface{}) error {
func Wrap(err error, f string, x ...any) error {
return &wrapError{
s: format(f, x...),
err: err,
@ -67,7 +67,7 @@ func (e *wrapError) Is(target error) bool {
return target == Error
}
func format(f string, x ...interface{}) string {
func format(f string, x ...any) string {
// avoid "proto: " prefix when chaining
for i := 0; i < len(x); i++ {
switch e := x[i].(type) {
@ -87,3 +87,18 @@ func InvalidUTF8(name string) error {
func RequiredNotSet(name string) error {
return New("required field %v not set", name)
}
type SizeMismatchError struct {
Calculated, Measured int
}
func (e *SizeMismatchError) Error() string {
return fmt.Sprintf("size mismatch (see https://github.com/golang/protobuf/issues/1609): calculated=%d, measured=%d", e.Calculated, e.Measured)
}
func MismatchedSizeCalculation(calculated, measured int) error {
return &SizeMismatchError{
Calculated: calculated,
Measured: measured,
}
}

View file

@ -7,6 +7,7 @@ package filedesc
import (
"bytes"
"fmt"
"strings"
"sync"
"sync/atomic"
@ -108,9 +109,12 @@ func (fd *File) ParentFile() protoreflect.FileDescriptor { return fd }
func (fd *File) Parent() protoreflect.Descriptor { return nil }
func (fd *File) Index() int { return 0 }
func (fd *File) Syntax() protoreflect.Syntax { return fd.L1.Syntax }
func (fd *File) Name() protoreflect.Name { return fd.L1.Package.Name() }
func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package }
func (fd *File) IsPlaceholder() bool { return false }
// Not exported and just used to reconstruct the original FileDescriptor proto
func (fd *File) Edition() int32 { return int32(fd.L1.Edition) }
func (fd *File) Name() protoreflect.Name { return fd.L1.Package.Name() }
func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package }
func (fd *File) IsPlaceholder() bool { return false }
func (fd *File) Options() protoreflect.ProtoMessage {
if f := fd.lazyInit().Options; f != nil {
return f()
@ -202,6 +206,9 @@ func (ed *Enum) lazyInit() *EnumL2 {
ed.L0.ParentFile.lazyInit() // implicitly initializes L2
return ed.L2
}
func (ed *Enum) IsClosed() bool {
return !ed.L1.EditionFeatures.IsOpenEnum
}
func (ed *EnumValue) Options() protoreflect.ProtoMessage {
if f := ed.L1.Options; f != nil {
@ -251,10 +258,6 @@ type (
StringName stringName
IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto
IsWeak bool // promoted from google.protobuf.FieldOptions
HasPacked bool // promoted from google.protobuf.FieldOptions
IsPacked bool // promoted from google.protobuf.FieldOptions
HasEnforceUTF8 bool // promoted from google.protobuf.FieldOptions
EnforceUTF8 bool // promoted from google.protobuf.FieldOptions
Default defaultValue
ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields
Enum protoreflect.EnumDescriptor
@ -331,8 +334,7 @@ func (fd *Field) HasPresence() bool {
if fd.L1.Cardinality == protoreflect.Repeated {
return false
}
explicitFieldPresence := fd.Syntax() == protoreflect.Editions && fd.L1.EditionFeatures.IsFieldPresence
return fd.Syntax() == protoreflect.Proto2 || explicitFieldPresence || fd.L1.Message != nil || fd.L1.ContainingOneof != nil
return fd.IsExtension() || fd.L1.EditionFeatures.IsFieldPresence || fd.L1.Message != nil || fd.L1.ContainingOneof != nil
}
func (fd *Field) HasOptionalKeyword() bool {
return (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && fd.L1.Cardinality == protoreflect.Optional && fd.L1.ContainingOneof == nil) || fd.L1.IsProto3Optional
@ -345,14 +347,7 @@ func (fd *Field) IsPacked() bool {
case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind:
return false
}
if fd.L0.ParentFile.L1.Syntax == protoreflect.Editions {
return fd.L1.EditionFeatures.IsPacked
}
if fd.L0.ParentFile.L1.Syntax == protoreflect.Proto3 {
// proto3 repeated fields are packed by default.
return !fd.L1.HasPacked || fd.L1.IsPacked
}
return fd.L1.IsPacked
return fd.L1.EditionFeatures.IsPacked
}
func (fd *Field) IsExtension() bool { return false }
func (fd *Field) IsWeak() bool { return fd.L1.IsWeak }
@ -388,6 +383,10 @@ func (fd *Field) Message() protoreflect.MessageDescriptor {
}
return fd.L1.Message
}
func (fd *Field) IsMapEntry() bool {
parent, ok := fd.L0.Parent.(protoreflect.MessageDescriptor)
return ok && parent.IsMapEntry()
}
func (fd *Field) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) }
func (fd *Field) ProtoType(protoreflect.FieldDescriptor) {}
@ -399,13 +398,7 @@ func (fd *Field) ProtoType(protoreflect.FieldDescriptor) {}
// WARNING: This method is exempt from the compatibility promise and may be
// removed in the future without warning.
func (fd *Field) EnforceUTF8() bool {
if fd.L0.ParentFile.L1.Syntax == protoreflect.Editions {
return fd.L1.EditionFeatures.IsUTF8Validated
}
if fd.L1.HasEnforceUTF8 {
return fd.L1.EnforceUTF8
}
return fd.L0.ParentFile.L1.Syntax == protoreflect.Proto3
return fd.L1.EditionFeatures.IsUTF8Validated
}
func (od *Oneof) IsSynthetic() bool {
@ -438,7 +431,6 @@ type (
Options func() protoreflect.ProtoMessage
StringName stringName
IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto
IsPacked bool // promoted from google.protobuf.FieldOptions
Default defaultValue
Enum protoreflect.EnumDescriptor
Message protoreflect.MessageDescriptor
@ -461,7 +453,16 @@ func (xd *Extension) HasPresence() bool { return xd.L1.Cardi
func (xd *Extension) HasOptionalKeyword() bool {
return (xd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && xd.L1.Cardinality == protoreflect.Optional) || xd.lazyInit().IsProto3Optional
}
func (xd *Extension) IsPacked() bool { return xd.lazyInit().IsPacked }
func (xd *Extension) IsPacked() bool {
if xd.L1.Cardinality != protoreflect.Repeated {
return false
}
switch xd.L1.Kind {
case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind:
return false
}
return xd.L1.EditionFeatures.IsPacked
}
func (xd *Extension) IsExtension() bool { return true }
func (xd *Extension) IsWeak() bool { return false }
func (xd *Extension) IsList() bool { return xd.Cardinality() == protoreflect.Repeated }
@ -542,8 +543,9 @@ func (md *Method) ProtoInternal(pragma.DoNotImplement) {}
// Surrogate files are can be used to create standalone descriptors
// where the syntax is only information derived from the parent file.
var (
SurrogateProto2 = &File{L1: FileL1{Syntax: protoreflect.Proto2}, L2: &FileL2{}}
SurrogateProto3 = &File{L1: FileL1{Syntax: protoreflect.Proto3}, L2: &FileL2{}}
SurrogateProto2 = &File{L1: FileL1{Syntax: protoreflect.Proto2}, L2: &FileL2{}}
SurrogateProto3 = &File{L1: FileL1{Syntax: protoreflect.Proto3}, L2: &FileL2{}}
SurrogateEdition2023 = &File{L1: FileL1{Syntax: protoreflect.Editions, Edition: Edition2023}, L2: &FileL2{}}
)
type (
@ -585,6 +587,34 @@ func (s *stringName) InitJSON(name string) {
s.nameJSON = name
}
// Returns true if this field is structured like the synthetic field of a proto2
// group. This allows us to expand our treatment of delimited fields without
// breaking proto2 files that have been upgraded to editions.
func isGroupLike(fd protoreflect.FieldDescriptor) bool {
// Groups are always group types.
if fd.Kind() != protoreflect.GroupKind {
return false
}
// Group fields are always the lowercase type name.
if strings.ToLower(string(fd.Message().Name())) != string(fd.Name()) {
return false
}
// Groups could only be defined in the same file they're used.
if fd.Message().ParentFile() != fd.ParentFile() {
return false
}
// Group messages are always defined in the same scope as the field. File
// level extensions will compare NULL == NULL here, which is why the file
// comparison above is necessary to ensure both come from the same file.
if fd.IsExtension() {
return fd.Parent() == fd.Message().Parent()
}
return fd.ContainingMessage() == fd.Message().Parent()
}
func (s *stringName) lazyInit(fd protoreflect.FieldDescriptor) *stringName {
s.once.Do(func() {
if fd.IsExtension() {
@ -605,7 +635,7 @@ func (s *stringName) lazyInit(fd protoreflect.FieldDescriptor) *stringName {
// Format the text name.
s.nameText = string(fd.Name())
if fd.Kind() == protoreflect.GroupKind {
if isGroupLike(fd) {
s.nameText = string(fd.Message().Name())
}
}

View file

@ -113,8 +113,10 @@ func (fd *File) unmarshalSeed(b []byte) {
switch string(v) {
case "proto2":
fd.L1.Syntax = protoreflect.Proto2
fd.L1.Edition = EditionProto2
case "proto3":
fd.L1.Syntax = protoreflect.Proto3
fd.L1.Edition = EditionProto3
case "editions":
fd.L1.Syntax = protoreflect.Editions
default:
@ -177,11 +179,10 @@ func (fd *File) unmarshalSeed(b []byte) {
// If syntax is missing, it is assumed to be proto2.
if fd.L1.Syntax == 0 {
fd.L1.Syntax = protoreflect.Proto2
fd.L1.Edition = EditionProto2
}
if fd.L1.Syntax == protoreflect.Editions {
fd.L1.EditionFeatures = getFeaturesFor(fd.L1.Edition)
}
fd.L1.EditionFeatures = getFeaturesFor(fd.L1.Edition)
// Parse editions features from options if any
if options != nil {
@ -267,6 +268,7 @@ func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protorefl
ed.L0.ParentFile = pf
ed.L0.Parent = pd
ed.L0.Index = i
ed.L1.EditionFeatures = featuresFromParentDesc(ed.Parent())
var numValues int
for b := b; len(b) > 0; {
@ -443,6 +445,7 @@ func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd prot
xd.L0.ParentFile = pf
xd.L0.Parent = pd
xd.L0.Index = i
xd.L1.EditionFeatures = featuresFromParentDesc(pd)
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
@ -467,6 +470,38 @@ func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd prot
xd.L0.FullName = appendFullName(sb, pd.FullName(), v)
case genid.FieldDescriptorProto_Extendee_field_number:
xd.L1.Extendee = PlaceholderMessage(makeFullName(sb, v))
case genid.FieldDescriptorProto_Options_field_number:
xd.unmarshalOptions(v)
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
}
}
if xd.L1.Kind == protoreflect.MessageKind && xd.L1.EditionFeatures.IsDelimitedEncoded {
xd.L1.Kind = protoreflect.GroupKind
}
}
func (xd *Extension) unmarshalOptions(b []byte) {
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
case protowire.VarintType:
v, m := protowire.ConsumeVarint(b)
b = b[m:]
switch num {
case genid.FieldOptions_Packed_field_number:
xd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v)
}
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
case genid.FieldOptions_Features_field_number:
xd.L1.EditionFeatures = unmarshalFeatureSet(v, xd.L1.EditionFeatures)
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
@ -499,7 +534,7 @@ func (sd *Service) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protor
}
var nameBuilderPool = sync.Pool{
New: func() interface{} { return new(strs.Builder) },
New: func() any { return new(strs.Builder) },
}
func getBuilder() *strs.Builder {

View file

@ -45,6 +45,11 @@ func (file *File) resolveMessages() {
case protoreflect.MessageKind, protoreflect.GroupKind:
fd.L1.Message = file.resolveMessageDependency(fd.L1.Message, listFieldDeps, depIdx)
depIdx++
if fd.L1.Kind == protoreflect.GroupKind && (fd.IsMap() || fd.IsMapEntry()) {
// A map field might inherit delimited encoding from a file-wide default feature.
// But maps never actually use delimited encoding. (At least for now...)
fd.L1.Kind = protoreflect.MessageKind
}
}
// Default is resolved here since it depends on Enum being resolved.
@ -466,10 +471,10 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoref
b = b[m:]
}
}
if fd.Syntax() == protoreflect.Editions && fd.L1.Kind == protoreflect.MessageKind && fd.L1.EditionFeatures.IsDelimitedEncoded {
if fd.L1.Kind == protoreflect.MessageKind && fd.L1.EditionFeatures.IsDelimitedEncoded {
fd.L1.Kind = protoreflect.GroupKind
}
if fd.Syntax() == protoreflect.Editions && fd.L1.EditionFeatures.IsLegacyRequired {
if fd.L1.EditionFeatures.IsLegacyRequired {
fd.L1.Cardinality = protoreflect.Required
}
if rawTypeName != nil {
@ -496,13 +501,11 @@ func (fd *Field) unmarshalOptions(b []byte) {
b = b[m:]
switch num {
case genid.FieldOptions_Packed_field_number:
fd.L1.HasPacked = true
fd.L1.IsPacked = protowire.DecodeBool(v)
fd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v)
case genid.FieldOptions_Weak_field_number:
fd.L1.IsWeak = protowire.DecodeBool(v)
case FieldOptions_EnforceUTF8:
fd.L1.HasEnforceUTF8 = true
fd.L1.EnforceUTF8 = protowire.DecodeBool(v)
fd.L1.EditionFeatures.IsUTF8Validated = protowire.DecodeBool(v)
}
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
@ -548,7 +551,6 @@ func (od *Oneof) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoref
func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) {
var rawTypeName []byte
var rawOptions []byte
xd.L1.EditionFeatures = featuresFromParentDesc(xd.L1.Extendee)
xd.L2 = new(ExtensionL2)
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
@ -572,7 +574,6 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) {
case genid.FieldDescriptorProto_TypeName_field_number:
rawTypeName = v
case genid.FieldDescriptorProto_Options_field_number:
xd.unmarshalOptions(v)
rawOptions = appendOptions(rawOptions, v)
}
default:
@ -580,12 +581,6 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) {
b = b[m:]
}
}
if xd.Syntax() == protoreflect.Editions && xd.L1.Kind == protoreflect.MessageKind && xd.L1.EditionFeatures.IsDelimitedEncoded {
xd.L1.Kind = protoreflect.GroupKind
}
if xd.Syntax() == protoreflect.Editions && xd.L1.EditionFeatures.IsLegacyRequired {
xd.L1.Cardinality = protoreflect.Required
}
if rawTypeName != nil {
name := makeFullName(sb, rawTypeName)
switch xd.L1.Kind {
@ -598,32 +593,6 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) {
xd.L2.Options = xd.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Field, rawOptions)
}
func (xd *Extension) unmarshalOptions(b []byte) {
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
case protowire.VarintType:
v, m := protowire.ConsumeVarint(b)
b = b[m:]
switch num {
case genid.FieldOptions_Packed_field_number:
xd.L2.IsPacked = protowire.DecodeBool(v)
}
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
case genid.FieldOptions_Features_field_number:
xd.L1.EditionFeatures = unmarshalFeatureSet(v, xd.L1.EditionFeatures)
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
}
}
}
func (sd *Service) unmarshalFull(b []byte, sb *strs.Builder) {
var rawMethods [][]byte
var rawOptions []byte

View file

@ -8,6 +8,7 @@ package filedesc
import (
"fmt"
"strings"
"sync"
"google.golang.org/protobuf/internal/descfmt"
@ -198,6 +199,16 @@ func (p *Fields) lazyInit() *Fields {
if _, ok := p.byText[d.TextName()]; !ok {
p.byText[d.TextName()] = d
}
if isGroupLike(d) {
lowerJSONName := strings.ToLower(d.JSONName())
if _, ok := p.byJSON[lowerJSONName]; !ok {
p.byJSON[lowerJSONName] = d
}
lowerTextName := strings.ToLower(d.TextName())
if _, ok := p.byText[lowerTextName]; !ok {
p.byText[lowerTextName] = d
}
}
if _, ok := p.byNum[d.Number()]; !ok {
p.byNum[d.Number()] = d
}

View file

@ -14,9 +14,13 @@ import (
)
var defaultsCache = make(map[Edition]EditionFeatures)
var defaultsKeys = []Edition{}
func init() {
unmarshalEditionDefaults(editiondefaults.Defaults)
SurrogateProto2.L1.EditionFeatures = getFeaturesFor(EditionProto2)
SurrogateProto3.L1.EditionFeatures = getFeaturesFor(EditionProto3)
SurrogateEdition2023.L1.EditionFeatures = getFeaturesFor(Edition2023)
}
func unmarshalGoFeature(b []byte, parent EditionFeatures) EditionFeatures {
@ -104,12 +108,15 @@ func unmarshalEditionDefault(b []byte) {
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
case genid.FeatureSetDefaults_FeatureSetEditionDefault_Features_field_number:
case genid.FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_number:
fs = unmarshalFeatureSet(v, fs)
case genid.FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_number:
fs = unmarshalFeatureSet(v, fs)
}
}
}
defaultsCache[ed] = fs
defaultsKeys = append(defaultsKeys, ed)
}
func unmarshalEditionDefaults(b []byte) {
@ -135,8 +142,15 @@ func unmarshalEditionDefaults(b []byte) {
}
func getFeaturesFor(ed Edition) EditionFeatures {
if def, ok := defaultsCache[ed]; ok {
return def
match := EditionUnknown
for _, key := range defaultsKeys {
if key > ed {
break
}
match = key
}
panic(fmt.Sprintf("unsupported edition: %v", ed))
if match == EditionUnknown {
panic(fmt.Sprintf("unsupported edition: %v", ed))
}
return defaultsCache[match]
}

View file

@ -63,6 +63,7 @@ func (e PlaceholderEnum) Options() protoreflect.ProtoMessage { return des
func (e PlaceholderEnum) Values() protoreflect.EnumValueDescriptors { return emptyEnumValues }
func (e PlaceholderEnum) ReservedNames() protoreflect.Names { return emptyNames }
func (e PlaceholderEnum) ReservedRanges() protoreflect.EnumRanges { return emptyEnumRanges }
func (e PlaceholderEnum) IsClosed() bool { return false }
func (e PlaceholderEnum) ProtoType(protoreflect.EnumDescriptor) { return }
func (e PlaceholderEnum) ProtoInternal(pragma.DoNotImplement) { return }

View file

@ -68,7 +68,7 @@ type Builder struct {
// and for input and output messages referenced by service methods.
// Dependencies must come after declarations, but the ordering of
// dependencies themselves is unspecified.
GoTypes []interface{}
GoTypes []any
// DependencyIndexes is an ordered list of indexes into GoTypes for the
// dependencies of messages, extensions, or services.
@ -268,7 +268,7 @@ func (x depIdxs) Get(i, j int32) int32 {
type (
resolverByIndex struct {
goTypes []interface{}
goTypes []any
depIdxs depIdxs
fileRegistry
}

View file

@ -21,6 +21,7 @@ const (
// Enum values for google.protobuf.Edition.
const (
Edition_EDITION_UNKNOWN_enum_value = 0
Edition_EDITION_LEGACY_enum_value = 900
Edition_EDITION_PROTO2_enum_value = 998
Edition_EDITION_PROTO3_enum_value = 999
Edition_EDITION_2023_enum_value = 1000
@ -653,6 +654,7 @@ const (
FieldOptions_Targets_field_name protoreflect.Name = "targets"
FieldOptions_EditionDefaults_field_name protoreflect.Name = "edition_defaults"
FieldOptions_Features_field_name protoreflect.Name = "features"
FieldOptions_FeatureSupport_field_name protoreflect.Name = "feature_support"
FieldOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
FieldOptions_Ctype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.ctype"
@ -667,6 +669,7 @@ const (
FieldOptions_Targets_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.targets"
FieldOptions_EditionDefaults_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.edition_defaults"
FieldOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.features"
FieldOptions_FeatureSupport_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.feature_support"
FieldOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.uninterpreted_option"
)
@ -684,6 +687,7 @@ const (
FieldOptions_Targets_field_number protoreflect.FieldNumber = 19
FieldOptions_EditionDefaults_field_number protoreflect.FieldNumber = 20
FieldOptions_Features_field_number protoreflect.FieldNumber = 21
FieldOptions_FeatureSupport_field_number protoreflect.FieldNumber = 22
FieldOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
)
@ -767,6 +771,33 @@ const (
FieldOptions_EditionDefault_Value_field_number protoreflect.FieldNumber = 2
)
// Names for google.protobuf.FieldOptions.FeatureSupport.
const (
FieldOptions_FeatureSupport_message_name protoreflect.Name = "FeatureSupport"
FieldOptions_FeatureSupport_message_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport"
)
// Field names for google.protobuf.FieldOptions.FeatureSupport.
const (
FieldOptions_FeatureSupport_EditionIntroduced_field_name protoreflect.Name = "edition_introduced"
FieldOptions_FeatureSupport_EditionDeprecated_field_name protoreflect.Name = "edition_deprecated"
FieldOptions_FeatureSupport_DeprecationWarning_field_name protoreflect.Name = "deprecation_warning"
FieldOptions_FeatureSupport_EditionRemoved_field_name protoreflect.Name = "edition_removed"
FieldOptions_FeatureSupport_EditionIntroduced_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.edition_introduced"
FieldOptions_FeatureSupport_EditionDeprecated_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.edition_deprecated"
FieldOptions_FeatureSupport_DeprecationWarning_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.deprecation_warning"
FieldOptions_FeatureSupport_EditionRemoved_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.edition_removed"
)
// Field numbers for google.protobuf.FieldOptions.FeatureSupport.
const (
FieldOptions_FeatureSupport_EditionIntroduced_field_number protoreflect.FieldNumber = 1
FieldOptions_FeatureSupport_EditionDeprecated_field_number protoreflect.FieldNumber = 2
FieldOptions_FeatureSupport_DeprecationWarning_field_number protoreflect.FieldNumber = 3
FieldOptions_FeatureSupport_EditionRemoved_field_number protoreflect.FieldNumber = 4
)
// Names for google.protobuf.OneofOptions.
const (
OneofOptions_message_name protoreflect.Name = "OneofOptions"
@ -829,11 +860,13 @@ const (
EnumValueOptions_Deprecated_field_name protoreflect.Name = "deprecated"
EnumValueOptions_Features_field_name protoreflect.Name = "features"
EnumValueOptions_DebugRedact_field_name protoreflect.Name = "debug_redact"
EnumValueOptions_FeatureSupport_field_name protoreflect.Name = "feature_support"
EnumValueOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
EnumValueOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.deprecated"
EnumValueOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.features"
EnumValueOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.debug_redact"
EnumValueOptions_FeatureSupport_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.feature_support"
EnumValueOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.uninterpreted_option"
)
@ -842,6 +875,7 @@ const (
EnumValueOptions_Deprecated_field_number protoreflect.FieldNumber = 1
EnumValueOptions_Features_field_number protoreflect.FieldNumber = 2
EnumValueOptions_DebugRedact_field_number protoreflect.FieldNumber = 3
EnumValueOptions_FeatureSupport_field_number protoreflect.FieldNumber = 4
EnumValueOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
)
@ -1110,17 +1144,20 @@ const (
// Field names for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.
const (
FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_name protoreflect.Name = "edition"
FeatureSetDefaults_FeatureSetEditionDefault_Features_field_name protoreflect.Name = "features"
FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_name protoreflect.Name = "edition"
FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_name protoreflect.Name = "overridable_features"
FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_name protoreflect.Name = "fixed_features"
FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition"
FeatureSetDefaults_FeatureSetEditionDefault_Features_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.features"
FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition"
FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features"
FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features"
)
// Field numbers for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.
const (
FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_number protoreflect.FieldNumber = 3
FeatureSetDefaults_FeatureSetEditionDefault_Features_field_number protoreflect.FieldNumber = 2
FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_number protoreflect.FieldNumber = 3
FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_number protoreflect.FieldNumber = 4
FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_number protoreflect.FieldNumber = 5
)
// Names for google.protobuf.SourceCodeInfo.

View file

@ -10,7 +10,7 @@ import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
)
const File_reflect_protodesc_proto_go_features_proto = "reflect/protodesc/proto/go_features.proto"
const File_google_protobuf_go_features_proto = "google/protobuf/go_features.proto"
// Names for google.protobuf.GoFeatures.
const (

View file

@ -22,13 +22,13 @@ type Export struct{}
// NewError formats a string according to the format specifier and arguments and
// returns an error that has a "proto" prefix.
func (Export) NewError(f string, x ...interface{}) error {
func (Export) NewError(f string, x ...any) error {
return errors.New(f, x...)
}
// enum is any enum type generated by protoc-gen-go
// and must be a named int32 type.
type enum = interface{}
type enum = any
// EnumOf returns the protoreflect.Enum interface over e.
// It returns nil if e is nil.
@ -81,7 +81,7 @@ func (Export) EnumStringOf(ed protoreflect.EnumDescriptor, n protoreflect.EnumNu
// message is any message type generated by protoc-gen-go
// and must be a pointer to a named struct type.
type message = interface{}
type message = any
// legacyMessageWrapper wraps a v2 message as a v1 message.
type legacyMessageWrapper struct{ m protoreflect.ProtoMessage }

View file

@ -68,7 +68,7 @@ func (mi *MessageInfo) isInitExtensions(ext *map[int32]ExtensionField) error {
}
for _, x := range *ext {
ei := getExtensionFieldInfo(x.Type())
if ei.funcs.isInit == nil {
if ei.funcs.isInit == nil || x.isUnexpandedLazy() {
continue
}
v := x.Value()

View file

@ -99,6 +99,28 @@ func (f *ExtensionField) canLazy(xt protoreflect.ExtensionType) bool {
return false
}
// isUnexpandedLazy returns true if the ExensionField is lazy and not
// yet expanded, which means it's present and already checked for
// initialized required fields.
func (f *ExtensionField) isUnexpandedLazy() bool {
return f.lazy != nil && atomic.LoadUint32(&f.lazy.atomicOnce) == 0
}
// lazyBuffer retrieves the buffer for a lazy extension if it's not yet expanded.
//
// The returned buffer has to be kept over whatever operation we're planning,
// as re-retrieving it will fail after the message is lazily decoded.
func (f *ExtensionField) lazyBuffer() []byte {
// This function might be in the critical path, so check the atomic without
// taking a look first, then only take the lock if needed.
if !f.isUnexpandedLazy() {
return nil
}
f.lazy.mu.Lock()
defer f.lazy.mu.Unlock()
return f.lazy.b
}
func (f *ExtensionField) lazyInit() {
f.lazy.mu.Lock()
defer f.lazy.mu.Unlock()

View file

@ -233,9 +233,15 @@ func sizeMessageInfo(p pointer, f *coderFieldInfo, opts marshalOptions) int {
}
func appendMessageInfo(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
calculatedSize := f.mi.sizePointer(p.Elem(), opts)
b = protowire.AppendVarint(b, f.wiretag)
b = protowire.AppendVarint(b, uint64(f.mi.sizePointer(p.Elem(), opts)))
return f.mi.marshalAppendPointer(b, p.Elem(), opts)
b = protowire.AppendVarint(b, uint64(calculatedSize))
before := len(b)
b, err := f.mi.marshalAppendPointer(b, p.Elem(), opts)
if measuredSize := len(b) - before; calculatedSize != measuredSize && err == nil {
return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize)
}
return b, err
}
func consumeMessageInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
@ -262,14 +268,21 @@ func isInitMessageInfo(p pointer, f *coderFieldInfo) error {
return f.mi.checkInitializedPointer(p.Elem())
}
func sizeMessage(m proto.Message, tagsize int, _ marshalOptions) int {
return protowire.SizeBytes(proto.Size(m)) + tagsize
func sizeMessage(m proto.Message, tagsize int, opts marshalOptions) int {
return protowire.SizeBytes(opts.Options().Size(m)) + tagsize
}
func appendMessage(b []byte, m proto.Message, wiretag uint64, opts marshalOptions) ([]byte, error) {
mopts := opts.Options()
calculatedSize := mopts.Size(m)
b = protowire.AppendVarint(b, wiretag)
b = protowire.AppendVarint(b, uint64(proto.Size(m)))
return opts.Options().MarshalAppend(b, m)
b = protowire.AppendVarint(b, uint64(calculatedSize))
before := len(b)
b, err := mopts.MarshalAppend(b, m)
if measuredSize := len(b) - before; calculatedSize != measuredSize && err == nil {
return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize)
}
return b, err
}
func consumeMessage(b []byte, m proto.Message, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, err error) {
@ -405,8 +418,8 @@ func consumeGroupType(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf
return f.mi.unmarshalPointer(b, p.Elem(), f.num, opts)
}
func sizeGroup(m proto.Message, tagsize int, _ marshalOptions) int {
return 2*tagsize + proto.Size(m)
func sizeGroup(m proto.Message, tagsize int, opts marshalOptions) int {
return 2*tagsize + opts.Options().Size(m)
}
func appendGroup(b []byte, m proto.Message, wiretag uint64, opts marshalOptions) ([]byte, error) {
@ -482,10 +495,14 @@ func appendMessageSliceInfo(b []byte, p pointer, f *coderFieldInfo, opts marshal
b = protowire.AppendVarint(b, f.wiretag)
siz := f.mi.sizePointer(v, opts)
b = protowire.AppendVarint(b, uint64(siz))
before := len(b)
b, err = f.mi.marshalAppendPointer(b, v, opts)
if err != nil {
return b, err
}
if measuredSize := len(b) - before; siz != measuredSize {
return nil, errors.MismatchedSizeCalculation(siz, measuredSize)
}
}
return b, nil
}
@ -520,28 +537,34 @@ func isInitMessageSliceInfo(p pointer, f *coderFieldInfo) error {
return nil
}
func sizeMessageSlice(p pointer, goType reflect.Type, tagsize int, _ marshalOptions) int {
func sizeMessageSlice(p pointer, goType reflect.Type, tagsize int, opts marshalOptions) int {
mopts := opts.Options()
s := p.PointerSlice()
n := 0
for _, v := range s {
m := asMessage(v.AsValueOf(goType.Elem()))
n += protowire.SizeBytes(proto.Size(m)) + tagsize
n += protowire.SizeBytes(mopts.Size(m)) + tagsize
}
return n
}
func appendMessageSlice(b []byte, p pointer, wiretag uint64, goType reflect.Type, opts marshalOptions) ([]byte, error) {
mopts := opts.Options()
s := p.PointerSlice()
var err error
for _, v := range s {
m := asMessage(v.AsValueOf(goType.Elem()))
b = protowire.AppendVarint(b, wiretag)
siz := proto.Size(m)
siz := mopts.Size(m)
b = protowire.AppendVarint(b, uint64(siz))
b, err = opts.Options().MarshalAppend(b, m)
before := len(b)
b, err = mopts.MarshalAppend(b, m)
if err != nil {
return b, err
}
if measuredSize := len(b) - before; siz != measuredSize {
return nil, errors.MismatchedSizeCalculation(siz, measuredSize)
}
}
return b, nil
}
@ -582,11 +605,12 @@ func isInitMessageSlice(p pointer, goType reflect.Type) error {
// Slices of messages
func sizeMessageSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) int {
mopts := opts.Options()
list := listv.List()
n := 0
for i, llen := 0, list.Len(); i < llen; i++ {
m := list.Get(i).Message().Interface()
n += protowire.SizeBytes(proto.Size(m)) + tagsize
n += protowire.SizeBytes(mopts.Size(m)) + tagsize
}
return n
}
@ -597,13 +621,17 @@ func appendMessageSliceValue(b []byte, listv protoreflect.Value, wiretag uint64,
for i, llen := 0, list.Len(); i < llen; i++ {
m := list.Get(i).Message().Interface()
b = protowire.AppendVarint(b, wiretag)
siz := proto.Size(m)
siz := mopts.Size(m)
b = protowire.AppendVarint(b, uint64(siz))
before := len(b)
var err error
b, err = mopts.MarshalAppend(b, m)
if err != nil {
return b, err
}
if measuredSize := len(b) - before; siz != measuredSize {
return nil, errors.MismatchedSizeCalculation(siz, measuredSize)
}
}
return b, nil
}
@ -651,11 +679,12 @@ var coderMessageSliceValue = valueCoderFuncs{
}
func sizeGroupSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) int {
mopts := opts.Options()
list := listv.List()
n := 0
for i, llen := 0, list.Len(); i < llen; i++ {
m := list.Get(i).Message().Interface()
n += 2*tagsize + proto.Size(m)
n += 2*tagsize + mopts.Size(m)
}
return n
}
@ -738,12 +767,13 @@ func makeGroupSliceFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type)
}
}
func sizeGroupSlice(p pointer, messageType reflect.Type, tagsize int, _ marshalOptions) int {
func sizeGroupSlice(p pointer, messageType reflect.Type, tagsize int, opts marshalOptions) int {
mopts := opts.Options()
s := p.PointerSlice()
n := 0
for _, v := range s {
m := asMessage(v.AsValueOf(messageType.Elem()))
n += 2*tagsize + proto.Size(m)
n += 2*tagsize + mopts.Size(m)
}
return n
}

View file

@ -9,6 +9,7 @@ import (
"sort"
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/internal/errors"
"google.golang.org/protobuf/internal/genid"
"google.golang.org/protobuf/reflect/protoreflect"
)
@ -240,11 +241,16 @@ func appendMapItem(b []byte, keyrv, valrv reflect.Value, mapi *mapInfo, f *coder
size += mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts)
size += mapi.valFuncs.size(val, mapValTagSize, opts)
b = protowire.AppendVarint(b, uint64(size))
before := len(b)
b, err := mapi.keyFuncs.marshal(b, key.Value(), mapi.keyWiretag, opts)
if err != nil {
return nil, err
}
return mapi.valFuncs.marshal(b, val, mapi.valWiretag, opts)
b, err = mapi.valFuncs.marshal(b, val, mapi.valWiretag, opts)
if measuredSize := len(b) - before; size != measuredSize && err == nil {
return nil, errors.MismatchedSizeCalculation(size, measuredSize)
}
return b, err
} else {
key := mapi.conv.keyConv.PBValueOf(keyrv).MapKey()
val := pointerOfValue(valrv)
@ -259,7 +265,12 @@ func appendMapItem(b []byte, keyrv, valrv reflect.Value, mapi *mapInfo, f *coder
}
b = protowire.AppendVarint(b, mapi.valWiretag)
b = protowire.AppendVarint(b, uint64(valSize))
return f.mi.marshalAppendPointer(b, val, opts)
before := len(b)
b, err = f.mi.marshalAppendPointer(b, val, opts)
if measuredSize := len(b) - before; valSize != measuredSize && err == nil {
return nil, errors.MismatchedSizeCalculation(valSize, measuredSize)
}
return b, err
}
}

View file

@ -26,6 +26,15 @@ func sizeMessageSet(mi *MessageInfo, p pointer, opts marshalOptions) (size int)
}
num, _ := protowire.DecodeTag(xi.wiretag)
size += messageset.SizeField(num)
if fullyLazyExtensions(opts) {
// Don't expand the extension, instead use the buffer to calculate size
if lb := x.lazyBuffer(); lb != nil {
// We got hold of the buffer, so it's still lazy.
// Don't count the tag size in the extension buffer, it's already added.
size += protowire.SizeTag(messageset.FieldMessage) + len(lb) - xi.tagsize
continue
}
}
size += xi.funcs.size(x.Value(), protowire.SizeTag(messageset.FieldMessage), opts)
}
@ -85,6 +94,19 @@ func marshalMessageSetField(mi *MessageInfo, b []byte, x ExtensionField, opts ma
xi := getExtensionFieldInfo(x.Type())
num, _ := protowire.DecodeTag(xi.wiretag)
b = messageset.AppendFieldStart(b, num)
if fullyLazyExtensions(opts) {
// Don't expand the extension if it's still in wire format, instead use the buffer content.
if lb := x.lazyBuffer(); lb != nil {
// The tag inside the lazy buffer is a different tag (the extension
// number), but what we need here is the tag for FieldMessage:
b = protowire.AppendVarint(b, protowire.EncodeTag(messageset.FieldMessage, protowire.BytesType))
b = append(b, lb[xi.tagsize:]...)
b = messageset.AppendFieldEnd(b)
return b, nil
}
}
b, err := xi.funcs.marshal(b, x.Value(), protowire.EncodeTag(messageset.FieldMessage, protowire.BytesType), opts)
if err != nil {
return b, err

View file

@ -14,7 +14,7 @@ import (
// unwrapper unwraps the value to the underlying value.
// This is implemented by List and Map.
type unwrapper interface {
protoUnwrap() interface{}
protoUnwrap() any
}
// A Converter coverts to/from Go reflect.Value types and protobuf protoreflect.Value types.

View file

@ -136,6 +136,6 @@ func (ls *listReflect) NewElement() protoreflect.Value {
func (ls *listReflect) IsValid() bool {
return !ls.v.IsNil()
}
func (ls *listReflect) protoUnwrap() interface{} {
func (ls *listReflect) protoUnwrap() any {
return ls.v.Interface()
}

View file

@ -116,6 +116,6 @@ func (ms *mapReflect) NewValue() protoreflect.Value {
func (ms *mapReflect) IsValid() bool {
return !ms.v.IsNil()
}
func (ms *mapReflect) protoUnwrap() interface{} {
func (ms *mapReflect) protoUnwrap() any {
return ms.v.Interface()
}

Some files were not shown because too many files have changed in this diff Show more