update vendor
Signed-off-by: Jess Frazelle <acidburn@microsoft.com>
This commit is contained in:
parent
19a32db84d
commit
94d1cfbfbf
10501 changed files with 2307943 additions and 29279 deletions
547
vendor/google.golang.org/grpc/benchmark/benchmain/main.go
generated
vendored
Normal file
547
vendor/google.golang.org/grpc/benchmark/benchmain/main.go
generated
vendored
Normal file
|
@ -0,0 +1,547 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
/*
|
||||
Package main provides benchmark with setting flags.
|
||||
|
||||
An example to run some benchmarks with profiling enabled:
|
||||
|
||||
go run benchmark/benchmain/main.go -benchtime=10s -workloads=all \
|
||||
-compression=on -maxConcurrentCalls=1 -trace=off \
|
||||
-reqSizeBytes=1,1048576 -respSizeBytes=1,1048576 -networkMode=Local \
|
||||
-cpuProfile=cpuProf -memProfile=memProf -memProfileRate=10000 -resultFile=result
|
||||
|
||||
As a suggestion, when creating a branch, you can run this benchmark and save the result
|
||||
file "-resultFile=basePerf", and later when you at the middle of the work or finish the
|
||||
work, you can get the benchmark result and compare it with the base anytime.
|
||||
|
||||
Assume there are two result files names as "basePerf" and "curPerf" created by adding
|
||||
-resultFile=basePerf and -resultFile=curPerf.
|
||||
To format the curPerf, run:
|
||||
go run benchmark/benchresult/main.go curPerf
|
||||
To observe how the performance changes based on a base result, run:
|
||||
go run benchmark/benchresult/main.go basePerf curPerf
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/gob"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
bm "google.golang.org/grpc/benchmark"
|
||||
testpb "google.golang.org/grpc/benchmark/grpc_testing"
|
||||
"google.golang.org/grpc/benchmark/latency"
|
||||
"google.golang.org/grpc/benchmark/stats"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/test/bufconn"
|
||||
)
|
||||
|
||||
const (
|
||||
modeOn = "on"
|
||||
modeOff = "off"
|
||||
modeBoth = "both"
|
||||
)
|
||||
|
||||
var allCompressionModes = []string{modeOn, modeOff, modeBoth}
|
||||
var allTraceModes = []string{modeOn, modeOff, modeBoth}
|
||||
|
||||
const (
|
||||
workloadsUnary = "unary"
|
||||
workloadsStreaming = "streaming"
|
||||
workloadsAll = "all"
|
||||
)
|
||||
|
||||
var allWorkloads = []string{workloadsUnary, workloadsStreaming, workloadsAll}
|
||||
|
||||
var (
|
||||
runMode = []bool{true, true} // {runUnary, runStream}
|
||||
// When set the latency to 0 (no delay), the result is slower than the real result with no delay
|
||||
// because latency simulation section has extra operations
|
||||
ltc = []time.Duration{0, 40 * time.Millisecond} // if non-positive, no delay.
|
||||
kbps = []int{0, 10240} // if non-positive, infinite
|
||||
mtu = []int{0} // if non-positive, infinite
|
||||
maxConcurrentCalls = []int{1, 8, 64, 512}
|
||||
reqSizeBytes = []int{1, 1024, 1024 * 1024}
|
||||
respSizeBytes = []int{1, 1024, 1024 * 1024}
|
||||
enableTrace []bool
|
||||
benchtime time.Duration
|
||||
memProfile, cpuProfile string
|
||||
memProfileRate int
|
||||
enableCompressor []bool
|
||||
enableChannelz []bool
|
||||
networkMode string
|
||||
benchmarkResultFile string
|
||||
networks = map[string]latency.Network{
|
||||
"Local": latency.Local,
|
||||
"LAN": latency.LAN,
|
||||
"WAN": latency.WAN,
|
||||
"Longhaul": latency.Longhaul,
|
||||
}
|
||||
)
|
||||
|
||||
func unaryBenchmark(startTimer func(), stopTimer func(int32), benchFeatures stats.Features, benchtime time.Duration, s *stats.Stats) {
|
||||
caller, cleanup := makeFuncUnary(benchFeatures)
|
||||
defer cleanup()
|
||||
runBenchmark(caller, startTimer, stopTimer, benchFeatures, benchtime, s)
|
||||
}
|
||||
|
||||
func streamBenchmark(startTimer func(), stopTimer func(int32), benchFeatures stats.Features, benchtime time.Duration, s *stats.Stats) {
|
||||
caller, cleanup := makeFuncStream(benchFeatures)
|
||||
defer cleanup()
|
||||
runBenchmark(caller, startTimer, stopTimer, benchFeatures, benchtime, s)
|
||||
}
|
||||
|
||||
func makeFuncUnary(benchFeatures stats.Features) (func(int), func()) {
|
||||
nw := &latency.Network{Kbps: benchFeatures.Kbps, Latency: benchFeatures.Latency, MTU: benchFeatures.Mtu}
|
||||
opts := []grpc.DialOption{}
|
||||
sopts := []grpc.ServerOption{}
|
||||
if benchFeatures.EnableCompressor {
|
||||
sopts = append(sopts,
|
||||
grpc.RPCCompressor(nopCompressor{}),
|
||||
grpc.RPCDecompressor(nopDecompressor{}),
|
||||
)
|
||||
opts = append(opts,
|
||||
grpc.WithCompressor(nopCompressor{}),
|
||||
grpc.WithDecompressor(nopDecompressor{}),
|
||||
)
|
||||
}
|
||||
sopts = append(sopts, grpc.MaxConcurrentStreams(uint32(benchFeatures.MaxConcurrentCalls+1)))
|
||||
opts = append(opts, grpc.WithInsecure())
|
||||
|
||||
var lis net.Listener
|
||||
if *useBufconn {
|
||||
bcLis := bufconn.Listen(256 * 1024)
|
||||
lis = bcLis
|
||||
opts = append(opts, grpc.WithDialer(func(string, time.Duration) (net.Conn, error) {
|
||||
return nw.TimeoutDialer(
|
||||
func(string, string, time.Duration) (net.Conn, error) {
|
||||
return bcLis.Dial()
|
||||
})("", "", 0)
|
||||
}))
|
||||
} else {
|
||||
var err error
|
||||
lis, err = net.Listen("tcp", "localhost:0")
|
||||
if err != nil {
|
||||
grpclog.Fatalf("Failed to listen: %v", err)
|
||||
}
|
||||
opts = append(opts, grpc.WithDialer(func(_ string, timeout time.Duration) (net.Conn, error) {
|
||||
return nw.TimeoutDialer(net.DialTimeout)("tcp", lis.Addr().String(), timeout)
|
||||
}))
|
||||
}
|
||||
lis = nw.Listener(lis)
|
||||
stopper := bm.StartServer(bm.ServerInfo{Type: "protobuf", Listener: lis}, sopts...)
|
||||
conn := bm.NewClientConn("" /* target not used */, opts...)
|
||||
tc := testpb.NewBenchmarkServiceClient(conn)
|
||||
return func(int) {
|
||||
unaryCaller(tc, benchFeatures.ReqSizeBytes, benchFeatures.RespSizeBytes)
|
||||
}, func() {
|
||||
conn.Close()
|
||||
stopper()
|
||||
}
|
||||
}
|
||||
|
||||
func makeFuncStream(benchFeatures stats.Features) (func(int), func()) {
|
||||
// TODO: Refactor to remove duplication with makeFuncUnary.
|
||||
nw := &latency.Network{Kbps: benchFeatures.Kbps, Latency: benchFeatures.Latency, MTU: benchFeatures.Mtu}
|
||||
opts := []grpc.DialOption{}
|
||||
sopts := []grpc.ServerOption{}
|
||||
if benchFeatures.EnableCompressor {
|
||||
sopts = append(sopts,
|
||||
grpc.RPCCompressor(grpc.NewGZIPCompressor()),
|
||||
grpc.RPCDecompressor(grpc.NewGZIPDecompressor()),
|
||||
)
|
||||
opts = append(opts,
|
||||
grpc.WithCompressor(grpc.NewGZIPCompressor()),
|
||||
grpc.WithDecompressor(grpc.NewGZIPDecompressor()),
|
||||
)
|
||||
}
|
||||
sopts = append(sopts, grpc.MaxConcurrentStreams(uint32(benchFeatures.MaxConcurrentCalls+1)))
|
||||
opts = append(opts, grpc.WithInsecure())
|
||||
|
||||
var lis net.Listener
|
||||
if *useBufconn {
|
||||
bcLis := bufconn.Listen(256 * 1024)
|
||||
lis = bcLis
|
||||
opts = append(opts, grpc.WithDialer(func(string, time.Duration) (net.Conn, error) {
|
||||
return nw.TimeoutDialer(
|
||||
func(string, string, time.Duration) (net.Conn, error) {
|
||||
return bcLis.Dial()
|
||||
})("", "", 0)
|
||||
}))
|
||||
} else {
|
||||
var err error
|
||||
lis, err = net.Listen("tcp", "localhost:0")
|
||||
if err != nil {
|
||||
grpclog.Fatalf("Failed to listen: %v", err)
|
||||
}
|
||||
opts = append(opts, grpc.WithDialer(func(_ string, timeout time.Duration) (net.Conn, error) {
|
||||
return nw.TimeoutDialer(net.DialTimeout)("tcp", lis.Addr().String(), timeout)
|
||||
}))
|
||||
}
|
||||
lis = nw.Listener(lis)
|
||||
stopper := bm.StartServer(bm.ServerInfo{Type: "protobuf", Listener: lis}, sopts...)
|
||||
conn := bm.NewClientConn("" /* target not used */, opts...)
|
||||
tc := testpb.NewBenchmarkServiceClient(conn)
|
||||
streams := make([]testpb.BenchmarkService_StreamingCallClient, benchFeatures.MaxConcurrentCalls)
|
||||
for i := 0; i < benchFeatures.MaxConcurrentCalls; i++ {
|
||||
stream, err := tc.StreamingCall(context.Background())
|
||||
if err != nil {
|
||||
grpclog.Fatalf("%v.StreamingCall(_) = _, %v", tc, err)
|
||||
}
|
||||
streams[i] = stream
|
||||
}
|
||||
return func(pos int) {
|
||||
streamCaller(streams[pos], benchFeatures.ReqSizeBytes, benchFeatures.RespSizeBytes)
|
||||
}, func() {
|
||||
conn.Close()
|
||||
stopper()
|
||||
}
|
||||
}
|
||||
|
||||
func unaryCaller(client testpb.BenchmarkServiceClient, reqSize, respSize int) {
|
||||
if err := bm.DoUnaryCall(client, reqSize, respSize); err != nil {
|
||||
grpclog.Fatalf("DoUnaryCall failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func streamCaller(stream testpb.BenchmarkService_StreamingCallClient, reqSize, respSize int) {
|
||||
if err := bm.DoStreamingRoundTrip(stream, reqSize, respSize); err != nil {
|
||||
grpclog.Fatalf("DoStreamingRoundTrip failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func runBenchmark(caller func(int), startTimer func(), stopTimer func(int32), benchFeatures stats.Features, benchtime time.Duration, s *stats.Stats) {
|
||||
// Warm up connection.
|
||||
for i := 0; i < 10; i++ {
|
||||
caller(0)
|
||||
}
|
||||
// Run benchmark.
|
||||
startTimer()
|
||||
var (
|
||||
mu sync.Mutex
|
||||
wg sync.WaitGroup
|
||||
)
|
||||
wg.Add(benchFeatures.MaxConcurrentCalls)
|
||||
bmEnd := time.Now().Add(benchtime)
|
||||
var count int32
|
||||
for i := 0; i < benchFeatures.MaxConcurrentCalls; i++ {
|
||||
go func(pos int) {
|
||||
for {
|
||||
t := time.Now()
|
||||
if t.After(bmEnd) {
|
||||
break
|
||||
}
|
||||
start := time.Now()
|
||||
caller(pos)
|
||||
elapse := time.Since(start)
|
||||
atomic.AddInt32(&count, 1)
|
||||
mu.Lock()
|
||||
s.Add(elapse)
|
||||
mu.Unlock()
|
||||
}
|
||||
wg.Done()
|
||||
}(i)
|
||||
}
|
||||
wg.Wait()
|
||||
stopTimer(count)
|
||||
}
|
||||
|
||||
var useBufconn = flag.Bool("bufconn", false, "Use in-memory connection instead of system network I/O")
|
||||
|
||||
// Initiate main function to get settings of features.
|
||||
func init() {
|
||||
var (
|
||||
workloads, traceMode, compressorMode, readLatency, channelzOn string
|
||||
readKbps, readMtu, readMaxConcurrentCalls intSliceType
|
||||
readReqSizeBytes, readRespSizeBytes intSliceType
|
||||
)
|
||||
flag.StringVar(&workloads, "workloads", workloadsAll,
|
||||
fmt.Sprintf("Workloads to execute - One of: %v", strings.Join(allWorkloads, ", ")))
|
||||
flag.StringVar(&traceMode, "trace", modeOff,
|
||||
fmt.Sprintf("Trace mode - One of: %v", strings.Join(allTraceModes, ", ")))
|
||||
flag.StringVar(&readLatency, "latency", "", "Simulated one-way network latency - may be a comma-separated list")
|
||||
flag.StringVar(&channelzOn, "channelz", modeOff, "whether channelz should be turned on")
|
||||
flag.DurationVar(&benchtime, "benchtime", time.Second, "Configures the amount of time to run each benchmark")
|
||||
flag.Var(&readKbps, "kbps", "Simulated network throughput (in kbps) - may be a comma-separated list")
|
||||
flag.Var(&readMtu, "mtu", "Simulated network MTU (Maximum Transmission Unit) - may be a comma-separated list")
|
||||
flag.Var(&readMaxConcurrentCalls, "maxConcurrentCalls", "Number of concurrent RPCs during benchmarks")
|
||||
flag.Var(&readReqSizeBytes, "reqSizeBytes", "Request size in bytes - may be a comma-separated list")
|
||||
flag.Var(&readRespSizeBytes, "respSizeBytes", "Response size in bytes - may be a comma-separated list")
|
||||
flag.StringVar(&memProfile, "memProfile", "", "Enables memory profiling output to the filename provided.")
|
||||
flag.IntVar(&memProfileRate, "memProfileRate", 512*1024, "Configures the memory profiling rate. \n"+
|
||||
"memProfile should be set before setting profile rate. To include every allocated block in the profile, "+
|
||||
"set MemProfileRate to 1. To turn off profiling entirely, set MemProfileRate to 0. 512 * 1024 by default.")
|
||||
flag.StringVar(&cpuProfile, "cpuProfile", "", "Enables CPU profiling output to the filename provided")
|
||||
flag.StringVar(&compressorMode, "compression", modeOff,
|
||||
fmt.Sprintf("Compression mode - One of: %v", strings.Join(allCompressionModes, ", ")))
|
||||
flag.StringVar(&benchmarkResultFile, "resultFile", "", "Save the benchmark result into a binary file")
|
||||
flag.StringVar(&networkMode, "networkMode", "", "Network mode includes LAN, WAN, Local and Longhaul")
|
||||
flag.Parse()
|
||||
if flag.NArg() != 0 {
|
||||
log.Fatal("Error: unparsed arguments: ", flag.Args())
|
||||
}
|
||||
switch workloads {
|
||||
case workloadsUnary:
|
||||
runMode[0] = true
|
||||
runMode[1] = false
|
||||
case workloadsStreaming:
|
||||
runMode[0] = false
|
||||
runMode[1] = true
|
||||
case workloadsAll:
|
||||
runMode[0] = true
|
||||
runMode[1] = true
|
||||
default:
|
||||
log.Fatalf("Unknown workloads setting: %v (want one of: %v)",
|
||||
workloads, strings.Join(allWorkloads, ", "))
|
||||
}
|
||||
enableCompressor = setMode(compressorMode)
|
||||
enableTrace = setMode(traceMode)
|
||||
enableChannelz = setMode(channelzOn)
|
||||
// Time input formats as (time + unit).
|
||||
readTimeFromInput(<c, readLatency)
|
||||
readIntFromIntSlice(&kbps, readKbps)
|
||||
readIntFromIntSlice(&mtu, readMtu)
|
||||
readIntFromIntSlice(&maxConcurrentCalls, readMaxConcurrentCalls)
|
||||
readIntFromIntSlice(&reqSizeBytes, readReqSizeBytes)
|
||||
readIntFromIntSlice(&respSizeBytes, readRespSizeBytes)
|
||||
// Re-write latency, kpbs and mtu if network mode is set.
|
||||
if network, ok := networks[networkMode]; ok {
|
||||
ltc = []time.Duration{network.Latency}
|
||||
kbps = []int{network.Kbps}
|
||||
mtu = []int{network.MTU}
|
||||
}
|
||||
}
|
||||
|
||||
func setMode(name string) []bool {
|
||||
switch name {
|
||||
case modeOn:
|
||||
return []bool{true}
|
||||
case modeOff:
|
||||
return []bool{false}
|
||||
case modeBoth:
|
||||
return []bool{false, true}
|
||||
default:
|
||||
log.Fatalf("Unknown %s setting: %v (want one of: %v)",
|
||||
name, name, strings.Join(allCompressionModes, ", "))
|
||||
return []bool{}
|
||||
}
|
||||
}
|
||||
|
||||
type intSliceType []int
|
||||
|
||||
func (intSlice *intSliceType) String() string {
|
||||
return fmt.Sprintf("%v", *intSlice)
|
||||
}
|
||||
|
||||
func (intSlice *intSliceType) Set(value string) error {
|
||||
if len(*intSlice) > 0 {
|
||||
return errors.New("interval flag already set")
|
||||
}
|
||||
for _, num := range strings.Split(value, ",") {
|
||||
next, err := strconv.Atoi(num)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*intSlice = append(*intSlice, next)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func readIntFromIntSlice(values *[]int, replace intSliceType) {
|
||||
// If not set replace in the flag, just return to run the default settings.
|
||||
if len(replace) == 0 {
|
||||
return
|
||||
}
|
||||
*values = replace
|
||||
}
|
||||
|
||||
func readTimeFromInput(values *[]time.Duration, replace string) {
|
||||
if strings.Compare(replace, "") != 0 {
|
||||
*values = []time.Duration{}
|
||||
for _, ltc := range strings.Split(replace, ",") {
|
||||
duration, err := time.ParseDuration(ltc)
|
||||
if err != nil {
|
||||
log.Fatal(err.Error())
|
||||
}
|
||||
*values = append(*values, duration)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
before()
|
||||
featuresPos := make([]int, 9)
|
||||
// 0:enableTracing 1:ltc 2:kbps 3:mtu 4:maxC 5:reqSize 6:respSize
|
||||
featuresNum := []int{len(enableTrace), len(ltc), len(kbps), len(mtu),
|
||||
len(maxConcurrentCalls), len(reqSizeBytes), len(respSizeBytes), len(enableCompressor), len(enableChannelz)}
|
||||
initalPos := make([]int, len(featuresPos))
|
||||
s := stats.NewStats(10)
|
||||
s.SortLatency()
|
||||
var memStats runtime.MemStats
|
||||
var results testing.BenchmarkResult
|
||||
var startAllocs, startBytes uint64
|
||||
var startTime time.Time
|
||||
start := true
|
||||
var startTimer = func() {
|
||||
runtime.ReadMemStats(&memStats)
|
||||
startAllocs = memStats.Mallocs
|
||||
startBytes = memStats.TotalAlloc
|
||||
startTime = time.Now()
|
||||
}
|
||||
var stopTimer = func(count int32) {
|
||||
runtime.ReadMemStats(&memStats)
|
||||
results = testing.BenchmarkResult{N: int(count), T: time.Since(startTime),
|
||||
Bytes: 0, MemAllocs: memStats.Mallocs - startAllocs, MemBytes: memStats.TotalAlloc - startBytes}
|
||||
}
|
||||
sharedPos := make([]bool, len(featuresPos))
|
||||
for i := 0; i < len(featuresPos); i++ {
|
||||
if featuresNum[i] <= 1 {
|
||||
sharedPos[i] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Run benchmarks
|
||||
resultSlice := []stats.BenchResults{}
|
||||
for !reflect.DeepEqual(featuresPos, initalPos) || start {
|
||||
start = false
|
||||
benchFeature := stats.Features{
|
||||
NetworkMode: networkMode,
|
||||
EnableTrace: enableTrace[featuresPos[0]],
|
||||
Latency: ltc[featuresPos[1]],
|
||||
Kbps: kbps[featuresPos[2]],
|
||||
Mtu: mtu[featuresPos[3]],
|
||||
MaxConcurrentCalls: maxConcurrentCalls[featuresPos[4]],
|
||||
ReqSizeBytes: reqSizeBytes[featuresPos[5]],
|
||||
RespSizeBytes: respSizeBytes[featuresPos[6]],
|
||||
EnableCompressor: enableCompressor[featuresPos[7]],
|
||||
EnableChannelz: enableChannelz[featuresPos[8]],
|
||||
}
|
||||
|
||||
grpc.EnableTracing = enableTrace[featuresPos[0]]
|
||||
if enableChannelz[featuresPos[8]] {
|
||||
channelz.TurnOn()
|
||||
}
|
||||
if runMode[0] {
|
||||
unaryBenchmark(startTimer, stopTimer, benchFeature, benchtime, s)
|
||||
s.SetBenchmarkResult("Unary", benchFeature, results.N,
|
||||
results.AllocedBytesPerOp(), results.AllocsPerOp(), sharedPos)
|
||||
fmt.Println(s.BenchString())
|
||||
fmt.Println(s.String())
|
||||
resultSlice = append(resultSlice, s.GetBenchmarkResults())
|
||||
s.Clear()
|
||||
}
|
||||
if runMode[1] {
|
||||
streamBenchmark(startTimer, stopTimer, benchFeature, benchtime, s)
|
||||
s.SetBenchmarkResult("Stream", benchFeature, results.N,
|
||||
results.AllocedBytesPerOp(), results.AllocsPerOp(), sharedPos)
|
||||
fmt.Println(s.BenchString())
|
||||
fmt.Println(s.String())
|
||||
resultSlice = append(resultSlice, s.GetBenchmarkResults())
|
||||
s.Clear()
|
||||
}
|
||||
bm.AddOne(featuresPos, featuresNum)
|
||||
}
|
||||
after(resultSlice)
|
||||
}
|
||||
|
||||
func before() {
|
||||
if memProfile != "" {
|
||||
runtime.MemProfileRate = memProfileRate
|
||||
}
|
||||
if cpuProfile != "" {
|
||||
f, err := os.Create(cpuProfile)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "testing: %s\n", err)
|
||||
return
|
||||
}
|
||||
if err := pprof.StartCPUProfile(f); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "testing: can't start cpu profile: %s\n", err)
|
||||
f.Close()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func after(data []stats.BenchResults) {
|
||||
if cpuProfile != "" {
|
||||
pprof.StopCPUProfile() // flushes profile to disk
|
||||
}
|
||||
if memProfile != "" {
|
||||
f, err := os.Create(memProfile)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "testing: %s\n", err)
|
||||
os.Exit(2)
|
||||
}
|
||||
runtime.GC() // materialize all statistics
|
||||
if err = pprof.WriteHeapProfile(f); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "testing: can't write heap profile %s: %s\n", memProfile, err)
|
||||
os.Exit(2)
|
||||
}
|
||||
f.Close()
|
||||
}
|
||||
if benchmarkResultFile != "" {
|
||||
f, err := os.Create(benchmarkResultFile)
|
||||
if err != nil {
|
||||
log.Fatalf("testing: can't write benchmark result %s: %s\n", benchmarkResultFile, err)
|
||||
}
|
||||
dataEncoder := gob.NewEncoder(f)
|
||||
dataEncoder.Encode(data)
|
||||
f.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// nopCompressor is a compressor that just copies data.
|
||||
type nopCompressor struct{}
|
||||
|
||||
func (nopCompressor) Do(w io.Writer, p []byte) error {
|
||||
n, err := w.Write(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n != len(p) {
|
||||
return fmt.Errorf("nopCompressor.Write: wrote %v bytes; want %v", n, len(p))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (nopCompressor) Type() string { return "nop" }
|
||||
|
||||
// nopDecompressor is a decompressor that just copies data.
|
||||
type nopDecompressor struct{}
|
||||
|
||||
func (nopDecompressor) Do(r io.Reader) ([]byte, error) { return ioutil.ReadAll(r) }
|
||||
func (nopDecompressor) Type() string { return "nop" }
|
369
vendor/google.golang.org/grpc/benchmark/benchmark.go
generated
vendored
Normal file
369
vendor/google.golang.org/grpc/benchmark/benchmark.go
generated
vendored
Normal file
|
@ -0,0 +1,369 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2014 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
//go:generate protoc -I grpc_testing --go_out=plugins=grpc:grpc_testing grpc_testing/control.proto grpc_testing/messages.proto grpc_testing/payloads.proto grpc_testing/services.proto grpc_testing/stats.proto
|
||||
|
||||
/*
|
||||
Package benchmark implements the building blocks to setup end-to-end gRPC benchmarks.
|
||||
*/
|
||||
package benchmark
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
testpb "google.golang.org/grpc/benchmark/grpc_testing"
|
||||
"google.golang.org/grpc/benchmark/latency"
|
||||
"google.golang.org/grpc/benchmark/stats"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
// AddOne add 1 to the features slice
|
||||
func AddOne(features []int, featuresMaxPosition []int) {
|
||||
for i := len(features) - 1; i >= 0; i-- {
|
||||
features[i] = (features[i] + 1)
|
||||
if features[i]/featuresMaxPosition[i] == 0 {
|
||||
break
|
||||
}
|
||||
features[i] = features[i] % featuresMaxPosition[i]
|
||||
}
|
||||
}
|
||||
|
||||
// Allows reuse of the same testpb.Payload object.
|
||||
func setPayload(p *testpb.Payload, t testpb.PayloadType, size int) {
|
||||
if size < 0 {
|
||||
grpclog.Fatalf("Requested a response with invalid length %d", size)
|
||||
}
|
||||
body := make([]byte, size)
|
||||
switch t {
|
||||
case testpb.PayloadType_COMPRESSABLE:
|
||||
case testpb.PayloadType_UNCOMPRESSABLE:
|
||||
grpclog.Fatalf("PayloadType UNCOMPRESSABLE is not supported")
|
||||
default:
|
||||
grpclog.Fatalf("Unsupported payload type: %d", t)
|
||||
}
|
||||
p.Type = t
|
||||
p.Body = body
|
||||
}
|
||||
|
||||
func newPayload(t testpb.PayloadType, size int) *testpb.Payload {
|
||||
p := new(testpb.Payload)
|
||||
setPayload(p, t, size)
|
||||
return p
|
||||
}
|
||||
|
||||
type testServer struct {
|
||||
}
|
||||
|
||||
func (s *testServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
|
||||
return &testpb.SimpleResponse{
|
||||
Payload: newPayload(in.ResponseType, int(in.ResponseSize)),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *testServer) StreamingCall(stream testpb.BenchmarkService_StreamingCallServer) error {
|
||||
response := &testpb.SimpleResponse{
|
||||
Payload: new(testpb.Payload),
|
||||
}
|
||||
in := new(testpb.SimpleRequest)
|
||||
for {
|
||||
// use ServerStream directly to reuse the same testpb.SimpleRequest object
|
||||
err := stream.(grpc.ServerStream).RecvMsg(in)
|
||||
if err == io.EOF {
|
||||
// read done.
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
setPayload(response.Payload, in.ResponseType, int(in.ResponseSize))
|
||||
if err := stream.Send(response); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// byteBufServer is a gRPC server that sends and receives byte buffer.
|
||||
// The purpose is to benchmark the gRPC performance without protobuf serialization/deserialization overhead.
|
||||
type byteBufServer struct {
|
||||
respSize int32
|
||||
}
|
||||
|
||||
// UnaryCall is an empty function and is not used for benchmark.
|
||||
// If bytebuf UnaryCall benchmark is needed later, the function body needs to be updated.
|
||||
func (s *byteBufServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
|
||||
return &testpb.SimpleResponse{}, nil
|
||||
}
|
||||
|
||||
func (s *byteBufServer) StreamingCall(stream testpb.BenchmarkService_StreamingCallServer) error {
|
||||
for {
|
||||
var in []byte
|
||||
err := stream.(grpc.ServerStream).RecvMsg(&in)
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
out := make([]byte, s.respSize)
|
||||
if err := stream.(grpc.ServerStream).SendMsg(&out); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ServerInfo contains the information to create a gRPC benchmark server.
|
||||
type ServerInfo struct {
|
||||
// Type is the type of the server.
|
||||
// It should be "protobuf" or "bytebuf".
|
||||
Type string
|
||||
|
||||
// Metadata is an optional configuration.
|
||||
// For "protobuf", it's ignored.
|
||||
// For "bytebuf", it should be an int representing response size.
|
||||
Metadata interface{}
|
||||
|
||||
// Listener is the network listener for the server to use
|
||||
Listener net.Listener
|
||||
}
|
||||
|
||||
// StartServer starts a gRPC server serving a benchmark service according to info.
|
||||
// It returns a function to stop the server.
|
||||
func StartServer(info ServerInfo, opts ...grpc.ServerOption) func() {
|
||||
opts = append(opts, grpc.WriteBufferSize(128*1024))
|
||||
opts = append(opts, grpc.ReadBufferSize(128*1024))
|
||||
s := grpc.NewServer(opts...)
|
||||
switch info.Type {
|
||||
case "protobuf":
|
||||
testpb.RegisterBenchmarkServiceServer(s, &testServer{})
|
||||
case "bytebuf":
|
||||
respSize, ok := info.Metadata.(int32)
|
||||
if !ok {
|
||||
grpclog.Fatalf("failed to StartServer, invalid metadata: %v, for Type: %v", info.Metadata, info.Type)
|
||||
}
|
||||
testpb.RegisterBenchmarkServiceServer(s, &byteBufServer{respSize: respSize})
|
||||
default:
|
||||
grpclog.Fatalf("failed to StartServer, unknown Type: %v", info.Type)
|
||||
}
|
||||
go s.Serve(info.Listener)
|
||||
return func() {
|
||||
s.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
// DoUnaryCall performs an unary RPC with given stub and request and response sizes.
|
||||
func DoUnaryCall(tc testpb.BenchmarkServiceClient, reqSize, respSize int) error {
|
||||
pl := newPayload(testpb.PayloadType_COMPRESSABLE, reqSize)
|
||||
req := &testpb.SimpleRequest{
|
||||
ResponseType: pl.Type,
|
||||
ResponseSize: int32(respSize),
|
||||
Payload: pl,
|
||||
}
|
||||
if _, err := tc.UnaryCall(context.Background(), req); err != nil {
|
||||
return fmt.Errorf("/BenchmarkService/UnaryCall(_, _) = _, %v, want _, <nil>", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DoStreamingRoundTrip performs a round trip for a single streaming rpc.
|
||||
func DoStreamingRoundTrip(stream testpb.BenchmarkService_StreamingCallClient, reqSize, respSize int) error {
|
||||
pl := newPayload(testpb.PayloadType_COMPRESSABLE, reqSize)
|
||||
req := &testpb.SimpleRequest{
|
||||
ResponseType: pl.Type,
|
||||
ResponseSize: int32(respSize),
|
||||
Payload: pl,
|
||||
}
|
||||
if err := stream.Send(req); err != nil {
|
||||
return fmt.Errorf("/BenchmarkService/StreamingCall.Send(_) = %v, want <nil>", err)
|
||||
}
|
||||
if _, err := stream.Recv(); err != nil {
|
||||
// EOF is a valid error here.
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("/BenchmarkService/StreamingCall.Recv(_) = %v, want <nil>", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DoByteBufStreamingRoundTrip performs a round trip for a single streaming rpc, using a custom codec for byte buffer.
|
||||
func DoByteBufStreamingRoundTrip(stream testpb.BenchmarkService_StreamingCallClient, reqSize, respSize int) error {
|
||||
out := make([]byte, reqSize)
|
||||
if err := stream.(grpc.ClientStream).SendMsg(&out); err != nil {
|
||||
return fmt.Errorf("/BenchmarkService/StreamingCall.(ClientStream).SendMsg(_) = %v, want <nil>", err)
|
||||
}
|
||||
var in []byte
|
||||
if err := stream.(grpc.ClientStream).RecvMsg(&in); err != nil {
|
||||
// EOF is a valid error here.
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("/BenchmarkService/StreamingCall.(ClientStream).RecvMsg(_) = %v, want <nil>", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewClientConn creates a gRPC client connection to addr.
|
||||
func NewClientConn(addr string, opts ...grpc.DialOption) *grpc.ClientConn {
|
||||
return NewClientConnWithContext(context.Background(), addr, opts...)
|
||||
}
|
||||
|
||||
// NewClientConnWithContext creates a gRPC client connection to addr using ctx.
|
||||
func NewClientConnWithContext(ctx context.Context, addr string, opts ...grpc.DialOption) *grpc.ClientConn {
|
||||
opts = append(opts, grpc.WithWriteBufferSize(128*1024))
|
||||
opts = append(opts, grpc.WithReadBufferSize(128*1024))
|
||||
conn, err := grpc.DialContext(ctx, addr, opts...)
|
||||
if err != nil {
|
||||
grpclog.Fatalf("NewClientConn(%q) failed to create a ClientConn %v", addr, err)
|
||||
}
|
||||
return conn
|
||||
}
|
||||
|
||||
func runUnary(b *testing.B, benchFeatures stats.Features) {
|
||||
s := stats.AddStats(b, 38)
|
||||
nw := &latency.Network{Kbps: benchFeatures.Kbps, Latency: benchFeatures.Latency, MTU: benchFeatures.Mtu}
|
||||
lis, err := net.Listen("tcp", "localhost:0")
|
||||
if err != nil {
|
||||
grpclog.Fatalf("Failed to listen: %v", err)
|
||||
}
|
||||
target := lis.Addr().String()
|
||||
lis = nw.Listener(lis)
|
||||
stopper := StartServer(ServerInfo{Type: "protobuf", Listener: lis}, grpc.MaxConcurrentStreams(uint32(benchFeatures.MaxConcurrentCalls+1)))
|
||||
defer stopper()
|
||||
conn := NewClientConn(
|
||||
target, grpc.WithInsecure(),
|
||||
grpc.WithDialer(func(address string, timeout time.Duration) (net.Conn, error) {
|
||||
return nw.TimeoutDialer(net.DialTimeout)("tcp", address, timeout)
|
||||
}),
|
||||
)
|
||||
tc := testpb.NewBenchmarkServiceClient(conn)
|
||||
|
||||
// Warm up connection.
|
||||
for i := 0; i < 10; i++ {
|
||||
unaryCaller(tc, benchFeatures.ReqSizeBytes, benchFeatures.RespSizeBytes)
|
||||
}
|
||||
ch := make(chan int, benchFeatures.MaxConcurrentCalls*4)
|
||||
var (
|
||||
mu sync.Mutex
|
||||
wg sync.WaitGroup
|
||||
)
|
||||
wg.Add(benchFeatures.MaxConcurrentCalls)
|
||||
|
||||
// Distribute the b.N calls over maxConcurrentCalls workers.
|
||||
for i := 0; i < benchFeatures.MaxConcurrentCalls; i++ {
|
||||
go func() {
|
||||
for range ch {
|
||||
start := time.Now()
|
||||
unaryCaller(tc, benchFeatures.ReqSizeBytes, benchFeatures.RespSizeBytes)
|
||||
elapse := time.Since(start)
|
||||
mu.Lock()
|
||||
s.Add(elapse)
|
||||
mu.Unlock()
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
ch <- i
|
||||
}
|
||||
close(ch)
|
||||
wg.Wait()
|
||||
b.StopTimer()
|
||||
conn.Close()
|
||||
}
|
||||
|
||||
func runStream(b *testing.B, benchFeatures stats.Features) {
|
||||
s := stats.AddStats(b, 38)
|
||||
nw := &latency.Network{Kbps: benchFeatures.Kbps, Latency: benchFeatures.Latency, MTU: benchFeatures.Mtu}
|
||||
lis, err := net.Listen("tcp", "localhost:0")
|
||||
if err != nil {
|
||||
grpclog.Fatalf("Failed to listen: %v", err)
|
||||
}
|
||||
target := lis.Addr().String()
|
||||
lis = nw.Listener(lis)
|
||||
stopper := StartServer(ServerInfo{Type: "protobuf", Listener: lis}, grpc.MaxConcurrentStreams(uint32(benchFeatures.MaxConcurrentCalls+1)))
|
||||
defer stopper()
|
||||
conn := NewClientConn(
|
||||
target, grpc.WithInsecure(),
|
||||
grpc.WithDialer(func(address string, timeout time.Duration) (net.Conn, error) {
|
||||
return nw.TimeoutDialer(net.DialTimeout)("tcp", address, timeout)
|
||||
}),
|
||||
)
|
||||
tc := testpb.NewBenchmarkServiceClient(conn)
|
||||
|
||||
// Warm up connection.
|
||||
stream, err := tc.StreamingCall(context.Background())
|
||||
if err != nil {
|
||||
b.Fatalf("%v.StreamingCall(_) = _, %v", tc, err)
|
||||
}
|
||||
for i := 0; i < 10; i++ {
|
||||
streamCaller(stream, benchFeatures.ReqSizeBytes, benchFeatures.RespSizeBytes)
|
||||
}
|
||||
|
||||
ch := make(chan struct{}, benchFeatures.MaxConcurrentCalls*4)
|
||||
var (
|
||||
mu sync.Mutex
|
||||
wg sync.WaitGroup
|
||||
)
|
||||
wg.Add(benchFeatures.MaxConcurrentCalls)
|
||||
|
||||
// Distribute the b.N calls over maxConcurrentCalls workers.
|
||||
for i := 0; i < benchFeatures.MaxConcurrentCalls; i++ {
|
||||
stream, err := tc.StreamingCall(context.Background())
|
||||
if err != nil {
|
||||
b.Fatalf("%v.StreamingCall(_) = _, %v", tc, err)
|
||||
}
|
||||
go func() {
|
||||
for range ch {
|
||||
start := time.Now()
|
||||
streamCaller(stream, benchFeatures.ReqSizeBytes, benchFeatures.RespSizeBytes)
|
||||
elapse := time.Since(start)
|
||||
mu.Lock()
|
||||
s.Add(elapse)
|
||||
mu.Unlock()
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
ch <- struct{}{}
|
||||
}
|
||||
close(ch)
|
||||
wg.Wait()
|
||||
b.StopTimer()
|
||||
conn.Close()
|
||||
}
|
||||
func unaryCaller(client testpb.BenchmarkServiceClient, reqSize, respSize int) {
|
||||
if err := DoUnaryCall(client, reqSize, respSize); err != nil {
|
||||
grpclog.Fatalf("DoUnaryCall failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func streamCaller(stream testpb.BenchmarkService_StreamingCallClient, reqSize, respSize int) {
|
||||
if err := DoStreamingRoundTrip(stream, reqSize, respSize); err != nil {
|
||||
grpclog.Fatalf("DoStreamingRoundTrip failed: %v", err)
|
||||
}
|
||||
}
|
112
vendor/google.golang.org/grpc/benchmark/benchmark16_test.go
generated
vendored
Normal file
112
vendor/google.golang.org/grpc/benchmark/benchmark16_test.go
generated
vendored
Normal file
|
@ -0,0 +1,112 @@
|
|||
// +build go1.6,!go1.7
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package benchmark
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/benchmark/stats"
|
||||
)
|
||||
|
||||
func BenchmarkClientStreamc1(b *testing.B) {
|
||||
grpc.EnableTracing = true
|
||||
runStream(b, stats.Features{"", true, 0, 0, 0, 1, 1, 1, false, false})
|
||||
}
|
||||
|
||||
func BenchmarkClientStreamc8(b *testing.B) {
|
||||
grpc.EnableTracing = true
|
||||
runStream(b, stats.Features{"", true, 0, 0, 0, 8, 1, 1, false, false})
|
||||
}
|
||||
|
||||
func BenchmarkClientStreamc64(b *testing.B) {
|
||||
grpc.EnableTracing = true
|
||||
runStream(b, stats.Features{"", true, 0, 0, 0, 64, 1, 1, false, false})
|
||||
}
|
||||
|
||||
func BenchmarkClientStreamc512(b *testing.B) {
|
||||
grpc.EnableTracing = true
|
||||
runStream(b, stats.Features{"", true, 0, 0, 0, 512, 1, 1, false, false})
|
||||
}
|
||||
func BenchmarkClientUnaryc1(b *testing.B) {
|
||||
grpc.EnableTracing = true
|
||||
runStream(b, stats.Features{"", true, 0, 0, 0, 1, 1, 1, false, false})
|
||||
}
|
||||
|
||||
func BenchmarkClientUnaryc8(b *testing.B) {
|
||||
grpc.EnableTracing = true
|
||||
runStream(b, stats.Features{"", true, 0, 0, 0, 8, 1, 1, false, false})
|
||||
}
|
||||
|
||||
func BenchmarkClientUnaryc64(b *testing.B) {
|
||||
grpc.EnableTracing = true
|
||||
runStream(b, stats.Features{"", true, 0, 0, 0, 64, 1, 1, false, false})
|
||||
}
|
||||
|
||||
func BenchmarkClientUnaryc512(b *testing.B) {
|
||||
grpc.EnableTracing = true
|
||||
runStream(b, stats.Features{"", true, 0, 0, 0, 512, 1, 1, false, false})
|
||||
}
|
||||
|
||||
func BenchmarkClientStreamNoTracec1(b *testing.B) {
|
||||
grpc.EnableTracing = false
|
||||
runStream(b, stats.Features{"", false, 0, 0, 0, 1, 1, 1, false, false})
|
||||
}
|
||||
|
||||
func BenchmarkClientStreamNoTracec8(b *testing.B) {
|
||||
grpc.EnableTracing = false
|
||||
runStream(b, stats.Features{"", false, 0, 0, 0, 8, 1, 1, false, false})
|
||||
}
|
||||
|
||||
func BenchmarkClientStreamNoTracec64(b *testing.B) {
|
||||
grpc.EnableTracing = false
|
||||
runStream(b, stats.Features{"", false, 0, 0, 0, 64, 1, 1, false, false})
|
||||
}
|
||||
|
||||
func BenchmarkClientStreamNoTracec512(b *testing.B) {
|
||||
grpc.EnableTracing = false
|
||||
runStream(b, stats.Features{"", false, 0, 0, 0, 512, 1, 1, false, false})
|
||||
}
|
||||
func BenchmarkClientUnaryNoTracec1(b *testing.B) {
|
||||
grpc.EnableTracing = false
|
||||
runStream(b, stats.Features{"", false, 0, 0, 0, 1, 1, 1, false, false})
|
||||
}
|
||||
|
||||
func BenchmarkClientUnaryNoTracec8(b *testing.B) {
|
||||
grpc.EnableTracing = false
|
||||
runStream(b, stats.Features{"", false, 0, 0, 0, 8, 1, 1, false, false})
|
||||
}
|
||||
|
||||
func BenchmarkClientUnaryNoTracec64(b *testing.B) {
|
||||
grpc.EnableTracing = false
|
||||
runStream(b, stats.Features{"", false, 0, 0, 0, 64, 1, 1, false, false})
|
||||
}
|
||||
|
||||
func BenchmarkClientUnaryNoTracec512(b *testing.B) {
|
||||
grpc.EnableTracing = false
|
||||
runStream(b, stats.Features{"", false, 0, 0, 0, 512, 1, 1, false, false})
|
||||
runStream(b, stats.Features{"", false, 0, 0, 0, 512, 1, 1, false, false})
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
os.Exit(stats.RunTestMain(m))
|
||||
}
|
85
vendor/google.golang.org/grpc/benchmark/benchmark17_test.go
generated
vendored
Normal file
85
vendor/google.golang.org/grpc/benchmark/benchmark17_test.go
generated
vendored
Normal file
|
@ -0,0 +1,85 @@
|
|||
// +build go1.7
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package benchmark
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/benchmark/stats"
|
||||
)
|
||||
|
||||
func BenchmarkClient(b *testing.B) {
|
||||
enableTrace := []bool{true, false} // run both enable and disable by default
|
||||
// When set the latency to 0 (no delay), the result is slower than the real result with no delay
|
||||
// because latency simulation section has extra operations
|
||||
latency := []time.Duration{0, 40 * time.Millisecond} // if non-positive, no delay.
|
||||
kbps := []int{0, 10240} // if non-positive, infinite
|
||||
mtu := []int{0} // if non-positive, infinite
|
||||
maxConcurrentCalls := []int{1, 8, 64, 512}
|
||||
reqSizeBytes := []int{1, 1024 * 1024}
|
||||
respSizeBytes := []int{1, 1024 * 1024}
|
||||
featuresCurPos := make([]int, 7)
|
||||
|
||||
// 0:enableTracing 1:md 2:ltc 3:kbps 4:mtu 5:maxC 6:connCount 7:reqSize 8:respSize
|
||||
featuresMaxPosition := []int{len(enableTrace), len(latency), len(kbps), len(mtu), len(maxConcurrentCalls), len(reqSizeBytes), len(respSizeBytes)}
|
||||
initalPos := make([]int, len(featuresCurPos))
|
||||
|
||||
// run benchmarks
|
||||
start := true
|
||||
for !reflect.DeepEqual(featuresCurPos, initalPos) || start {
|
||||
start = false
|
||||
tracing := "Trace"
|
||||
if !enableTrace[featuresCurPos[0]] {
|
||||
tracing = "noTrace"
|
||||
}
|
||||
|
||||
benchFeature := stats.Features{
|
||||
EnableTrace: enableTrace[featuresCurPos[0]],
|
||||
Latency: latency[featuresCurPos[1]],
|
||||
Kbps: kbps[featuresCurPos[2]],
|
||||
Mtu: mtu[featuresCurPos[3]],
|
||||
MaxConcurrentCalls: maxConcurrentCalls[featuresCurPos[4]],
|
||||
ReqSizeBytes: reqSizeBytes[featuresCurPos[5]],
|
||||
RespSizeBytes: respSizeBytes[featuresCurPos[6]],
|
||||
}
|
||||
|
||||
grpc.EnableTracing = enableTrace[featuresCurPos[0]]
|
||||
b.Run(fmt.Sprintf("Unary-%s-%s",
|
||||
tracing, benchFeature.String()), func(b *testing.B) {
|
||||
runUnary(b, benchFeature)
|
||||
})
|
||||
|
||||
b.Run(fmt.Sprintf("Stream-%s-%s",
|
||||
tracing, benchFeature.String()), func(b *testing.B) {
|
||||
runStream(b, benchFeature)
|
||||
})
|
||||
AddOne(featuresCurPos, featuresMaxPosition)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
os.Exit(stats.RunTestMain(m))
|
||||
}
|
133
vendor/google.golang.org/grpc/benchmark/benchresult/main.go
generated
vendored
Normal file
133
vendor/google.golang.org/grpc/benchmark/benchresult/main.go
generated
vendored
Normal file
|
@ -0,0 +1,133 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
/*
|
||||
To format the benchmark result:
|
||||
go run benchmark/benchresult/main.go resultfile
|
||||
|
||||
To see the performance change based on a old result:
|
||||
go run benchmark/benchresult/main.go resultfile_old resultfile
|
||||
It will print the comparison result of intersection benchmarks between two files.
|
||||
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/gob"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/benchmark/stats"
|
||||
)
|
||||
|
||||
func createMap(fileName string, m map[string]stats.BenchResults) {
|
||||
f, err := os.Open(fileName)
|
||||
if err != nil {
|
||||
log.Fatalf("Read file %s error: %s\n", fileName, err)
|
||||
}
|
||||
defer f.Close()
|
||||
var data []stats.BenchResults
|
||||
decoder := gob.NewDecoder(f)
|
||||
if err = decoder.Decode(&data); err != nil {
|
||||
log.Fatalf("Decode file %s error: %s\n", fileName, err)
|
||||
}
|
||||
for _, d := range data {
|
||||
m[d.RunMode+"-"+d.Features.String()] = d
|
||||
}
|
||||
}
|
||||
|
||||
func intChange(title string, val1, val2 int64) string {
|
||||
return fmt.Sprintf("%10s %12s %12s %8.2f%%\n", title, strconv.FormatInt(val1, 10),
|
||||
strconv.FormatInt(val2, 10), float64(val2-val1)*100/float64(val1))
|
||||
}
|
||||
|
||||
func timeChange(title int, val1, val2 time.Duration) string {
|
||||
return fmt.Sprintf("%10s %12s %12s %8.2f%%\n", strconv.Itoa(title)+" latency", val1.String(),
|
||||
val2.String(), float64(val2-val1)*100/float64(val1))
|
||||
}
|
||||
|
||||
func compareTwoMap(m1, m2 map[string]stats.BenchResults) {
|
||||
for k2, v2 := range m2 {
|
||||
if v1, ok := m1[k2]; ok {
|
||||
changes := k2 + "\n"
|
||||
changes += fmt.Sprintf("%10s %12s %12s %8s\n", "Title", "Before", "After", "Percentage")
|
||||
changes += intChange("Bytes/op", v1.AllocedBytesPerOp, v2.AllocedBytesPerOp)
|
||||
changes += intChange("Allocs/op", v1.AllocsPerOp, v2.AllocsPerOp)
|
||||
changes += timeChange(v1.Latency[1].Percent, v1.Latency[1].Value, v2.Latency[1].Value)
|
||||
changes += timeChange(v1.Latency[2].Percent, v1.Latency[2].Value, v2.Latency[2].Value)
|
||||
fmt.Printf("%s\n", changes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func compareBenchmark(file1, file2 string) {
|
||||
var BenchValueFile1 map[string]stats.BenchResults
|
||||
var BenchValueFile2 map[string]stats.BenchResults
|
||||
BenchValueFile1 = make(map[string]stats.BenchResults)
|
||||
BenchValueFile2 = make(map[string]stats.BenchResults)
|
||||
|
||||
createMap(file1, BenchValueFile1)
|
||||
createMap(file2, BenchValueFile2)
|
||||
|
||||
compareTwoMap(BenchValueFile1, BenchValueFile2)
|
||||
}
|
||||
|
||||
func printline(benchName, ltc50, ltc90, allocByte, allocsOp interface{}) {
|
||||
fmt.Printf("%-80v%12v%12v%12v%12v\n", benchName, ltc50, ltc90, allocByte, allocsOp)
|
||||
}
|
||||
|
||||
func formatBenchmark(fileName string) {
|
||||
f, err := os.Open(fileName)
|
||||
if err != nil {
|
||||
log.Fatalf("Read file %s error: %s\n", fileName, err)
|
||||
}
|
||||
defer f.Close()
|
||||
var data []stats.BenchResults
|
||||
decoder := gob.NewDecoder(f)
|
||||
if err = decoder.Decode(&data); err != nil {
|
||||
log.Fatalf("Decode file %s error: %s\n", fileName, err)
|
||||
}
|
||||
if len(data) == 0 {
|
||||
log.Fatalf("No data in file %s\n", fileName)
|
||||
}
|
||||
printPos := data[0].SharedPosion
|
||||
fmt.Println("\nShared features:\n" + strings.Repeat("-", 20))
|
||||
fmt.Print(stats.PartialPrintString(printPos, data[0].Features, true))
|
||||
fmt.Println(strings.Repeat("-", 35))
|
||||
for i := 0; i < len(data[0].SharedPosion); i++ {
|
||||
printPos[i] = !printPos[i]
|
||||
}
|
||||
printline("Name", "latency-50", "latency-90", "Alloc (B)", "Alloc (#)")
|
||||
for _, d := range data {
|
||||
name := d.RunMode + stats.PartialPrintString(printPos, d.Features, false)
|
||||
printline(name, d.Latency[1].Value.String(), d.Latency[2].Value.String(),
|
||||
d.AllocedBytesPerOp, d.AllocsPerOp)
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
if len(os.Args) == 2 {
|
||||
formatBenchmark(os.Args[1])
|
||||
} else {
|
||||
compareBenchmark(os.Args[1], os.Args[2])
|
||||
}
|
||||
}
|
187
vendor/google.golang.org/grpc/benchmark/client/main.go
generated
vendored
Normal file
187
vendor/google.golang.org/grpc/benchmark/client/main.go
generated
vendored
Normal file
|
@ -0,0 +1,187 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/benchmark"
|
||||
testpb "google.golang.org/grpc/benchmark/grpc_testing"
|
||||
"google.golang.org/grpc/benchmark/stats"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal/syscall"
|
||||
)
|
||||
|
||||
var (
|
||||
port = flag.String("port", "50051", "Localhost port to connect to.")
|
||||
numRPC = flag.Int("r", 1, "The number of concurrent RPCs on each connection.")
|
||||
numConn = flag.Int("c", 1, "The number of parallel connections.")
|
||||
warmupDur = flag.Int("w", 10, "Warm-up duration in seconds")
|
||||
duration = flag.Int("d", 60, "Benchmark duration in seconds")
|
||||
rqSize = flag.Int("req", 1, "Request message size in bytes.")
|
||||
rspSize = flag.Int("resp", 1, "Response message size in bytes.")
|
||||
rpcType = flag.String("rpc_type", "unary",
|
||||
`Configure different client rpc type. Valid options are:
|
||||
unary;
|
||||
streaming.`)
|
||||
testName = flag.String("test_name", "", "Name of the test used for creating profiles.")
|
||||
wg sync.WaitGroup
|
||||
hopts = stats.HistogramOptions{
|
||||
NumBuckets: 2495,
|
||||
GrowthFactor: .01,
|
||||
}
|
||||
mu sync.Mutex
|
||||
hists []*stats.Histogram
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
if *testName == "" {
|
||||
grpclog.Fatalf("test_name not set")
|
||||
}
|
||||
req := &testpb.SimpleRequest{
|
||||
ResponseType: testpb.PayloadType_COMPRESSABLE,
|
||||
ResponseSize: int32(*rspSize),
|
||||
Payload: &testpb.Payload{
|
||||
Type: testpb.PayloadType_COMPRESSABLE,
|
||||
Body: make([]byte, *rqSize),
|
||||
},
|
||||
}
|
||||
connectCtx, connectCancel := context.WithDeadline(context.Background(), time.Now().Add(5*time.Second))
|
||||
defer connectCancel()
|
||||
ccs := buildConnections(connectCtx)
|
||||
warmDeadline := time.Now().Add(time.Duration(*warmupDur) * time.Second)
|
||||
endDeadline := warmDeadline.Add(time.Duration(*duration) * time.Second)
|
||||
cf, err := os.Create("/tmp/" + *testName + ".cpu")
|
||||
if err != nil {
|
||||
grpclog.Fatalf("Error creating file: %v", err)
|
||||
}
|
||||
defer cf.Close()
|
||||
pprof.StartCPUProfile(cf)
|
||||
cpuBeg := syscall.GetCPUTime()
|
||||
for _, cc := range ccs {
|
||||
runWithConn(cc, req, warmDeadline, endDeadline)
|
||||
}
|
||||
wg.Wait()
|
||||
cpu := time.Duration(syscall.GetCPUTime() - cpuBeg)
|
||||
pprof.StopCPUProfile()
|
||||
mf, err := os.Create("/tmp/" + *testName + ".mem")
|
||||
if err != nil {
|
||||
grpclog.Fatalf("Error creating file: %v", err)
|
||||
}
|
||||
defer mf.Close()
|
||||
runtime.GC() // materialize all statistics
|
||||
if err := pprof.WriteHeapProfile(mf); err != nil {
|
||||
grpclog.Fatalf("Error writing memory profile: %v", err)
|
||||
}
|
||||
hist := stats.NewHistogram(hopts)
|
||||
for _, h := range hists {
|
||||
hist.Merge(h)
|
||||
}
|
||||
parseHist(hist)
|
||||
fmt.Println("Client CPU utilization:", cpu)
|
||||
fmt.Println("Client CPU profile:", cf.Name())
|
||||
fmt.Println("Client Mem Profile:", mf.Name())
|
||||
}
|
||||
|
||||
func buildConnections(ctx context.Context) []*grpc.ClientConn {
|
||||
ccs := make([]*grpc.ClientConn, *numConn)
|
||||
for i := range ccs {
|
||||
ccs[i] = benchmark.NewClientConnWithContext(ctx, "localhost:"+*port, grpc.WithInsecure(), grpc.WithBlock())
|
||||
}
|
||||
return ccs
|
||||
}
|
||||
|
||||
func runWithConn(cc *grpc.ClientConn, req *testpb.SimpleRequest, warmDeadline, endDeadline time.Time) {
|
||||
for i := 0; i < *numRPC; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
caller := makeCaller(cc, req)
|
||||
hist := stats.NewHistogram(hopts)
|
||||
for {
|
||||
start := time.Now()
|
||||
if start.After(endDeadline) {
|
||||
mu.Lock()
|
||||
hists = append(hists, hist)
|
||||
mu.Unlock()
|
||||
return
|
||||
}
|
||||
caller()
|
||||
elapsed := time.Since(start)
|
||||
if start.After(warmDeadline) {
|
||||
hist.Add(elapsed.Nanoseconds())
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func makeCaller(cc *grpc.ClientConn, req *testpb.SimpleRequest) func() {
|
||||
client := testpb.NewBenchmarkServiceClient(cc)
|
||||
if *rpcType == "unary" {
|
||||
return func() {
|
||||
if _, err := client.UnaryCall(context.Background(), req); err != nil {
|
||||
grpclog.Fatalf("RPC failed: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
stream, err := client.StreamingCall(context.Background())
|
||||
if err != nil {
|
||||
grpclog.Fatalf("RPC failed: %v", err)
|
||||
}
|
||||
return func() {
|
||||
if err := stream.Send(req); err != nil {
|
||||
grpclog.Fatalf("Streaming RPC failed to send: %v", err)
|
||||
}
|
||||
if _, err := stream.Recv(); err != nil {
|
||||
grpclog.Fatalf("Streaming RPC failed to read: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func parseHist(hist *stats.Histogram) {
|
||||
fmt.Println("qps:", float64(hist.Count)/float64(*duration))
|
||||
fmt.Printf("Latency: (50/90/99 %%ile): %v/%v/%v\n",
|
||||
time.Duration(median(.5, hist)),
|
||||
time.Duration(median(.9, hist)),
|
||||
time.Duration(median(.99, hist)))
|
||||
}
|
||||
|
||||
func median(percentile float64, h *stats.Histogram) int64 {
|
||||
need := int64(float64(h.Count) * percentile)
|
||||
have := int64(0)
|
||||
for _, bucket := range h.Buckets {
|
||||
count := bucket.Count
|
||||
if have+count >= need {
|
||||
percent := float64(need-have) / float64(count)
|
||||
return int64((1.0-percent)*bucket.LowBound + percent*bucket.LowBound*(1.0+hopts.GrowthFactor))
|
||||
}
|
||||
have += bucket.Count
|
||||
}
|
||||
panic("should have found a bound")
|
||||
}
|
1580
vendor/google.golang.org/grpc/benchmark/grpc_testing/control.pb.go
generated
vendored
Normal file
1580
vendor/google.golang.org/grpc/benchmark/grpc_testing/control.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
186
vendor/google.golang.org/grpc/benchmark/grpc_testing/control.proto
generated
vendored
Normal file
186
vendor/google.golang.org/grpc/benchmark/grpc_testing/control.proto
generated
vendored
Normal file
|
@ -0,0 +1,186 @@
|
|||
// Copyright 2016 gRPC authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
import "payloads.proto";
|
||||
import "stats.proto";
|
||||
|
||||
package grpc.testing;
|
||||
|
||||
enum ClientType {
|
||||
SYNC_CLIENT = 0;
|
||||
ASYNC_CLIENT = 1;
|
||||
}
|
||||
|
||||
enum ServerType {
|
||||
SYNC_SERVER = 0;
|
||||
ASYNC_SERVER = 1;
|
||||
ASYNC_GENERIC_SERVER = 2;
|
||||
}
|
||||
|
||||
enum RpcType {
|
||||
UNARY = 0;
|
||||
STREAMING = 1;
|
||||
}
|
||||
|
||||
// Parameters of poisson process distribution, which is a good representation
|
||||
// of activity coming in from independent identical stationary sources.
|
||||
message PoissonParams {
|
||||
// The rate of arrivals (a.k.a. lambda parameter of the exp distribution).
|
||||
double offered_load = 1;
|
||||
}
|
||||
|
||||
message UniformParams {
|
||||
double interarrival_lo = 1;
|
||||
double interarrival_hi = 2;
|
||||
}
|
||||
|
||||
message DeterministicParams {
|
||||
double offered_load = 1;
|
||||
}
|
||||
|
||||
message ParetoParams {
|
||||
double interarrival_base = 1;
|
||||
double alpha = 2;
|
||||
}
|
||||
|
||||
// Once an RPC finishes, immediately start a new one.
|
||||
// No configuration parameters needed.
|
||||
message ClosedLoopParams {
|
||||
}
|
||||
|
||||
message LoadParams {
|
||||
oneof load {
|
||||
ClosedLoopParams closed_loop = 1;
|
||||
PoissonParams poisson = 2;
|
||||
UniformParams uniform = 3;
|
||||
DeterministicParams determ = 4;
|
||||
ParetoParams pareto = 5;
|
||||
};
|
||||
}
|
||||
|
||||
// presence of SecurityParams implies use of TLS
|
||||
message SecurityParams {
|
||||
bool use_test_ca = 1;
|
||||
string server_host_override = 2;
|
||||
}
|
||||
|
||||
message ClientConfig {
|
||||
// List of targets to connect to. At least one target needs to be specified.
|
||||
repeated string server_targets = 1;
|
||||
ClientType client_type = 2;
|
||||
SecurityParams security_params = 3;
|
||||
// How many concurrent RPCs to start for each channel.
|
||||
// For synchronous client, use a separate thread for each outstanding RPC.
|
||||
int32 outstanding_rpcs_per_channel = 4;
|
||||
// Number of independent client channels to create.
|
||||
// i-th channel will connect to server_target[i % server_targets.size()]
|
||||
int32 client_channels = 5;
|
||||
// Only for async client. Number of threads to use to start/manage RPCs.
|
||||
int32 async_client_threads = 7;
|
||||
RpcType rpc_type = 8;
|
||||
// The requested load for the entire client (aggregated over all the threads).
|
||||
LoadParams load_params = 10;
|
||||
PayloadConfig payload_config = 11;
|
||||
HistogramParams histogram_params = 12;
|
||||
|
||||
// Specify the cores we should run the client on, if desired
|
||||
repeated int32 core_list = 13;
|
||||
int32 core_limit = 14;
|
||||
}
|
||||
|
||||
message ClientStatus {
|
||||
ClientStats stats = 1;
|
||||
}
|
||||
|
||||
// Request current stats
|
||||
message Mark {
|
||||
// if true, the stats will be reset after taking their snapshot.
|
||||
bool reset = 1;
|
||||
}
|
||||
|
||||
message ClientArgs {
|
||||
oneof argtype {
|
||||
ClientConfig setup = 1;
|
||||
Mark mark = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message ServerConfig {
|
||||
ServerType server_type = 1;
|
||||
SecurityParams security_params = 2;
|
||||
// Port on which to listen. Zero means pick unused port.
|
||||
int32 port = 4;
|
||||
// Only for async server. Number of threads used to serve the requests.
|
||||
int32 async_server_threads = 7;
|
||||
// Specify the number of cores to limit server to, if desired
|
||||
int32 core_limit = 8;
|
||||
// payload config, used in generic server
|
||||
PayloadConfig payload_config = 9;
|
||||
|
||||
// Specify the cores we should run the server on, if desired
|
||||
repeated int32 core_list = 10;
|
||||
}
|
||||
|
||||
message ServerArgs {
|
||||
oneof argtype {
|
||||
ServerConfig setup = 1;
|
||||
Mark mark = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message ServerStatus {
|
||||
ServerStats stats = 1;
|
||||
// the port bound by the server
|
||||
int32 port = 2;
|
||||
// Number of cores available to the server
|
||||
int32 cores = 3;
|
||||
}
|
||||
|
||||
message CoreRequest {
|
||||
}
|
||||
|
||||
message CoreResponse {
|
||||
// Number of cores available on the server
|
||||
int32 cores = 1;
|
||||
}
|
||||
|
||||
message Void {
|
||||
}
|
||||
|
||||
// A single performance scenario: input to qps_json_driver
|
||||
message Scenario {
|
||||
// Human readable name for this scenario
|
||||
string name = 1;
|
||||
// Client configuration
|
||||
ClientConfig client_config = 2;
|
||||
// Number of clients to start for the test
|
||||
int32 num_clients = 3;
|
||||
// Server configuration
|
||||
ServerConfig server_config = 4;
|
||||
// Number of servers to start for the test
|
||||
int32 num_servers = 5;
|
||||
// Warmup period, in seconds
|
||||
int32 warmup_seconds = 6;
|
||||
// Benchmark time, in seconds
|
||||
int32 benchmark_seconds = 7;
|
||||
// Number of workers to spawn locally (usually zero)
|
||||
int32 spawn_local_worker_count = 8;
|
||||
}
|
||||
|
||||
// A set of scenarios to be run with qps_json_driver
|
||||
message Scenarios {
|
||||
repeated Scenario scenarios = 1;
|
||||
}
|
731
vendor/google.golang.org/grpc/benchmark/grpc_testing/messages.pb.go
generated
vendored
Normal file
731
vendor/google.golang.org/grpc/benchmark/grpc_testing/messages.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,731 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: messages.proto
|
||||
|
||||
package grpc_testing
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
// The type of payload that should be returned.
|
||||
type PayloadType int32
|
||||
|
||||
const (
|
||||
// Compressable text format.
|
||||
PayloadType_COMPRESSABLE PayloadType = 0
|
||||
// Uncompressable binary format.
|
||||
PayloadType_UNCOMPRESSABLE PayloadType = 1
|
||||
// Randomly chosen from all other formats defined in this enum.
|
||||
PayloadType_RANDOM PayloadType = 2
|
||||
)
|
||||
|
||||
var PayloadType_name = map[int32]string{
|
||||
0: "COMPRESSABLE",
|
||||
1: "UNCOMPRESSABLE",
|
||||
2: "RANDOM",
|
||||
}
|
||||
var PayloadType_value = map[string]int32{
|
||||
"COMPRESSABLE": 0,
|
||||
"UNCOMPRESSABLE": 1,
|
||||
"RANDOM": 2,
|
||||
}
|
||||
|
||||
func (x PayloadType) String() string {
|
||||
return proto.EnumName(PayloadType_name, int32(x))
|
||||
}
|
||||
func (PayloadType) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_messages_5c70222ad96bf232, []int{0}
|
||||
}
|
||||
|
||||
// Compression algorithms
|
||||
type CompressionType int32
|
||||
|
||||
const (
|
||||
// No compression
|
||||
CompressionType_NONE CompressionType = 0
|
||||
CompressionType_GZIP CompressionType = 1
|
||||
CompressionType_DEFLATE CompressionType = 2
|
||||
)
|
||||
|
||||
var CompressionType_name = map[int32]string{
|
||||
0: "NONE",
|
||||
1: "GZIP",
|
||||
2: "DEFLATE",
|
||||
}
|
||||
var CompressionType_value = map[string]int32{
|
||||
"NONE": 0,
|
||||
"GZIP": 1,
|
||||
"DEFLATE": 2,
|
||||
}
|
||||
|
||||
func (x CompressionType) String() string {
|
||||
return proto.EnumName(CompressionType_name, int32(x))
|
||||
}
|
||||
func (CompressionType) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_messages_5c70222ad96bf232, []int{1}
|
||||
}
|
||||
|
||||
// A block of data, to simply increase gRPC message size.
|
||||
type Payload struct {
|
||||
// The type of data in body.
|
||||
Type PayloadType `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.testing.PayloadType" json:"type,omitempty"`
|
||||
// Primary contents of payload.
|
||||
Body []byte `protobuf:"bytes,2,opt,name=body,proto3" json:"body,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Payload) Reset() { *m = Payload{} }
|
||||
func (m *Payload) String() string { return proto.CompactTextString(m) }
|
||||
func (*Payload) ProtoMessage() {}
|
||||
func (*Payload) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_messages_5c70222ad96bf232, []int{0}
|
||||
}
|
||||
func (m *Payload) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Payload.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Payload) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Payload.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *Payload) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Payload.Merge(dst, src)
|
||||
}
|
||||
func (m *Payload) XXX_Size() int {
|
||||
return xxx_messageInfo_Payload.Size(m)
|
||||
}
|
||||
func (m *Payload) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Payload.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Payload proto.InternalMessageInfo
|
||||
|
||||
func (m *Payload) GetType() PayloadType {
|
||||
if m != nil {
|
||||
return m.Type
|
||||
}
|
||||
return PayloadType_COMPRESSABLE
|
||||
}
|
||||
|
||||
func (m *Payload) GetBody() []byte {
|
||||
if m != nil {
|
||||
return m.Body
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// A protobuf representation for grpc status. This is used by test
|
||||
// clients to specify a status that the server should attempt to return.
|
||||
type EchoStatus struct {
|
||||
Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
|
||||
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *EchoStatus) Reset() { *m = EchoStatus{} }
|
||||
func (m *EchoStatus) String() string { return proto.CompactTextString(m) }
|
||||
func (*EchoStatus) ProtoMessage() {}
|
||||
func (*EchoStatus) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_messages_5c70222ad96bf232, []int{1}
|
||||
}
|
||||
func (m *EchoStatus) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_EchoStatus.Unmarshal(m, b)
|
||||
}
|
||||
func (m *EchoStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_EchoStatus.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *EchoStatus) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_EchoStatus.Merge(dst, src)
|
||||
}
|
||||
func (m *EchoStatus) XXX_Size() int {
|
||||
return xxx_messageInfo_EchoStatus.Size(m)
|
||||
}
|
||||
func (m *EchoStatus) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_EchoStatus.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_EchoStatus proto.InternalMessageInfo
|
||||
|
||||
func (m *EchoStatus) GetCode() int32 {
|
||||
if m != nil {
|
||||
return m.Code
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *EchoStatus) GetMessage() string {
|
||||
if m != nil {
|
||||
return m.Message
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Unary request.
|
||||
type SimpleRequest struct {
|
||||
// Desired payload type in the response from the server.
|
||||
// If response_type is RANDOM, server randomly chooses one from other formats.
|
||||
ResponseType PayloadType `protobuf:"varint,1,opt,name=response_type,json=responseType,proto3,enum=grpc.testing.PayloadType" json:"response_type,omitempty"`
|
||||
// Desired payload size in the response from the server.
|
||||
// If response_type is COMPRESSABLE, this denotes the size before compression.
|
||||
ResponseSize int32 `protobuf:"varint,2,opt,name=response_size,json=responseSize,proto3" json:"response_size,omitempty"`
|
||||
// Optional input payload sent along with the request.
|
||||
Payload *Payload `protobuf:"bytes,3,opt,name=payload,proto3" json:"payload,omitempty"`
|
||||
// Whether SimpleResponse should include username.
|
||||
FillUsername bool `protobuf:"varint,4,opt,name=fill_username,json=fillUsername,proto3" json:"fill_username,omitempty"`
|
||||
// Whether SimpleResponse should include OAuth scope.
|
||||
FillOauthScope bool `protobuf:"varint,5,opt,name=fill_oauth_scope,json=fillOauthScope,proto3" json:"fill_oauth_scope,omitempty"`
|
||||
// Compression algorithm to be used by the server for the response (stream)
|
||||
ResponseCompression CompressionType `protobuf:"varint,6,opt,name=response_compression,json=responseCompression,proto3,enum=grpc.testing.CompressionType" json:"response_compression,omitempty"`
|
||||
// Whether server should return a given status
|
||||
ResponseStatus *EchoStatus `protobuf:"bytes,7,opt,name=response_status,json=responseStatus,proto3" json:"response_status,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *SimpleRequest) Reset() { *m = SimpleRequest{} }
|
||||
func (m *SimpleRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*SimpleRequest) ProtoMessage() {}
|
||||
func (*SimpleRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_messages_5c70222ad96bf232, []int{2}
|
||||
}
|
||||
func (m *SimpleRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_SimpleRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *SimpleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_SimpleRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *SimpleRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_SimpleRequest.Merge(dst, src)
|
||||
}
|
||||
func (m *SimpleRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_SimpleRequest.Size(m)
|
||||
}
|
||||
func (m *SimpleRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_SimpleRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_SimpleRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *SimpleRequest) GetResponseType() PayloadType {
|
||||
if m != nil {
|
||||
return m.ResponseType
|
||||
}
|
||||
return PayloadType_COMPRESSABLE
|
||||
}
|
||||
|
||||
func (m *SimpleRequest) GetResponseSize() int32 {
|
||||
if m != nil {
|
||||
return m.ResponseSize
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *SimpleRequest) GetPayload() *Payload {
|
||||
if m != nil {
|
||||
return m.Payload
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *SimpleRequest) GetFillUsername() bool {
|
||||
if m != nil {
|
||||
return m.FillUsername
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *SimpleRequest) GetFillOauthScope() bool {
|
||||
if m != nil {
|
||||
return m.FillOauthScope
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *SimpleRequest) GetResponseCompression() CompressionType {
|
||||
if m != nil {
|
||||
return m.ResponseCompression
|
||||
}
|
||||
return CompressionType_NONE
|
||||
}
|
||||
|
||||
func (m *SimpleRequest) GetResponseStatus() *EchoStatus {
|
||||
if m != nil {
|
||||
return m.ResponseStatus
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unary response, as configured by the request.
|
||||
type SimpleResponse struct {
|
||||
// Payload to increase message size.
|
||||
Payload *Payload `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"`
|
||||
// The user the request came from, for verifying authentication was
|
||||
// successful when the client expected it.
|
||||
Username string `protobuf:"bytes,2,opt,name=username,proto3" json:"username,omitempty"`
|
||||
// OAuth scope.
|
||||
OauthScope string `protobuf:"bytes,3,opt,name=oauth_scope,json=oauthScope,proto3" json:"oauth_scope,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *SimpleResponse) Reset() { *m = SimpleResponse{} }
|
||||
func (m *SimpleResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*SimpleResponse) ProtoMessage() {}
|
||||
func (*SimpleResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_messages_5c70222ad96bf232, []int{3}
|
||||
}
|
||||
func (m *SimpleResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_SimpleResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *SimpleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_SimpleResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *SimpleResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_SimpleResponse.Merge(dst, src)
|
||||
}
|
||||
func (m *SimpleResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_SimpleResponse.Size(m)
|
||||
}
|
||||
func (m *SimpleResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_SimpleResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_SimpleResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *SimpleResponse) GetPayload() *Payload {
|
||||
if m != nil {
|
||||
return m.Payload
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *SimpleResponse) GetUsername() string {
|
||||
if m != nil {
|
||||
return m.Username
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *SimpleResponse) GetOauthScope() string {
|
||||
if m != nil {
|
||||
return m.OauthScope
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Client-streaming request.
|
||||
type StreamingInputCallRequest struct {
|
||||
// Optional input payload sent along with the request.
|
||||
Payload *Payload `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *StreamingInputCallRequest) Reset() { *m = StreamingInputCallRequest{} }
|
||||
func (m *StreamingInputCallRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*StreamingInputCallRequest) ProtoMessage() {}
|
||||
func (*StreamingInputCallRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_messages_5c70222ad96bf232, []int{4}
|
||||
}
|
||||
func (m *StreamingInputCallRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_StreamingInputCallRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *StreamingInputCallRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_StreamingInputCallRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *StreamingInputCallRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_StreamingInputCallRequest.Merge(dst, src)
|
||||
}
|
||||
func (m *StreamingInputCallRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_StreamingInputCallRequest.Size(m)
|
||||
}
|
||||
func (m *StreamingInputCallRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_StreamingInputCallRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_StreamingInputCallRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *StreamingInputCallRequest) GetPayload() *Payload {
|
||||
if m != nil {
|
||||
return m.Payload
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Client-streaming response.
|
||||
type StreamingInputCallResponse struct {
|
||||
// Aggregated size of payloads received from the client.
|
||||
AggregatedPayloadSize int32 `protobuf:"varint,1,opt,name=aggregated_payload_size,json=aggregatedPayloadSize,proto3" json:"aggregated_payload_size,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *StreamingInputCallResponse) Reset() { *m = StreamingInputCallResponse{} }
|
||||
func (m *StreamingInputCallResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*StreamingInputCallResponse) ProtoMessage() {}
|
||||
func (*StreamingInputCallResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_messages_5c70222ad96bf232, []int{5}
|
||||
}
|
||||
func (m *StreamingInputCallResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_StreamingInputCallResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *StreamingInputCallResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_StreamingInputCallResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *StreamingInputCallResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_StreamingInputCallResponse.Merge(dst, src)
|
||||
}
|
||||
func (m *StreamingInputCallResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_StreamingInputCallResponse.Size(m)
|
||||
}
|
||||
func (m *StreamingInputCallResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_StreamingInputCallResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_StreamingInputCallResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *StreamingInputCallResponse) GetAggregatedPayloadSize() int32 {
|
||||
if m != nil {
|
||||
return m.AggregatedPayloadSize
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Configuration for a particular response.
|
||||
type ResponseParameters struct {
|
||||
// Desired payload sizes in responses from the server.
|
||||
// If response_type is COMPRESSABLE, this denotes the size before compression.
|
||||
Size int32 `protobuf:"varint,1,opt,name=size,proto3" json:"size,omitempty"`
|
||||
// Desired interval between consecutive responses in the response stream in
|
||||
// microseconds.
|
||||
IntervalUs int32 `protobuf:"varint,2,opt,name=interval_us,json=intervalUs,proto3" json:"interval_us,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ResponseParameters) Reset() { *m = ResponseParameters{} }
|
||||
func (m *ResponseParameters) String() string { return proto.CompactTextString(m) }
|
||||
func (*ResponseParameters) ProtoMessage() {}
|
||||
func (*ResponseParameters) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_messages_5c70222ad96bf232, []int{6}
|
||||
}
|
||||
func (m *ResponseParameters) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ResponseParameters.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ResponseParameters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ResponseParameters.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *ResponseParameters) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ResponseParameters.Merge(dst, src)
|
||||
}
|
||||
func (m *ResponseParameters) XXX_Size() int {
|
||||
return xxx_messageInfo_ResponseParameters.Size(m)
|
||||
}
|
||||
func (m *ResponseParameters) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ResponseParameters.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ResponseParameters proto.InternalMessageInfo
|
||||
|
||||
func (m *ResponseParameters) GetSize() int32 {
|
||||
if m != nil {
|
||||
return m.Size
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *ResponseParameters) GetIntervalUs() int32 {
|
||||
if m != nil {
|
||||
return m.IntervalUs
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Server-streaming request.
|
||||
type StreamingOutputCallRequest struct {
|
||||
// Desired payload type in the response from the server.
|
||||
// If response_type is RANDOM, the payload from each response in the stream
|
||||
// might be of different types. This is to simulate a mixed type of payload
|
||||
// stream.
|
||||
ResponseType PayloadType `protobuf:"varint,1,opt,name=response_type,json=responseType,proto3,enum=grpc.testing.PayloadType" json:"response_type,omitempty"`
|
||||
// Configuration for each expected response message.
|
||||
ResponseParameters []*ResponseParameters `protobuf:"bytes,2,rep,name=response_parameters,json=responseParameters,proto3" json:"response_parameters,omitempty"`
|
||||
// Optional input payload sent along with the request.
|
||||
Payload *Payload `protobuf:"bytes,3,opt,name=payload,proto3" json:"payload,omitempty"`
|
||||
// Compression algorithm to be used by the server for the response (stream)
|
||||
ResponseCompression CompressionType `protobuf:"varint,6,opt,name=response_compression,json=responseCompression,proto3,enum=grpc.testing.CompressionType" json:"response_compression,omitempty"`
|
||||
// Whether server should return a given status
|
||||
ResponseStatus *EchoStatus `protobuf:"bytes,7,opt,name=response_status,json=responseStatus,proto3" json:"response_status,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *StreamingOutputCallRequest) Reset() { *m = StreamingOutputCallRequest{} }
|
||||
func (m *StreamingOutputCallRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*StreamingOutputCallRequest) ProtoMessage() {}
|
||||
func (*StreamingOutputCallRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_messages_5c70222ad96bf232, []int{7}
|
||||
}
|
||||
func (m *StreamingOutputCallRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_StreamingOutputCallRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *StreamingOutputCallRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_StreamingOutputCallRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *StreamingOutputCallRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_StreamingOutputCallRequest.Merge(dst, src)
|
||||
}
|
||||
func (m *StreamingOutputCallRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_StreamingOutputCallRequest.Size(m)
|
||||
}
|
||||
func (m *StreamingOutputCallRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_StreamingOutputCallRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_StreamingOutputCallRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *StreamingOutputCallRequest) GetResponseType() PayloadType {
|
||||
if m != nil {
|
||||
return m.ResponseType
|
||||
}
|
||||
return PayloadType_COMPRESSABLE
|
||||
}
|
||||
|
||||
func (m *StreamingOutputCallRequest) GetResponseParameters() []*ResponseParameters {
|
||||
if m != nil {
|
||||
return m.ResponseParameters
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *StreamingOutputCallRequest) GetPayload() *Payload {
|
||||
if m != nil {
|
||||
return m.Payload
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *StreamingOutputCallRequest) GetResponseCompression() CompressionType {
|
||||
if m != nil {
|
||||
return m.ResponseCompression
|
||||
}
|
||||
return CompressionType_NONE
|
||||
}
|
||||
|
||||
func (m *StreamingOutputCallRequest) GetResponseStatus() *EchoStatus {
|
||||
if m != nil {
|
||||
return m.ResponseStatus
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Server-streaming response, as configured by the request and parameters.
|
||||
type StreamingOutputCallResponse struct {
|
||||
// Payload to increase response size.
|
||||
Payload *Payload `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *StreamingOutputCallResponse) Reset() { *m = StreamingOutputCallResponse{} }
|
||||
func (m *StreamingOutputCallResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*StreamingOutputCallResponse) ProtoMessage() {}
|
||||
func (*StreamingOutputCallResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_messages_5c70222ad96bf232, []int{8}
|
||||
}
|
||||
func (m *StreamingOutputCallResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_StreamingOutputCallResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *StreamingOutputCallResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_StreamingOutputCallResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *StreamingOutputCallResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_StreamingOutputCallResponse.Merge(dst, src)
|
||||
}
|
||||
func (m *StreamingOutputCallResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_StreamingOutputCallResponse.Size(m)
|
||||
}
|
||||
func (m *StreamingOutputCallResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_StreamingOutputCallResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_StreamingOutputCallResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *StreamingOutputCallResponse) GetPayload() *Payload {
|
||||
if m != nil {
|
||||
return m.Payload
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// For reconnect interop test only.
|
||||
// Client tells server what reconnection parameters it used.
|
||||
type ReconnectParams struct {
|
||||
MaxReconnectBackoffMs int32 `protobuf:"varint,1,opt,name=max_reconnect_backoff_ms,json=maxReconnectBackoffMs,proto3" json:"max_reconnect_backoff_ms,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ReconnectParams) Reset() { *m = ReconnectParams{} }
|
||||
func (m *ReconnectParams) String() string { return proto.CompactTextString(m) }
|
||||
func (*ReconnectParams) ProtoMessage() {}
|
||||
func (*ReconnectParams) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_messages_5c70222ad96bf232, []int{9}
|
||||
}
|
||||
func (m *ReconnectParams) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ReconnectParams.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ReconnectParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ReconnectParams.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *ReconnectParams) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ReconnectParams.Merge(dst, src)
|
||||
}
|
||||
func (m *ReconnectParams) XXX_Size() int {
|
||||
return xxx_messageInfo_ReconnectParams.Size(m)
|
||||
}
|
||||
func (m *ReconnectParams) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ReconnectParams.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ReconnectParams proto.InternalMessageInfo
|
||||
|
||||
func (m *ReconnectParams) GetMaxReconnectBackoffMs() int32 {
|
||||
if m != nil {
|
||||
return m.MaxReconnectBackoffMs
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// For reconnect interop test only.
|
||||
// Server tells client whether its reconnects are following the spec and the
|
||||
// reconnect backoffs it saw.
|
||||
type ReconnectInfo struct {
|
||||
Passed bool `protobuf:"varint,1,opt,name=passed,proto3" json:"passed,omitempty"`
|
||||
BackoffMs []int32 `protobuf:"varint,2,rep,packed,name=backoff_ms,json=backoffMs,proto3" json:"backoff_ms,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ReconnectInfo) Reset() { *m = ReconnectInfo{} }
|
||||
func (m *ReconnectInfo) String() string { return proto.CompactTextString(m) }
|
||||
func (*ReconnectInfo) ProtoMessage() {}
|
||||
func (*ReconnectInfo) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_messages_5c70222ad96bf232, []int{10}
|
||||
}
|
||||
func (m *ReconnectInfo) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ReconnectInfo.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ReconnectInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ReconnectInfo.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *ReconnectInfo) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ReconnectInfo.Merge(dst, src)
|
||||
}
|
||||
func (m *ReconnectInfo) XXX_Size() int {
|
||||
return xxx_messageInfo_ReconnectInfo.Size(m)
|
||||
}
|
||||
func (m *ReconnectInfo) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ReconnectInfo.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ReconnectInfo proto.InternalMessageInfo
|
||||
|
||||
func (m *ReconnectInfo) GetPassed() bool {
|
||||
if m != nil {
|
||||
return m.Passed
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *ReconnectInfo) GetBackoffMs() []int32 {
|
||||
if m != nil {
|
||||
return m.BackoffMs
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Payload)(nil), "grpc.testing.Payload")
|
||||
proto.RegisterType((*EchoStatus)(nil), "grpc.testing.EchoStatus")
|
||||
proto.RegisterType((*SimpleRequest)(nil), "grpc.testing.SimpleRequest")
|
||||
proto.RegisterType((*SimpleResponse)(nil), "grpc.testing.SimpleResponse")
|
||||
proto.RegisterType((*StreamingInputCallRequest)(nil), "grpc.testing.StreamingInputCallRequest")
|
||||
proto.RegisterType((*StreamingInputCallResponse)(nil), "grpc.testing.StreamingInputCallResponse")
|
||||
proto.RegisterType((*ResponseParameters)(nil), "grpc.testing.ResponseParameters")
|
||||
proto.RegisterType((*StreamingOutputCallRequest)(nil), "grpc.testing.StreamingOutputCallRequest")
|
||||
proto.RegisterType((*StreamingOutputCallResponse)(nil), "grpc.testing.StreamingOutputCallResponse")
|
||||
proto.RegisterType((*ReconnectParams)(nil), "grpc.testing.ReconnectParams")
|
||||
proto.RegisterType((*ReconnectInfo)(nil), "grpc.testing.ReconnectInfo")
|
||||
proto.RegisterEnum("grpc.testing.PayloadType", PayloadType_name, PayloadType_value)
|
||||
proto.RegisterEnum("grpc.testing.CompressionType", CompressionType_name, CompressionType_value)
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("messages.proto", fileDescriptor_messages_5c70222ad96bf232) }
|
||||
|
||||
var fileDescriptor_messages_5c70222ad96bf232 = []byte{
|
||||
// 652 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x55, 0x4d, 0x6f, 0xd3, 0x40,
|
||||
0x10, 0xc5, 0xf9, 0xee, 0x24, 0x4d, 0xa3, 0x85, 0x82, 0x5b, 0x54, 0x11, 0x99, 0x4b, 0x54, 0x89,
|
||||
0x20, 0x05, 0x09, 0x24, 0x0e, 0xa0, 0xb4, 0x4d, 0x51, 0x50, 0x9a, 0x84, 0x75, 0x7b, 0xe1, 0x62,
|
||||
0x6d, 0x9c, 0x8d, 0x6b, 0x11, 0x7b, 0x8d, 0x77, 0x8d, 0x9a, 0x1e, 0xb8, 0xf3, 0x83, 0xb9, 0xa3,
|
||||
0x5d, 0x7f, 0xc4, 0x69, 0x7b, 0x68, 0xe1, 0xc2, 0x6d, 0xf7, 0xed, 0x9b, 0x97, 0x79, 0x33, 0xcf,
|
||||
0x0a, 0x34, 0x3d, 0xca, 0x39, 0x71, 0x28, 0xef, 0x06, 0x21, 0x13, 0x0c, 0x35, 0x9c, 0x30, 0xb0,
|
||||
0xbb, 0x82, 0x72, 0xe1, 0xfa, 0x8e, 0x31, 0x82, 0xea, 0x94, 0xac, 0x96, 0x8c, 0xcc, 0xd1, 0x2b,
|
||||
0x28, 0x89, 0x55, 0x40, 0x75, 0xad, 0xad, 0x75, 0x9a, 0xbd, 0xbd, 0x6e, 0x9e, 0xd7, 0x4d, 0x48,
|
||||
0xe7, 0xab, 0x80, 0x62, 0x45, 0x43, 0x08, 0x4a, 0x33, 0x36, 0x5f, 0xe9, 0x85, 0xb6, 0xd6, 0x69,
|
||||
0x60, 0x75, 0x36, 0xde, 0x03, 0x0c, 0xec, 0x4b, 0x66, 0x0a, 0x22, 0x22, 0x2e, 0x19, 0x36, 0x9b,
|
||||
0xc7, 0x82, 0x65, 0xac, 0xce, 0x48, 0x87, 0x6a, 0xd2, 0x8f, 0x2a, 0xdc, 0xc2, 0xe9, 0xd5, 0xf8,
|
||||
0x55, 0x84, 0x6d, 0xd3, 0xf5, 0x82, 0x25, 0xc5, 0xf4, 0x7b, 0x44, 0xb9, 0x40, 0x1f, 0x60, 0x3b,
|
||||
0xa4, 0x3c, 0x60, 0x3e, 0xa7, 0xd6, 0xfd, 0x3a, 0x6b, 0xa4, 0x7c, 0x79, 0x43, 0x2f, 0x73, 0xf5,
|
||||
0xdc, 0xbd, 0x8e, 0x7f, 0xb1, 0xbc, 0x26, 0x99, 0xee, 0x35, 0x45, 0xaf, 0xa1, 0x1a, 0xc4, 0x0a,
|
||||
0x7a, 0xb1, 0xad, 0x75, 0xea, 0xbd, 0xdd, 0x3b, 0xe5, 0x71, 0xca, 0x92, 0xaa, 0x0b, 0x77, 0xb9,
|
||||
0xb4, 0x22, 0x4e, 0x43, 0x9f, 0x78, 0x54, 0x2f, 0xb5, 0xb5, 0x4e, 0x0d, 0x37, 0x24, 0x78, 0x91,
|
||||
0x60, 0xa8, 0x03, 0x2d, 0x45, 0x62, 0x24, 0x12, 0x97, 0x16, 0xb7, 0x59, 0x40, 0xf5, 0xb2, 0xe2,
|
||||
0x35, 0x25, 0x3e, 0x91, 0xb0, 0x29, 0x51, 0x34, 0x85, 0x27, 0x59, 0x93, 0x36, 0xf3, 0x82, 0x90,
|
||||
0x72, 0xee, 0x32, 0x5f, 0xaf, 0x28, 0xaf, 0x07, 0x9b, 0xcd, 0x1c, 0xaf, 0x09, 0xca, 0xef, 0xe3,
|
||||
0xb4, 0x34, 0xf7, 0x80, 0xfa, 0xb0, 0xb3, 0xb6, 0xad, 0x36, 0xa1, 0x57, 0x95, 0x33, 0x7d, 0x53,
|
||||
0x6c, 0xbd, 0x29, 0xdc, 0xcc, 0x46, 0xa2, 0xee, 0xc6, 0x4f, 0x68, 0xa6, 0xab, 0x88, 0xf1, 0xfc,
|
||||
0x98, 0xb4, 0x7b, 0x8d, 0x69, 0x1f, 0x6a, 0xd9, 0x84, 0xe2, 0x4d, 0x67, 0x77, 0xf4, 0x02, 0xea,
|
||||
0xf9, 0xc1, 0x14, 0xd5, 0x33, 0xb0, 0x6c, 0x28, 0xc6, 0x08, 0xf6, 0x4c, 0x11, 0x52, 0xe2, 0xb9,
|
||||
0xbe, 0x33, 0xf4, 0x83, 0x48, 0x1c, 0x93, 0xe5, 0x32, 0x8d, 0xc5, 0x43, 0x5b, 0x31, 0xce, 0x61,
|
||||
0xff, 0x2e, 0xb5, 0xc4, 0xd9, 0x5b, 0x78, 0x46, 0x1c, 0x27, 0xa4, 0x0e, 0x11, 0x74, 0x6e, 0x25,
|
||||
0x35, 0x71, 0x5e, 0xe2, 0xe0, 0xee, 0xae, 0x9f, 0x13, 0x69, 0x19, 0x1c, 0x63, 0x08, 0x28, 0xd5,
|
||||
0x98, 0x92, 0x90, 0x78, 0x54, 0xd0, 0x50, 0x65, 0x3e, 0x57, 0xaa, 0xce, 0xd2, 0xae, 0xeb, 0x0b,
|
||||
0x1a, 0xfe, 0x20, 0x32, 0x35, 0x49, 0x0a, 0x21, 0x85, 0x2e, 0xb8, 0xf1, 0xbb, 0x90, 0xeb, 0x70,
|
||||
0x12, 0x89, 0x1b, 0x86, 0xff, 0xf5, 0x3b, 0xf8, 0x02, 0x59, 0x4e, 0xac, 0x20, 0x6b, 0x55, 0x2f,
|
||||
0xb4, 0x8b, 0x9d, 0x7a, 0xaf, 0xbd, 0xa9, 0x72, 0xdb, 0x12, 0x46, 0xe1, 0x6d, 0x9b, 0x0f, 0xfe,
|
||||
0x6a, 0xfe, 0xcb, 0x98, 0x8f, 0xe1, 0xf9, 0x9d, 0x63, 0xff, 0xcb, 0xcc, 0x1b, 0x9f, 0x61, 0x07,
|
||||
0x53, 0x9b, 0xf9, 0x3e, 0xb5, 0x85, 0x1a, 0x16, 0x47, 0xef, 0x40, 0xf7, 0xc8, 0x95, 0x15, 0xa6,
|
||||
0xb0, 0x35, 0x23, 0xf6, 0x37, 0xb6, 0x58, 0x58, 0x1e, 0x4f, 0xe3, 0xe5, 0x91, 0xab, 0xac, 0xea,
|
||||
0x28, 0x7e, 0x3d, 0xe3, 0xc6, 0x29, 0x6c, 0x67, 0xe8, 0xd0, 0x5f, 0x30, 0xf4, 0x14, 0x2a, 0x01,
|
||||
0xe1, 0x9c, 0xc6, 0xcd, 0xd4, 0x70, 0x72, 0x43, 0x07, 0x00, 0x39, 0x4d, 0xb9, 0xd4, 0x32, 0xde,
|
||||
0x9a, 0xa5, 0x3a, 0x87, 0x1f, 0xa1, 0x9e, 0x4b, 0x06, 0x6a, 0x41, 0xe3, 0x78, 0x72, 0x36, 0xc5,
|
||||
0x03, 0xd3, 0xec, 0x1f, 0x8d, 0x06, 0xad, 0x47, 0x08, 0x41, 0xf3, 0x62, 0xbc, 0x81, 0x69, 0x08,
|
||||
0xa0, 0x82, 0xfb, 0xe3, 0x93, 0xc9, 0x59, 0xab, 0x70, 0xd8, 0x83, 0x9d, 0x1b, 0xfb, 0x40, 0x35,
|
||||
0x28, 0x8d, 0x27, 0x63, 0x59, 0x5c, 0x83, 0xd2, 0xa7, 0xaf, 0xc3, 0x69, 0x4b, 0x43, 0x75, 0xa8,
|
||||
0x9e, 0x0c, 0x4e, 0x47, 0xfd, 0xf3, 0x41, 0xab, 0x30, 0xab, 0xa8, 0xbf, 0x9a, 0x37, 0x7f, 0x02,
|
||||
0x00, 0x00, 0xff, 0xff, 0xc2, 0x6a, 0xce, 0x1e, 0x7c, 0x06, 0x00, 0x00,
|
||||
}
|
157
vendor/google.golang.org/grpc/benchmark/grpc_testing/messages.proto
generated
vendored
Normal file
157
vendor/google.golang.org/grpc/benchmark/grpc_testing/messages.proto
generated
vendored
Normal file
|
@ -0,0 +1,157 @@
|
|||
// Copyright 2016 gRPC authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Message definitions to be used by integration test service definitions.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package grpc.testing;
|
||||
|
||||
// The type of payload that should be returned.
|
||||
enum PayloadType {
|
||||
// Compressable text format.
|
||||
COMPRESSABLE = 0;
|
||||
|
||||
// Uncompressable binary format.
|
||||
UNCOMPRESSABLE = 1;
|
||||
|
||||
// Randomly chosen from all other formats defined in this enum.
|
||||
RANDOM = 2;
|
||||
}
|
||||
|
||||
// Compression algorithms
|
||||
enum CompressionType {
|
||||
// No compression
|
||||
NONE = 0;
|
||||
GZIP = 1;
|
||||
DEFLATE = 2;
|
||||
}
|
||||
|
||||
// A block of data, to simply increase gRPC message size.
|
||||
message Payload {
|
||||
// The type of data in body.
|
||||
PayloadType type = 1;
|
||||
// Primary contents of payload.
|
||||
bytes body = 2;
|
||||
}
|
||||
|
||||
// A protobuf representation for grpc status. This is used by test
|
||||
// clients to specify a status that the server should attempt to return.
|
||||
message EchoStatus {
|
||||
int32 code = 1;
|
||||
string message = 2;
|
||||
}
|
||||
|
||||
// Unary request.
|
||||
message SimpleRequest {
|
||||
// Desired payload type in the response from the server.
|
||||
// If response_type is RANDOM, server randomly chooses one from other formats.
|
||||
PayloadType response_type = 1;
|
||||
|
||||
// Desired payload size in the response from the server.
|
||||
// If response_type is COMPRESSABLE, this denotes the size before compression.
|
||||
int32 response_size = 2;
|
||||
|
||||
// Optional input payload sent along with the request.
|
||||
Payload payload = 3;
|
||||
|
||||
// Whether SimpleResponse should include username.
|
||||
bool fill_username = 4;
|
||||
|
||||
// Whether SimpleResponse should include OAuth scope.
|
||||
bool fill_oauth_scope = 5;
|
||||
|
||||
// Compression algorithm to be used by the server for the response (stream)
|
||||
CompressionType response_compression = 6;
|
||||
|
||||
// Whether server should return a given status
|
||||
EchoStatus response_status = 7;
|
||||
}
|
||||
|
||||
// Unary response, as configured by the request.
|
||||
message SimpleResponse {
|
||||
// Payload to increase message size.
|
||||
Payload payload = 1;
|
||||
// The user the request came from, for verifying authentication was
|
||||
// successful when the client expected it.
|
||||
string username = 2;
|
||||
// OAuth scope.
|
||||
string oauth_scope = 3;
|
||||
}
|
||||
|
||||
// Client-streaming request.
|
||||
message StreamingInputCallRequest {
|
||||
// Optional input payload sent along with the request.
|
||||
Payload payload = 1;
|
||||
|
||||
// Not expecting any payload from the response.
|
||||
}
|
||||
|
||||
// Client-streaming response.
|
||||
message StreamingInputCallResponse {
|
||||
// Aggregated size of payloads received from the client.
|
||||
int32 aggregated_payload_size = 1;
|
||||
}
|
||||
|
||||
// Configuration for a particular response.
|
||||
message ResponseParameters {
|
||||
// Desired payload sizes in responses from the server.
|
||||
// If response_type is COMPRESSABLE, this denotes the size before compression.
|
||||
int32 size = 1;
|
||||
|
||||
// Desired interval between consecutive responses in the response stream in
|
||||
// microseconds.
|
||||
int32 interval_us = 2;
|
||||
}
|
||||
|
||||
// Server-streaming request.
|
||||
message StreamingOutputCallRequest {
|
||||
// Desired payload type in the response from the server.
|
||||
// If response_type is RANDOM, the payload from each response in the stream
|
||||
// might be of different types. This is to simulate a mixed type of payload
|
||||
// stream.
|
||||
PayloadType response_type = 1;
|
||||
|
||||
// Configuration for each expected response message.
|
||||
repeated ResponseParameters response_parameters = 2;
|
||||
|
||||
// Optional input payload sent along with the request.
|
||||
Payload payload = 3;
|
||||
|
||||
// Compression algorithm to be used by the server for the response (stream)
|
||||
CompressionType response_compression = 6;
|
||||
|
||||
// Whether server should return a given status
|
||||
EchoStatus response_status = 7;
|
||||
}
|
||||
|
||||
// Server-streaming response, as configured by the request and parameters.
|
||||
message StreamingOutputCallResponse {
|
||||
// Payload to increase response size.
|
||||
Payload payload = 1;
|
||||
}
|
||||
|
||||
// For reconnect interop test only.
|
||||
// Client tells server what reconnection parameters it used.
|
||||
message ReconnectParams {
|
||||
int32 max_reconnect_backoff_ms = 1;
|
||||
}
|
||||
|
||||
// For reconnect interop test only.
|
||||
// Server tells client whether its reconnects are following the spec and the
|
||||
// reconnect backoffs it saw.
|
||||
message ReconnectInfo {
|
||||
bool passed = 1;
|
||||
repeated int32 backoff_ms = 2;
|
||||
}
|
348
vendor/google.golang.org/grpc/benchmark/grpc_testing/payloads.pb.go
generated
vendored
Normal file
348
vendor/google.golang.org/grpc/benchmark/grpc_testing/payloads.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,348 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: payloads.proto
|
||||
|
||||
package grpc_testing
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type ByteBufferParams struct {
|
||||
ReqSize int32 `protobuf:"varint,1,opt,name=req_size,json=reqSize,proto3" json:"req_size,omitempty"`
|
||||
RespSize int32 `protobuf:"varint,2,opt,name=resp_size,json=respSize,proto3" json:"resp_size,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ByteBufferParams) Reset() { *m = ByteBufferParams{} }
|
||||
func (m *ByteBufferParams) String() string { return proto.CompactTextString(m) }
|
||||
func (*ByteBufferParams) ProtoMessage() {}
|
||||
func (*ByteBufferParams) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_payloads_3abc71de35f06c83, []int{0}
|
||||
}
|
||||
func (m *ByteBufferParams) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ByteBufferParams.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ByteBufferParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ByteBufferParams.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *ByteBufferParams) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ByteBufferParams.Merge(dst, src)
|
||||
}
|
||||
func (m *ByteBufferParams) XXX_Size() int {
|
||||
return xxx_messageInfo_ByteBufferParams.Size(m)
|
||||
}
|
||||
func (m *ByteBufferParams) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ByteBufferParams.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ByteBufferParams proto.InternalMessageInfo
|
||||
|
||||
func (m *ByteBufferParams) GetReqSize() int32 {
|
||||
if m != nil {
|
||||
return m.ReqSize
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *ByteBufferParams) GetRespSize() int32 {
|
||||
if m != nil {
|
||||
return m.RespSize
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type SimpleProtoParams struct {
|
||||
ReqSize int32 `protobuf:"varint,1,opt,name=req_size,json=reqSize,proto3" json:"req_size,omitempty"`
|
||||
RespSize int32 `protobuf:"varint,2,opt,name=resp_size,json=respSize,proto3" json:"resp_size,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *SimpleProtoParams) Reset() { *m = SimpleProtoParams{} }
|
||||
func (m *SimpleProtoParams) String() string { return proto.CompactTextString(m) }
|
||||
func (*SimpleProtoParams) ProtoMessage() {}
|
||||
func (*SimpleProtoParams) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_payloads_3abc71de35f06c83, []int{1}
|
||||
}
|
||||
func (m *SimpleProtoParams) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_SimpleProtoParams.Unmarshal(m, b)
|
||||
}
|
||||
func (m *SimpleProtoParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_SimpleProtoParams.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *SimpleProtoParams) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_SimpleProtoParams.Merge(dst, src)
|
||||
}
|
||||
func (m *SimpleProtoParams) XXX_Size() int {
|
||||
return xxx_messageInfo_SimpleProtoParams.Size(m)
|
||||
}
|
||||
func (m *SimpleProtoParams) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_SimpleProtoParams.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_SimpleProtoParams proto.InternalMessageInfo
|
||||
|
||||
func (m *SimpleProtoParams) GetReqSize() int32 {
|
||||
if m != nil {
|
||||
return m.ReqSize
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *SimpleProtoParams) GetRespSize() int32 {
|
||||
if m != nil {
|
||||
return m.RespSize
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type ComplexProtoParams struct {
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ComplexProtoParams) Reset() { *m = ComplexProtoParams{} }
|
||||
func (m *ComplexProtoParams) String() string { return proto.CompactTextString(m) }
|
||||
func (*ComplexProtoParams) ProtoMessage() {}
|
||||
func (*ComplexProtoParams) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_payloads_3abc71de35f06c83, []int{2}
|
||||
}
|
||||
func (m *ComplexProtoParams) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ComplexProtoParams.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ComplexProtoParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ComplexProtoParams.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *ComplexProtoParams) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ComplexProtoParams.Merge(dst, src)
|
||||
}
|
||||
func (m *ComplexProtoParams) XXX_Size() int {
|
||||
return xxx_messageInfo_ComplexProtoParams.Size(m)
|
||||
}
|
||||
func (m *ComplexProtoParams) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ComplexProtoParams.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ComplexProtoParams proto.InternalMessageInfo
|
||||
|
||||
type PayloadConfig struct {
|
||||
// Types that are valid to be assigned to Payload:
|
||||
// *PayloadConfig_BytebufParams
|
||||
// *PayloadConfig_SimpleParams
|
||||
// *PayloadConfig_ComplexParams
|
||||
Payload isPayloadConfig_Payload `protobuf_oneof:"payload"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *PayloadConfig) Reset() { *m = PayloadConfig{} }
|
||||
func (m *PayloadConfig) String() string { return proto.CompactTextString(m) }
|
||||
func (*PayloadConfig) ProtoMessage() {}
|
||||
func (*PayloadConfig) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_payloads_3abc71de35f06c83, []int{3}
|
||||
}
|
||||
func (m *PayloadConfig) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_PayloadConfig.Unmarshal(m, b)
|
||||
}
|
||||
func (m *PayloadConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_PayloadConfig.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *PayloadConfig) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_PayloadConfig.Merge(dst, src)
|
||||
}
|
||||
func (m *PayloadConfig) XXX_Size() int {
|
||||
return xxx_messageInfo_PayloadConfig.Size(m)
|
||||
}
|
||||
func (m *PayloadConfig) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_PayloadConfig.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_PayloadConfig proto.InternalMessageInfo
|
||||
|
||||
type isPayloadConfig_Payload interface {
|
||||
isPayloadConfig_Payload()
|
||||
}
|
||||
|
||||
type PayloadConfig_BytebufParams struct {
|
||||
BytebufParams *ByteBufferParams `protobuf:"bytes,1,opt,name=bytebuf_params,json=bytebufParams,proto3,oneof"`
|
||||
}
|
||||
|
||||
type PayloadConfig_SimpleParams struct {
|
||||
SimpleParams *SimpleProtoParams `protobuf:"bytes,2,opt,name=simple_params,json=simpleParams,proto3,oneof"`
|
||||
}
|
||||
|
||||
type PayloadConfig_ComplexParams struct {
|
||||
ComplexParams *ComplexProtoParams `protobuf:"bytes,3,opt,name=complex_params,json=complexParams,proto3,oneof"`
|
||||
}
|
||||
|
||||
func (*PayloadConfig_BytebufParams) isPayloadConfig_Payload() {}
|
||||
|
||||
func (*PayloadConfig_SimpleParams) isPayloadConfig_Payload() {}
|
||||
|
||||
func (*PayloadConfig_ComplexParams) isPayloadConfig_Payload() {}
|
||||
|
||||
func (m *PayloadConfig) GetPayload() isPayloadConfig_Payload {
|
||||
if m != nil {
|
||||
return m.Payload
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *PayloadConfig) GetBytebufParams() *ByteBufferParams {
|
||||
if x, ok := m.GetPayload().(*PayloadConfig_BytebufParams); ok {
|
||||
return x.BytebufParams
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *PayloadConfig) GetSimpleParams() *SimpleProtoParams {
|
||||
if x, ok := m.GetPayload().(*PayloadConfig_SimpleParams); ok {
|
||||
return x.SimpleParams
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *PayloadConfig) GetComplexParams() *ComplexProtoParams {
|
||||
if x, ok := m.GetPayload().(*PayloadConfig_ComplexParams); ok {
|
||||
return x.ComplexParams
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// XXX_OneofFuncs is for the internal use of the proto package.
|
||||
func (*PayloadConfig) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
|
||||
return _PayloadConfig_OneofMarshaler, _PayloadConfig_OneofUnmarshaler, _PayloadConfig_OneofSizer, []interface{}{
|
||||
(*PayloadConfig_BytebufParams)(nil),
|
||||
(*PayloadConfig_SimpleParams)(nil),
|
||||
(*PayloadConfig_ComplexParams)(nil),
|
||||
}
|
||||
}
|
||||
|
||||
func _PayloadConfig_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
|
||||
m := msg.(*PayloadConfig)
|
||||
// payload
|
||||
switch x := m.Payload.(type) {
|
||||
case *PayloadConfig_BytebufParams:
|
||||
b.EncodeVarint(1<<3 | proto.WireBytes)
|
||||
if err := b.EncodeMessage(x.BytebufParams); err != nil {
|
||||
return err
|
||||
}
|
||||
case *PayloadConfig_SimpleParams:
|
||||
b.EncodeVarint(2<<3 | proto.WireBytes)
|
||||
if err := b.EncodeMessage(x.SimpleParams); err != nil {
|
||||
return err
|
||||
}
|
||||
case *PayloadConfig_ComplexParams:
|
||||
b.EncodeVarint(3<<3 | proto.WireBytes)
|
||||
if err := b.EncodeMessage(x.ComplexParams); err != nil {
|
||||
return err
|
||||
}
|
||||
case nil:
|
||||
default:
|
||||
return fmt.Errorf("PayloadConfig.Payload has unexpected type %T", x)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func _PayloadConfig_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
|
||||
m := msg.(*PayloadConfig)
|
||||
switch tag {
|
||||
case 1: // payload.bytebuf_params
|
||||
if wire != proto.WireBytes {
|
||||
return true, proto.ErrInternalBadWireType
|
||||
}
|
||||
msg := new(ByteBufferParams)
|
||||
err := b.DecodeMessage(msg)
|
||||
m.Payload = &PayloadConfig_BytebufParams{msg}
|
||||
return true, err
|
||||
case 2: // payload.simple_params
|
||||
if wire != proto.WireBytes {
|
||||
return true, proto.ErrInternalBadWireType
|
||||
}
|
||||
msg := new(SimpleProtoParams)
|
||||
err := b.DecodeMessage(msg)
|
||||
m.Payload = &PayloadConfig_SimpleParams{msg}
|
||||
return true, err
|
||||
case 3: // payload.complex_params
|
||||
if wire != proto.WireBytes {
|
||||
return true, proto.ErrInternalBadWireType
|
||||
}
|
||||
msg := new(ComplexProtoParams)
|
||||
err := b.DecodeMessage(msg)
|
||||
m.Payload = &PayloadConfig_ComplexParams{msg}
|
||||
return true, err
|
||||
default:
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
func _PayloadConfig_OneofSizer(msg proto.Message) (n int) {
|
||||
m := msg.(*PayloadConfig)
|
||||
// payload
|
||||
switch x := m.Payload.(type) {
|
||||
case *PayloadConfig_BytebufParams:
|
||||
s := proto.Size(x.BytebufParams)
|
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(s))
|
||||
n += s
|
||||
case *PayloadConfig_SimpleParams:
|
||||
s := proto.Size(x.SimpleParams)
|
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(s))
|
||||
n += s
|
||||
case *PayloadConfig_ComplexParams:
|
||||
s := proto.Size(x.ComplexParams)
|
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(s))
|
||||
n += s
|
||||
case nil:
|
||||
default:
|
||||
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*ByteBufferParams)(nil), "grpc.testing.ByteBufferParams")
|
||||
proto.RegisterType((*SimpleProtoParams)(nil), "grpc.testing.SimpleProtoParams")
|
||||
proto.RegisterType((*ComplexProtoParams)(nil), "grpc.testing.ComplexProtoParams")
|
||||
proto.RegisterType((*PayloadConfig)(nil), "grpc.testing.PayloadConfig")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("payloads.proto", fileDescriptor_payloads_3abc71de35f06c83) }
|
||||
|
||||
var fileDescriptor_payloads_3abc71de35f06c83 = []byte{
|
||||
// 254 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2b, 0x48, 0xac, 0xcc,
|
||||
0xc9, 0x4f, 0x4c, 0x29, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x49, 0x2f, 0x2a, 0x48,
|
||||
0xd6, 0x2b, 0x49, 0x2d, 0x2e, 0xc9, 0xcc, 0x4b, 0x57, 0xf2, 0xe2, 0x12, 0x70, 0xaa, 0x2c, 0x49,
|
||||
0x75, 0x2a, 0x4d, 0x4b, 0x4b, 0x2d, 0x0a, 0x48, 0x2c, 0x4a, 0xcc, 0x2d, 0x16, 0x92, 0xe4, 0xe2,
|
||||
0x28, 0x4a, 0x2d, 0x8c, 0x2f, 0xce, 0xac, 0x4a, 0x95, 0x60, 0x54, 0x60, 0xd4, 0x60, 0x0d, 0x62,
|
||||
0x2f, 0x4a, 0x2d, 0x0c, 0xce, 0xac, 0x4a, 0x15, 0x92, 0xe6, 0xe2, 0x2c, 0x4a, 0x2d, 0x2e, 0x80,
|
||||
0xc8, 0x31, 0x81, 0xe5, 0x38, 0x40, 0x02, 0x20, 0x49, 0x25, 0x6f, 0x2e, 0xc1, 0xe0, 0xcc, 0xdc,
|
||||
0x82, 0x9c, 0xd4, 0x00, 0x90, 0x45, 0x14, 0x1a, 0x26, 0xc2, 0x25, 0xe4, 0x9c, 0x0f, 0x32, 0xac,
|
||||
0x02, 0xc9, 0x34, 0xa5, 0x6f, 0x8c, 0x5c, 0xbc, 0x01, 0x10, 0xff, 0x38, 0xe7, 0xe7, 0xa5, 0x65,
|
||||
0xa6, 0x0b, 0xb9, 0x73, 0xf1, 0x25, 0x55, 0x96, 0xa4, 0x26, 0x95, 0xa6, 0xc5, 0x17, 0x80, 0xd5,
|
||||
0x80, 0x6d, 0xe1, 0x36, 0x92, 0xd3, 0x43, 0xf6, 0xa7, 0x1e, 0xba, 0x27, 0x3d, 0x18, 0x82, 0x78,
|
||||
0xa1, 0xfa, 0xa0, 0x0e, 0x75, 0xe3, 0xe2, 0x2d, 0x06, 0xbb, 0x1e, 0x66, 0x0e, 0x13, 0xd8, 0x1c,
|
||||
0x79, 0x54, 0x73, 0x30, 0x3c, 0xe8, 0xc1, 0x10, 0xc4, 0x03, 0xd1, 0x07, 0x35, 0xc7, 0x93, 0x8b,
|
||||
0x2f, 0x19, 0xe2, 0x70, 0x98, 0x41, 0xcc, 0x60, 0x83, 0x14, 0x50, 0x0d, 0xc2, 0xf4, 0x1c, 0xc8,
|
||||
0x49, 0x50, 0x9d, 0x10, 0x01, 0x27, 0x4e, 0x2e, 0x76, 0x68, 0xe4, 0x25, 0xb1, 0x81, 0x23, 0xcf,
|
||||
0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0xb0, 0x8c, 0x18, 0x4e, 0xce, 0x01, 0x00, 0x00,
|
||||
}
|
40
vendor/google.golang.org/grpc/benchmark/grpc_testing/payloads.proto
generated
vendored
Normal file
40
vendor/google.golang.org/grpc/benchmark/grpc_testing/payloads.proto
generated
vendored
Normal file
|
@ -0,0 +1,40 @@
|
|||
// Copyright 2016 gRPC authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package grpc.testing;
|
||||
|
||||
message ByteBufferParams {
|
||||
int32 req_size = 1;
|
||||
int32 resp_size = 2;
|
||||
}
|
||||
|
||||
message SimpleProtoParams {
|
||||
int32 req_size = 1;
|
||||
int32 resp_size = 2;
|
||||
}
|
||||
|
||||
message ComplexProtoParams {
|
||||
// TODO (vpai): Fill this in once the details of complex, representative
|
||||
// protos are decided
|
||||
}
|
||||
|
||||
message PayloadConfig {
|
||||
oneof payload {
|
||||
ByteBufferParams bytebuf_params = 1;
|
||||
SimpleProtoParams simple_params = 2;
|
||||
ComplexProtoParams complex_params = 3;
|
||||
}
|
||||
}
|
448
vendor/google.golang.org/grpc/benchmark/grpc_testing/services.pb.go
generated
vendored
Normal file
448
vendor/google.golang.org/grpc/benchmark/grpc_testing/services.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,448 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: services.proto
|
||||
|
||||
package grpc_testing
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
import (
|
||||
context "golang.org/x/net/context"
|
||||
grpc "google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ context.Context
|
||||
var _ grpc.ClientConn
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion4
|
||||
|
||||
// BenchmarkServiceClient is the client API for BenchmarkService service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
||||
type BenchmarkServiceClient interface {
|
||||
// One request followed by one response.
|
||||
// The server returns the client payload as-is.
|
||||
UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error)
|
||||
// One request followed by one response.
|
||||
// The server returns the client payload as-is.
|
||||
StreamingCall(ctx context.Context, opts ...grpc.CallOption) (BenchmarkService_StreamingCallClient, error)
|
||||
}
|
||||
|
||||
type benchmarkServiceClient struct {
|
||||
cc *grpc.ClientConn
|
||||
}
|
||||
|
||||
func NewBenchmarkServiceClient(cc *grpc.ClientConn) BenchmarkServiceClient {
|
||||
return &benchmarkServiceClient{cc}
|
||||
}
|
||||
|
||||
func (c *benchmarkServiceClient) UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) {
|
||||
out := new(SimpleResponse)
|
||||
err := c.cc.Invoke(ctx, "/grpc.testing.BenchmarkService/UnaryCall", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *benchmarkServiceClient) StreamingCall(ctx context.Context, opts ...grpc.CallOption) (BenchmarkService_StreamingCallClient, error) {
|
||||
stream, err := c.cc.NewStream(ctx, &_BenchmarkService_serviceDesc.Streams[0], "/grpc.testing.BenchmarkService/StreamingCall", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &benchmarkServiceStreamingCallClient{stream}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type BenchmarkService_StreamingCallClient interface {
|
||||
Send(*SimpleRequest) error
|
||||
Recv() (*SimpleResponse, error)
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
type benchmarkServiceStreamingCallClient struct {
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
func (x *benchmarkServiceStreamingCallClient) Send(m *SimpleRequest) error {
|
||||
return x.ClientStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *benchmarkServiceStreamingCallClient) Recv() (*SimpleResponse, error) {
|
||||
m := new(SimpleResponse)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// BenchmarkServiceServer is the server API for BenchmarkService service.
|
||||
type BenchmarkServiceServer interface {
|
||||
// One request followed by one response.
|
||||
// The server returns the client payload as-is.
|
||||
UnaryCall(context.Context, *SimpleRequest) (*SimpleResponse, error)
|
||||
// One request followed by one response.
|
||||
// The server returns the client payload as-is.
|
||||
StreamingCall(BenchmarkService_StreamingCallServer) error
|
||||
}
|
||||
|
||||
func RegisterBenchmarkServiceServer(s *grpc.Server, srv BenchmarkServiceServer) {
|
||||
s.RegisterService(&_BenchmarkService_serviceDesc, srv)
|
||||
}
|
||||
|
||||
func _BenchmarkService_UnaryCall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(SimpleRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(BenchmarkServiceServer).UnaryCall(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/grpc.testing.BenchmarkService/UnaryCall",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(BenchmarkServiceServer).UnaryCall(ctx, req.(*SimpleRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _BenchmarkService_StreamingCall_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
return srv.(BenchmarkServiceServer).StreamingCall(&benchmarkServiceStreamingCallServer{stream})
|
||||
}
|
||||
|
||||
type BenchmarkService_StreamingCallServer interface {
|
||||
Send(*SimpleResponse) error
|
||||
Recv() (*SimpleRequest, error)
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
type benchmarkServiceStreamingCallServer struct {
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
func (x *benchmarkServiceStreamingCallServer) Send(m *SimpleResponse) error {
|
||||
return x.ServerStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *benchmarkServiceStreamingCallServer) Recv() (*SimpleRequest, error) {
|
||||
m := new(SimpleRequest)
|
||||
if err := x.ServerStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
var _BenchmarkService_serviceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "grpc.testing.BenchmarkService",
|
||||
HandlerType: (*BenchmarkServiceServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "UnaryCall",
|
||||
Handler: _BenchmarkService_UnaryCall_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{
|
||||
{
|
||||
StreamName: "StreamingCall",
|
||||
Handler: _BenchmarkService_StreamingCall_Handler,
|
||||
ServerStreams: true,
|
||||
ClientStreams: true,
|
||||
},
|
||||
},
|
||||
Metadata: "services.proto",
|
||||
}
|
||||
|
||||
// WorkerServiceClient is the client API for WorkerService service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
||||
type WorkerServiceClient interface {
|
||||
// Start server with specified workload.
|
||||
// First request sent specifies the ServerConfig followed by ServerStatus
|
||||
// response. After that, a "Mark" can be sent anytime to request the latest
|
||||
// stats. Closing the stream will initiate shutdown of the test server
|
||||
// and once the shutdown has finished, the OK status is sent to terminate
|
||||
// this RPC.
|
||||
RunServer(ctx context.Context, opts ...grpc.CallOption) (WorkerService_RunServerClient, error)
|
||||
// Start client with specified workload.
|
||||
// First request sent specifies the ClientConfig followed by ClientStatus
|
||||
// response. After that, a "Mark" can be sent anytime to request the latest
|
||||
// stats. Closing the stream will initiate shutdown of the test client
|
||||
// and once the shutdown has finished, the OK status is sent to terminate
|
||||
// this RPC.
|
||||
RunClient(ctx context.Context, opts ...grpc.CallOption) (WorkerService_RunClientClient, error)
|
||||
// Just return the core count - unary call
|
||||
CoreCount(ctx context.Context, in *CoreRequest, opts ...grpc.CallOption) (*CoreResponse, error)
|
||||
// Quit this worker
|
||||
QuitWorker(ctx context.Context, in *Void, opts ...grpc.CallOption) (*Void, error)
|
||||
}
|
||||
|
||||
type workerServiceClient struct {
|
||||
cc *grpc.ClientConn
|
||||
}
|
||||
|
||||
func NewWorkerServiceClient(cc *grpc.ClientConn) WorkerServiceClient {
|
||||
return &workerServiceClient{cc}
|
||||
}
|
||||
|
||||
func (c *workerServiceClient) RunServer(ctx context.Context, opts ...grpc.CallOption) (WorkerService_RunServerClient, error) {
|
||||
stream, err := c.cc.NewStream(ctx, &_WorkerService_serviceDesc.Streams[0], "/grpc.testing.WorkerService/RunServer", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &workerServiceRunServerClient{stream}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type WorkerService_RunServerClient interface {
|
||||
Send(*ServerArgs) error
|
||||
Recv() (*ServerStatus, error)
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
type workerServiceRunServerClient struct {
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
func (x *workerServiceRunServerClient) Send(m *ServerArgs) error {
|
||||
return x.ClientStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *workerServiceRunServerClient) Recv() (*ServerStatus, error) {
|
||||
m := new(ServerStatus)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (c *workerServiceClient) RunClient(ctx context.Context, opts ...grpc.CallOption) (WorkerService_RunClientClient, error) {
|
||||
stream, err := c.cc.NewStream(ctx, &_WorkerService_serviceDesc.Streams[1], "/grpc.testing.WorkerService/RunClient", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &workerServiceRunClientClient{stream}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type WorkerService_RunClientClient interface {
|
||||
Send(*ClientArgs) error
|
||||
Recv() (*ClientStatus, error)
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
type workerServiceRunClientClient struct {
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
func (x *workerServiceRunClientClient) Send(m *ClientArgs) error {
|
||||
return x.ClientStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *workerServiceRunClientClient) Recv() (*ClientStatus, error) {
|
||||
m := new(ClientStatus)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (c *workerServiceClient) CoreCount(ctx context.Context, in *CoreRequest, opts ...grpc.CallOption) (*CoreResponse, error) {
|
||||
out := new(CoreResponse)
|
||||
err := c.cc.Invoke(ctx, "/grpc.testing.WorkerService/CoreCount", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *workerServiceClient) QuitWorker(ctx context.Context, in *Void, opts ...grpc.CallOption) (*Void, error) {
|
||||
out := new(Void)
|
||||
err := c.cc.Invoke(ctx, "/grpc.testing.WorkerService/QuitWorker", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// WorkerServiceServer is the server API for WorkerService service.
|
||||
type WorkerServiceServer interface {
|
||||
// Start server with specified workload.
|
||||
// First request sent specifies the ServerConfig followed by ServerStatus
|
||||
// response. After that, a "Mark" can be sent anytime to request the latest
|
||||
// stats. Closing the stream will initiate shutdown of the test server
|
||||
// and once the shutdown has finished, the OK status is sent to terminate
|
||||
// this RPC.
|
||||
RunServer(WorkerService_RunServerServer) error
|
||||
// Start client with specified workload.
|
||||
// First request sent specifies the ClientConfig followed by ClientStatus
|
||||
// response. After that, a "Mark" can be sent anytime to request the latest
|
||||
// stats. Closing the stream will initiate shutdown of the test client
|
||||
// and once the shutdown has finished, the OK status is sent to terminate
|
||||
// this RPC.
|
||||
RunClient(WorkerService_RunClientServer) error
|
||||
// Just return the core count - unary call
|
||||
CoreCount(context.Context, *CoreRequest) (*CoreResponse, error)
|
||||
// Quit this worker
|
||||
QuitWorker(context.Context, *Void) (*Void, error)
|
||||
}
|
||||
|
||||
func RegisterWorkerServiceServer(s *grpc.Server, srv WorkerServiceServer) {
|
||||
s.RegisterService(&_WorkerService_serviceDesc, srv)
|
||||
}
|
||||
|
||||
func _WorkerService_RunServer_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
return srv.(WorkerServiceServer).RunServer(&workerServiceRunServerServer{stream})
|
||||
}
|
||||
|
||||
type WorkerService_RunServerServer interface {
|
||||
Send(*ServerStatus) error
|
||||
Recv() (*ServerArgs, error)
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
type workerServiceRunServerServer struct {
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
func (x *workerServiceRunServerServer) Send(m *ServerStatus) error {
|
||||
return x.ServerStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *workerServiceRunServerServer) Recv() (*ServerArgs, error) {
|
||||
m := new(ServerArgs)
|
||||
if err := x.ServerStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func _WorkerService_RunClient_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
return srv.(WorkerServiceServer).RunClient(&workerServiceRunClientServer{stream})
|
||||
}
|
||||
|
||||
type WorkerService_RunClientServer interface {
|
||||
Send(*ClientStatus) error
|
||||
Recv() (*ClientArgs, error)
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
type workerServiceRunClientServer struct {
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
func (x *workerServiceRunClientServer) Send(m *ClientStatus) error {
|
||||
return x.ServerStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *workerServiceRunClientServer) Recv() (*ClientArgs, error) {
|
||||
m := new(ClientArgs)
|
||||
if err := x.ServerStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func _WorkerService_CoreCount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(CoreRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(WorkerServiceServer).CoreCount(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/grpc.testing.WorkerService/CoreCount",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(WorkerServiceServer).CoreCount(ctx, req.(*CoreRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _WorkerService_QuitWorker_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(Void)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(WorkerServiceServer).QuitWorker(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/grpc.testing.WorkerService/QuitWorker",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(WorkerServiceServer).QuitWorker(ctx, req.(*Void))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
var _WorkerService_serviceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "grpc.testing.WorkerService",
|
||||
HandlerType: (*WorkerServiceServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "CoreCount",
|
||||
Handler: _WorkerService_CoreCount_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "QuitWorker",
|
||||
Handler: _WorkerService_QuitWorker_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{
|
||||
{
|
||||
StreamName: "RunServer",
|
||||
Handler: _WorkerService_RunServer_Handler,
|
||||
ServerStreams: true,
|
||||
ClientStreams: true,
|
||||
},
|
||||
{
|
||||
StreamName: "RunClient",
|
||||
Handler: _WorkerService_RunClient_Handler,
|
||||
ServerStreams: true,
|
||||
ClientStreams: true,
|
||||
},
|
||||
},
|
||||
Metadata: "services.proto",
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("services.proto", fileDescriptor_services_bf68f4d7cbd0e0a1) }
|
||||
|
||||
var fileDescriptor_services_bf68f4d7cbd0e0a1 = []byte{
|
||||
// 255 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x91, 0xc1, 0x4a, 0xc4, 0x30,
|
||||
0x10, 0x86, 0xa9, 0x07, 0xa1, 0xc1, 0x2e, 0x92, 0x93, 0x46, 0x1f, 0xc0, 0x53, 0x91, 0xd5, 0x17,
|
||||
0x70, 0x8b, 0x1e, 0x05, 0xb7, 0xa8, 0xe7, 0x58, 0x87, 0x1a, 0x36, 0xcd, 0xd4, 0x99, 0x89, 0xe0,
|
||||
0x93, 0xf8, 0x0e, 0x3e, 0xa5, 0xec, 0x66, 0x57, 0xd6, 0x92, 0x9b, 0xc7, 0xf9, 0xbf, 0xe1, 0x23,
|
||||
0x7f, 0x46, 0xcd, 0x18, 0xe8, 0xc3, 0x75, 0xc0, 0xf5, 0x48, 0x28, 0xa8, 0x8f, 0x7a, 0x1a, 0xbb,
|
||||
0x5a, 0x80, 0xc5, 0x85, 0xde, 0xcc, 0x06, 0x60, 0xb6, 0xfd, 0x8e, 0x9a, 0xaa, 0xc3, 0x20, 0x84,
|
||||
0x3e, 0x8d, 0xf3, 0xef, 0x42, 0x1d, 0x2f, 0x20, 0x74, 0x6f, 0x83, 0xa5, 0x55, 0x9b, 0x44, 0xfa,
|
||||
0x4e, 0x95, 0x8f, 0xc1, 0xd2, 0x67, 0x63, 0xbd, 0xd7, 0x67, 0xf5, 0xbe, 0xaf, 0x6e, 0xdd, 0x30,
|
||||
0x7a, 0x58, 0xc2, 0x7b, 0x04, 0x16, 0x73, 0x9e, 0x87, 0x3c, 0x62, 0x60, 0xd0, 0xf7, 0xaa, 0x6a,
|
||||
0x85, 0xc0, 0x0e, 0x2e, 0xf4, 0xff, 0x74, 0x5d, 0x14, 0x97, 0xc5, 0xfc, 0xeb, 0x40, 0x55, 0xcf,
|
||||
0x48, 0x2b, 0xa0, 0xdd, 0x4b, 0x6f, 0x55, 0xb9, 0x8c, 0x61, 0x3d, 0x01, 0xe9, 0x93, 0x89, 0x60,
|
||||
0x93, 0xde, 0x50, 0xcf, 0xc6, 0xe4, 0x48, 0x2b, 0x56, 0x22, 0xaf, 0xc5, 0x5b, 0x4d, 0xe3, 0x1d,
|
||||
0x04, 0x99, 0x6a, 0x52, 0x9a, 0xd3, 0x24, 0xb2, 0xa7, 0x59, 0xa8, 0xb2, 0x41, 0x82, 0x06, 0x63,
|
||||
0x10, 0x7d, 0x3a, 0x59, 0x46, 0xfa, 0x6d, 0x6a, 0x72, 0x68, 0xfb, 0x67, 0xd7, 0x4a, 0x3d, 0x44,
|
||||
0x27, 0xa9, 0xa6, 0xd6, 0x7f, 0x37, 0x9f, 0xd0, 0xbd, 0x9a, 0x4c, 0xf6, 0x72, 0xb8, 0xb9, 0xe6,
|
||||
0xd5, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x3b, 0x84, 0x02, 0xe3, 0x0c, 0x02, 0x00, 0x00,
|
||||
}
|
56
vendor/google.golang.org/grpc/benchmark/grpc_testing/services.proto
generated
vendored
Normal file
56
vendor/google.golang.org/grpc/benchmark/grpc_testing/services.proto
generated
vendored
Normal file
|
@ -0,0 +1,56 @@
|
|||
// Copyright 2016 gRPC authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// An integration test service that covers all the method signature permutations
|
||||
// of unary/streaming requests/responses.
|
||||
syntax = "proto3";
|
||||
|
||||
import "messages.proto";
|
||||
import "control.proto";
|
||||
|
||||
package grpc.testing;
|
||||
|
||||
service BenchmarkService {
|
||||
// One request followed by one response.
|
||||
// The server returns the client payload as-is.
|
||||
rpc UnaryCall(SimpleRequest) returns (SimpleResponse);
|
||||
|
||||
// One request followed by one response.
|
||||
// The server returns the client payload as-is.
|
||||
rpc StreamingCall(stream SimpleRequest) returns (stream SimpleResponse);
|
||||
}
|
||||
|
||||
service WorkerService {
|
||||
// Start server with specified workload.
|
||||
// First request sent specifies the ServerConfig followed by ServerStatus
|
||||
// response. After that, a "Mark" can be sent anytime to request the latest
|
||||
// stats. Closing the stream will initiate shutdown of the test server
|
||||
// and once the shutdown has finished, the OK status is sent to terminate
|
||||
// this RPC.
|
||||
rpc RunServer(stream ServerArgs) returns (stream ServerStatus);
|
||||
|
||||
// Start client with specified workload.
|
||||
// First request sent specifies the ClientConfig followed by ClientStatus
|
||||
// response. After that, a "Mark" can be sent anytime to request the latest
|
||||
// stats. Closing the stream will initiate shutdown of the test client
|
||||
// and once the shutdown has finished, the OK status is sent to terminate
|
||||
// this RPC.
|
||||
rpc RunClient(stream ClientArgs) returns (stream ClientStatus);
|
||||
|
||||
// Just return the core count - unary call
|
||||
rpc CoreCount(CoreRequest) returns (CoreResponse);
|
||||
|
||||
// Quit this worker
|
||||
rpc QuitWorker(Void) returns (Void);
|
||||
}
|
302
vendor/google.golang.org/grpc/benchmark/grpc_testing/stats.pb.go
generated
vendored
Normal file
302
vendor/google.golang.org/grpc/benchmark/grpc_testing/stats.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,302 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: stats.proto
|
||||
|
||||
package grpc_testing
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type ServerStats struct {
|
||||
// wall clock time change in seconds since last reset
|
||||
TimeElapsed float64 `protobuf:"fixed64,1,opt,name=time_elapsed,json=timeElapsed,proto3" json:"time_elapsed,omitempty"`
|
||||
// change in user time (in seconds) used by the server since last reset
|
||||
TimeUser float64 `protobuf:"fixed64,2,opt,name=time_user,json=timeUser,proto3" json:"time_user,omitempty"`
|
||||
// change in server time (in seconds) used by the server process and all
|
||||
// threads since last reset
|
||||
TimeSystem float64 `protobuf:"fixed64,3,opt,name=time_system,json=timeSystem,proto3" json:"time_system,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ServerStats) Reset() { *m = ServerStats{} }
|
||||
func (m *ServerStats) String() string { return proto.CompactTextString(m) }
|
||||
func (*ServerStats) ProtoMessage() {}
|
||||
func (*ServerStats) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_stats_8ba831c0cb3c3440, []int{0}
|
||||
}
|
||||
func (m *ServerStats) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ServerStats.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ServerStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ServerStats.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *ServerStats) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ServerStats.Merge(dst, src)
|
||||
}
|
||||
func (m *ServerStats) XXX_Size() int {
|
||||
return xxx_messageInfo_ServerStats.Size(m)
|
||||
}
|
||||
func (m *ServerStats) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ServerStats.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ServerStats proto.InternalMessageInfo
|
||||
|
||||
func (m *ServerStats) GetTimeElapsed() float64 {
|
||||
if m != nil {
|
||||
return m.TimeElapsed
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *ServerStats) GetTimeUser() float64 {
|
||||
if m != nil {
|
||||
return m.TimeUser
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *ServerStats) GetTimeSystem() float64 {
|
||||
if m != nil {
|
||||
return m.TimeSystem
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Histogram params based on grpc/support/histogram.c
|
||||
type HistogramParams struct {
|
||||
Resolution float64 `protobuf:"fixed64,1,opt,name=resolution,proto3" json:"resolution,omitempty"`
|
||||
MaxPossible float64 `protobuf:"fixed64,2,opt,name=max_possible,json=maxPossible,proto3" json:"max_possible,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *HistogramParams) Reset() { *m = HistogramParams{} }
|
||||
func (m *HistogramParams) String() string { return proto.CompactTextString(m) }
|
||||
func (*HistogramParams) ProtoMessage() {}
|
||||
func (*HistogramParams) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_stats_8ba831c0cb3c3440, []int{1}
|
||||
}
|
||||
func (m *HistogramParams) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_HistogramParams.Unmarshal(m, b)
|
||||
}
|
||||
func (m *HistogramParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_HistogramParams.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *HistogramParams) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_HistogramParams.Merge(dst, src)
|
||||
}
|
||||
func (m *HistogramParams) XXX_Size() int {
|
||||
return xxx_messageInfo_HistogramParams.Size(m)
|
||||
}
|
||||
func (m *HistogramParams) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_HistogramParams.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_HistogramParams proto.InternalMessageInfo
|
||||
|
||||
func (m *HistogramParams) GetResolution() float64 {
|
||||
if m != nil {
|
||||
return m.Resolution
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *HistogramParams) GetMaxPossible() float64 {
|
||||
if m != nil {
|
||||
return m.MaxPossible
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Histogram data based on grpc/support/histogram.c
|
||||
type HistogramData struct {
|
||||
Bucket []uint32 `protobuf:"varint,1,rep,packed,name=bucket,proto3" json:"bucket,omitempty"`
|
||||
MinSeen float64 `protobuf:"fixed64,2,opt,name=min_seen,json=minSeen,proto3" json:"min_seen,omitempty"`
|
||||
MaxSeen float64 `protobuf:"fixed64,3,opt,name=max_seen,json=maxSeen,proto3" json:"max_seen,omitempty"`
|
||||
Sum float64 `protobuf:"fixed64,4,opt,name=sum,proto3" json:"sum,omitempty"`
|
||||
SumOfSquares float64 `protobuf:"fixed64,5,opt,name=sum_of_squares,json=sumOfSquares,proto3" json:"sum_of_squares,omitempty"`
|
||||
Count float64 `protobuf:"fixed64,6,opt,name=count,proto3" json:"count,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *HistogramData) Reset() { *m = HistogramData{} }
|
||||
func (m *HistogramData) String() string { return proto.CompactTextString(m) }
|
||||
func (*HistogramData) ProtoMessage() {}
|
||||
func (*HistogramData) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_stats_8ba831c0cb3c3440, []int{2}
|
||||
}
|
||||
func (m *HistogramData) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_HistogramData.Unmarshal(m, b)
|
||||
}
|
||||
func (m *HistogramData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_HistogramData.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *HistogramData) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_HistogramData.Merge(dst, src)
|
||||
}
|
||||
func (m *HistogramData) XXX_Size() int {
|
||||
return xxx_messageInfo_HistogramData.Size(m)
|
||||
}
|
||||
func (m *HistogramData) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_HistogramData.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_HistogramData proto.InternalMessageInfo
|
||||
|
||||
func (m *HistogramData) GetBucket() []uint32 {
|
||||
if m != nil {
|
||||
return m.Bucket
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *HistogramData) GetMinSeen() float64 {
|
||||
if m != nil {
|
||||
return m.MinSeen
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *HistogramData) GetMaxSeen() float64 {
|
||||
if m != nil {
|
||||
return m.MaxSeen
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *HistogramData) GetSum() float64 {
|
||||
if m != nil {
|
||||
return m.Sum
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *HistogramData) GetSumOfSquares() float64 {
|
||||
if m != nil {
|
||||
return m.SumOfSquares
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *HistogramData) GetCount() float64 {
|
||||
if m != nil {
|
||||
return m.Count
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type ClientStats struct {
|
||||
// Latency histogram. Data points are in nanoseconds.
|
||||
Latencies *HistogramData `protobuf:"bytes,1,opt,name=latencies,proto3" json:"latencies,omitempty"`
|
||||
// See ServerStats for details.
|
||||
TimeElapsed float64 `protobuf:"fixed64,2,opt,name=time_elapsed,json=timeElapsed,proto3" json:"time_elapsed,omitempty"`
|
||||
TimeUser float64 `protobuf:"fixed64,3,opt,name=time_user,json=timeUser,proto3" json:"time_user,omitempty"`
|
||||
TimeSystem float64 `protobuf:"fixed64,4,opt,name=time_system,json=timeSystem,proto3" json:"time_system,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ClientStats) Reset() { *m = ClientStats{} }
|
||||
func (m *ClientStats) String() string { return proto.CompactTextString(m) }
|
||||
func (*ClientStats) ProtoMessage() {}
|
||||
func (*ClientStats) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_stats_8ba831c0cb3c3440, []int{3}
|
||||
}
|
||||
func (m *ClientStats) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ClientStats.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ClientStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ClientStats.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *ClientStats) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ClientStats.Merge(dst, src)
|
||||
}
|
||||
func (m *ClientStats) XXX_Size() int {
|
||||
return xxx_messageInfo_ClientStats.Size(m)
|
||||
}
|
||||
func (m *ClientStats) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ClientStats.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ClientStats proto.InternalMessageInfo
|
||||
|
||||
func (m *ClientStats) GetLatencies() *HistogramData {
|
||||
if m != nil {
|
||||
return m.Latencies
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ClientStats) GetTimeElapsed() float64 {
|
||||
if m != nil {
|
||||
return m.TimeElapsed
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *ClientStats) GetTimeUser() float64 {
|
||||
if m != nil {
|
||||
return m.TimeUser
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *ClientStats) GetTimeSystem() float64 {
|
||||
if m != nil {
|
||||
return m.TimeSystem
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*ServerStats)(nil), "grpc.testing.ServerStats")
|
||||
proto.RegisterType((*HistogramParams)(nil), "grpc.testing.HistogramParams")
|
||||
proto.RegisterType((*HistogramData)(nil), "grpc.testing.HistogramData")
|
||||
proto.RegisterType((*ClientStats)(nil), "grpc.testing.ClientStats")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("stats.proto", fileDescriptor_stats_8ba831c0cb3c3440) }
|
||||
|
||||
var fileDescriptor_stats_8ba831c0cb3c3440 = []byte{
|
||||
// 341 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0xc1, 0x4a, 0xeb, 0x40,
|
||||
0x14, 0x86, 0x49, 0xd3, 0xf6, 0xb6, 0x27, 0xed, 0xbd, 0x97, 0x41, 0x24, 0x52, 0xd0, 0x1a, 0x5c,
|
||||
0x74, 0x95, 0x85, 0xae, 0x5c, 0xab, 0xe0, 0xce, 0xd2, 0xe8, 0x3a, 0x4c, 0xe3, 0x69, 0x19, 0xcc,
|
||||
0xcc, 0xc4, 0x39, 0x33, 0x12, 0x1f, 0x49, 0x7c, 0x49, 0xc9, 0x24, 0x68, 0x55, 0xd0, 0x5d, 0xe6,
|
||||
0xfb, 0x7e, 0xe6, 0xe4, 0xe4, 0x0f, 0x44, 0x64, 0xb9, 0xa5, 0xb4, 0x32, 0xda, 0x6a, 0x36, 0xd9,
|
||||
0x9a, 0xaa, 0x48, 0x2d, 0x92, 0x15, 0x6a, 0x9b, 0x28, 0x88, 0x32, 0x34, 0x4f, 0x68, 0xb2, 0x26,
|
||||
0xc2, 0x8e, 0x61, 0x62, 0x85, 0xc4, 0x1c, 0x4b, 0x5e, 0x11, 0xde, 0xc7, 0xc1, 0x3c, 0x58, 0x04,
|
||||
0xab, 0xa8, 0x61, 0x57, 0x2d, 0x62, 0x33, 0x18, 0xfb, 0x88, 0x23, 0x34, 0x71, 0xcf, 0xfb, 0x51,
|
||||
0x03, 0xee, 0x08, 0x0d, 0x3b, 0x02, 0x9f, 0xcd, 0xe9, 0x99, 0x2c, 0xca, 0x38, 0xf4, 0x1a, 0x1a,
|
||||
0x94, 0x79, 0x92, 0xdc, 0xc2, 0xbf, 0x6b, 0x41, 0x56, 0x6f, 0x0d, 0x97, 0x4b, 0x6e, 0xb8, 0x24,
|
||||
0x76, 0x08, 0x60, 0x90, 0x74, 0xe9, 0xac, 0xd0, 0xaa, 0x9b, 0xb8, 0x43, 0x9a, 0x77, 0x92, 0xbc,
|
||||
0xce, 0x2b, 0x4d, 0x24, 0xd6, 0x25, 0x76, 0x33, 0x23, 0xc9, 0xeb, 0x65, 0x87, 0x92, 0xd7, 0x00,
|
||||
0xa6, 0xef, 0xd7, 0x5e, 0x72, 0xcb, 0xd9, 0x3e, 0x0c, 0xd7, 0xae, 0x78, 0x40, 0x1b, 0x07, 0xf3,
|
||||
0x70, 0x31, 0x5d, 0x75, 0x27, 0x76, 0x00, 0x23, 0x29, 0x54, 0x4e, 0x88, 0xaa, 0xbb, 0xe8, 0x8f,
|
||||
0x14, 0x2a, 0x43, 0x54, 0x5e, 0xf1, 0xba, 0x55, 0x61, 0xa7, 0x78, 0xed, 0xd5, 0x7f, 0x08, 0xc9,
|
||||
0xc9, 0xb8, 0xef, 0x69, 0xf3, 0xc8, 0x4e, 0xe0, 0x2f, 0x39, 0x99, 0xeb, 0x4d, 0x4e, 0x8f, 0x8e,
|
||||
0x1b, 0xa4, 0x78, 0xe0, 0xe5, 0x84, 0x9c, 0xbc, 0xd9, 0x64, 0x2d, 0x63, 0x7b, 0x30, 0x28, 0xb4,
|
||||
0x53, 0x36, 0x1e, 0x7a, 0xd9, 0x1e, 0x92, 0x97, 0x00, 0xa2, 0x8b, 0x52, 0xa0, 0xb2, 0xed, 0x47,
|
||||
0x3f, 0x87, 0x71, 0xc9, 0x2d, 0xaa, 0x42, 0x20, 0xf9, 0xfd, 0xa3, 0xd3, 0x59, 0xba, 0xdb, 0x52,
|
||||
0xfa, 0x69, 0xb7, 0xd5, 0x47, 0xfa, 0x5b, 0x5f, 0xbd, 0x5f, 0xfa, 0x0a, 0x7f, 0xee, 0xab, 0xff,
|
||||
0xb5, 0xaf, 0xf5, 0xd0, 0xff, 0x34, 0x67, 0x6f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xea, 0x75, 0x34,
|
||||
0x90, 0x43, 0x02, 0x00, 0x00,
|
||||
}
|
55
vendor/google.golang.org/grpc/benchmark/grpc_testing/stats.proto
generated
vendored
Normal file
55
vendor/google.golang.org/grpc/benchmark/grpc_testing/stats.proto
generated
vendored
Normal file
|
@ -0,0 +1,55 @@
|
|||
// Copyright 2016 gRPC authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package grpc.testing;
|
||||
|
||||
message ServerStats {
|
||||
// wall clock time change in seconds since last reset
|
||||
double time_elapsed = 1;
|
||||
|
||||
// change in user time (in seconds) used by the server since last reset
|
||||
double time_user = 2;
|
||||
|
||||
// change in server time (in seconds) used by the server process and all
|
||||
// threads since last reset
|
||||
double time_system = 3;
|
||||
}
|
||||
|
||||
// Histogram params based on grpc/support/histogram.c
|
||||
message HistogramParams {
|
||||
double resolution = 1; // first bucket is [0, 1 + resolution)
|
||||
double max_possible = 2; // use enough buckets to allow this value
|
||||
}
|
||||
|
||||
// Histogram data based on grpc/support/histogram.c
|
||||
message HistogramData {
|
||||
repeated uint32 bucket = 1;
|
||||
double min_seen = 2;
|
||||
double max_seen = 3;
|
||||
double sum = 4;
|
||||
double sum_of_squares = 5;
|
||||
double count = 6;
|
||||
}
|
||||
|
||||
message ClientStats {
|
||||
// Latency histogram. Data points are in nanoseconds.
|
||||
HistogramData latencies = 1;
|
||||
|
||||
// See ServerStats for details.
|
||||
double time_elapsed = 2;
|
||||
double time_user = 3;
|
||||
double time_system = 4;
|
||||
}
|
316
vendor/google.golang.org/grpc/benchmark/latency/latency.go
generated
vendored
Normal file
316
vendor/google.golang.org/grpc/benchmark/latency/latency.go
generated
vendored
Normal file
|
@ -0,0 +1,316 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package latency provides wrappers for net.Conn, net.Listener, and
|
||||
// net.Dialers, designed to interoperate to inject real-world latency into
|
||||
// network connections.
|
||||
package latency
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// Dialer is a function matching the signature of net.Dial.
|
||||
type Dialer func(network, address string) (net.Conn, error)
|
||||
|
||||
// TimeoutDialer is a function matching the signature of net.DialTimeout.
|
||||
type TimeoutDialer func(network, address string, timeout time.Duration) (net.Conn, error)
|
||||
|
||||
// ContextDialer is a function matching the signature of
|
||||
// net.Dialer.DialContext.
|
||||
type ContextDialer func(ctx context.Context, network, address string) (net.Conn, error)
|
||||
|
||||
// Network represents a network with the given bandwidth, latency, and MTU
|
||||
// (Maximum Transmission Unit) configuration, and can produce wrappers of
|
||||
// net.Listeners, net.Conn, and various forms of dialing functions. The
|
||||
// Listeners and Dialers/Conns on both sides of connections must come from this
|
||||
// package, but need not be created from the same Network. Latency is computed
|
||||
// when sending (in Write), and is injected when receiving (in Read). This
|
||||
// allows senders' Write calls to be non-blocking, as in real-world
|
||||
// applications.
|
||||
//
|
||||
// Note: Latency is injected by the sender specifying the absolute time data
|
||||
// should be available, and the reader delaying until that time arrives to
|
||||
// provide the data. This package attempts to counter-act the effects of clock
|
||||
// drift and existing network latency by measuring the delay between the
|
||||
// sender's transmission time and the receiver's reception time during startup.
|
||||
// No attempt is made to measure the existing bandwidth of the connection.
|
||||
type Network struct {
|
||||
Kbps int // Kilobits per second; if non-positive, infinite
|
||||
Latency time.Duration // One-way latency (sending); if non-positive, no delay
|
||||
MTU int // Bytes per packet; if non-positive, infinite
|
||||
}
|
||||
|
||||
var (
|
||||
//Local simulates local network.
|
||||
Local = Network{0, 0, 0}
|
||||
//LAN simulates local area network network.
|
||||
LAN = Network{100 * 1024, 2 * time.Millisecond, 1500}
|
||||
//WAN simulates wide area network.
|
||||
WAN = Network{20 * 1024, 30 * time.Millisecond, 1500}
|
||||
//Longhaul simulates bad network.
|
||||
Longhaul = Network{1000 * 1024, 200 * time.Millisecond, 9000}
|
||||
)
|
||||
|
||||
// Conn returns a net.Conn that wraps c and injects n's latency into that
|
||||
// connection. This function also imposes latency for connection creation.
|
||||
// If n's Latency is lower than the measured latency in c, an error is
|
||||
// returned.
|
||||
func (n *Network) Conn(c net.Conn) (net.Conn, error) {
|
||||
start := now()
|
||||
nc := &conn{Conn: c, network: n, readBuf: new(bytes.Buffer)}
|
||||
if err := nc.sync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sleep(start.Add(nc.delay).Sub(now()))
|
||||
return nc, nil
|
||||
}
|
||||
|
||||
type conn struct {
|
||||
net.Conn
|
||||
network *Network
|
||||
|
||||
readBuf *bytes.Buffer // one packet worth of data received
|
||||
lastSendEnd time.Time // time the previous Write should be fully on the wire
|
||||
delay time.Duration // desired latency - measured latency
|
||||
}
|
||||
|
||||
// header is sent before all data transmitted by the application.
|
||||
type header struct {
|
||||
ReadTime int64 // Time the reader is allowed to read this packet (UnixNano)
|
||||
Sz int32 // Size of the data in the packet
|
||||
}
|
||||
|
||||
func (c *conn) Write(p []byte) (n int, err error) {
|
||||
tNow := now()
|
||||
if c.lastSendEnd.Before(tNow) {
|
||||
c.lastSendEnd = tNow
|
||||
}
|
||||
for len(p) > 0 {
|
||||
pkt := p
|
||||
if c.network.MTU > 0 && len(pkt) > c.network.MTU {
|
||||
pkt = pkt[:c.network.MTU]
|
||||
p = p[c.network.MTU:]
|
||||
} else {
|
||||
p = nil
|
||||
}
|
||||
if c.network.Kbps > 0 {
|
||||
if congestion := c.lastSendEnd.Sub(tNow) - c.delay; congestion > 0 {
|
||||
// The network is full; sleep until this packet can be sent.
|
||||
sleep(congestion)
|
||||
tNow = tNow.Add(congestion)
|
||||
}
|
||||
}
|
||||
c.lastSendEnd = c.lastSendEnd.Add(c.network.pktTime(len(pkt)))
|
||||
hdr := header{ReadTime: c.lastSendEnd.Add(c.delay).UnixNano(), Sz: int32(len(pkt))}
|
||||
if err := binary.Write(c.Conn, binary.BigEndian, hdr); err != nil {
|
||||
return n, err
|
||||
}
|
||||
x, err := c.Conn.Write(pkt)
|
||||
n += x
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (c *conn) Read(p []byte) (n int, err error) {
|
||||
if c.readBuf.Len() == 0 {
|
||||
var hdr header
|
||||
if err := binary.Read(c.Conn, binary.BigEndian, &hdr); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer func() { sleep(time.Unix(0, hdr.ReadTime).Sub(now())) }()
|
||||
|
||||
if _, err := io.CopyN(c.readBuf, c.Conn, int64(hdr.Sz)); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
// Read from readBuf.
|
||||
return c.readBuf.Read(p)
|
||||
}
|
||||
|
||||
// sync does a handshake and then measures the latency on the network in
|
||||
// coordination with the other side.
|
||||
func (c *conn) sync() error {
|
||||
const (
|
||||
pingMsg = "syncPing"
|
||||
warmup = 10 // minimum number of iterations to measure latency
|
||||
giveUp = 50 // maximum number of iterations to measure latency
|
||||
accuracy = time.Millisecond // req'd accuracy to stop early
|
||||
goodRun = 3 // stop early if latency within accuracy this many times
|
||||
)
|
||||
|
||||
type syncMsg struct {
|
||||
SendT int64 // Time sent. If zero, stop.
|
||||
RecvT int64 // Time received. If zero, fill in and respond.
|
||||
}
|
||||
|
||||
// A trivial handshake
|
||||
if err := binary.Write(c.Conn, binary.BigEndian, []byte(pingMsg)); err != nil {
|
||||
return err
|
||||
}
|
||||
var ping [8]byte
|
||||
if err := binary.Read(c.Conn, binary.BigEndian, &ping); err != nil {
|
||||
return err
|
||||
} else if string(ping[:]) != pingMsg {
|
||||
return fmt.Errorf("malformed handshake message: %v (want %q)", ping, pingMsg)
|
||||
}
|
||||
|
||||
// Both sides are alive and syncing. Calculate network delay / clock skew.
|
||||
att := 0
|
||||
good := 0
|
||||
var latency time.Duration
|
||||
localDone, remoteDone := false, false
|
||||
send := true
|
||||
for !localDone || !remoteDone {
|
||||
if send {
|
||||
if err := binary.Write(c.Conn, binary.BigEndian, syncMsg{SendT: now().UnixNano()}); err != nil {
|
||||
return err
|
||||
}
|
||||
att++
|
||||
send = false
|
||||
}
|
||||
|
||||
// Block until we get a syncMsg
|
||||
m := syncMsg{}
|
||||
if err := binary.Read(c.Conn, binary.BigEndian, &m); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if m.RecvT == 0 {
|
||||
// Message initiated from other side.
|
||||
if m.SendT == 0 {
|
||||
remoteDone = true
|
||||
continue
|
||||
}
|
||||
// Send response.
|
||||
m.RecvT = now().UnixNano()
|
||||
if err := binary.Write(c.Conn, binary.BigEndian, m); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
lag := time.Duration(m.RecvT - m.SendT)
|
||||
latency += lag
|
||||
avgLatency := latency / time.Duration(att)
|
||||
if e := lag - avgLatency; e > -accuracy && e < accuracy {
|
||||
good++
|
||||
} else {
|
||||
good = 0
|
||||
}
|
||||
if att < giveUp && (att < warmup || good < goodRun) {
|
||||
send = true
|
||||
continue
|
||||
}
|
||||
localDone = true
|
||||
latency = avgLatency
|
||||
// Tell the other side we're done.
|
||||
if err := binary.Write(c.Conn, binary.BigEndian, syncMsg{}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if c.network.Latency <= 0 {
|
||||
return nil
|
||||
}
|
||||
c.delay = c.network.Latency - latency
|
||||
if c.delay < 0 {
|
||||
return fmt.Errorf("measured network latency (%v) higher than desired latency (%v)", latency, c.network.Latency)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Listener returns a net.Listener that wraps l and injects n's latency in its
|
||||
// connections.
|
||||
func (n *Network) Listener(l net.Listener) net.Listener {
|
||||
return &listener{Listener: l, network: n}
|
||||
}
|
||||
|
||||
type listener struct {
|
||||
net.Listener
|
||||
network *Network
|
||||
}
|
||||
|
||||
func (l *listener) Accept() (net.Conn, error) {
|
||||
c, err := l.Listener.Accept()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return l.network.Conn(c)
|
||||
}
|
||||
|
||||
// Dialer returns a Dialer that wraps d and injects n's latency in its
|
||||
// connections. n's Latency is also injected to the connection's creation.
|
||||
func (n *Network) Dialer(d Dialer) Dialer {
|
||||
return func(network, address string) (net.Conn, error) {
|
||||
conn, err := d(network, address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return n.Conn(conn)
|
||||
}
|
||||
}
|
||||
|
||||
// TimeoutDialer returns a TimeoutDialer that wraps d and injects n's latency
|
||||
// in its connections. n's Latency is also injected to the connection's
|
||||
// creation.
|
||||
func (n *Network) TimeoutDialer(d TimeoutDialer) TimeoutDialer {
|
||||
return func(network, address string, timeout time.Duration) (net.Conn, error) {
|
||||
conn, err := d(network, address, timeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return n.Conn(conn)
|
||||
}
|
||||
}
|
||||
|
||||
// ContextDialer returns a ContextDialer that wraps d and injects n's latency
|
||||
// in its connections. n's Latency is also injected to the connection's
|
||||
// creation.
|
||||
func (n *Network) ContextDialer(d ContextDialer) ContextDialer {
|
||||
return func(ctx context.Context, network, address string) (net.Conn, error) {
|
||||
conn, err := d(ctx, network, address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return n.Conn(conn)
|
||||
}
|
||||
}
|
||||
|
||||
// pktTime returns the time it takes to transmit one packet of data of size b
|
||||
// in bytes.
|
||||
func (n *Network) pktTime(b int) time.Duration {
|
||||
if n.Kbps <= 0 {
|
||||
return time.Duration(0)
|
||||
}
|
||||
return time.Duration(b) * time.Second / time.Duration(n.Kbps*(1024/8))
|
||||
}
|
||||
|
||||
// Wrappers for testing
|
||||
|
||||
var now = time.Now
|
||||
var sleep = time.Sleep
|
353
vendor/google.golang.org/grpc/benchmark/latency/latency_test.go
generated
vendored
Normal file
353
vendor/google.golang.org/grpc/benchmark/latency/latency_test.go
generated
vendored
Normal file
|
@ -0,0 +1,353 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package latency
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// bufConn is a net.Conn implemented by a bytes.Buffer (which is a ReadWriter).
|
||||
type bufConn struct {
|
||||
*bytes.Buffer
|
||||
}
|
||||
|
||||
func (bufConn) Close() error { panic("unimplemented") }
|
||||
func (bufConn) LocalAddr() net.Addr { panic("unimplemented") }
|
||||
func (bufConn) RemoteAddr() net.Addr { panic("unimplemented") }
|
||||
func (bufConn) SetDeadline(t time.Time) error { panic("unimplemneted") }
|
||||
func (bufConn) SetReadDeadline(t time.Time) error { panic("unimplemneted") }
|
||||
func (bufConn) SetWriteDeadline(t time.Time) error { panic("unimplemneted") }
|
||||
|
||||
func restoreHooks() func() {
|
||||
s := sleep
|
||||
n := now
|
||||
return func() {
|
||||
sleep = s
|
||||
now = n
|
||||
}
|
||||
}
|
||||
|
||||
func TestConn(t *testing.T) {
|
||||
defer restoreHooks()()
|
||||
|
||||
// Constant time.
|
||||
now = func() time.Time { return time.Unix(123, 456) }
|
||||
|
||||
// Capture sleep times for checking later.
|
||||
var sleepTimes []time.Duration
|
||||
sleep = func(t time.Duration) { sleepTimes = append(sleepTimes, t) }
|
||||
|
||||
wantSleeps := func(want ...time.Duration) {
|
||||
if !reflect.DeepEqual(want, sleepTimes) {
|
||||
t.Fatalf("sleepTimes = %v; want %v", sleepTimes, want)
|
||||
}
|
||||
sleepTimes = nil
|
||||
}
|
||||
|
||||
// Use a fairly high latency to cause a large BDP and avoid sleeps while
|
||||
// writing due to simulation of full buffers.
|
||||
latency := 1 * time.Second
|
||||
c, err := (&Network{Kbps: 1, Latency: latency, MTU: 5}).Conn(bufConn{&bytes.Buffer{}})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error creating connection: %v", err)
|
||||
}
|
||||
wantSleeps(latency) // Connection creation delay.
|
||||
|
||||
// 1 kbps = 128 Bps. Divides evenly by 1 second using nanos.
|
||||
byteLatency := time.Duration(time.Second / 128)
|
||||
|
||||
write := func(b []byte) {
|
||||
n, err := c.Write(b)
|
||||
if n != len(b) || err != nil {
|
||||
t.Fatalf("c.Write(%v) = %v, %v; want %v, nil", b, n, err, len(b))
|
||||
}
|
||||
}
|
||||
|
||||
write([]byte{1, 2, 3, 4, 5}) // One full packet
|
||||
pkt1Time := latency + byteLatency*5
|
||||
write([]byte{6}) // One partial packet
|
||||
pkt2Time := pkt1Time + byteLatency
|
||||
write([]byte{7, 8, 9, 10, 11, 12, 13}) // Two packets
|
||||
pkt3Time := pkt2Time + byteLatency*5
|
||||
pkt4Time := pkt3Time + byteLatency*2
|
||||
|
||||
// No reads, so no sleeps yet.
|
||||
wantSleeps()
|
||||
|
||||
read := func(n int, want []byte) {
|
||||
b := make([]byte, n)
|
||||
if rd, err := c.Read(b); err != nil || rd != len(want) {
|
||||
t.Fatalf("c.Read(<%v bytes>) = %v, %v; want %v, nil", n, rd, err, len(want))
|
||||
}
|
||||
if !reflect.DeepEqual(b[:len(want)], want) {
|
||||
t.Fatalf("read %v; want %v", b, want)
|
||||
}
|
||||
}
|
||||
|
||||
read(1, []byte{1})
|
||||
wantSleeps(pkt1Time)
|
||||
read(1, []byte{2})
|
||||
wantSleeps()
|
||||
read(3, []byte{3, 4, 5})
|
||||
wantSleeps()
|
||||
read(2, []byte{6})
|
||||
wantSleeps(pkt2Time)
|
||||
read(2, []byte{7, 8})
|
||||
wantSleeps(pkt3Time)
|
||||
read(10, []byte{9, 10, 11})
|
||||
wantSleeps()
|
||||
read(10, []byte{12, 13})
|
||||
wantSleeps(pkt4Time)
|
||||
}
|
||||
|
||||
func TestSync(t *testing.T) {
|
||||
defer restoreHooks()()
|
||||
|
||||
// Infinitely fast CPU: time doesn't pass unless sleep is called.
|
||||
tn := time.Unix(123, 0)
|
||||
now = func() time.Time { return tn }
|
||||
sleep = func(d time.Duration) { tn = tn.Add(d) }
|
||||
|
||||
// Simulate a 20ms latency network, then run sync across that and expect to
|
||||
// measure 20ms latency, or 10ms additional delay for a 30ms network.
|
||||
slowConn, err := (&Network{Kbps: 0, Latency: 20 * time.Millisecond, MTU: 5}).Conn(bufConn{&bytes.Buffer{}})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error creating connection: %v", err)
|
||||
}
|
||||
c, err := (&Network{Latency: 30 * time.Millisecond}).Conn(slowConn)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error creating connection: %v", err)
|
||||
}
|
||||
if c.(*conn).delay != 10*time.Millisecond {
|
||||
t.Fatalf("c.delay = %v; want 10ms", c.(*conn).delay)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSyncTooSlow(t *testing.T) {
|
||||
defer restoreHooks()()
|
||||
|
||||
// Infinitely fast CPU: time doesn't pass unless sleep is called.
|
||||
tn := time.Unix(123, 0)
|
||||
now = func() time.Time { return tn }
|
||||
sleep = func(d time.Duration) { tn = tn.Add(d) }
|
||||
|
||||
// Simulate a 10ms latency network, then attempt to simulate a 5ms latency
|
||||
// network and expect an error.
|
||||
slowConn, err := (&Network{Kbps: 0, Latency: 10 * time.Millisecond, MTU: 5}).Conn(bufConn{&bytes.Buffer{}})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error creating connection: %v", err)
|
||||
}
|
||||
|
||||
errWant := "measured network latency (10ms) higher than desired latency (5ms)"
|
||||
if _, err := (&Network{Latency: 5 * time.Millisecond}).Conn(slowConn); err == nil || err.Error() != errWant {
|
||||
t.Fatalf("Conn() = _, %q; want _, %q", err, errWant)
|
||||
}
|
||||
}
|
||||
|
||||
func TestListenerAndDialer(t *testing.T) {
|
||||
defer restoreHooks()()
|
||||
|
||||
tn := time.Unix(123, 0)
|
||||
startTime := tn
|
||||
mu := &sync.Mutex{}
|
||||
now = func() time.Time {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
return tn
|
||||
}
|
||||
|
||||
// Use a fairly high latency to cause a large BDP and avoid sleeps while
|
||||
// writing due to simulation of full buffers.
|
||||
n := &Network{Kbps: 2, Latency: 1 * time.Second, MTU: 10}
|
||||
// 2 kbps = .25 kBps = 256 Bps
|
||||
byteLatency := func(n int) time.Duration {
|
||||
return time.Duration(n) * time.Second / 256
|
||||
}
|
||||
|
||||
// Create a real listener and wrap it.
|
||||
l, err := net.Listen("tcp", "localhost:0")
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error creating listener: %v", err)
|
||||
}
|
||||
defer l.Close()
|
||||
l = n.Listener(l)
|
||||
|
||||
var serverConn net.Conn
|
||||
var scErr error
|
||||
scDone := make(chan struct{})
|
||||
go func() {
|
||||
serverConn, scErr = l.Accept()
|
||||
close(scDone)
|
||||
}()
|
||||
|
||||
// Create a dialer and use it.
|
||||
clientConn, err := n.TimeoutDialer(net.DialTimeout)("tcp", l.Addr().String(), 2*time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error dialing: %v", err)
|
||||
}
|
||||
defer clientConn.Close()
|
||||
|
||||
// Block until server's Conn is available.
|
||||
<-scDone
|
||||
if scErr != nil {
|
||||
t.Fatalf("Unexpected error listening: %v", scErr)
|
||||
}
|
||||
defer serverConn.Close()
|
||||
|
||||
// sleep (only) advances tn. Done after connections established so sync detects zero delay.
|
||||
sleep = func(d time.Duration) {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
if d > 0 {
|
||||
tn = tn.Add(d)
|
||||
}
|
||||
}
|
||||
|
||||
seq := func(a, b int) []byte {
|
||||
buf := make([]byte, b-a)
|
||||
for i := 0; i < b-a; i++ {
|
||||
buf[i] = byte(i + a)
|
||||
}
|
||||
return buf
|
||||
}
|
||||
|
||||
pkt1 := seq(0, 10)
|
||||
pkt2 := seq(10, 30)
|
||||
pkt3 := seq(30, 35)
|
||||
|
||||
write := func(c net.Conn, b []byte) {
|
||||
n, err := c.Write(b)
|
||||
if n != len(b) || err != nil {
|
||||
t.Fatalf("c.Write(%v) = %v, %v; want %v, nil", b, n, err, len(b))
|
||||
}
|
||||
}
|
||||
|
||||
write(serverConn, pkt1)
|
||||
write(serverConn, pkt2)
|
||||
write(serverConn, pkt3)
|
||||
write(clientConn, pkt3)
|
||||
write(clientConn, pkt1)
|
||||
write(clientConn, pkt2)
|
||||
|
||||
if tn != startTime {
|
||||
t.Fatalf("unexpected sleep in write; tn = %v; want %v", tn, startTime)
|
||||
}
|
||||
|
||||
read := func(c net.Conn, n int, want []byte, timeWant time.Time) {
|
||||
b := make([]byte, n)
|
||||
if rd, err := c.Read(b); err != nil || rd != len(want) {
|
||||
t.Fatalf("c.Read(<%v bytes>) = %v, %v; want %v, nil (read: %v)", n, rd, err, len(want), b[:rd])
|
||||
}
|
||||
if !reflect.DeepEqual(b[:len(want)], want) {
|
||||
t.Fatalf("read %v; want %v", b, want)
|
||||
}
|
||||
if !tn.Equal(timeWant) {
|
||||
t.Errorf("tn after read(%v) = %v; want %v", want, tn, timeWant)
|
||||
}
|
||||
}
|
||||
|
||||
read(clientConn, len(pkt1)+1, pkt1, startTime.Add(n.Latency+byteLatency(len(pkt1))))
|
||||
read(serverConn, len(pkt3)+1, pkt3, tn) // tn was advanced by the above read; pkt3 is shorter than pkt1
|
||||
|
||||
read(clientConn, len(pkt2), pkt2[:10], startTime.Add(n.Latency+byteLatency(len(pkt1)+10)))
|
||||
read(clientConn, len(pkt2), pkt2[10:], startTime.Add(n.Latency+byteLatency(len(pkt1)+len(pkt2))))
|
||||
read(clientConn, len(pkt3), pkt3, startTime.Add(n.Latency+byteLatency(len(pkt1)+len(pkt2)+len(pkt3))))
|
||||
|
||||
read(serverConn, len(pkt1), pkt1, tn) // tn already past the arrival time due to prior reads
|
||||
read(serverConn, len(pkt2), pkt2[:10], tn)
|
||||
read(serverConn, len(pkt2), pkt2[10:], tn)
|
||||
|
||||
// Sleep awhile and make sure the read happens disregarding previous writes
|
||||
// (lastSendEnd handling).
|
||||
sleep(10 * time.Second)
|
||||
write(clientConn, pkt1)
|
||||
read(serverConn, len(pkt1), pkt1, tn.Add(n.Latency+byteLatency(len(pkt1))))
|
||||
|
||||
// Send, sleep longer than the network delay, then make sure the read happens
|
||||
// instantly.
|
||||
write(serverConn, pkt1)
|
||||
sleep(10 * time.Second)
|
||||
read(clientConn, len(pkt1), pkt1, tn)
|
||||
}
|
||||
|
||||
func TestBufferBloat(t *testing.T) {
|
||||
defer restoreHooks()()
|
||||
|
||||
// Infinitely fast CPU: time doesn't pass unless sleep is called.
|
||||
tn := time.Unix(123, 0)
|
||||
now = func() time.Time { return tn }
|
||||
// Capture sleep times for checking later.
|
||||
var sleepTimes []time.Duration
|
||||
sleep = func(d time.Duration) {
|
||||
sleepTimes = append(sleepTimes, d)
|
||||
tn = tn.Add(d)
|
||||
}
|
||||
|
||||
wantSleeps := func(want ...time.Duration) error {
|
||||
if !reflect.DeepEqual(want, sleepTimes) {
|
||||
return fmt.Errorf("sleepTimes = %v; want %v", sleepTimes, want)
|
||||
}
|
||||
sleepTimes = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
n := &Network{Kbps: 8 /* 1KBps */, Latency: time.Second, MTU: 8}
|
||||
bdpBytes := (n.Kbps * 1024 / 8) * int(n.Latency/time.Second) // 1024
|
||||
c, err := n.Conn(bufConn{&bytes.Buffer{}})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error creating connection: %v", err)
|
||||
}
|
||||
wantSleeps(n.Latency) // Connection creation delay.
|
||||
|
||||
write := func(n int, sleeps ...time.Duration) {
|
||||
if wt, err := c.Write(make([]byte, n)); err != nil || wt != n {
|
||||
t.Fatalf("c.Write(<%v bytes>) = %v, %v; want %v, nil", n, wt, err, n)
|
||||
}
|
||||
if err := wantSleeps(sleeps...); err != nil {
|
||||
t.Fatalf("After writing %v bytes: %v", n, err)
|
||||
}
|
||||
}
|
||||
|
||||
read := func(n int, sleeps ...time.Duration) {
|
||||
if rd, err := c.Read(make([]byte, n)); err != nil || rd != n {
|
||||
t.Fatalf("c.Read(_) = %v, %v; want %v, nil", rd, err, n)
|
||||
}
|
||||
if err := wantSleeps(sleeps...); err != nil {
|
||||
t.Fatalf("After reading %v bytes: %v", n, err)
|
||||
}
|
||||
}
|
||||
|
||||
write(8) // No reads and buffer not full, so no sleeps yet.
|
||||
read(8, time.Second+n.pktTime(8))
|
||||
|
||||
write(bdpBytes) // Fill the buffer.
|
||||
write(1) // We can send one extra packet even when the buffer is full.
|
||||
write(n.MTU, n.pktTime(1)) // Make sure we sleep to clear the previous write.
|
||||
write(1, n.pktTime(n.MTU))
|
||||
write(n.MTU+1, n.pktTime(1), n.pktTime(n.MTU))
|
||||
|
||||
tn = tn.Add(10 * time.Second) // Wait long enough for the buffer to clear.
|
||||
write(bdpBytes) // No sleeps required.
|
||||
}
|
135
vendor/google.golang.org/grpc/benchmark/primitives/code_string_test.go
generated
vendored
Normal file
135
vendor/google.golang.org/grpc/benchmark/primitives/code_string_test.go
generated
vendored
Normal file
|
@ -0,0 +1,135 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package primitives_test
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
type codeBench uint32
|
||||
|
||||
const (
|
||||
OK codeBench = iota
|
||||
Canceled
|
||||
Unknown
|
||||
InvalidArgument
|
||||
DeadlineExceeded
|
||||
NotFound
|
||||
AlreadyExists
|
||||
PermissionDenied
|
||||
ResourceExhausted
|
||||
FailedPrecondition
|
||||
Aborted
|
||||
OutOfRange
|
||||
Unimplemented
|
||||
Internal
|
||||
Unavailable
|
||||
DataLoss
|
||||
Unauthenticated
|
||||
)
|
||||
|
||||
// The following String() function was generated by stringer.
|
||||
const _Code_name = "OKCanceledUnknownInvalidArgumentDeadlineExceededNotFoundAlreadyExistsPermissionDeniedResourceExhaustedFailedPreconditionAbortedOutOfRangeUnimplementedInternalUnavailableDataLossUnauthenticated"
|
||||
|
||||
var _Code_index = [...]uint8{0, 2, 10, 17, 32, 48, 56, 69, 85, 102, 120, 127, 137, 150, 158, 169, 177, 192}
|
||||
|
||||
func (i codeBench) String() string {
|
||||
if i >= codeBench(len(_Code_index)-1) {
|
||||
return "Code(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
return _Code_name[_Code_index[i]:_Code_index[i+1]]
|
||||
}
|
||||
|
||||
var nameMap = map[codeBench]string{
|
||||
OK: "OK",
|
||||
Canceled: "Canceled",
|
||||
Unknown: "Unknown",
|
||||
InvalidArgument: "InvalidArgument",
|
||||
DeadlineExceeded: "DeadlineExceeded",
|
||||
NotFound: "NotFound",
|
||||
AlreadyExists: "AlreadyExists",
|
||||
PermissionDenied: "PermissionDenied",
|
||||
ResourceExhausted: "ResourceExhausted",
|
||||
FailedPrecondition: "FailedPrecondition",
|
||||
Aborted: "Aborted",
|
||||
OutOfRange: "OutOfRange",
|
||||
Unimplemented: "Unimplemented",
|
||||
Internal: "Internal",
|
||||
Unavailable: "Unavailable",
|
||||
DataLoss: "DataLoss",
|
||||
Unauthenticated: "Unauthenticated",
|
||||
}
|
||||
|
||||
func (i codeBench) StringUsingMap() string {
|
||||
if s, ok := nameMap[i]; ok {
|
||||
return s
|
||||
}
|
||||
return "Code(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
|
||||
func BenchmarkCodeStringStringer(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
c := codeBench(uint32(i % 17))
|
||||
_ = c.String()
|
||||
}
|
||||
b.StopTimer()
|
||||
}
|
||||
|
||||
func BenchmarkCodeStringMap(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
c := codeBench(uint32(i % 17))
|
||||
_ = c.StringUsingMap()
|
||||
}
|
||||
b.StopTimer()
|
||||
}
|
||||
|
||||
// codes.Code.String() does a switch.
|
||||
func BenchmarkCodeStringSwitch(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
c := codes.Code(uint32(i % 17))
|
||||
_ = c.String()
|
||||
}
|
||||
b.StopTimer()
|
||||
}
|
||||
|
||||
// Testing all codes (0<=c<=16) and also one overflow (17).
|
||||
func BenchmarkCodeStringStringerWithOverflow(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
c := codeBench(uint32(i % 18))
|
||||
_ = c.String()
|
||||
}
|
||||
b.StopTimer()
|
||||
}
|
||||
|
||||
// Testing all codes (0<=c<=16) and also one overflow (17).
|
||||
func BenchmarkCodeStringSwitchWithOverflow(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
c := codes.Code(uint32(i % 18))
|
||||
_ = c.String()
|
||||
}
|
||||
b.StopTimer()
|
||||
}
|
120
vendor/google.golang.org/grpc/benchmark/primitives/context_test.go
generated
vendored
Normal file
120
vendor/google.golang.org/grpc/benchmark/primitives/context_test.go
generated
vendored
Normal file
|
@ -0,0 +1,120 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package primitives_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func BenchmarkCancelContextErrNoErr(b *testing.B) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
for i := 0; i < b.N; i++ {
|
||||
if err := ctx.Err(); err != nil {
|
||||
b.Fatal("error")
|
||||
}
|
||||
}
|
||||
cancel()
|
||||
}
|
||||
|
||||
func BenchmarkCancelContextErrGotErr(b *testing.B) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
for i := 0; i < b.N; i++ {
|
||||
if err := ctx.Err(); err == nil {
|
||||
b.Fatal("error")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCancelContextChannelNoErr(b *testing.B) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
for i := 0; i < b.N; i++ {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
b.Fatal("error: ctx.Done():", ctx.Err())
|
||||
default:
|
||||
}
|
||||
}
|
||||
cancel()
|
||||
}
|
||||
|
||||
func BenchmarkCancelContextChannelGotErr(b *testing.B) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
for i := 0; i < b.N; i++ {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
if err := ctx.Err(); err == nil {
|
||||
b.Fatal("error")
|
||||
}
|
||||
default:
|
||||
b.Fatal("error: !ctx.Done()")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkTimerContextErrNoErr(b *testing.B) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 24*time.Hour)
|
||||
for i := 0; i < b.N; i++ {
|
||||
if err := ctx.Err(); err != nil {
|
||||
b.Fatal("error")
|
||||
}
|
||||
}
|
||||
cancel()
|
||||
}
|
||||
|
||||
func BenchmarkTimerContextErrGotErr(b *testing.B) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Microsecond)
|
||||
cancel()
|
||||
for i := 0; i < b.N; i++ {
|
||||
if err := ctx.Err(); err == nil {
|
||||
b.Fatal("error")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkTimerContextChannelNoErr(b *testing.B) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 24*time.Hour)
|
||||
for i := 0; i < b.N; i++ {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
b.Fatal("error: ctx.Done():", ctx.Err())
|
||||
default:
|
||||
}
|
||||
}
|
||||
cancel()
|
||||
}
|
||||
|
||||
func BenchmarkTimerContextChannelGotErr(b *testing.B) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Microsecond)
|
||||
cancel()
|
||||
for i := 0; i < b.N; i++ {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
if err := ctx.Err(); err == nil {
|
||||
b.Fatal("error")
|
||||
}
|
||||
default:
|
||||
b.Fatal("error: !ctx.Done()")
|
||||
}
|
||||
}
|
||||
}
|
403
vendor/google.golang.org/grpc/benchmark/primitives/primitives_test.go
generated
vendored
Normal file
403
vendor/google.golang.org/grpc/benchmark/primitives/primitives_test.go
generated
vendored
Normal file
|
@ -0,0 +1,403 @@
|
|||
// +build go1.7
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package primitives_test contains benchmarks for various synchronization primitives
|
||||
// available in Go.
|
||||
package primitives_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func BenchmarkSelectClosed(b *testing.B) {
|
||||
c := make(chan struct{})
|
||||
close(c)
|
||||
x := 0
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
select {
|
||||
case <-c:
|
||||
x++
|
||||
default:
|
||||
}
|
||||
}
|
||||
b.StopTimer()
|
||||
if x != b.N {
|
||||
b.Fatal("error")
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkSelectOpen(b *testing.B) {
|
||||
c := make(chan struct{})
|
||||
x := 0
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
select {
|
||||
case <-c:
|
||||
default:
|
||||
x++
|
||||
}
|
||||
}
|
||||
b.StopTimer()
|
||||
if x != b.N {
|
||||
b.Fatal("error")
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAtomicBool(b *testing.B) {
|
||||
c := int32(0)
|
||||
x := 0
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
if atomic.LoadInt32(&c) == 0 {
|
||||
x++
|
||||
}
|
||||
}
|
||||
b.StopTimer()
|
||||
if x != b.N {
|
||||
b.Fatal("error")
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAtomicValueLoad(b *testing.B) {
|
||||
c := atomic.Value{}
|
||||
c.Store(0)
|
||||
x := 0
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
if c.Load().(int) == 0 {
|
||||
x++
|
||||
}
|
||||
}
|
||||
b.StopTimer()
|
||||
if x != b.N {
|
||||
b.Fatal("error")
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAtomicValueStore(b *testing.B) {
|
||||
c := atomic.Value{}
|
||||
v := 123
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
c.Store(v)
|
||||
}
|
||||
b.StopTimer()
|
||||
}
|
||||
|
||||
func BenchmarkMutex(b *testing.B) {
|
||||
c := sync.Mutex{}
|
||||
x := 0
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
c.Lock()
|
||||
x++
|
||||
c.Unlock()
|
||||
}
|
||||
b.StopTimer()
|
||||
if x != b.N {
|
||||
b.Fatal("error")
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkRWMutex(b *testing.B) {
|
||||
c := sync.RWMutex{}
|
||||
x := 0
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
c.RLock()
|
||||
x++
|
||||
c.RUnlock()
|
||||
}
|
||||
b.StopTimer()
|
||||
if x != b.N {
|
||||
b.Fatal("error")
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkRWMutexW(b *testing.B) {
|
||||
c := sync.RWMutex{}
|
||||
x := 0
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
c.Lock()
|
||||
x++
|
||||
c.Unlock()
|
||||
}
|
||||
b.StopTimer()
|
||||
if x != b.N {
|
||||
b.Fatal("error")
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMutexWithDefer(b *testing.B) {
|
||||
c := sync.Mutex{}
|
||||
x := 0
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
func() {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
x++
|
||||
}()
|
||||
}
|
||||
b.StopTimer()
|
||||
if x != b.N {
|
||||
b.Fatal("error")
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMutexWithClosureDefer(b *testing.B) {
|
||||
c := sync.Mutex{}
|
||||
x := 0
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
func() {
|
||||
c.Lock()
|
||||
defer func() { c.Unlock() }()
|
||||
x++
|
||||
}()
|
||||
}
|
||||
b.StopTimer()
|
||||
if x != b.N {
|
||||
b.Fatal("error")
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMutexWithoutDefer(b *testing.B) {
|
||||
c := sync.Mutex{}
|
||||
x := 0
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
func() {
|
||||
c.Lock()
|
||||
x++
|
||||
c.Unlock()
|
||||
}()
|
||||
}
|
||||
b.StopTimer()
|
||||
if x != b.N {
|
||||
b.Fatal("error")
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAtomicAddInt64(b *testing.B) {
|
||||
var c int64
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
atomic.AddInt64(&c, 1)
|
||||
}
|
||||
b.StopTimer()
|
||||
if c != int64(b.N) {
|
||||
b.Fatal("error")
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAtomicTimeValueStore(b *testing.B) {
|
||||
var c atomic.Value
|
||||
t := time.Now()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
c.Store(t)
|
||||
}
|
||||
b.StopTimer()
|
||||
}
|
||||
|
||||
func BenchmarkAtomic16BValueStore(b *testing.B) {
|
||||
var c atomic.Value
|
||||
t := struct {
|
||||
a int64
|
||||
b int64
|
||||
}{
|
||||
123, 123,
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
c.Store(t)
|
||||
}
|
||||
b.StopTimer()
|
||||
}
|
||||
|
||||
func BenchmarkAtomic32BValueStore(b *testing.B) {
|
||||
var c atomic.Value
|
||||
t := struct {
|
||||
a int64
|
||||
b int64
|
||||
c int64
|
||||
d int64
|
||||
}{
|
||||
123, 123, 123, 123,
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
c.Store(t)
|
||||
}
|
||||
b.StopTimer()
|
||||
}
|
||||
|
||||
func BenchmarkAtomicPointerStore(b *testing.B) {
|
||||
t := 123
|
||||
var up unsafe.Pointer
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
atomic.StorePointer(&up, unsafe.Pointer(&t))
|
||||
}
|
||||
b.StopTimer()
|
||||
}
|
||||
|
||||
func BenchmarkAtomicTimePointerStore(b *testing.B) {
|
||||
t := time.Now()
|
||||
var up unsafe.Pointer
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
atomic.StorePointer(&up, unsafe.Pointer(&t))
|
||||
}
|
||||
b.StopTimer()
|
||||
}
|
||||
|
||||
func BenchmarkStoreContentionWithAtomic(b *testing.B) {
|
||||
t := 123
|
||||
var c unsafe.Pointer
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
atomic.StorePointer(&c, unsafe.Pointer(&t))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkStoreContentionWithMutex(b *testing.B) {
|
||||
t := 123
|
||||
var mu sync.Mutex
|
||||
var c int
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
mu.Lock()
|
||||
c = t
|
||||
mu.Unlock()
|
||||
}
|
||||
})
|
||||
_ = c
|
||||
}
|
||||
|
||||
type dummyStruct struct {
|
||||
a int64
|
||||
b time.Time
|
||||
}
|
||||
|
||||
func BenchmarkStructStoreContention(b *testing.B) {
|
||||
d := dummyStruct{}
|
||||
dp := unsafe.Pointer(&d)
|
||||
t := time.Now()
|
||||
for _, j := range []int{100000000, 10000, 0} {
|
||||
for _, i := range []int{100000, 10} {
|
||||
b.Run(fmt.Sprintf("CAS/%v/%v", j, i), func(b *testing.B) {
|
||||
b.SetParallelism(i)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
n := &dummyStruct{
|
||||
b: t,
|
||||
}
|
||||
for pb.Next() {
|
||||
for y := 0; y < j; y++ {
|
||||
}
|
||||
for {
|
||||
v := (*dummyStruct)(atomic.LoadPointer(&dp))
|
||||
n.a = v.a + 1
|
||||
if atomic.CompareAndSwapPointer(&dp, unsafe.Pointer(v), unsafe.Pointer(n)) {
|
||||
n = v
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
var mu sync.Mutex
|
||||
for _, j := range []int{100000000, 10000, 0} {
|
||||
for _, i := range []int{100000, 10} {
|
||||
b.Run(fmt.Sprintf("Mutex/%v/%v", j, i), func(b *testing.B) {
|
||||
b.SetParallelism(i)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
for y := 0; y < j; y++ {
|
||||
}
|
||||
mu.Lock()
|
||||
d.a++
|
||||
d.b = t
|
||||
mu.Unlock()
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type myFooer struct{}
|
||||
|
||||
func (myFooer) Foo() {}
|
||||
|
||||
type fooer interface {
|
||||
Foo()
|
||||
}
|
||||
|
||||
func BenchmarkInterfaceTypeAssertion(b *testing.B) {
|
||||
// Call a separate function to avoid compiler optimizations.
|
||||
runInterfaceTypeAssertion(b, myFooer{})
|
||||
}
|
||||
|
||||
func runInterfaceTypeAssertion(b *testing.B, fer interface{}) {
|
||||
x := 0
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
if _, ok := fer.(fooer); ok {
|
||||
x++
|
||||
}
|
||||
}
|
||||
b.StopTimer()
|
||||
if x != b.N {
|
||||
b.Fatal("error")
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkStructTypeAssertion(b *testing.B) {
|
||||
// Call a separate function to avoid compiler optimizations.
|
||||
runStructTypeAssertion(b, myFooer{})
|
||||
}
|
||||
|
||||
func runStructTypeAssertion(b *testing.B, fer interface{}) {
|
||||
x := 0
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
if _, ok := fer.(myFooer); ok {
|
||||
x++
|
||||
}
|
||||
}
|
||||
b.StopTimer()
|
||||
if x != b.N {
|
||||
b.Fatal("error")
|
||||
}
|
||||
}
|
187
vendor/google.golang.org/grpc/benchmark/run_bench.sh
generated
vendored
Executable file
187
vendor/google.golang.org/grpc/benchmark/run_bench.sh
generated
vendored
Executable file
|
@ -0,0 +1,187 @@
|
|||
#!/bin/bash
|
||||
|
||||
rpcs=(1)
|
||||
conns=(1)
|
||||
warmup=10
|
||||
dur=10
|
||||
reqs=(1)
|
||||
resps=(1)
|
||||
rpc_types=(unary)
|
||||
|
||||
# idx[0] = idx value for rpcs
|
||||
# idx[1] = idx value for conns
|
||||
# idx[2] = idx value for reqs
|
||||
# idx[3] = idx value for resps
|
||||
# idx[4] = idx value for rpc_types
|
||||
idx=(0 0 0 0 0)
|
||||
idx_max=(1 1 1 1 1)
|
||||
|
||||
inc()
|
||||
{
|
||||
for i in $(seq $((${#idx[@]}-1)) -1 0); do
|
||||
idx[${i}]=$((${idx[${i}]}+1))
|
||||
if [ ${idx[${i}]} == ${idx_max[${i}]} ]; then
|
||||
idx[${i}]=0
|
||||
else
|
||||
break
|
||||
fi
|
||||
done
|
||||
local fin
|
||||
fin=1
|
||||
# Check to see if we have looped back to the beginning.
|
||||
for v in ${idx[@]}; do
|
||||
if [ ${v} != 0 ]; then
|
||||
fin=0
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [ ${fin} == 1 ]; then
|
||||
rm -Rf ${out_dir}
|
||||
clean_and_die 0
|
||||
fi
|
||||
}
|
||||
|
||||
clean_and_die() {
|
||||
rm -Rf ${out_dir}
|
||||
exit $1
|
||||
}
|
||||
|
||||
run(){
|
||||
local nr
|
||||
nr=${rpcs[${idx[0]}]}
|
||||
local nc
|
||||
nc=${conns[${idx[1]}]}
|
||||
req_sz=${reqs[${idx[2]}]}
|
||||
resp_sz=${resps[${idx[3]}]}
|
||||
r_type=${rpc_types[${idx[4]}]}
|
||||
# Following runs one benchmark
|
||||
base_port=50051
|
||||
delta=0
|
||||
test_name="r_"${nr}"_c_"${nc}"_req_"${req_sz}"_resp_"${resp_sz}"_"${r_type}"_"$(date +%s)
|
||||
echo "================================================================================"
|
||||
echo ${test_name}
|
||||
while :
|
||||
do
|
||||
port=$((${base_port}+${delta}))
|
||||
|
||||
# Launch the server in background
|
||||
${out_dir}/server --port=${port} --test_name="Server_"${test_name}&
|
||||
server_pid=$(echo $!)
|
||||
|
||||
# Launch the client
|
||||
${out_dir}/client --port=${port} --d=${dur} --w=${warmup} --r=${nr} --c=${nc} --req=${req_sz} --resp=${resp_sz} --rpc_type=${r_type} --test_name="client_"${test_name}
|
||||
client_status=$(echo $?)
|
||||
|
||||
kill -INT ${server_pid}
|
||||
wait ${server_pid}
|
||||
|
||||
if [ ${client_status} == 0 ]; then
|
||||
break
|
||||
fi
|
||||
|
||||
delta=$((${delta}+1))
|
||||
if [ ${delta} == 10 ]; then
|
||||
echo "Continuous 10 failed runs. Exiting now."
|
||||
rm -Rf ${out_dir}
|
||||
clean_and_die 1
|
||||
fi
|
||||
done
|
||||
|
||||
}
|
||||
|
||||
set_param(){
|
||||
local argname=$1
|
||||
shift
|
||||
local idx=$1
|
||||
shift
|
||||
if [ $# -eq 0 ]; then
|
||||
echo "${argname} not specified"
|
||||
exit 1
|
||||
fi
|
||||
PARAM=($(echo $1 | sed 's/,/ /g'))
|
||||
if [ ${idx} -lt 0 ]; then
|
||||
return
|
||||
fi
|
||||
idx_max[${idx}]=${#PARAM[@]}
|
||||
}
|
||||
|
||||
while [ $# -gt 0 ]; do
|
||||
case "$1" in
|
||||
-r)
|
||||
shift
|
||||
set_param "number of rpcs" 0 $1
|
||||
rpcs=(${PARAM[@]})
|
||||
shift
|
||||
;;
|
||||
-c)
|
||||
shift
|
||||
set_param "number of connections" 1 $1
|
||||
conns=(${PARAM[@]})
|
||||
shift
|
||||
;;
|
||||
-w)
|
||||
shift
|
||||
set_param "warm-up period" -1 $1
|
||||
warmup=${PARAM}
|
||||
shift
|
||||
;;
|
||||
-d)
|
||||
shift
|
||||
set_param "duration" -1 $1
|
||||
dur=${PARAM}
|
||||
shift
|
||||
;;
|
||||
-req)
|
||||
shift
|
||||
set_param "request size" 2 $1
|
||||
reqs=(${PARAM[@]})
|
||||
shift
|
||||
;;
|
||||
-resp)
|
||||
shift
|
||||
set_param "response size" 3 $1
|
||||
resps=(${PARAM[@]})
|
||||
shift
|
||||
;;
|
||||
-rpc_type)
|
||||
shift
|
||||
set_param "rpc type" 4 $1
|
||||
rpc_types=(${PARAM[@]})
|
||||
shift
|
||||
;;
|
||||
-h|--help)
|
||||
echo "Following are valid options:"
|
||||
echo
|
||||
echo "-h, --help show brief help"
|
||||
echo "-w warm-up duration in seconds, default value is 10"
|
||||
echo "-d benchmark duration in seconds, default value is 60"
|
||||
echo ""
|
||||
echo "Each of the following can have multiple comma separated values."
|
||||
echo ""
|
||||
echo "-r number of RPCs, default value is 1"
|
||||
echo "-c number of Connections, default value is 1"
|
||||
echo "-req req size in bytes, default value is 1"
|
||||
echo "-resp resp size in bytes, default value is 1"
|
||||
echo "-rpc_type valid values are unary|streaming, default is unary"
|
||||
;;
|
||||
*)
|
||||
echo "Incorrect option $1"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Build server and client
|
||||
out_dir=$(mktemp -d oss_benchXXX)
|
||||
|
||||
go build -o ${out_dir}/server $GOPATH/src/google.golang.org/grpc/benchmark/server/main.go && go build -o ${out_dir}/client $GOPATH/src/google.golang.org/grpc/benchmark/client/main.go
|
||||
if [ $? != 0 ]; then
|
||||
clean_and_die 1
|
||||
fi
|
||||
|
||||
|
||||
while :
|
||||
do
|
||||
run
|
||||
inc
|
||||
done
|
81
vendor/google.golang.org/grpc/benchmark/server/main.go
generated
vendored
Normal file
81
vendor/google.golang.org/grpc/benchmark/server/main.go
generated
vendored
Normal file
|
@ -0,0 +1,81 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"net"
|
||||
_ "net/http/pprof"
|
||||
"os"
|
||||
"os/signal"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/benchmark"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal/syscall"
|
||||
)
|
||||
|
||||
var (
|
||||
port = flag.String("port", "50051", "Localhost port to listen on.")
|
||||
testName = flag.String("test_name", "", "Name of the test used for creating profiles.")
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
if *testName == "" {
|
||||
grpclog.Fatalf("test name not set")
|
||||
}
|
||||
lis, err := net.Listen("tcp", ":"+*port)
|
||||
if err != nil {
|
||||
grpclog.Fatalf("Failed to listen: %v", err)
|
||||
}
|
||||
defer lis.Close()
|
||||
|
||||
cf, err := os.Create("/tmp/" + *testName + ".cpu")
|
||||
if err != nil {
|
||||
grpclog.Fatalf("Failed to create file: %v", err)
|
||||
}
|
||||
defer cf.Close()
|
||||
pprof.StartCPUProfile(cf)
|
||||
cpuBeg := syscall.GetCPUTime()
|
||||
// Launch server in a separate goroutine.
|
||||
stop := benchmark.StartServer(benchmark.ServerInfo{Type: "protobuf", Listener: lis})
|
||||
// Wait on OS terminate signal.
|
||||
ch := make(chan os.Signal, 1)
|
||||
signal.Notify(ch, os.Interrupt)
|
||||
<-ch
|
||||
cpu := time.Duration(syscall.GetCPUTime() - cpuBeg)
|
||||
stop()
|
||||
pprof.StopCPUProfile()
|
||||
mf, err := os.Create("/tmp/" + *testName + ".mem")
|
||||
if err != nil {
|
||||
grpclog.Fatalf("Failed to create file: %v", err)
|
||||
}
|
||||
defer mf.Close()
|
||||
runtime.GC() // materialize all statistics
|
||||
if err := pprof.WriteHeapProfile(mf); err != nil {
|
||||
grpclog.Fatalf("Failed to write memory profile: %v", err)
|
||||
}
|
||||
fmt.Println("Server CPU utilization:", cpu)
|
||||
fmt.Println("Server CPU profile:", cf.Name())
|
||||
fmt.Println("Server Mem Profile:", mf.Name())
|
||||
}
|
222
vendor/google.golang.org/grpc/benchmark/stats/histogram.go
generated
vendored
Normal file
222
vendor/google.golang.org/grpc/benchmark/stats/histogram.go
generated
vendored
Normal file
|
@ -0,0 +1,222 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package stats
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Histogram accumulates values in the form of a histogram with
|
||||
// exponentially increased bucket sizes.
|
||||
type Histogram struct {
|
||||
// Count is the total number of values added to the histogram.
|
||||
Count int64
|
||||
// Sum is the sum of all the values added to the histogram.
|
||||
Sum int64
|
||||
// SumOfSquares is the sum of squares of all values.
|
||||
SumOfSquares int64
|
||||
// Min is the minimum of all the values added to the histogram.
|
||||
Min int64
|
||||
// Max is the maximum of all the values added to the histogram.
|
||||
Max int64
|
||||
// Buckets contains all the buckets of the histogram.
|
||||
Buckets []HistogramBucket
|
||||
|
||||
opts HistogramOptions
|
||||
logBaseBucketSize float64
|
||||
oneOverLogOnePlusGrowthFactor float64
|
||||
}
|
||||
|
||||
// HistogramOptions contains the parameters that define the histogram's buckets.
|
||||
// The first bucket of the created histogram (with index 0) contains [min, min+n)
|
||||
// where n = BaseBucketSize, min = MinValue.
|
||||
// Bucket i (i>=1) contains [min + n * m^(i-1), min + n * m^i), where m = 1+GrowthFactor.
|
||||
// The type of the values is int64.
|
||||
type HistogramOptions struct {
|
||||
// NumBuckets is the number of buckets.
|
||||
NumBuckets int
|
||||
// GrowthFactor is the growth factor of the buckets. A value of 0.1
|
||||
// indicates that bucket N+1 will be 10% larger than bucket N.
|
||||
GrowthFactor float64
|
||||
// BaseBucketSize is the size of the first bucket.
|
||||
BaseBucketSize float64
|
||||
// MinValue is the lower bound of the first bucket.
|
||||
MinValue int64
|
||||
}
|
||||
|
||||
// HistogramBucket represents one histogram bucket.
|
||||
type HistogramBucket struct {
|
||||
// LowBound is the lower bound of the bucket.
|
||||
LowBound float64
|
||||
// Count is the number of values in the bucket.
|
||||
Count int64
|
||||
}
|
||||
|
||||
// NewHistogram returns a pointer to a new Histogram object that was created
|
||||
// with the provided options.
|
||||
func NewHistogram(opts HistogramOptions) *Histogram {
|
||||
if opts.NumBuckets == 0 {
|
||||
opts.NumBuckets = 32
|
||||
}
|
||||
if opts.BaseBucketSize == 0.0 {
|
||||
opts.BaseBucketSize = 1.0
|
||||
}
|
||||
h := Histogram{
|
||||
Buckets: make([]HistogramBucket, opts.NumBuckets),
|
||||
Min: math.MaxInt64,
|
||||
Max: math.MinInt64,
|
||||
|
||||
opts: opts,
|
||||
logBaseBucketSize: math.Log(opts.BaseBucketSize),
|
||||
oneOverLogOnePlusGrowthFactor: 1 / math.Log(1+opts.GrowthFactor),
|
||||
}
|
||||
m := 1.0 + opts.GrowthFactor
|
||||
delta := opts.BaseBucketSize
|
||||
h.Buckets[0].LowBound = float64(opts.MinValue)
|
||||
for i := 1; i < opts.NumBuckets; i++ {
|
||||
h.Buckets[i].LowBound = float64(opts.MinValue) + delta
|
||||
delta = delta * m
|
||||
}
|
||||
return &h
|
||||
}
|
||||
|
||||
// Print writes textual output of the histogram values.
|
||||
func (h *Histogram) Print(w io.Writer) {
|
||||
h.PrintWithUnit(w, 1)
|
||||
}
|
||||
|
||||
// PrintWithUnit writes textual output of the histogram values .
|
||||
// Data in histogram is divided by a Unit before print.
|
||||
func (h *Histogram) PrintWithUnit(w io.Writer, unit float64) {
|
||||
avg := float64(h.Sum) / float64(h.Count)
|
||||
fmt.Fprintf(w, "Count: %d Min: %5.1f Max: %5.1f Avg: %.2f\n", h.Count, float64(h.Min)/unit, float64(h.Max)/unit, avg/unit)
|
||||
fmt.Fprintf(w, "%s\n", strings.Repeat("-", 60))
|
||||
if h.Count <= 0 {
|
||||
return
|
||||
}
|
||||
|
||||
maxBucketDigitLen := len(strconv.FormatFloat(h.Buckets[len(h.Buckets)-1].LowBound, 'f', 6, 64))
|
||||
if maxBucketDigitLen < 3 {
|
||||
// For "inf".
|
||||
maxBucketDigitLen = 3
|
||||
}
|
||||
maxCountDigitLen := len(strconv.FormatInt(h.Count, 10))
|
||||
percentMulti := 100 / float64(h.Count)
|
||||
|
||||
accCount := int64(0)
|
||||
for i, b := range h.Buckets {
|
||||
fmt.Fprintf(w, "[%*f, ", maxBucketDigitLen, b.LowBound/unit)
|
||||
if i+1 < len(h.Buckets) {
|
||||
fmt.Fprintf(w, "%*f)", maxBucketDigitLen, h.Buckets[i+1].LowBound/unit)
|
||||
} else {
|
||||
fmt.Fprintf(w, "%*s)", maxBucketDigitLen, "inf")
|
||||
}
|
||||
|
||||
accCount += b.Count
|
||||
fmt.Fprintf(w, " %*d %5.1f%% %5.1f%%", maxCountDigitLen, b.Count, float64(b.Count)*percentMulti, float64(accCount)*percentMulti)
|
||||
|
||||
const barScale = 0.1
|
||||
barLength := int(float64(b.Count)*percentMulti*barScale + 0.5)
|
||||
fmt.Fprintf(w, " %s\n", strings.Repeat("#", barLength))
|
||||
}
|
||||
}
|
||||
|
||||
// String returns the textual output of the histogram values as string.
|
||||
func (h *Histogram) String() string {
|
||||
var b bytes.Buffer
|
||||
h.Print(&b)
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// Clear resets all the content of histogram.
|
||||
func (h *Histogram) Clear() {
|
||||
h.Count = 0
|
||||
h.Sum = 0
|
||||
h.SumOfSquares = 0
|
||||
h.Min = math.MaxInt64
|
||||
h.Max = math.MinInt64
|
||||
for i := range h.Buckets {
|
||||
h.Buckets[i].Count = 0
|
||||
}
|
||||
}
|
||||
|
||||
// Opts returns a copy of the options used to create the Histogram.
|
||||
func (h *Histogram) Opts() HistogramOptions {
|
||||
return h.opts
|
||||
}
|
||||
|
||||
// Add adds a value to the histogram.
|
||||
func (h *Histogram) Add(value int64) error {
|
||||
bucket, err := h.findBucket(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
h.Buckets[bucket].Count++
|
||||
h.Count++
|
||||
h.Sum += value
|
||||
h.SumOfSquares += value * value
|
||||
if value < h.Min {
|
||||
h.Min = value
|
||||
}
|
||||
if value > h.Max {
|
||||
h.Max = value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *Histogram) findBucket(value int64) (int, error) {
|
||||
delta := float64(value - h.opts.MinValue)
|
||||
var b int
|
||||
if delta >= h.opts.BaseBucketSize {
|
||||
// b = log_{1+growthFactor} (delta / baseBucketSize) + 1
|
||||
// = log(delta / baseBucketSize) / log(1+growthFactor) + 1
|
||||
// = (log(delta) - log(baseBucketSize)) * (1 / log(1+growthFactor)) + 1
|
||||
b = int((math.Log(delta)-h.logBaseBucketSize)*h.oneOverLogOnePlusGrowthFactor + 1)
|
||||
}
|
||||
if b >= len(h.Buckets) {
|
||||
return 0, fmt.Errorf("no bucket for value: %d", value)
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// Merge takes another histogram h2, and merges its content into h.
|
||||
// The two histograms must be created by equivalent HistogramOptions.
|
||||
func (h *Histogram) Merge(h2 *Histogram) {
|
||||
if h.opts != h2.opts {
|
||||
log.Fatalf("failed to merge histograms, created by inequivalent options")
|
||||
}
|
||||
h.Count += h2.Count
|
||||
h.Sum += h2.Sum
|
||||
h.SumOfSquares += h2.SumOfSquares
|
||||
if h2.Min < h.Min {
|
||||
h.Min = h2.Min
|
||||
}
|
||||
if h2.Max > h.Max {
|
||||
h.Max = h2.Max
|
||||
}
|
||||
for i, b := range h2.Buckets {
|
||||
h.Buckets[i].Count += b.Count
|
||||
}
|
||||
}
|
302
vendor/google.golang.org/grpc/benchmark/stats/stats.go
generated
vendored
Normal file
302
vendor/google.golang.org/grpc/benchmark/stats/stats.go
generated
vendored
Normal file
|
@ -0,0 +1,302 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package stats
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"sort"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Features contains most fields for a benchmark
|
||||
type Features struct {
|
||||
NetworkMode string
|
||||
EnableTrace bool
|
||||
Latency time.Duration
|
||||
Kbps int
|
||||
Mtu int
|
||||
MaxConcurrentCalls int
|
||||
ReqSizeBytes int
|
||||
RespSizeBytes int
|
||||
EnableCompressor bool
|
||||
EnableChannelz bool
|
||||
}
|
||||
|
||||
// String returns the textual output of the Features as string.
|
||||
func (f Features) String() string {
|
||||
return fmt.Sprintf("traceMode_%t-latency_%s-kbps_%#v-MTU_%#v-maxConcurrentCalls_"+
|
||||
"%#v-reqSize_%#vB-respSize_%#vB-Compressor_%t", f.EnableTrace,
|
||||
f.Latency.String(), f.Kbps, f.Mtu, f.MaxConcurrentCalls, f.ReqSizeBytes, f.RespSizeBytes, f.EnableCompressor)
|
||||
}
|
||||
|
||||
// ConciseString returns the concise textual output of the Features as string, skipping
|
||||
// setting with default value.
|
||||
func (f Features) ConciseString() string {
|
||||
noneEmptyPos := []bool{f.EnableTrace, f.Latency != 0, f.Kbps != 0, f.Mtu != 0, true, true, true, f.EnableCompressor, f.EnableChannelz}
|
||||
return PartialPrintString(noneEmptyPos, f, false)
|
||||
}
|
||||
|
||||
// PartialPrintString can print certain features with different format.
|
||||
func PartialPrintString(noneEmptyPos []bool, f Features, shared bool) string {
|
||||
s := ""
|
||||
var (
|
||||
prefix, suffix, linker string
|
||||
isNetwork bool
|
||||
)
|
||||
if shared {
|
||||
suffix = "\n"
|
||||
linker = ": "
|
||||
} else {
|
||||
prefix = "-"
|
||||
linker = "_"
|
||||
}
|
||||
if noneEmptyPos[0] {
|
||||
s += fmt.Sprintf("%sTrace%s%t%s", prefix, linker, f.EnableTrace, suffix)
|
||||
}
|
||||
if shared && f.NetworkMode != "" {
|
||||
s += fmt.Sprintf("Network: %s \n", f.NetworkMode)
|
||||
isNetwork = true
|
||||
}
|
||||
if !isNetwork {
|
||||
if noneEmptyPos[1] {
|
||||
s += fmt.Sprintf("%slatency%s%s%s", prefix, linker, f.Latency.String(), suffix)
|
||||
}
|
||||
if noneEmptyPos[2] {
|
||||
s += fmt.Sprintf("%skbps%s%#v%s", prefix, linker, f.Kbps, suffix)
|
||||
}
|
||||
if noneEmptyPos[3] {
|
||||
s += fmt.Sprintf("%sMTU%s%#v%s", prefix, linker, f.Mtu, suffix)
|
||||
}
|
||||
}
|
||||
if noneEmptyPos[4] {
|
||||
s += fmt.Sprintf("%sCallers%s%#v%s", prefix, linker, f.MaxConcurrentCalls, suffix)
|
||||
}
|
||||
if noneEmptyPos[5] {
|
||||
s += fmt.Sprintf("%sreqSize%s%#vB%s", prefix, linker, f.ReqSizeBytes, suffix)
|
||||
}
|
||||
if noneEmptyPos[6] {
|
||||
s += fmt.Sprintf("%srespSize%s%#vB%s", prefix, linker, f.RespSizeBytes, suffix)
|
||||
}
|
||||
if noneEmptyPos[7] {
|
||||
s += fmt.Sprintf("%sCompressor%s%t%s", prefix, linker, f.EnableCompressor, suffix)
|
||||
}
|
||||
if noneEmptyPos[8] {
|
||||
s += fmt.Sprintf("%sChannelz%s%t%s", prefix, linker, f.EnableChannelz, suffix)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
type percentLatency struct {
|
||||
Percent int
|
||||
Value time.Duration
|
||||
}
|
||||
|
||||
// BenchResults records features and result of a benchmark.
|
||||
type BenchResults struct {
|
||||
RunMode string
|
||||
Features Features
|
||||
Latency []percentLatency
|
||||
Operations int
|
||||
NsPerOp int64
|
||||
AllocedBytesPerOp int64
|
||||
AllocsPerOp int64
|
||||
SharedPosion []bool
|
||||
}
|
||||
|
||||
// SetBenchmarkResult sets features of benchmark and basic results.
|
||||
func (stats *Stats) SetBenchmarkResult(mode string, features Features, o int, allocdBytes, allocs int64, sharedPos []bool) {
|
||||
stats.result.RunMode = mode
|
||||
stats.result.Features = features
|
||||
stats.result.Operations = o
|
||||
stats.result.AllocedBytesPerOp = allocdBytes
|
||||
stats.result.AllocsPerOp = allocs
|
||||
stats.result.SharedPosion = sharedPos
|
||||
}
|
||||
|
||||
// GetBenchmarkResults returns the result of the benchmark including features and result.
|
||||
func (stats *Stats) GetBenchmarkResults() BenchResults {
|
||||
return stats.result
|
||||
}
|
||||
|
||||
// BenchString output latency stats as the format as time + unit.
|
||||
func (stats *Stats) BenchString() string {
|
||||
stats.maybeUpdate()
|
||||
s := stats.result
|
||||
res := s.RunMode + "-" + s.Features.String() + ": \n"
|
||||
if len(s.Latency) != 0 {
|
||||
var statsUnit = s.Latency[0].Value
|
||||
var timeUnit = fmt.Sprintf("%v", statsUnit)[1:]
|
||||
for i := 1; i < len(s.Latency)-1; i++ {
|
||||
res += fmt.Sprintf("%d_Latency: %s %s \t", s.Latency[i].Percent,
|
||||
strconv.FormatFloat(float64(s.Latency[i].Value)/float64(statsUnit), 'f', 4, 64), timeUnit)
|
||||
}
|
||||
res += fmt.Sprintf("Avg latency: %s %s \t",
|
||||
strconv.FormatFloat(float64(s.Latency[len(s.Latency)-1].Value)/float64(statsUnit), 'f', 4, 64), timeUnit)
|
||||
}
|
||||
res += fmt.Sprintf("Count: %v \t", s.Operations)
|
||||
res += fmt.Sprintf("%v Bytes/op\t", s.AllocedBytesPerOp)
|
||||
res += fmt.Sprintf("%v Allocs/op\t", s.AllocsPerOp)
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
// Stats is a simple helper for gathering additional statistics like histogram
|
||||
// during benchmarks. This is not thread safe.
|
||||
type Stats struct {
|
||||
numBuckets int
|
||||
unit time.Duration
|
||||
min, max int64
|
||||
histogram *Histogram
|
||||
|
||||
durations durationSlice
|
||||
dirty bool
|
||||
|
||||
sortLatency bool
|
||||
result BenchResults
|
||||
}
|
||||
|
||||
type durationSlice []time.Duration
|
||||
|
||||
// NewStats creates a new Stats instance. If numBuckets is not positive,
|
||||
// the default value (16) will be used.
|
||||
func NewStats(numBuckets int) *Stats {
|
||||
if numBuckets <= 0 {
|
||||
numBuckets = 16
|
||||
}
|
||||
return &Stats{
|
||||
// Use one more bucket for the last unbounded bucket.
|
||||
numBuckets: numBuckets + 1,
|
||||
durations: make(durationSlice, 0, 100000),
|
||||
}
|
||||
}
|
||||
|
||||
// Add adds an elapsed time per operation to the stats.
|
||||
func (stats *Stats) Add(d time.Duration) {
|
||||
stats.durations = append(stats.durations, d)
|
||||
stats.dirty = true
|
||||
}
|
||||
|
||||
// Clear resets the stats, removing all values.
|
||||
func (stats *Stats) Clear() {
|
||||
stats.durations = stats.durations[:0]
|
||||
stats.histogram = nil
|
||||
stats.dirty = false
|
||||
stats.result = BenchResults{}
|
||||
}
|
||||
|
||||
//Sort method for durations
|
||||
func (a durationSlice) Len() int { return len(a) }
|
||||
func (a durationSlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a durationSlice) Less(i, j int) bool { return a[i] < a[j] }
|
||||
func max(a, b int64) int64 {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// maybeUpdate updates internal stat data if there was any newly added
|
||||
// stats since this was updated.
|
||||
func (stats *Stats) maybeUpdate() {
|
||||
if !stats.dirty {
|
||||
return
|
||||
}
|
||||
|
||||
if stats.sortLatency {
|
||||
sort.Sort(stats.durations)
|
||||
stats.min = int64(stats.durations[0])
|
||||
stats.max = int64(stats.durations[len(stats.durations)-1])
|
||||
}
|
||||
|
||||
stats.min = math.MaxInt64
|
||||
stats.max = 0
|
||||
for _, d := range stats.durations {
|
||||
if stats.min > int64(d) {
|
||||
stats.min = int64(d)
|
||||
}
|
||||
if stats.max < int64(d) {
|
||||
stats.max = int64(d)
|
||||
}
|
||||
}
|
||||
|
||||
// Use the largest unit that can represent the minimum time duration.
|
||||
stats.unit = time.Nanosecond
|
||||
for _, u := range []time.Duration{time.Microsecond, time.Millisecond, time.Second} {
|
||||
if stats.min <= int64(u) {
|
||||
break
|
||||
}
|
||||
stats.unit = u
|
||||
}
|
||||
|
||||
numBuckets := stats.numBuckets
|
||||
if n := int(stats.max - stats.min + 1); n < numBuckets {
|
||||
numBuckets = n
|
||||
}
|
||||
stats.histogram = NewHistogram(HistogramOptions{
|
||||
NumBuckets: numBuckets,
|
||||
// max-min(lower bound of last bucket) = (1 + growthFactor)^(numBuckets-2) * baseBucketSize.
|
||||
GrowthFactor: math.Pow(float64(stats.max-stats.min), 1/float64(numBuckets-2)) - 1,
|
||||
BaseBucketSize: 1.0,
|
||||
MinValue: stats.min})
|
||||
|
||||
for _, d := range stats.durations {
|
||||
stats.histogram.Add(int64(d))
|
||||
}
|
||||
|
||||
stats.dirty = false
|
||||
|
||||
if stats.durations.Len() != 0 {
|
||||
var percentToObserve = []int{50, 90, 99}
|
||||
// First data record min unit from the latency result.
|
||||
stats.result.Latency = append(stats.result.Latency, percentLatency{Percent: -1, Value: stats.unit})
|
||||
for _, position := range percentToObserve {
|
||||
stats.result.Latency = append(stats.result.Latency, percentLatency{Percent: position, Value: stats.durations[max(stats.histogram.Count*int64(position)/100-1, 0)]})
|
||||
}
|
||||
// Last data record the average latency.
|
||||
avg := float64(stats.histogram.Sum) / float64(stats.histogram.Count)
|
||||
stats.result.Latency = append(stats.result.Latency, percentLatency{Percent: -1, Value: time.Duration(avg)})
|
||||
}
|
||||
}
|
||||
|
||||
// SortLatency blocks the output
|
||||
func (stats *Stats) SortLatency() {
|
||||
stats.sortLatency = true
|
||||
}
|
||||
|
||||
// Print writes textual output of the Stats.
|
||||
func (stats *Stats) Print(w io.Writer) {
|
||||
stats.maybeUpdate()
|
||||
if stats.histogram == nil {
|
||||
fmt.Fprint(w, "Histogram (empty)\n")
|
||||
} else {
|
||||
fmt.Fprintf(w, "Histogram (unit: %s)\n", fmt.Sprintf("%v", stats.unit)[1:])
|
||||
stats.histogram.PrintWithUnit(w, float64(stats.unit))
|
||||
}
|
||||
}
|
||||
|
||||
// String returns the textual output of the Stats as string.
|
||||
func (stats *Stats) String() string {
|
||||
var b bytes.Buffer
|
||||
stats.Print(&b)
|
||||
return b.String()
|
||||
}
|
208
vendor/google.golang.org/grpc/benchmark/stats/util.go
generated
vendored
Normal file
208
vendor/google.golang.org/grpc/benchmark/stats/util.go
generated
vendored
Normal file
|
@ -0,0 +1,208 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package stats
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var (
|
||||
curB *testing.B
|
||||
curBenchName string
|
||||
curStats map[string]*Stats
|
||||
|
||||
orgStdout *os.File
|
||||
nextOutPos int
|
||||
|
||||
injectCond *sync.Cond
|
||||
injectDone chan struct{}
|
||||
)
|
||||
|
||||
// AddStats adds a new unnamed Stats instance to the current benchmark. You need
|
||||
// to run benchmarks by calling RunTestMain() to inject the stats to the
|
||||
// benchmark results. If numBuckets is not positive, the default value (16) will
|
||||
// be used. Please note that this calls b.ResetTimer() since it may be blocked
|
||||
// until the previous benchmark stats is printed out. So AddStats() should
|
||||
// typically be called at the very beginning of each benchmark function.
|
||||
func AddStats(b *testing.B, numBuckets int) *Stats {
|
||||
return AddStatsWithName(b, "", numBuckets)
|
||||
}
|
||||
|
||||
// AddStatsWithName adds a new named Stats instance to the current benchmark.
|
||||
// With this, you can add multiple stats in a single benchmark. You need
|
||||
// to run benchmarks by calling RunTestMain() to inject the stats to the
|
||||
// benchmark results. If numBuckets is not positive, the default value (16) will
|
||||
// be used. Please note that this calls b.ResetTimer() since it may be blocked
|
||||
// until the previous benchmark stats is printed out. So AddStatsWithName()
|
||||
// should typically be called at the very beginning of each benchmark function.
|
||||
func AddStatsWithName(b *testing.B, name string, numBuckets int) *Stats {
|
||||
var benchName string
|
||||
for i := 1; ; i++ {
|
||||
pc, _, _, ok := runtime.Caller(i)
|
||||
if !ok {
|
||||
panic("benchmark function not found")
|
||||
}
|
||||
p := strings.Split(runtime.FuncForPC(pc).Name(), ".")
|
||||
benchName = p[len(p)-1]
|
||||
if strings.HasPrefix(benchName, "run") {
|
||||
break
|
||||
}
|
||||
}
|
||||
procs := runtime.GOMAXPROCS(-1)
|
||||
if procs != 1 {
|
||||
benchName = fmt.Sprintf("%s-%d", benchName, procs)
|
||||
}
|
||||
|
||||
stats := NewStats(numBuckets)
|
||||
|
||||
if injectCond != nil {
|
||||
// We need to wait until the previous benchmark stats is printed out.
|
||||
injectCond.L.Lock()
|
||||
for curB != nil && curBenchName != benchName {
|
||||
injectCond.Wait()
|
||||
}
|
||||
|
||||
curB = b
|
||||
curBenchName = benchName
|
||||
curStats[name] = stats
|
||||
|
||||
injectCond.L.Unlock()
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
return stats
|
||||
}
|
||||
|
||||
// RunTestMain runs the tests with enabling injection of benchmark stats. It
|
||||
// returns an exit code to pass to os.Exit.
|
||||
func RunTestMain(m *testing.M) int {
|
||||
startStatsInjector()
|
||||
defer stopStatsInjector()
|
||||
return m.Run()
|
||||
}
|
||||
|
||||
// startStatsInjector starts stats injection to benchmark results.
|
||||
func startStatsInjector() {
|
||||
orgStdout = os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
nextOutPos = 0
|
||||
|
||||
resetCurBenchStats()
|
||||
|
||||
injectCond = sync.NewCond(&sync.Mutex{})
|
||||
injectDone = make(chan struct{})
|
||||
go func() {
|
||||
defer close(injectDone)
|
||||
|
||||
scanner := bufio.NewScanner(r)
|
||||
scanner.Split(splitLines)
|
||||
for scanner.Scan() {
|
||||
injectStatsIfFinished(scanner.Text())
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// stopStatsInjector stops stats injection and restores os.Stdout.
|
||||
func stopStatsInjector() {
|
||||
os.Stdout.Close()
|
||||
<-injectDone
|
||||
injectCond = nil
|
||||
os.Stdout = orgStdout
|
||||
}
|
||||
|
||||
// splitLines is a split function for a bufio.Scanner that returns each line
|
||||
// of text, teeing texts to the original stdout even before each line ends.
|
||||
func splitLines(data []byte, eof bool) (advance int, token []byte, err error) {
|
||||
if eof && len(data) == 0 {
|
||||
return 0, nil, nil
|
||||
}
|
||||
|
||||
if i := bytes.IndexByte(data, '\n'); i >= 0 {
|
||||
orgStdout.Write(data[nextOutPos : i+1])
|
||||
nextOutPos = 0
|
||||
return i + 1, data[0:i], nil
|
||||
}
|
||||
|
||||
orgStdout.Write(data[nextOutPos:])
|
||||
nextOutPos = len(data)
|
||||
|
||||
if eof {
|
||||
// This is a final, non-terminated line. Return it.
|
||||
return len(data), data, nil
|
||||
}
|
||||
|
||||
return 0, nil, nil
|
||||
}
|
||||
|
||||
// injectStatsIfFinished prints out the stats if the current benchmark finishes.
|
||||
func injectStatsIfFinished(line string) {
|
||||
injectCond.L.Lock()
|
||||
defer injectCond.L.Unlock()
|
||||
// We assume that the benchmark results start with "Benchmark".
|
||||
if curB == nil || !strings.HasPrefix(line, "Benchmark") {
|
||||
return
|
||||
}
|
||||
|
||||
if !curB.Failed() {
|
||||
// Output all stats in alphabetical order.
|
||||
names := make([]string, 0, len(curStats))
|
||||
for name := range curStats {
|
||||
names = append(names, name)
|
||||
}
|
||||
sort.Strings(names)
|
||||
for _, name := range names {
|
||||
stats := curStats[name]
|
||||
// The output of stats starts with a header like "Histogram (unit: ms)"
|
||||
// followed by statistical properties and the buckets. Add the stats name
|
||||
// if it is a named stats and indent them as Go testing outputs.
|
||||
lines := strings.Split(stats.String(), "\n")
|
||||
if n := len(lines); n > 0 {
|
||||
if name != "" {
|
||||
name = ": " + name
|
||||
}
|
||||
fmt.Fprintf(orgStdout, "--- %s%s\n", lines[0], name)
|
||||
for _, line := range lines[1 : n-1] {
|
||||
fmt.Fprintf(orgStdout, "\t%s\n", line)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resetCurBenchStats()
|
||||
injectCond.Signal()
|
||||
}
|
||||
|
||||
// resetCurBenchStats resets the current benchmark stats.
|
||||
func resetCurBenchStats() {
|
||||
curB = nil
|
||||
curBenchName = ""
|
||||
curStats = make(map[string]*Stats)
|
||||
}
|
386
vendor/google.golang.org/grpc/benchmark/worker/benchmark_client.go
generated
vendored
Normal file
386
vendor/google.golang.org/grpc/benchmark/worker/benchmark_client.go
generated
vendored
Normal file
|
@ -0,0 +1,386 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2016 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"math"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/benchmark"
|
||||
testpb "google.golang.org/grpc/benchmark/grpc_testing"
|
||||
"google.golang.org/grpc/benchmark/stats"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal/syscall"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/grpc/testdata"
|
||||
)
|
||||
|
||||
var caFile = flag.String("ca_file", "", "The file containing the CA root cert file")
|
||||
|
||||
type lockingHistogram struct {
|
||||
mu sync.Mutex
|
||||
histogram *stats.Histogram
|
||||
}
|
||||
|
||||
func (h *lockingHistogram) add(value int64) {
|
||||
h.mu.Lock()
|
||||
defer h.mu.Unlock()
|
||||
h.histogram.Add(value)
|
||||
}
|
||||
|
||||
// swap sets h.histogram to o and returns its old value.
|
||||
func (h *lockingHistogram) swap(o *stats.Histogram) *stats.Histogram {
|
||||
h.mu.Lock()
|
||||
defer h.mu.Unlock()
|
||||
old := h.histogram
|
||||
h.histogram = o
|
||||
return old
|
||||
}
|
||||
|
||||
func (h *lockingHistogram) mergeInto(merged *stats.Histogram) {
|
||||
h.mu.Lock()
|
||||
defer h.mu.Unlock()
|
||||
merged.Merge(h.histogram)
|
||||
}
|
||||
|
||||
type benchmarkClient struct {
|
||||
closeConns func()
|
||||
stop chan bool
|
||||
lastResetTime time.Time
|
||||
histogramOptions stats.HistogramOptions
|
||||
lockingHistograms []lockingHistogram
|
||||
rusageLastReset *syscall.Rusage
|
||||
}
|
||||
|
||||
func printClientConfig(config *testpb.ClientConfig) {
|
||||
// Some config options are ignored:
|
||||
// - client type:
|
||||
// will always create sync client
|
||||
// - async client threads.
|
||||
// - core list
|
||||
grpclog.Infof(" * client type: %v (ignored, always creates sync client)", config.ClientType)
|
||||
grpclog.Infof(" * async client threads: %v (ignored)", config.AsyncClientThreads)
|
||||
// TODO: use cores specified by CoreList when setting list of cores is supported in go.
|
||||
grpclog.Infof(" * core list: %v (ignored)", config.CoreList)
|
||||
|
||||
grpclog.Infof(" - security params: %v", config.SecurityParams)
|
||||
grpclog.Infof(" - core limit: %v", config.CoreLimit)
|
||||
grpclog.Infof(" - payload config: %v", config.PayloadConfig)
|
||||
grpclog.Infof(" - rpcs per chann: %v", config.OutstandingRpcsPerChannel)
|
||||
grpclog.Infof(" - channel number: %v", config.ClientChannels)
|
||||
grpclog.Infof(" - load params: %v", config.LoadParams)
|
||||
grpclog.Infof(" - rpc type: %v", config.RpcType)
|
||||
grpclog.Infof(" - histogram params: %v", config.HistogramParams)
|
||||
grpclog.Infof(" - server targets: %v", config.ServerTargets)
|
||||
}
|
||||
|
||||
func setupClientEnv(config *testpb.ClientConfig) {
|
||||
// Use all cpu cores available on machine by default.
|
||||
// TODO: Revisit this for the optimal default setup.
|
||||
if config.CoreLimit > 0 {
|
||||
runtime.GOMAXPROCS(int(config.CoreLimit))
|
||||
} else {
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
}
|
||||
}
|
||||
|
||||
// createConns creates connections according to given config.
|
||||
// It returns the connections and corresponding function to close them.
|
||||
// It returns non-nil error if there is anything wrong.
|
||||
func createConns(config *testpb.ClientConfig) ([]*grpc.ClientConn, func(), error) {
|
||||
var opts []grpc.DialOption
|
||||
|
||||
// Sanity check for client type.
|
||||
switch config.ClientType {
|
||||
case testpb.ClientType_SYNC_CLIENT:
|
||||
case testpb.ClientType_ASYNC_CLIENT:
|
||||
default:
|
||||
return nil, nil, status.Errorf(codes.InvalidArgument, "unknown client type: %v", config.ClientType)
|
||||
}
|
||||
|
||||
// Check and set security options.
|
||||
if config.SecurityParams != nil {
|
||||
if *caFile == "" {
|
||||
*caFile = testdata.Path("ca.pem")
|
||||
}
|
||||
creds, err := credentials.NewClientTLSFromFile(*caFile, config.SecurityParams.ServerHostOverride)
|
||||
if err != nil {
|
||||
return nil, nil, status.Errorf(codes.InvalidArgument, "failed to create TLS credentials %v", err)
|
||||
}
|
||||
opts = append(opts, grpc.WithTransportCredentials(creds))
|
||||
} else {
|
||||
opts = append(opts, grpc.WithInsecure())
|
||||
}
|
||||
|
||||
// Use byteBufCodec if it is required.
|
||||
if config.PayloadConfig != nil {
|
||||
switch config.PayloadConfig.Payload.(type) {
|
||||
case *testpb.PayloadConfig_BytebufParams:
|
||||
opts = append(opts, grpc.WithDefaultCallOptions(grpc.CallCustomCodec(byteBufCodec{})))
|
||||
case *testpb.PayloadConfig_SimpleParams:
|
||||
default:
|
||||
return nil, nil, status.Errorf(codes.InvalidArgument, "unknown payload config: %v", config.PayloadConfig)
|
||||
}
|
||||
}
|
||||
|
||||
// Create connections.
|
||||
connCount := int(config.ClientChannels)
|
||||
conns := make([]*grpc.ClientConn, connCount)
|
||||
for connIndex := 0; connIndex < connCount; connIndex++ {
|
||||
conns[connIndex] = benchmark.NewClientConn(config.ServerTargets[connIndex%len(config.ServerTargets)], opts...)
|
||||
}
|
||||
|
||||
return conns, func() {
|
||||
for _, conn := range conns {
|
||||
conn.Close()
|
||||
}
|
||||
}, nil
|
||||
}
|
||||
|
||||
func performRPCs(config *testpb.ClientConfig, conns []*grpc.ClientConn, bc *benchmarkClient) error {
|
||||
// Read payload size and type from config.
|
||||
var (
|
||||
payloadReqSize, payloadRespSize int
|
||||
payloadType string
|
||||
)
|
||||
if config.PayloadConfig != nil {
|
||||
switch c := config.PayloadConfig.Payload.(type) {
|
||||
case *testpb.PayloadConfig_BytebufParams:
|
||||
payloadReqSize = int(c.BytebufParams.ReqSize)
|
||||
payloadRespSize = int(c.BytebufParams.RespSize)
|
||||
payloadType = "bytebuf"
|
||||
case *testpb.PayloadConfig_SimpleParams:
|
||||
payloadReqSize = int(c.SimpleParams.ReqSize)
|
||||
payloadRespSize = int(c.SimpleParams.RespSize)
|
||||
payloadType = "protobuf"
|
||||
default:
|
||||
return status.Errorf(codes.InvalidArgument, "unknown payload config: %v", config.PayloadConfig)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO add open loop distribution.
|
||||
switch config.LoadParams.Load.(type) {
|
||||
case *testpb.LoadParams_ClosedLoop:
|
||||
case *testpb.LoadParams_Poisson:
|
||||
return status.Errorf(codes.Unimplemented, "unsupported load params: %v", config.LoadParams)
|
||||
default:
|
||||
return status.Errorf(codes.InvalidArgument, "unknown load params: %v", config.LoadParams)
|
||||
}
|
||||
|
||||
rpcCountPerConn := int(config.OutstandingRpcsPerChannel)
|
||||
|
||||
switch config.RpcType {
|
||||
case testpb.RpcType_UNARY:
|
||||
bc.doCloseLoopUnary(conns, rpcCountPerConn, payloadReqSize, payloadRespSize)
|
||||
// TODO open loop.
|
||||
case testpb.RpcType_STREAMING:
|
||||
bc.doCloseLoopStreaming(conns, rpcCountPerConn, payloadReqSize, payloadRespSize, payloadType)
|
||||
// TODO open loop.
|
||||
default:
|
||||
return status.Errorf(codes.InvalidArgument, "unknown rpc type: %v", config.RpcType)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func startBenchmarkClient(config *testpb.ClientConfig) (*benchmarkClient, error) {
|
||||
printClientConfig(config)
|
||||
|
||||
// Set running environment like how many cores to use.
|
||||
setupClientEnv(config)
|
||||
|
||||
conns, closeConns, err := createConns(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rpcCountPerConn := int(config.OutstandingRpcsPerChannel)
|
||||
bc := &benchmarkClient{
|
||||
histogramOptions: stats.HistogramOptions{
|
||||
NumBuckets: int(math.Log(config.HistogramParams.MaxPossible)/math.Log(1+config.HistogramParams.Resolution)) + 1,
|
||||
GrowthFactor: config.HistogramParams.Resolution,
|
||||
BaseBucketSize: (1 + config.HistogramParams.Resolution),
|
||||
MinValue: 0,
|
||||
},
|
||||
lockingHistograms: make([]lockingHistogram, rpcCountPerConn*len(conns)),
|
||||
|
||||
stop: make(chan bool),
|
||||
lastResetTime: time.Now(),
|
||||
closeConns: closeConns,
|
||||
rusageLastReset: syscall.GetRusage(),
|
||||
}
|
||||
|
||||
if err = performRPCs(config, conns, bc); err != nil {
|
||||
// Close all connections if performRPCs failed.
|
||||
closeConns()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return bc, nil
|
||||
}
|
||||
|
||||
func (bc *benchmarkClient) doCloseLoopUnary(conns []*grpc.ClientConn, rpcCountPerConn int, reqSize int, respSize int) {
|
||||
for ic, conn := range conns {
|
||||
client := testpb.NewBenchmarkServiceClient(conn)
|
||||
// For each connection, create rpcCountPerConn goroutines to do rpc.
|
||||
for j := 0; j < rpcCountPerConn; j++ {
|
||||
// Create histogram for each goroutine.
|
||||
idx := ic*rpcCountPerConn + j
|
||||
bc.lockingHistograms[idx].histogram = stats.NewHistogram(bc.histogramOptions)
|
||||
// Start goroutine on the created mutex and histogram.
|
||||
go func(idx int) {
|
||||
// TODO: do warm up if necessary.
|
||||
// Now relying on worker client to reserve time to do warm up.
|
||||
// The worker client needs to wait for some time after client is created,
|
||||
// before starting benchmark.
|
||||
done := make(chan bool)
|
||||
for {
|
||||
go func() {
|
||||
start := time.Now()
|
||||
if err := benchmark.DoUnaryCall(client, reqSize, respSize); err != nil {
|
||||
select {
|
||||
case <-bc.stop:
|
||||
case done <- false:
|
||||
}
|
||||
return
|
||||
}
|
||||
elapse := time.Since(start)
|
||||
bc.lockingHistograms[idx].add(int64(elapse))
|
||||
select {
|
||||
case <-bc.stop:
|
||||
case done <- true:
|
||||
}
|
||||
}()
|
||||
select {
|
||||
case <-bc.stop:
|
||||
return
|
||||
case <-done:
|
||||
}
|
||||
}
|
||||
}(idx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (bc *benchmarkClient) doCloseLoopStreaming(conns []*grpc.ClientConn, rpcCountPerConn int, reqSize int, respSize int, payloadType string) {
|
||||
var doRPC func(testpb.BenchmarkService_StreamingCallClient, int, int) error
|
||||
if payloadType == "bytebuf" {
|
||||
doRPC = benchmark.DoByteBufStreamingRoundTrip
|
||||
} else {
|
||||
doRPC = benchmark.DoStreamingRoundTrip
|
||||
}
|
||||
for ic, conn := range conns {
|
||||
// For each connection, create rpcCountPerConn goroutines to do rpc.
|
||||
for j := 0; j < rpcCountPerConn; j++ {
|
||||
c := testpb.NewBenchmarkServiceClient(conn)
|
||||
stream, err := c.StreamingCall(context.Background())
|
||||
if err != nil {
|
||||
grpclog.Fatalf("%v.StreamingCall(_) = _, %v", c, err)
|
||||
}
|
||||
// Create histogram for each goroutine.
|
||||
idx := ic*rpcCountPerConn + j
|
||||
bc.lockingHistograms[idx].histogram = stats.NewHistogram(bc.histogramOptions)
|
||||
// Start goroutine on the created mutex and histogram.
|
||||
go func(idx int) {
|
||||
// TODO: do warm up if necessary.
|
||||
// Now relying on worker client to reserve time to do warm up.
|
||||
// The worker client needs to wait for some time after client is created,
|
||||
// before starting benchmark.
|
||||
for {
|
||||
start := time.Now()
|
||||
if err := doRPC(stream, reqSize, respSize); err != nil {
|
||||
return
|
||||
}
|
||||
elapse := time.Since(start)
|
||||
bc.lockingHistograms[idx].add(int64(elapse))
|
||||
select {
|
||||
case <-bc.stop:
|
||||
return
|
||||
default:
|
||||
}
|
||||
}
|
||||
}(idx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// getStats returns the stats for benchmark client.
|
||||
// It resets lastResetTime and all histograms if argument reset is true.
|
||||
func (bc *benchmarkClient) getStats(reset bool) *testpb.ClientStats {
|
||||
var wallTimeElapsed, uTimeElapsed, sTimeElapsed float64
|
||||
mergedHistogram := stats.NewHistogram(bc.histogramOptions)
|
||||
|
||||
if reset {
|
||||
// Merging histogram may take some time.
|
||||
// Put all histograms aside and merge later.
|
||||
toMerge := make([]*stats.Histogram, len(bc.lockingHistograms))
|
||||
for i := range bc.lockingHistograms {
|
||||
toMerge[i] = bc.lockingHistograms[i].swap(stats.NewHistogram(bc.histogramOptions))
|
||||
}
|
||||
|
||||
for i := 0; i < len(toMerge); i++ {
|
||||
mergedHistogram.Merge(toMerge[i])
|
||||
}
|
||||
|
||||
wallTimeElapsed = time.Since(bc.lastResetTime).Seconds()
|
||||
latestRusage := syscall.GetRusage()
|
||||
uTimeElapsed, sTimeElapsed = syscall.CPUTimeDiff(bc.rusageLastReset, latestRusage)
|
||||
|
||||
bc.rusageLastReset = latestRusage
|
||||
bc.lastResetTime = time.Now()
|
||||
} else {
|
||||
// Merge only, not reset.
|
||||
for i := range bc.lockingHistograms {
|
||||
bc.lockingHistograms[i].mergeInto(mergedHistogram)
|
||||
}
|
||||
|
||||
wallTimeElapsed = time.Since(bc.lastResetTime).Seconds()
|
||||
uTimeElapsed, sTimeElapsed = syscall.CPUTimeDiff(bc.rusageLastReset, syscall.GetRusage())
|
||||
}
|
||||
|
||||
b := make([]uint32, len(mergedHistogram.Buckets))
|
||||
for i, v := range mergedHistogram.Buckets {
|
||||
b[i] = uint32(v.Count)
|
||||
}
|
||||
return &testpb.ClientStats{
|
||||
Latencies: &testpb.HistogramData{
|
||||
Bucket: b,
|
||||
MinSeen: float64(mergedHistogram.Min),
|
||||
MaxSeen: float64(mergedHistogram.Max),
|
||||
Sum: float64(mergedHistogram.Sum),
|
||||
SumOfSquares: float64(mergedHistogram.SumOfSquares),
|
||||
Count: float64(mergedHistogram.Count),
|
||||
},
|
||||
TimeElapsed: wallTimeElapsed,
|
||||
TimeUser: uTimeElapsed,
|
||||
TimeSystem: sTimeElapsed,
|
||||
}
|
||||
}
|
||||
|
||||
func (bc *benchmarkClient) shutdown() {
|
||||
close(bc.stop)
|
||||
bc.closeConns()
|
||||
}
|
184
vendor/google.golang.org/grpc/benchmark/worker/benchmark_server.go
generated
vendored
Normal file
184
vendor/google.golang.org/grpc/benchmark/worker/benchmark_server.go
generated
vendored
Normal file
|
@ -0,0 +1,184 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2016 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"net"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/benchmark"
|
||||
testpb "google.golang.org/grpc/benchmark/grpc_testing"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal/syscall"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/grpc/testdata"
|
||||
)
|
||||
|
||||
var (
|
||||
certFile = flag.String("tls_cert_file", "", "The TLS cert file")
|
||||
keyFile = flag.String("tls_key_file", "", "The TLS key file")
|
||||
)
|
||||
|
||||
type benchmarkServer struct {
|
||||
port int
|
||||
cores int
|
||||
closeFunc func()
|
||||
mu sync.RWMutex
|
||||
lastResetTime time.Time
|
||||
rusageLastReset *syscall.Rusage
|
||||
}
|
||||
|
||||
func printServerConfig(config *testpb.ServerConfig) {
|
||||
// Some config options are ignored:
|
||||
// - server type:
|
||||
// will always start sync server
|
||||
// - async server threads
|
||||
// - core list
|
||||
grpclog.Infof(" * server type: %v (ignored, always starts sync server)", config.ServerType)
|
||||
grpclog.Infof(" * async server threads: %v (ignored)", config.AsyncServerThreads)
|
||||
// TODO: use cores specified by CoreList when setting list of cores is supported in go.
|
||||
grpclog.Infof(" * core list: %v (ignored)", config.CoreList)
|
||||
|
||||
grpclog.Infof(" - security params: %v", config.SecurityParams)
|
||||
grpclog.Infof(" - core limit: %v", config.CoreLimit)
|
||||
grpclog.Infof(" - port: %v", config.Port)
|
||||
grpclog.Infof(" - payload config: %v", config.PayloadConfig)
|
||||
}
|
||||
|
||||
func startBenchmarkServer(config *testpb.ServerConfig, serverPort int) (*benchmarkServer, error) {
|
||||
printServerConfig(config)
|
||||
|
||||
// Use all cpu cores available on machine by default.
|
||||
// TODO: Revisit this for the optimal default setup.
|
||||
numOfCores := runtime.NumCPU()
|
||||
if config.CoreLimit > 0 {
|
||||
numOfCores = int(config.CoreLimit)
|
||||
}
|
||||
runtime.GOMAXPROCS(numOfCores)
|
||||
|
||||
var opts []grpc.ServerOption
|
||||
|
||||
// Sanity check for server type.
|
||||
switch config.ServerType {
|
||||
case testpb.ServerType_SYNC_SERVER:
|
||||
case testpb.ServerType_ASYNC_SERVER:
|
||||
case testpb.ServerType_ASYNC_GENERIC_SERVER:
|
||||
default:
|
||||
return nil, status.Errorf(codes.InvalidArgument, "unknown server type: %v", config.ServerType)
|
||||
}
|
||||
|
||||
// Set security options.
|
||||
if config.SecurityParams != nil {
|
||||
if *certFile == "" {
|
||||
*certFile = testdata.Path("server1.pem")
|
||||
}
|
||||
if *keyFile == "" {
|
||||
*keyFile = testdata.Path("server1.key")
|
||||
}
|
||||
creds, err := credentials.NewServerTLSFromFile(*certFile, *keyFile)
|
||||
if err != nil {
|
||||
grpclog.Fatalf("failed to generate credentials %v", err)
|
||||
}
|
||||
opts = append(opts, grpc.Creds(creds))
|
||||
}
|
||||
|
||||
// Priority: config.Port > serverPort > default (0).
|
||||
port := int(config.Port)
|
||||
if port == 0 {
|
||||
port = serverPort
|
||||
}
|
||||
lis, err := net.Listen("tcp", fmt.Sprintf(":%d", port))
|
||||
if err != nil {
|
||||
grpclog.Fatalf("Failed to listen: %v", err)
|
||||
}
|
||||
addr := lis.Addr().String()
|
||||
|
||||
// Create different benchmark server according to config.
|
||||
var closeFunc func()
|
||||
if config.PayloadConfig != nil {
|
||||
switch payload := config.PayloadConfig.Payload.(type) {
|
||||
case *testpb.PayloadConfig_BytebufParams:
|
||||
opts = append(opts, grpc.CustomCodec(byteBufCodec{}))
|
||||
closeFunc = benchmark.StartServer(benchmark.ServerInfo{
|
||||
Type: "bytebuf",
|
||||
Metadata: payload.BytebufParams.RespSize,
|
||||
Listener: lis,
|
||||
}, opts...)
|
||||
case *testpb.PayloadConfig_SimpleParams:
|
||||
closeFunc = benchmark.StartServer(benchmark.ServerInfo{
|
||||
Type: "protobuf",
|
||||
Listener: lis,
|
||||
}, opts...)
|
||||
case *testpb.PayloadConfig_ComplexParams:
|
||||
return nil, status.Errorf(codes.Unimplemented, "unsupported payload config: %v", config.PayloadConfig)
|
||||
default:
|
||||
return nil, status.Errorf(codes.InvalidArgument, "unknown payload config: %v", config.PayloadConfig)
|
||||
}
|
||||
} else {
|
||||
// Start protobuf server if payload config is nil.
|
||||
closeFunc = benchmark.StartServer(benchmark.ServerInfo{
|
||||
Type: "protobuf",
|
||||
Listener: lis,
|
||||
}, opts...)
|
||||
}
|
||||
|
||||
grpclog.Infof("benchmark server listening at %v", addr)
|
||||
addrSplitted := strings.Split(addr, ":")
|
||||
p, err := strconv.Atoi(addrSplitted[len(addrSplitted)-1])
|
||||
if err != nil {
|
||||
grpclog.Fatalf("failed to get port number from server address: %v", err)
|
||||
}
|
||||
|
||||
return &benchmarkServer{
|
||||
port: p,
|
||||
cores: numOfCores,
|
||||
closeFunc: closeFunc,
|
||||
lastResetTime: time.Now(),
|
||||
rusageLastReset: syscall.GetRusage(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// getStats returns the stats for benchmark server.
|
||||
// It resets lastResetTime if argument reset is true.
|
||||
func (bs *benchmarkServer) getStats(reset bool) *testpb.ServerStats {
|
||||
bs.mu.RLock()
|
||||
defer bs.mu.RUnlock()
|
||||
wallTimeElapsed := time.Since(bs.lastResetTime).Seconds()
|
||||
rusageLatest := syscall.GetRusage()
|
||||
uTimeElapsed, sTimeElapsed := syscall.CPUTimeDiff(bs.rusageLastReset, rusageLatest)
|
||||
|
||||
if reset {
|
||||
bs.lastResetTime = time.Now()
|
||||
bs.rusageLastReset = rusageLatest
|
||||
}
|
||||
return &testpb.ServerStats{
|
||||
TimeElapsed: wallTimeElapsed,
|
||||
TimeUser: uTimeElapsed,
|
||||
TimeSystem: sTimeElapsed,
|
||||
}
|
||||
}
|
230
vendor/google.golang.org/grpc/benchmark/worker/main.go
generated
vendored
Normal file
230
vendor/google.golang.org/grpc/benchmark/worker/main.go
generated
vendored
Normal file
|
@ -0,0 +1,230 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2016 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
testpb "google.golang.org/grpc/benchmark/grpc_testing"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
var (
|
||||
driverPort = flag.Int("driver_port", 10000, "port for communication with driver")
|
||||
serverPort = flag.Int("server_port", 0, "port for benchmark server if not specified by server config message")
|
||||
pprofPort = flag.Int("pprof_port", -1, "Port for pprof debug server to listen on. Pprof server doesn't start if unset")
|
||||
blockProfRate = flag.Int("block_prof_rate", 0, "fraction of goroutine blocking events to report in blocking profile")
|
||||
)
|
||||
|
||||
type byteBufCodec struct {
|
||||
}
|
||||
|
||||
func (byteBufCodec) Marshal(v interface{}) ([]byte, error) {
|
||||
b, ok := v.(*[]byte)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("failed to marshal: %v is not type of *[]byte", v)
|
||||
}
|
||||
return *b, nil
|
||||
}
|
||||
|
||||
func (byteBufCodec) Unmarshal(data []byte, v interface{}) error {
|
||||
b, ok := v.(*[]byte)
|
||||
if !ok {
|
||||
return fmt.Errorf("failed to marshal: %v is not type of *[]byte", v)
|
||||
}
|
||||
*b = data
|
||||
return nil
|
||||
}
|
||||
|
||||
func (byteBufCodec) String() string {
|
||||
return "bytebuffer"
|
||||
}
|
||||
|
||||
// workerServer implements WorkerService rpc handlers.
|
||||
// It can create benchmarkServer or benchmarkClient on demand.
|
||||
type workerServer struct {
|
||||
stop chan<- bool
|
||||
serverPort int
|
||||
}
|
||||
|
||||
func (s *workerServer) RunServer(stream testpb.WorkerService_RunServerServer) error {
|
||||
var bs *benchmarkServer
|
||||
defer func() {
|
||||
// Close benchmark server when stream ends.
|
||||
grpclog.Infof("closing benchmark server")
|
||||
if bs != nil {
|
||||
bs.closeFunc()
|
||||
}
|
||||
}()
|
||||
for {
|
||||
in, err := stream.Recv()
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var out *testpb.ServerStatus
|
||||
switch argtype := in.Argtype.(type) {
|
||||
case *testpb.ServerArgs_Setup:
|
||||
grpclog.Infof("server setup received:")
|
||||
if bs != nil {
|
||||
grpclog.Infof("server setup received when server already exists, closing the existing server")
|
||||
bs.closeFunc()
|
||||
}
|
||||
bs, err = startBenchmarkServer(argtype.Setup, s.serverPort)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
out = &testpb.ServerStatus{
|
||||
Stats: bs.getStats(false),
|
||||
Port: int32(bs.port),
|
||||
Cores: int32(bs.cores),
|
||||
}
|
||||
|
||||
case *testpb.ServerArgs_Mark:
|
||||
grpclog.Infof("server mark received:")
|
||||
grpclog.Infof(" - %v", argtype)
|
||||
if bs == nil {
|
||||
return status.Error(codes.InvalidArgument, "server does not exist when mark received")
|
||||
}
|
||||
out = &testpb.ServerStatus{
|
||||
Stats: bs.getStats(argtype.Mark.Reset_),
|
||||
Port: int32(bs.port),
|
||||
Cores: int32(bs.cores),
|
||||
}
|
||||
}
|
||||
|
||||
if err := stream.Send(out); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *workerServer) RunClient(stream testpb.WorkerService_RunClientServer) error {
|
||||
var bc *benchmarkClient
|
||||
defer func() {
|
||||
// Shut down benchmark client when stream ends.
|
||||
grpclog.Infof("shuting down benchmark client")
|
||||
if bc != nil {
|
||||
bc.shutdown()
|
||||
}
|
||||
}()
|
||||
for {
|
||||
in, err := stream.Recv()
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var out *testpb.ClientStatus
|
||||
switch t := in.Argtype.(type) {
|
||||
case *testpb.ClientArgs_Setup:
|
||||
grpclog.Infof("client setup received:")
|
||||
if bc != nil {
|
||||
grpclog.Infof("client setup received when client already exists, shuting down the existing client")
|
||||
bc.shutdown()
|
||||
}
|
||||
bc, err = startBenchmarkClient(t.Setup)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
out = &testpb.ClientStatus{
|
||||
Stats: bc.getStats(false),
|
||||
}
|
||||
|
||||
case *testpb.ClientArgs_Mark:
|
||||
grpclog.Infof("client mark received:")
|
||||
grpclog.Infof(" - %v", t)
|
||||
if bc == nil {
|
||||
return status.Error(codes.InvalidArgument, "client does not exist when mark received")
|
||||
}
|
||||
out = &testpb.ClientStatus{
|
||||
Stats: bc.getStats(t.Mark.Reset_),
|
||||
}
|
||||
}
|
||||
|
||||
if err := stream.Send(out); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *workerServer) CoreCount(ctx context.Context, in *testpb.CoreRequest) (*testpb.CoreResponse, error) {
|
||||
grpclog.Infof("core count: %v", runtime.NumCPU())
|
||||
return &testpb.CoreResponse{Cores: int32(runtime.NumCPU())}, nil
|
||||
}
|
||||
|
||||
func (s *workerServer) QuitWorker(ctx context.Context, in *testpb.Void) (*testpb.Void, error) {
|
||||
grpclog.Infof("quitting worker")
|
||||
s.stop <- true
|
||||
return &testpb.Void{}, nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
grpc.EnableTracing = false
|
||||
|
||||
flag.Parse()
|
||||
lis, err := net.Listen("tcp", ":"+strconv.Itoa(*driverPort))
|
||||
if err != nil {
|
||||
grpclog.Fatalf("failed to listen: %v", err)
|
||||
}
|
||||
grpclog.Infof("worker listening at port %v", *driverPort)
|
||||
|
||||
s := grpc.NewServer()
|
||||
stop := make(chan bool)
|
||||
testpb.RegisterWorkerServiceServer(s, &workerServer{
|
||||
stop: stop,
|
||||
serverPort: *serverPort,
|
||||
})
|
||||
|
||||
go func() {
|
||||
<-stop
|
||||
// Wait for 1 second before stopping the server to make sure the return value of QuitWorker is sent to client.
|
||||
// TODO revise this once server graceful stop is supported in gRPC.
|
||||
time.Sleep(time.Second)
|
||||
s.Stop()
|
||||
}()
|
||||
|
||||
runtime.SetBlockProfileRate(*blockProfRate)
|
||||
|
||||
if *pprofPort >= 0 {
|
||||
go func() {
|
||||
grpclog.Infoln("Starting pprof server on port " + strconv.Itoa(*pprofPort))
|
||||
grpclog.Infoln(http.ListenAndServe("localhost:"+strconv.Itoa(*pprofPort), nil))
|
||||
}()
|
||||
}
|
||||
|
||||
s.Serve(lis)
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue