JinXiaozhao opened a new issue, #1770:
URL: https://github.com/apache/incubator-brpc/issues/1770
**Describe the bug (描述bug)**
serve端:brpc
client端:brpc(FLAGS_protocol=h2:grpc)
./echo_image_client --h2_client_stream_window_size=536870912
--h2_client_connection_window_size=536870912
重复发起100次请求,只有第一次请求可以正常;
其他请求报错:
E2004]remote_window_left is not enough, data_size=4915254
[R1][E2004]remote_window_left is not enough, data_size=4915254
[R2][E2004]remote_window_left is not enough, data_size=4915254
[R3][E2004]remote_window_left is not enough, data_size=4915254
**To Reproduce (复现方法)**
grpc传输数据为4.69M
**Expected behavior (期望行为)**
在客户端如何更改grpc传输数据的最大值
**Versions (各种版本)**
OS:
Compiler:
brpc:
protobuf:
**Additional context/screenshots (更多上下文/截图)**
客户端代码:
`
#include <chrono>
#include <gflags/gflags.h>
#include <butil/logging.h>
#include <butil/time.h>
#include <brpc/channel.h>
#include "echo.pb.h"
using namespace std::chrono;
using google::protobuf::int32;
DEFINE_string(attachment, "", "Carry this along with requests");
// DEFINE_string(protocol, "baidu_std", "Protocol type. Defined in
src/brpc/options.proto");
DEFINE_string(protocol, "h2:grpc", "Protocol type. Defined in
src/brpc/options.proto");
DEFINE_string(connection_type, "", "Connection type. Available values:
single, pooled, short");
DEFINE_string(server, "0.0.0.0:8000", "IP Address of server");
DEFINE_string(load_balancer, "", "The algorithm for load balancing");
DEFINE_int32(timeout_ms, 100, "RPC timeout in milliseconds");
DEFINE_int32(max_retry, 3, "Max retries(not including the first RPC)");
DEFINE_int32(interval_ms, 1000, "Milliseconds between consecutive requests");
// DEFINE_int32(h2_client_stream_window_size, 256*1024*1024, "256M");
// DEFINE_int32(h2_client_connection_window_size, 256*1024*1024, "256M");
tensorflow::TensorProto make_tensor_proto(int dims[3],
const int32 maxBatchSize,
const tensorflow::DataType dataType) {
tensorflow::TensorProto result;
result.set_dtype(dataType);
result.mutable_tensor_shape()->add_dim()->set_size(maxBatchSize);
auto numPixels = maxBatchSize;
for (int i=0; i<3; ++i) {
int dim_i = dims[i];
result.mutable_tensor_shape()->add_dim()->set_size(dim_i);
numPixels *= dim_i;
}
// here should be your true input data
std::vector<float> fakeData(numPixels, 0.5);
for (auto it = fakeData.begin(); it != fakeData.end(); ++it) {
union {
float source;
char target[sizeof(float)];
} converter;
converter.source = *it;
std::copy(
std::begin(converter.target), std::end(converter.target),
std::back_inserter(*result.mutable_tensor_content()));
}
return result;
}
int main(int argc, char* argv[]) {
// Parse gflags. We recommend you to use gflags as well.
GFLAGS_NS::ParseCommandLineFlags(&argc, &argv, true);
std::cout << "h2_client_stream_window_size: " << FLAGS_protocol <<
std::endl;
// A Channel represents a communication line to a Server. Notice that
// Channel is thread-safe and can be shared by all threads in your
program.
brpc::Channel channel;
// Initialize the channel, NULL means using default options.
brpc::ChannelOptions options;
options.protocol = FLAGS_protocol;
options.connection_type = FLAGS_connection_type;
options.timeout_ms = FLAGS_timeout_ms/*milliseconds*/;
options.max_retry = FLAGS_max_retry;
if (channel.Init(FLAGS_server.c_str(), FLAGS_load_balancer.c_str(),
&options) != 0) {
LOG(ERROR) << "Fail to initialize channel";
return -1;
}
// Normally, you should not call a Channel directly, but instead
construct
// a stub Service wrapping it. stub can be shared by all threads as well.
example::EchoService_Stub stub(&channel);
// Send a request and wait for the response every 1 second.
int log_id = 0;
int num = 100;
int time_us = 0;
// We will receive response synchronously, safe to put variables
// on stack.
example::EchoRequest request;
int dims[3] = {3,640,640};
const tensorflow::DataType dataType = tensorflow::DataType::DT_FLOAT;
int batchSize = 1;
tensorflow::TensorProto inputProto = make_tensor_proto(dims, batchSize,
dataType);
request.mutable_frame()->CopyFrom(inputProto);
// request.set_allocated_frame(&inputProto);
request.set_model_name("hello-world");
request.set_model_version("v1");
std::cout << "input_proto size:" << inputProto.ByteSize() << std::endl;
// std::string inputProtoStr;
// google::protobuf::util::MessageToJsonString(request, &inputProtoStr);
// std::cout << "input_proto :" << inputProtoStr << std::endl;
time_point<high_resolution_clock> timeStart =
high_resolution_clock::now();
while (!brpc::IsAskedToQuit() && num) {
std::cout << num << std::endl;
brpc::Controller cntl;
example::EchoResponse response;
cntl.set_log_id(log_id ++); // set by user
// Set attachment which is wired to network directly instead of
// being serialized into protobuf messages.
cntl.request_attachment().append(FLAGS_attachment);
// Because `done'(last parameter) is NULL, this function waits until
// the response comes back or error occurs(including timedout).
stub.Echo(&cntl, &request, &response, NULL);
if (!cntl.Failed()) {
time_us += cntl.latency_us();
LOG(INFO) << "Received response from " << cntl.remote_side()
<< " to " << cntl.local_side()
<< ": "
<< " model_name: " << response.model_name()
<< " , model_version: " << response.model_version()
<< " , tensor_size: " << response.image().ByteSize()
<< " (attached="
<< cntl.response_attachment() << ")"
<< " latency=" << cntl.latency_us() << "us";
} else {
LOG(WARNING) << cntl.ErrorText();
}
num -= 1;
// usleep(FLAGS_interval_ms * 1000L);
}
time_point<high_resolution_clock> timeEnd = high_resolution_clock::now();
float timeMs = duration_cast<milliseconds>(timeEnd - timeStart).count();
std::cout << "100 times is :" << timeMs / 100 << "ms" << std::endl;
std::cout << "100 times latency us is :" << time_us << std::endl;
LOG(INFO) << "EchoClient is going to quit";
return 0;
}
`
服务端代码:[https://github.com/apache/incubator-brpc/blob/master/example/echo_c%2B%2B/server.cpp](url)
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]