本文整理汇总了C++中Connection::Recv方法的典型用法代码示例。如果您正苦于以下问题:C++ Connection::Recv方法的具体用法?C++ Connection::Recv怎么用?C++ Connection::Recv使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Connection
的用法示例。
在下文中一共展示了Connection::Recv方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: main
int main(void)
{
Connection *cnxn = Connection::CreateService("namer");
Message msg, reply;
do_namer_register(NAMER_PORT,"namer");
while(cnxn->Recv(&msg) == 0){
int32 op = -1;
int32 port = -1;
const char *name = 0;
int32 res = -1;
msg.GetInt32('code',&op);
msg.GetInt32('port',&port);
msg.GetString('name',&name);
switch(op){
case NAMER_FIND:
res = do_namer_find(name);
break;
case NAMER_REGISTER:
res = do_namer_register(port, name);
break;
}
reply.Empty();
reply.PutInt32('resp',res);
msg.Reply(&reply);
}
return 0;
}
示例2: Execute
void Service::Execute(const std::string& host, int port, Command& cmd)
{
cmd.status() = Command::EXECUTING;
Connection *con = new TCPConnection(host, port);
if (!con->Open())
{
delete con;
throw "Could not connect to " + host;
}
con->Send(&cmd.Msg());
Message *rsp = con->Recv();
cmd.ReadMessage(*rsp);
delete rsp;
}
示例3: main
//.........这里部分代码省略.........
goto Exit;
}
result = audio_client_render->GetMixFormat(&wf_render);
if (FAILED(result))
{
printf("Failed to get mix format\n");
goto Exit;
}
result = audio_client_render->Initialize(AUDCLNT_SHAREMODE_SHARED, 0, timeIntervalForBuffer, 0, wf_render, NULL);
if (FAILED(result))
{
printf("Failed to initialize audio client\n");
goto Exit;
}
printf("Sample rate: %u Hz\n", wf_render->nSamplesPerSec);
printf("Sample size: %u bits\n", wf_render->wBitsPerSample);
printf("Size of audio frame: %u bytes\n", wf_render->nBlockAlign);
printf("Number of channels: %u\n", wf_render->nChannels);
result = audio_client_render->Start();
if (FAILED(result))
{
printf("Failed to start recording\n");
goto Exit;
}
result = audio_client_render->GetService(__uuidof(IAudioRenderClient), (void**)&render_client);
if (FAILED(result))
{
printf("Failed to get render service\n");
goto Exit;
}
int partner_format_received_size;
SendingSocket.Send((const char*)wf_capture, sizeof(*wf_capture));
ReceivingSocket.Recv((char*)&partner_format, sizeof(partner_format), partner_format_received_size);
if (last_error != _NO_ERROR)
{
printf("Connection failed\n");
goto Exit;
}
if (partner_format.nSamplesPerSec != wf_render->nSamplesPerSec)
{
/*cout << "Partned format:\n";
cout << partner_format.wBitsPerSample << endl;
cout << partner_format.nSamplesPerSec << endl;
cout << "My format:\n";
cout << wf_render->wBitsPerSample << endl;
cout << wf_render->nSamplesPerSec << endl;*/
printf("Partner capture format unsupported\n");
//goto Exit;
}
LPTHREAD_START_ROUTINE StartRoutine = (LPTHREAD_START_ROUTINE)RenderAudio;
DWORD threadID;
HANDLE hThread = CreateThread(NULL, 0, StartRoutine, NULL, 0, &threadID);
while (true)
{
Sleep(timeIntervalInMilliseconds);
result = capture_client->GetNextPacketSize(&packSize);
if (FAILED(result))
{
printf("Failed to get next pack size\n");
break;
}
while (packSize)
{
result = capture_client->GetBuffer(&pData, &availableFrames, &flags, NULL, NULL);
if (FAILED(result))
示例4: RenderAudio
void RenderAudio(void*)
{
UINT32 buffer_size;
int frame_size = wf_render->nBlockAlign;
audio_client_render->GetBufferSize(&buffer_size);
char* receive_buffer = new char [buffer_size * frame_size * 16]; // Big enough
char* preproccess_buffer = NULL;
char* used_buffer = receive_buffer;
int size;
int offset = 0;
UINT32 padding;
UINT32 frames_available;
UINT32 request_frames;
BYTE* audio_buffer;
bool scale_samples = false;
float sample_coeff;
float one_over_sample_coeff;
/*if (wf_render->nSamplesPerSec != partner_format.nSamplesPerSec)
{
sample_coeff = (float)wf_render->nSamplesPerSec / (float)partner_format.nSamplesPerSec;
one_over_sample_coeff = 1.f / sample_coeff;
preproccess_buffer = new char[buffer_size * frame_size * 16];
scale_samples = true;
used_buffer = preproccess_buffer;
}
else
{
used_buffer = receive_buffer;
}*/
while (true)
{
if (!ReceivingSocket.Recv(receive_buffer, buffer_size * frame_size * 16, size))
{
break;
}
/*if (scale_samples)
{
size = size * sample_coeff;
for (int i = 0; i < size; i += 4)
{
((float*)preproccess_buffer)[i] = ((float*)receive_buffer)[(int)(i * one_over_sample_coeff)];
}
}*/
//printf("Received %d bytes\n", size);
while (size - offset > 0)
{
audio_client_render->GetCurrentPadding(&padding);
frames_available = buffer_size - padding;
request_frames = frames_available < (size - offset) / frame_size ? frames_available : (size - offset) / frame_size;
if (!request_frames)
{
//printf("[DEBUG] Audio loss: %d\n", size - offset);
break;
}
render_client->GetBuffer(request_frames, &audio_buffer);
memcpy(audio_buffer, used_buffer + offset, request_frames * frame_size);
offset += request_frames * frame_size;
render_client->ReleaseBuffer(request_frames, 0);
//Sleep(request_frames * wf_render->nBlockAlign * 1000 / wf_render->nAvgBytesPerSec);
//printf("Cycle: %d, %d, %d\n", size, offset, frames_available);
}
offset = 0;
}
}