I have a GTKmm Windows application (built-in with MinGW) that receives UDP packets (without sending). A socket is a native winsock, and I use the glibmm IOChannel to connect it to the main application loop. The socket is read from recvfrom.
My problem: this setting consumes 25% of the processor time on a 3GHz workstation. Can someone tell me why?
In this case, the application is inactive, and if I delete the UDP code, the processor load will drop to almost zero. Since the application needs to perform some intensive CPU tasks, I could better consider ways to spend 25%
Here are some code snippets: (sorry for printf;))
void UDPInterface::bindToPort(unsigned short port)
{
struct sockaddr_in target;
WSADATA wsaData;
target.sin_family = AF_INET;
target.sin_port = htons(port);
target.sin_addr.s_addr = 0;
if ( WSAStartup ( 0x0202, &wsaData ) )
{
printf("WSAStartup failed!\n");
exit(0);
WSACleanup();
}
sock = socket( AF_INET, SOCK_DGRAM, 0 );
if (sock == INVALID_SOCKET)
{
printf("invalid socket!\n");
exit(0);
}
if (bind(sock,(struct sockaddr*) &target, sizeof(struct sockaddr_in) ) == SOCKET_ERROR)
{
printf("failed to bind to port!\n");
exit(0);
}
printf("[UDPInterface::bindToPort] listening on port %i\n", port);
}
bool UDPInterface::UDPEvent(Glib::IOCondition io_condition)
{
recvfrom(sock, (char*)buf, BUF_SIZE*4, 0, NULL, NULL);
}
Glib::RefPtr channel = Glib::IOChannel::create_from_win32_socket(udp.sock);
Glib::signal_io().connect( sigc::mem_fun(udp, &UDPInterface::UDPEvent), channel, Glib::IO_IN );
, glib docs (g_io_channel_win32_new_socket()), , " ". , ?
glib recvfrom() , , , , - . glibmm docs , recvfrom(), (Glib:: IOChannel:: create_from_win32_socket())
-pg CPp gprof. , , - glib/glibmm dll.