关闭一个socket后,就会因为产生无数个成功的FD_CONNCET导致死循环.
为何?
unsigned __stdcall CMinClient::ThreadSocket(void *param)
{
int Index=0;
int i=0;
int iRet=0;
int iServiceID=0;
CString strAddr,strServiceID;
DWORD dwRet=0;
WSANETWORKEVENTS NetworkEvents;
while(1)
{
dwRet= WaitForSingleObject(m_hThreadEvent,INFINITE);
if (dwRet!=WAIT_OBJECT_0)
{
break;
}
//等待所有套接字事件,只要有一个套接字产生事件,就阻塞返回
Index=WSAWaitForMultipleEvents(m_iSocketCount,m_EventArray,FALSE,WSA_INFINITE,FALSE);
if (Index==WSA_WAIT_FAILED)
{
Sleep(100);
continue;
}
Index=Index-WSA_WAIT_EVENT_0;
::EnterCriticalSection(&m_CS);
for(i=Index;i<m_iSocketCount;i++)
{//for begin
if (i<0)
{
break;
}
Index=WSAWaitForMultipleEvents(1,&m_EventArray[i],TRUE,0,FALSE);
if (Index==WSA_WAIT_FAILED)
{
::LeaveCriticalSection(&m_CS);
continue;
}
Index=i;
WSAEnumNetworkEvents(m_SocketArray[Index],m_EventArray[Index],&NetworkEvents);
//Connect事件
if (NetworkEvents.lNetworkEvents & FD_CONNECT)
{
iRet=NetworkEvents.iErrorCode[FD_CONNECT_BIT];
//Connect失败,把该套接字及其事件从数组中删除
if (iRet!=0)
{
//测试这个地方:如果连接失败,是否从数组中清除
DelSocket(m_SocketArray[Index]);
i--;
}
else
{
iServiceID=GetServiceID(m_SocketArray[Index]);
if (iServiceID!=0)
{
m_TranRule.Lookup(iServiceID,strAddr);
strServiceID.Format("%d",iServiceID);
::SendMessage( g_hMessageDlg, MSG_PRINTMINCONN, (WPARAM)&strServiceID, (LPARAM)&strAddr);
}
}
}
//Write事件
if (NetworkEvents.lNetworkEvents & FD_WRITE)
{
iRet=NetworkEvents.iErrorCode[FD_WRITE_BIT];
if (iRet!=0)
{
//阻塞发送线程
}
else
{
//调度发送线程
}
}
if (NetworkEvents.lNetworkEvents & FD_READ)
{
iRet=NetworkEvents.iErrorCode[FD_READ_BIT];
if (iRet!=0)
{
//阻塞读取线程
}
else
{
//调度读取线程
}
}
if (NetworkEvents.lNetworkEvents & FD_CLOSE)
{
iRet=NetworkEvents.iErrorCode[FD_CLOSE_BIT];
if (iRet!=0)
{}
DelSocket(m_SocketArray[Index]);
i-=2;//?????????????????这个地方有问题,循环
//continue;
}
}//for end
::LeaveCriticalSection(&m_CS);
}
return 0;
}
//通过数字得到套接字句柄,然后关闭它.
void CMinClient::CloseClient(int iServiceID)
{
SOCKET hSocket=NULL;
hSocket=GetSocket(iServiceID);
shutdown(hSocket,SD_RECEIVE);
closesocket(hSocket);
}
12 个解决方案
#1
Index=WSAWaitForMultipleEvents(1,&m_EventArray[i],TRUE,0,FALSE);
有必要么?为何又用For循环?
Index=WSAWaitForMultipleEvents(m_iSocketCount,m_EventArray,FALSE,WSA_INFINITE,FALSE);
不是已经知道相应的Index了么?
shutdown(hSocket,SD_BOTH);?
有必要么?为何又用For循环?
Index=WSAWaitForMultipleEvents(m_iSocketCount,m_EventArray,FALSE,WSA_INFINITE,FALSE);
不是已经知道相应的Index了么?
shutdown(hSocket,SD_BOTH);?
#2
up
#3
Index=WSAWaitForMultipleEvents(1,&m_EventArray[i],TRUE,0,FALSE);
循环里不应该这样用,
应该用 WSAWaitForSingleObject(m_EventArray[i],0);
循环里不应该这样用,
应该用 WSAWaitForSingleObject(m_EventArray[i],0);
#4
上面错了,
Index=WSAWaitForMultipleEvents(1,&m_EventArray[i],TRUE,0,FALSE);
循环里不应该这样用,
应该用 WaitForSingleObject(m_EventArray[i],0);
Index=WSAWaitForMultipleEvents(1,&m_EventArray[i],TRUE,0,FALSE);
循环里不应该这样用,
应该用 WaitForSingleObject(m_EventArray[i],0);
#5
DWORD WINAPI Tcp_Server_EVENT::WorkThread(LPVOID lpParam)
{
Thread_Obj *ThreadObj = (Thread_Obj*)lpParam;
WSANETWORKEVENTS NEvents;
WaitForSingleObject(ThreadObj->RunEvent, INFINITE);
while (true)
{
if (ThreadObj->SocketCount <= 0)
{
ThreadObj->TcpServerEVENT->Lock();
ThreadObj->TcpServerEVENT->FreeThreadObj(ThreadObj);
ThreadObj->TcpServerEVENT->UnLock();
return 0;
}
int rc = WaitForMultipleObjects(ThreadObj->SocketCount, ThreadObj->Events, false, INFINITE);
if (rc == WAIT_FAILED)
{
ThreadObj->TcpServerEVENT->Lock();
ThreadObj->TcpServerEVENT->OnError(GetLastError());
ThreadObj->TcpServerEVENT->FreeThreadObj(ThreadObj);
ThreadObj->TcpServerEVENT->UnLock();
return 0;
}
EnterCriticalSection(&ThreadObj->Locked);
for (int i = 0; i < ThreadObj->SocketCount; i++)
{
rc = WaitForSingleObject(ThreadObj->Events[i], 0);
if (rc == WAIT_FAILED)
{
ThreadObj->TcpServerEVENT->Lock();
ThreadObj->TcpServerEVENT->OnError(GetLastError());
ThreadObj->TcpServerEVENT->FreeSocketObj(ThreadObj, i);
i--;
ThreadObj->TcpServerEVENT->UnLock();
continue;
}
if (rc == WAIT_TIMEOUT)
{
continue;
}
rc = WSAEnumNetworkEvents(ThreadObj->SocketObjs[i]->ClientSocket, ThreadObj->Events[i], &NEvents);
if (rc == SOCKET_ERROR)
{
ThreadObj->TcpServerEVENT->Lock();
ThreadObj->TcpServerEVENT->OnError(WSAGetLastError());
ThreadObj->TcpServerEVENT->FreeSocketObj(ThreadObj, i);
i--;
ThreadObj->TcpServerEVENT->UnLock();
continue;
}
if (NEvents.lNetworkEvents & FD_READ)
{
if (NEvents.iErrorCode[FD_READ_BIT] == 0)
{
ThreadObj->SocketObjs[i]->Lock();
ThreadObj->SocketObjs[i]->OnRecv();
ThreadObj->SocketObjs[i]->UnLock();
}
else
{
ThreadObj->TcpServerEVENT->Lock();
ThreadObj->TcpServerEVENT->OnError(WSAGetLastError());
ThreadObj->TcpServerEVENT->FreeSocketObj(ThreadObj, i);
i--;
ThreadObj->TcpServerEVENT->UnLock();
}
}
if (NEvents.lNetworkEvents & FD_WRITE)
{
if (NEvents.iErrorCode[FD_WRITE_BIT] == 0)
{
ThreadObj->SocketObjs[i]->Lock();
ThreadObj->SocketObjs[i]->OnSend();
ThreadObj->SocketObjs[i]->UnLock();
}
else
{
ThreadObj->TcpServerEVENT->Lock();
ThreadObj->TcpServerEVENT->OnError(WSAGetLastError());
ThreadObj->TcpServerEVENT->FreeSocketObj(ThreadObj, i);
i--;
ThreadObj->TcpServerEVENT->UnLock();
}
}
if (NEvents.lNetworkEvents & FD_CLOSE)
{
ThreadObj->TcpServerEVENT->Lock();
ThreadObj->SocketObjs[i]->OnDisconnect();
ThreadObj->TcpServerEVENT->OnDisconnect(ThreadObj->SocketObjs[i]);
ThreadObj->TcpServerEVENT->FreeSocketObj(ThreadObj, i);
i--;
ThreadObj->TcpServerEVENT->UnLock();
}
}
LeaveCriticalSection(&ThreadObj->Locked);
}
return 0;
}
{
Thread_Obj *ThreadObj = (Thread_Obj*)lpParam;
WSANETWORKEVENTS NEvents;
WaitForSingleObject(ThreadObj->RunEvent, INFINITE);
while (true)
{
if (ThreadObj->SocketCount <= 0)
{
ThreadObj->TcpServerEVENT->Lock();
ThreadObj->TcpServerEVENT->FreeThreadObj(ThreadObj);
ThreadObj->TcpServerEVENT->UnLock();
return 0;
}
int rc = WaitForMultipleObjects(ThreadObj->SocketCount, ThreadObj->Events, false, INFINITE);
if (rc == WAIT_FAILED)
{
ThreadObj->TcpServerEVENT->Lock();
ThreadObj->TcpServerEVENT->OnError(GetLastError());
ThreadObj->TcpServerEVENT->FreeThreadObj(ThreadObj);
ThreadObj->TcpServerEVENT->UnLock();
return 0;
}
EnterCriticalSection(&ThreadObj->Locked);
for (int i = 0; i < ThreadObj->SocketCount; i++)
{
rc = WaitForSingleObject(ThreadObj->Events[i], 0);
if (rc == WAIT_FAILED)
{
ThreadObj->TcpServerEVENT->Lock();
ThreadObj->TcpServerEVENT->OnError(GetLastError());
ThreadObj->TcpServerEVENT->FreeSocketObj(ThreadObj, i);
i--;
ThreadObj->TcpServerEVENT->UnLock();
continue;
}
if (rc == WAIT_TIMEOUT)
{
continue;
}
rc = WSAEnumNetworkEvents(ThreadObj->SocketObjs[i]->ClientSocket, ThreadObj->Events[i], &NEvents);
if (rc == SOCKET_ERROR)
{
ThreadObj->TcpServerEVENT->Lock();
ThreadObj->TcpServerEVENT->OnError(WSAGetLastError());
ThreadObj->TcpServerEVENT->FreeSocketObj(ThreadObj, i);
i--;
ThreadObj->TcpServerEVENT->UnLock();
continue;
}
if (NEvents.lNetworkEvents & FD_READ)
{
if (NEvents.iErrorCode[FD_READ_BIT] == 0)
{
ThreadObj->SocketObjs[i]->Lock();
ThreadObj->SocketObjs[i]->OnRecv();
ThreadObj->SocketObjs[i]->UnLock();
}
else
{
ThreadObj->TcpServerEVENT->Lock();
ThreadObj->TcpServerEVENT->OnError(WSAGetLastError());
ThreadObj->TcpServerEVENT->FreeSocketObj(ThreadObj, i);
i--;
ThreadObj->TcpServerEVENT->UnLock();
}
}
if (NEvents.lNetworkEvents & FD_WRITE)
{
if (NEvents.iErrorCode[FD_WRITE_BIT] == 0)
{
ThreadObj->SocketObjs[i]->Lock();
ThreadObj->SocketObjs[i]->OnSend();
ThreadObj->SocketObjs[i]->UnLock();
}
else
{
ThreadObj->TcpServerEVENT->Lock();
ThreadObj->TcpServerEVENT->OnError(WSAGetLastError());
ThreadObj->TcpServerEVENT->FreeSocketObj(ThreadObj, i);
i--;
ThreadObj->TcpServerEVENT->UnLock();
}
}
if (NEvents.lNetworkEvents & FD_CLOSE)
{
ThreadObj->TcpServerEVENT->Lock();
ThreadObj->SocketObjs[i]->OnDisconnect();
ThreadObj->TcpServerEVENT->OnDisconnect(ThreadObj->SocketObjs[i]);
ThreadObj->TcpServerEVENT->FreeSocketObj(ThreadObj, i);
i--;
ThreadObj->TcpServerEVENT->UnLock();
}
}
LeaveCriticalSection(&ThreadObj->Locked);
}
return 0;
}
#6
这个问题很简单,我没看你的代码.知道你可能是用阻塞方式吧
是因为数据接收未完全接收完数据,还处于读.所以会重复生成.
测试这种最好的办法是测试数据包
是因为数据接收未完全接收完数据,还处于读.所以会重复生成.
测试这种最好的办法是测试数据包
#7
to hehou:
不是阻塞方式.
我先看看rwdx的代码.
下面是我的socket的创建.
int CMinClient::AddClient(CString strServiceID,CString strIP,CString strPort)
{
SOCKET hSocket;
WSAEVENT hEvent;
int iRet;
int iServiceID;
CString strAddr;
SOCKADDR_IN ServerAddr;
iServiceID=atoi(strServiceID);
strAddr=strIP;
strAddr+=":";
strAddr+=strPort;
hSocket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
if (hSocket == INVALID_SOCKET)
{
return -1;
}
hEvent=WSACreateEvent();
//把套接字与事件关联起来,并注册网络事件
iRet=WSAEventSelect(hSocket,hEvent,FD_CONNECT|FD_READ|FD_WRITE|FD_CLOSE);
if (iRet==SOCKET_ERROR)
{
return -1;
}
//设置套接字地址结构
ServerAddr.sin_family=AF_INET;
ServerAddr.sin_addr.S_un.S_addr=inet_addr(strIP);
ServerAddr.sin_port=htons(atoi(strPort));
//投递异步连接
iRet=connect(hSocket,(SOCKADDR *)&ServerAddr,sizeof(ServerAddr));
EnterCriticalSection(&m_CS);
m_EventArray[m_iSocketCount]=hEvent;
m_SocketArray[m_iSocketCount]=hSocket;
m_SocketMap.SetAt(iServiceID,hSocket);
m_TranRule.SetAt(atoi(strServiceID),strAddr);
m_iSocketCount++;
if (m_iSocketCount==1)
{ //等于1的时候,会第一次运行到waitformul
SetEvent(m_hThreadEvent);
}
else if (m_iSocketCount>1)
{ //
WSASetEvent(m_EventArray[0]);
}
LeaveCriticalSection(&m_CS);
return 0;
}
不是阻塞方式.
我先看看rwdx的代码.
下面是我的socket的创建.
int CMinClient::AddClient(CString strServiceID,CString strIP,CString strPort)
{
SOCKET hSocket;
WSAEVENT hEvent;
int iRet;
int iServiceID;
CString strAddr;
SOCKADDR_IN ServerAddr;
iServiceID=atoi(strServiceID);
strAddr=strIP;
strAddr+=":";
strAddr+=strPort;
hSocket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
if (hSocket == INVALID_SOCKET)
{
return -1;
}
hEvent=WSACreateEvent();
//把套接字与事件关联起来,并注册网络事件
iRet=WSAEventSelect(hSocket,hEvent,FD_CONNECT|FD_READ|FD_WRITE|FD_CLOSE);
if (iRet==SOCKET_ERROR)
{
return -1;
}
//设置套接字地址结构
ServerAddr.sin_family=AF_INET;
ServerAddr.sin_addr.S_un.S_addr=inet_addr(strIP);
ServerAddr.sin_port=htons(atoi(strPort));
//投递异步连接
iRet=connect(hSocket,(SOCKADDR *)&ServerAddr,sizeof(ServerAddr));
EnterCriticalSection(&m_CS);
m_EventArray[m_iSocketCount]=hEvent;
m_SocketArray[m_iSocketCount]=hSocket;
m_SocketMap.SetAt(iServiceID,hSocket);
m_TranRule.SetAt(atoi(strServiceID),strAddr);
m_iSocketCount++;
if (m_iSocketCount==1)
{ //等于1的时候,会第一次运行到waitformul
SetEvent(m_hThreadEvent);
}
else if (m_iSocketCount>1)
{ //
WSASetEvent(m_EventArray[0]);
}
LeaveCriticalSection(&m_CS);
return 0;
}
#8
1 我在WSAEnumNetworkEvents之前加了memset,这样就不会产生迷惑的connect事件了,而是没有事件(0)
2 我觉得我的错误是因为我在closesocket的时候没有去减少事件数组(我异想天开的想在收到close事件之后再去减少事件数组的元素),这样在waitformul的时候就有一个事件关联的套接字已经失效,可能就是因为这个原因,导致了死循环.
3 hehou的源代码很漂亮(好像也是在收到close事件之后才去清除为该socket分配的其他资源的),能不能发给我一份.wd@zibo.com.cn,在这儿贴出来最好了.
2 我觉得我的错误是因为我在closesocket的时候没有去减少事件数组(我异想天开的想在收到close事件之后再去减少事件数组的元素),这样在waitformul的时候就有一个事件关联的套接字已经失效,可能就是因为这个原因,导致了死循环.
3 hehou的源代码很漂亮(好像也是在收到close事件之后才去清除为该socket分配的其他资源的),能不能发给我一份.wd@zibo.com.cn,在这儿贴出来最好了.
#9
up
#10
这个程序本身就有问题,如果
WaitForSingleObject(m_hThreadEvent,INFINITE);
怎么可能进入到下面的等待事件中呢?
WSAWaitForMultipleEvents(1,&m_EventArray[i],TRUE,0,FALSE);
如果第一个等待改为等待时间为0,那么又怎么能退出呢,除非等待到网络事件,所以
建议楼主把第一个事件合并到WSAWaitForMultipleEvents()中一起等待。
WaitForSingleObject(m_hThreadEvent,INFINITE);
怎么可能进入到下面的等待事件中呢?
WSAWaitForMultipleEvents(1,&m_EventArray[i],TRUE,0,FALSE);
如果第一个等待改为等待时间为0,那么又怎么能退出呢,除非等待到网络事件,所以
建议楼主把第一个事件合并到WSAWaitForMultipleEvents()中一起等待。
#11
up
#12
mark
#1
Index=WSAWaitForMultipleEvents(1,&m_EventArray[i],TRUE,0,FALSE);
有必要么?为何又用For循环?
Index=WSAWaitForMultipleEvents(m_iSocketCount,m_EventArray,FALSE,WSA_INFINITE,FALSE);
不是已经知道相应的Index了么?
shutdown(hSocket,SD_BOTH);?
有必要么?为何又用For循环?
Index=WSAWaitForMultipleEvents(m_iSocketCount,m_EventArray,FALSE,WSA_INFINITE,FALSE);
不是已经知道相应的Index了么?
shutdown(hSocket,SD_BOTH);?
#2
up
#3
Index=WSAWaitForMultipleEvents(1,&m_EventArray[i],TRUE,0,FALSE);
循环里不应该这样用,
应该用 WSAWaitForSingleObject(m_EventArray[i],0);
循环里不应该这样用,
应该用 WSAWaitForSingleObject(m_EventArray[i],0);
#4
上面错了,
Index=WSAWaitForMultipleEvents(1,&m_EventArray[i],TRUE,0,FALSE);
循环里不应该这样用,
应该用 WaitForSingleObject(m_EventArray[i],0);
Index=WSAWaitForMultipleEvents(1,&m_EventArray[i],TRUE,0,FALSE);
循环里不应该这样用,
应该用 WaitForSingleObject(m_EventArray[i],0);
#5
DWORD WINAPI Tcp_Server_EVENT::WorkThread(LPVOID lpParam)
{
Thread_Obj *ThreadObj = (Thread_Obj*)lpParam;
WSANETWORKEVENTS NEvents;
WaitForSingleObject(ThreadObj->RunEvent, INFINITE);
while (true)
{
if (ThreadObj->SocketCount <= 0)
{
ThreadObj->TcpServerEVENT->Lock();
ThreadObj->TcpServerEVENT->FreeThreadObj(ThreadObj);
ThreadObj->TcpServerEVENT->UnLock();
return 0;
}
int rc = WaitForMultipleObjects(ThreadObj->SocketCount, ThreadObj->Events, false, INFINITE);
if (rc == WAIT_FAILED)
{
ThreadObj->TcpServerEVENT->Lock();
ThreadObj->TcpServerEVENT->OnError(GetLastError());
ThreadObj->TcpServerEVENT->FreeThreadObj(ThreadObj);
ThreadObj->TcpServerEVENT->UnLock();
return 0;
}
EnterCriticalSection(&ThreadObj->Locked);
for (int i = 0; i < ThreadObj->SocketCount; i++)
{
rc = WaitForSingleObject(ThreadObj->Events[i], 0);
if (rc == WAIT_FAILED)
{
ThreadObj->TcpServerEVENT->Lock();
ThreadObj->TcpServerEVENT->OnError(GetLastError());
ThreadObj->TcpServerEVENT->FreeSocketObj(ThreadObj, i);
i--;
ThreadObj->TcpServerEVENT->UnLock();
continue;
}
if (rc == WAIT_TIMEOUT)
{
continue;
}
rc = WSAEnumNetworkEvents(ThreadObj->SocketObjs[i]->ClientSocket, ThreadObj->Events[i], &NEvents);
if (rc == SOCKET_ERROR)
{
ThreadObj->TcpServerEVENT->Lock();
ThreadObj->TcpServerEVENT->OnError(WSAGetLastError());
ThreadObj->TcpServerEVENT->FreeSocketObj(ThreadObj, i);
i--;
ThreadObj->TcpServerEVENT->UnLock();
continue;
}
if (NEvents.lNetworkEvents & FD_READ)
{
if (NEvents.iErrorCode[FD_READ_BIT] == 0)
{
ThreadObj->SocketObjs[i]->Lock();
ThreadObj->SocketObjs[i]->OnRecv();
ThreadObj->SocketObjs[i]->UnLock();
}
else
{
ThreadObj->TcpServerEVENT->Lock();
ThreadObj->TcpServerEVENT->OnError(WSAGetLastError());
ThreadObj->TcpServerEVENT->FreeSocketObj(ThreadObj, i);
i--;
ThreadObj->TcpServerEVENT->UnLock();
}
}
if (NEvents.lNetworkEvents & FD_WRITE)
{
if (NEvents.iErrorCode[FD_WRITE_BIT] == 0)
{
ThreadObj->SocketObjs[i]->Lock();
ThreadObj->SocketObjs[i]->OnSend();
ThreadObj->SocketObjs[i]->UnLock();
}
else
{
ThreadObj->TcpServerEVENT->Lock();
ThreadObj->TcpServerEVENT->OnError(WSAGetLastError());
ThreadObj->TcpServerEVENT->FreeSocketObj(ThreadObj, i);
i--;
ThreadObj->TcpServerEVENT->UnLock();
}
}
if (NEvents.lNetworkEvents & FD_CLOSE)
{
ThreadObj->TcpServerEVENT->Lock();
ThreadObj->SocketObjs[i]->OnDisconnect();
ThreadObj->TcpServerEVENT->OnDisconnect(ThreadObj->SocketObjs[i]);
ThreadObj->TcpServerEVENT->FreeSocketObj(ThreadObj, i);
i--;
ThreadObj->TcpServerEVENT->UnLock();
}
}
LeaveCriticalSection(&ThreadObj->Locked);
}
return 0;
}
{
Thread_Obj *ThreadObj = (Thread_Obj*)lpParam;
WSANETWORKEVENTS NEvents;
WaitForSingleObject(ThreadObj->RunEvent, INFINITE);
while (true)
{
if (ThreadObj->SocketCount <= 0)
{
ThreadObj->TcpServerEVENT->Lock();
ThreadObj->TcpServerEVENT->FreeThreadObj(ThreadObj);
ThreadObj->TcpServerEVENT->UnLock();
return 0;
}
int rc = WaitForMultipleObjects(ThreadObj->SocketCount, ThreadObj->Events, false, INFINITE);
if (rc == WAIT_FAILED)
{
ThreadObj->TcpServerEVENT->Lock();
ThreadObj->TcpServerEVENT->OnError(GetLastError());
ThreadObj->TcpServerEVENT->FreeThreadObj(ThreadObj);
ThreadObj->TcpServerEVENT->UnLock();
return 0;
}
EnterCriticalSection(&ThreadObj->Locked);
for (int i = 0; i < ThreadObj->SocketCount; i++)
{
rc = WaitForSingleObject(ThreadObj->Events[i], 0);
if (rc == WAIT_FAILED)
{
ThreadObj->TcpServerEVENT->Lock();
ThreadObj->TcpServerEVENT->OnError(GetLastError());
ThreadObj->TcpServerEVENT->FreeSocketObj(ThreadObj, i);
i--;
ThreadObj->TcpServerEVENT->UnLock();
continue;
}
if (rc == WAIT_TIMEOUT)
{
continue;
}
rc = WSAEnumNetworkEvents(ThreadObj->SocketObjs[i]->ClientSocket, ThreadObj->Events[i], &NEvents);
if (rc == SOCKET_ERROR)
{
ThreadObj->TcpServerEVENT->Lock();
ThreadObj->TcpServerEVENT->OnError(WSAGetLastError());
ThreadObj->TcpServerEVENT->FreeSocketObj(ThreadObj, i);
i--;
ThreadObj->TcpServerEVENT->UnLock();
continue;
}
if (NEvents.lNetworkEvents & FD_READ)
{
if (NEvents.iErrorCode[FD_READ_BIT] == 0)
{
ThreadObj->SocketObjs[i]->Lock();
ThreadObj->SocketObjs[i]->OnRecv();
ThreadObj->SocketObjs[i]->UnLock();
}
else
{
ThreadObj->TcpServerEVENT->Lock();
ThreadObj->TcpServerEVENT->OnError(WSAGetLastError());
ThreadObj->TcpServerEVENT->FreeSocketObj(ThreadObj, i);
i--;
ThreadObj->TcpServerEVENT->UnLock();
}
}
if (NEvents.lNetworkEvents & FD_WRITE)
{
if (NEvents.iErrorCode[FD_WRITE_BIT] == 0)
{
ThreadObj->SocketObjs[i]->Lock();
ThreadObj->SocketObjs[i]->OnSend();
ThreadObj->SocketObjs[i]->UnLock();
}
else
{
ThreadObj->TcpServerEVENT->Lock();
ThreadObj->TcpServerEVENT->OnError(WSAGetLastError());
ThreadObj->TcpServerEVENT->FreeSocketObj(ThreadObj, i);
i--;
ThreadObj->TcpServerEVENT->UnLock();
}
}
if (NEvents.lNetworkEvents & FD_CLOSE)
{
ThreadObj->TcpServerEVENT->Lock();
ThreadObj->SocketObjs[i]->OnDisconnect();
ThreadObj->TcpServerEVENT->OnDisconnect(ThreadObj->SocketObjs[i]);
ThreadObj->TcpServerEVENT->FreeSocketObj(ThreadObj, i);
i--;
ThreadObj->TcpServerEVENT->UnLock();
}
}
LeaveCriticalSection(&ThreadObj->Locked);
}
return 0;
}
#6
这个问题很简单,我没看你的代码.知道你可能是用阻塞方式吧
是因为数据接收未完全接收完数据,还处于读.所以会重复生成.
测试这种最好的办法是测试数据包
是因为数据接收未完全接收完数据,还处于读.所以会重复生成.
测试这种最好的办法是测试数据包
#7
to hehou:
不是阻塞方式.
我先看看rwdx的代码.
下面是我的socket的创建.
int CMinClient::AddClient(CString strServiceID,CString strIP,CString strPort)
{
SOCKET hSocket;
WSAEVENT hEvent;
int iRet;
int iServiceID;
CString strAddr;
SOCKADDR_IN ServerAddr;
iServiceID=atoi(strServiceID);
strAddr=strIP;
strAddr+=":";
strAddr+=strPort;
hSocket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
if (hSocket == INVALID_SOCKET)
{
return -1;
}
hEvent=WSACreateEvent();
//把套接字与事件关联起来,并注册网络事件
iRet=WSAEventSelect(hSocket,hEvent,FD_CONNECT|FD_READ|FD_WRITE|FD_CLOSE);
if (iRet==SOCKET_ERROR)
{
return -1;
}
//设置套接字地址结构
ServerAddr.sin_family=AF_INET;
ServerAddr.sin_addr.S_un.S_addr=inet_addr(strIP);
ServerAddr.sin_port=htons(atoi(strPort));
//投递异步连接
iRet=connect(hSocket,(SOCKADDR *)&ServerAddr,sizeof(ServerAddr));
EnterCriticalSection(&m_CS);
m_EventArray[m_iSocketCount]=hEvent;
m_SocketArray[m_iSocketCount]=hSocket;
m_SocketMap.SetAt(iServiceID,hSocket);
m_TranRule.SetAt(atoi(strServiceID),strAddr);
m_iSocketCount++;
if (m_iSocketCount==1)
{ //等于1的时候,会第一次运行到waitformul
SetEvent(m_hThreadEvent);
}
else if (m_iSocketCount>1)
{ //
WSASetEvent(m_EventArray[0]);
}
LeaveCriticalSection(&m_CS);
return 0;
}
不是阻塞方式.
我先看看rwdx的代码.
下面是我的socket的创建.
int CMinClient::AddClient(CString strServiceID,CString strIP,CString strPort)
{
SOCKET hSocket;
WSAEVENT hEvent;
int iRet;
int iServiceID;
CString strAddr;
SOCKADDR_IN ServerAddr;
iServiceID=atoi(strServiceID);
strAddr=strIP;
strAddr+=":";
strAddr+=strPort;
hSocket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
if (hSocket == INVALID_SOCKET)
{
return -1;
}
hEvent=WSACreateEvent();
//把套接字与事件关联起来,并注册网络事件
iRet=WSAEventSelect(hSocket,hEvent,FD_CONNECT|FD_READ|FD_WRITE|FD_CLOSE);
if (iRet==SOCKET_ERROR)
{
return -1;
}
//设置套接字地址结构
ServerAddr.sin_family=AF_INET;
ServerAddr.sin_addr.S_un.S_addr=inet_addr(strIP);
ServerAddr.sin_port=htons(atoi(strPort));
//投递异步连接
iRet=connect(hSocket,(SOCKADDR *)&ServerAddr,sizeof(ServerAddr));
EnterCriticalSection(&m_CS);
m_EventArray[m_iSocketCount]=hEvent;
m_SocketArray[m_iSocketCount]=hSocket;
m_SocketMap.SetAt(iServiceID,hSocket);
m_TranRule.SetAt(atoi(strServiceID),strAddr);
m_iSocketCount++;
if (m_iSocketCount==1)
{ //等于1的时候,会第一次运行到waitformul
SetEvent(m_hThreadEvent);
}
else if (m_iSocketCount>1)
{ //
WSASetEvent(m_EventArray[0]);
}
LeaveCriticalSection(&m_CS);
return 0;
}
#8
1 我在WSAEnumNetworkEvents之前加了memset,这样就不会产生迷惑的connect事件了,而是没有事件(0)
2 我觉得我的错误是因为我在closesocket的时候没有去减少事件数组(我异想天开的想在收到close事件之后再去减少事件数组的元素),这样在waitformul的时候就有一个事件关联的套接字已经失效,可能就是因为这个原因,导致了死循环.
3 hehou的源代码很漂亮(好像也是在收到close事件之后才去清除为该socket分配的其他资源的),能不能发给我一份.wd@zibo.com.cn,在这儿贴出来最好了.
2 我觉得我的错误是因为我在closesocket的时候没有去减少事件数组(我异想天开的想在收到close事件之后再去减少事件数组的元素),这样在waitformul的时候就有一个事件关联的套接字已经失效,可能就是因为这个原因,导致了死循环.
3 hehou的源代码很漂亮(好像也是在收到close事件之后才去清除为该socket分配的其他资源的),能不能发给我一份.wd@zibo.com.cn,在这儿贴出来最好了.
#9
up
#10
这个程序本身就有问题,如果
WaitForSingleObject(m_hThreadEvent,INFINITE);
怎么可能进入到下面的等待事件中呢?
WSAWaitForMultipleEvents(1,&m_EventArray[i],TRUE,0,FALSE);
如果第一个等待改为等待时间为0,那么又怎么能退出呢,除非等待到网络事件,所以
建议楼主把第一个事件合并到WSAWaitForMultipleEvents()中一起等待。
WaitForSingleObject(m_hThreadEvent,INFINITE);
怎么可能进入到下面的等待事件中呢?
WSAWaitForMultipleEvents(1,&m_EventArray[i],TRUE,0,FALSE);
如果第一个等待改为等待时间为0,那么又怎么能退出呢,除非等待到网络事件,所以
建议楼主把第一个事件合并到WSAWaitForMultipleEvents()中一起等待。
#11
up
#12
mark