clean up shm and sem more cleanly.
authorGengbin Zheng <gzheng@illinois.edu>
Thu, 8 Mar 2012 06:04:49 +0000 (00:04 -0600)
committerGengbin Zheng <gzheng@illinois.edu>
Thu, 8 Mar 2012 06:04:49 +0000 (00:04 -0600)
src/arch/util/machine-pxshm.c
src/arch/util/machine-xpmem.c

index a368bf661df29fe9a911db3a5281893f8469b0cd..183fdfa971f352ec7d2ed7967e2eba2d8fecad85 100644 (file)
@@ -292,13 +292,16 @@ void CmiInitPxshm(char **argv){
  * shutdown shmem objects and semaphores
  *
  * *******************/
+static int pxshm_freed = 0;
 void tearDownSharedBuffers();
+void freeSharedBuffers();
 
 void CmiExitPxshm(){
         if (pxshmContext == NULL) return;
        if(pxshmContext->nodesize != 1){
                 int i;
-               tearDownSharedBuffers();
+               if (!pxshm_freed)
+                    tearDownSharedBuffers();
        
                for(i=0;i<pxshmContext->nodesize;i++){
                        if(i != pxshmContext->noderank){
@@ -557,6 +560,11 @@ void setupSharedBuffers(){
                        pxshmContext->sendBufs[i].header->bytes = 0;
                }
        }
+
+        if (CmiBarrier() == 0) {
+            freeSharedBuffers();
+            pxshm_freed = 1;
+        }
 }
 
 void allocBufNameStrings(char ***bufName){
@@ -595,7 +603,6 @@ void createShmObjectsAndSems(sharedBufData **bufs,char **bufNames){
                        (*bufs)[i].header->lock = 0; // by convention(see man page) 0 means unlocked
 #elif PXSHM_LOCK
                        (*bufs)[i].mutex = sem_open(bufNames[i],O_CREAT, S_IRUSR | S_IWUSR,1);
-//                        sem_unlink(bufNames[i]);
 #endif
                }else{
                        (*bufs)[i].header = NULL;
@@ -633,9 +640,24 @@ void createShmObject(char *name,int size,char **pPtr){
        CmiAssert(*pPtr != NULL);
 
        close(fd);
-        unlink(name);
 }
 
+void freeSharedBuffers(){
+       int i;
+       for(i= 0;i<pxshmContext->nodesize;i++){
+           if(i != pxshmContext->noderank){
+               if(shm_unlink(pxshmContext->recvBufNames[i]) < 0){
+                   fprintf(stderr,"Error from shm_unlink %s \n",strerror(errno));
+               }
+               sem_unlink(pxshmContext->sendBufNames[i]);
+#if PXSHM_LOCK
+               sem_unlink(pxshmContext->sendBufNames[i]);
+               sem_unlink(pxshmContext->recvBufNames[i]);
+#endif
+           }
+       }
+};
+
 void tearDownSharedBuffers(){
        int i;
        for(i= 0;i<pxshmContext->nodesize;i++){
index ffce1b0fdaeea982eb98e21b50504427d8660c51..6de6b561c171abd2410bc2d7b79208213e4cc510 100644 (file)
@@ -261,7 +261,9 @@ void CmiInitXpmem(char **argv){
  * shutdown shmem objects and semaphores
  *
  * *******************/
+static int pxshm_freed = 0;
 void tearDownSharedBuffers();
+void freeSharedBuffers();
 
 void CmiExitXpmem(){
        int i=0;
@@ -269,7 +271,7 @@ void CmiExitXpmem(){
         if (xpmemContext == NULL) return;
 
        if(xpmemContext->nodesize != 1) {
-               tearDownSharedBuffers();
+                //tearDownSharedBuffers();
        
                for(i=0;i<xpmemContext->nodesize;i++){
                        if(i != xpmemContext->noderank){
@@ -513,6 +515,7 @@ void setupSharedBuffers(){
        createSendXpmemAndSems(&(xpmemContext->sendBufs),xpmemContext->sendBufNames);
         CmiBarrier();
         removeXpmemFiles();
+        freeSharedBuffers();
        
        for(i=0;i<xpmemContext->nodesize;i++){
                if(i != xpmemContext->noderank){
@@ -688,6 +691,18 @@ void removeXpmemFiles()
         unlink(fname);
 }
 
+void freeSharedBuffers(){
+       int i;
+       for(i= 0;i<xpmemContext->nodesize;i++){
+           if(i != xpmemContext->noderank){
+#if XPMEM_LOCK
+               sem_unlink(xpmemContext->sendBufNames[i]);
+               sem_unlink(xpmemContext->recvBufNames[i]);
+#endif
+           }
+       }
+}
+
 void tearDownSharedBuffers(){
        int i;
        for(i= 0;i<xpmemContext->nodesize;i++){
@@ -837,7 +852,7 @@ inline void flushAllSendQs(){
         for(i=0;i<xpmemContext->nodesize;i++) {
                 if (i == xpmemContext->noderank) continue;
                 XpmemSendQ *sendQ = xpmemContext->sendQs[i];
-                if(SendQ->numEntries > 0) {
+                if(sendQ->numEntries > 0) {
 #endif
        
 #if XPMEM_OSSPINLOCK