1
mirror of https://github.com/rapid7/metasploit-payloads synced 2025-04-18 07:11:12 +02:00

Fix up POSIX to match channel changes

POSIX was out of whack with Windows as a result of the changes made
around channels. The schedular in posix was very different, and this
commit brings it into line.

Other than the obvious issues, a non-obvious issue with the changes
was that the channel was being freed up on close prior to the thread
terminating. This doesn't appear to be an issue on Windows, but was
causing crashes on close in POSIX.

The changes go quite deep. This changeset requires a lot of testing.
This commit is contained in:
OJ 2013-10-22 16:43:22 +10:00
parent a89d79d139
commit 2f200d4fa1
11 changed files with 368 additions and 340 deletions
c/meterpreter

@ -43,6 +43,12 @@ source/jpeg-8/Backup/*
# ignore posix temp stuff # ignore posix temp stuff
posix-meterp-build-tmp/* posix-meterp-build-tmp/*
data/meterpreter/*
source/server/rtld/elf2bin
source/server/rtld/lib*
source/server/rtld/msflinker
source/server/rtld/msflinker.bin
source/server/rtld/rtldtest
# Doxygen output # Doxygen output
docs/* docs/*

@ -1,317 +1,310 @@
#include "queue.h" #include "common.h"
#include "common.h"
#include <poll.h> #include <poll.h>
#include <pthread.h> typedef struct _WaitableEntry
{
typedef struct _WaitableEntry Remote * remote;
{ HANDLE waitable;
HANDLE waitable; EVENT* pause;
LPVOID context; EVENT* resume;
WaitableNotifyRoutine routine; LPVOID context;
LIST_ENTRY(_WaitableEntry) link; BOOL running;
} WaitableEntry; WaitableNotifyRoutine routine;
WaitableDestroyRoutine destroy;
int nentries = 0; } WaitableEntry;
int ntableentries = 0;
struct pollfd *polltable; /*
LIST_HEAD(_WaitableEntryHead, _WaitableEntry) WEHead; * The list of all currenltly running threads in the scheduler subsystem.
*/
THREAD *scheduler_thread; LIST * schedulerThreadList = NULL;
/* /*
* If there are no waitables in the queue, we wait * The Remote that is associated with the scheduler subsystem
* for a conditional broadcast to start it. */
*/ Remote * schedulerRemote = NULL;
pthread_mutex_t scheduler_mutex; /*
pthread_cond_t scheduler_cond; * Initialize the scheduler subsystem. Must be called before any calls to scheduler_insert_waitable.
*/
DWORD scheduler_run(THREAD *thread); DWORD scheduler_initialize( Remote * remote )
{
DWORD scheduler_destroy( VOID ) DWORD result = ERROR_SUCCESS;
{
WaitableEntry *current, *tmp; dprintf( "[SCHEDULER] entering scheduler_initialize." );
dprintf("Shutdown of scheduler requested"); if( remote == NULL )
return ERROR_INVALID_HANDLE;
if(scheduler_thread)
{ schedulerThreadList = list_create();
dprintf("sigterm'ing thread"); if( schedulerThreadList == NULL )
thread_sigterm(scheduler_thread); return ERROR_INVALID_HANDLE;
// wake up the thread if needed schedulerRemote = remote;
pthread_cond_signal(&scheduler_cond);
dprintf( "[SCHEDULER] leaving scheduler_initialize." );
// can delay execution up to 2 sec give or take
thread_join(scheduler_thread); return result;
}
// free up memory
thread_destroy(scheduler_thread); /*
scheduler_thread = NULL; * Destroy the scheduler subsystem. All waitable threads at signaled to terminate.
* this function blocks untill all waitable threads have terminated.
dprintf("thread joined .. going for polltable"); */
DWORD scheduler_destroy( VOID )
if(polltable) {
{ DWORD result = ERROR_SUCCESS;
free(polltable); DWORD index = 0;
polltable = NULL; DWORD count = 0;
nentries = ntableentries = 0; LIST * jlist = list_create();
} THREAD * thread = NULL;
WaitableEntry * entry = NULL;
dprintf("Now for the fun part, iterating through list and removing items");
dprintf( "[SCHEDULER] entering scheduler_destroy." );
LIST_FOREACH_SAFE(current, &WEHead, link, tmp)
{ lock_acquire( schedulerThreadList->lock );
// can't call close function due to no remote struct
// will segfault if we try count = list_count( schedulerThreadList );
// XXX could steal from scheduler_thread->parameter1 ?
for( index=0 ; index < count ; index++ )
dprintf("current: %08x, current->routine: %08x", current, current->routine); {
thread = (THREAD *)list_get( schedulerThreadList, index );
LIST_REMOVE(current, link); if( thread == NULL )
close(current->waitable); continue;
free(current->context);
free(current); list_push( jlist, thread );
}
entry = (WaitableEntry *)thread->parameter1;
dprintf("All done. Leaving");
if( !entry->running )
} event_signal( entry->resume );
return ERROR_SUCCESS;
} thread_sigterm( thread );
}
DWORD scheduler_initialize( Remote * remote )
{ lock_release( schedulerThreadList->lock );
if(scheduler_thread) {
dprintf("Hmmm. scheduler_initialize() called twice?"); dprintf( "[SCHEDULER] scheduler_destroy, joining all waitable threads..." );
return ERROR_SUCCESS;
} while( TRUE )
{
pthread_mutex_init(&scheduler_mutex, NULL); dprintf( "[SCHEDULER] scheduler_destroy, popping off another item from thread liat..." );
pthread_cond_init(&scheduler_cond, NULL);
thread = (THREAD *)list_pop( jlist );
scheduler_thread = thread_create(scheduler_run, remote, NULL); if( thread == NULL )
if(! scheduler_thread) { break;
return ENOMEM;
} dprintf( "[SCHEDULER] scheduler_destroy, joining thread 0x%08X...", thread );
thread_run(scheduler_thread); thread_join( thread );
}
dprintf("Initialized scheduler thread and started it running");
dprintf( "[SCHEDULER] scheduler_destroy, destroying lists..." );
return ERROR_SUCCESS;
} list_destroy( jlist );
/* list_destroy( schedulerThreadList );
* Insert a waitable object for checking and processing
*/ schedulerThreadList = NULL;
DWORD
scheduler_insert_waitable(HANDLE waitable, LPVOID context, dprintf( "[SCHEDULER] leaving scheduler_destroy." );
WaitableNotifyRoutine routine)
{ return result;
DWORD retcode = ERROR_SUCCESS; }
WaitableEntry *current; /*
struct pollfd *polltableprev; * Insert a new waitable thread for checking and processing.
*/
pthread_mutex_lock(&scheduler_mutex); DWORD scheduler_insert_waitable( HANDLE waitable, LPVOID entryContext, LPVOID threadContext, WaitableNotifyRoutine routine, WaitableDestroyRoutine destroy )
{
//dprintf("Handle: %d, context: 0x%08x, routine: 0x%08x. nentries = %d, polltable = 0x%08x", DWORD result = ERROR_SUCCESS;
// waitable, context, routine, nentries, polltable); THREAD * swt = NULL;
do { WaitableEntry * entry = (WaitableEntry *)malloc( sizeof( WaitableEntry ) );
if ((current = malloc(sizeof(WaitableEntry))) == NULL) { if( entry == NULL )
retcode = ENOMEM; return ERROR_NOT_ENOUGH_MEMORY;
break;
} dprintf( "[SCHEDULER] entering scheduler_insert_waitable( 0x%08X, 0x%08X, 0x%08X, 0x%08X, 0x%08X )",
waitable, entryContext, threadContext, routine, destroy );
nentries++;
memset( entry, 0, sizeof( WaitableEntry ) );
if (nentries > ntableentries) {
polltableprev = polltable; entry->remote = schedulerRemote;
entry->waitable = waitable;
// We do *2 because reallocating every scheduler_insert_waitable entry->destroy = destroy;
// is slower than need be. entry->context = entryContext;
entry->routine = routine;
polltable = malloc((nentries*2)*sizeof(struct pollfd)); entry->pause = event_create();
entry->resume = event_create();
if (polltable == NULL) {
nentries--; swt = thread_create( scheduler_waitable_thread, entry, threadContext );
polltable = polltableprev; if( swt != NULL )
free(current); {
dprintf( "[SCHEDULER] created scheduler_waitable_thread 0x%08X", swt );
retcode = ENOMEM; thread_run( swt );
break; }
} else
{
if (polltableprev != NULL) free( entry );
free(polltableprev); result = ERROR_INVALID_HANDLE;
}
ntableentries = (nentries*2);
} dprintf( "[SCHEDULER] leaving scheduler_insert_waitable" );
current->waitable = waitable;
current->context = context; return result;
current->routine = routine; }
LIST_INSERT_HEAD(&WEHead, current, link); /*
* Signal a waitable object.
*/
} while(0); DWORD scheduler_signal_waitable( HANDLE waitable, SchedularSignal signal )
{
DWORD index = 0;
dprintf("WEHead: %08x, Now nentries = %d, and polltable = 0x%08x. LIST_EMPTY: %d", &WEHead, nentries, polltable, LIST_EMPTY(&WEHead)); DWORD count = 0;
/* THREAD * thread = NULL;
LIST_FOREACH(current, &WEHead, link) WaitableEntry * entry = NULL;
dprintf("current->waitable: %d, current->context: %08x, current->routine: %08x", DWORD result = ERROR_NOT_FOUND;
current->waitable, current->context, current->routine);
*/ dprintf( "[SCHEDULER] entering scheduler_signal_waitable( 0x%08X )", waitable );
pthread_mutex_unlock(&scheduler_mutex); if( schedulerThreadList == NULL || waitable == NULL )
return ERROR_INVALID_HANDLE;
// wake up scheduler if needed.
pthread_cond_signal(&scheduler_cond); lock_acquire( schedulerThreadList->lock );
return retcode; count = list_count( schedulerThreadList );
}
for( index=0 ; index < count ; index++ )
/* {
* Remove a waitable object thread = (THREAD *)list_get( schedulerThreadList, index );
*/ if( thread == NULL )
DWORD continue;
scheduler_remove_waitable(HANDLE waitable)
{ entry = (WaitableEntry *)thread->parameter1;
DWORD retcode = ERROR_SUCCESS; if( entry == NULL )
WaitableEntry *current; continue;
dprintf("Handle: %d", waitable); if( entry->waitable == waitable )
{
pthread_mutex_lock(&scheduler_mutex); dprintf( "[SCHEDULER] scheduler_signal_waitable: signaling waitable = 0x%08X, thread = 0x%08X", waitable, thread );
if( signal == Pause )
do { {
LIST_FOREACH(current, &WEHead, link) if( entry->running ) {
if (current->waitable == waitable) dprintf( "[SCHEDULER] scheduler_signal_waitable: thread running, pausing. waitable = 0x%08X, thread = 0x%08X, handle = 0x%X", waitable, thread, entry->pause->handle );
break; event_signal( entry->pause );
} else {
if (current == NULL) { dprintf( "[SCHEDULER] scheduler_signal_waitable: thread already paused. waitable = 0x%08X, thread = 0x%08X", waitable, thread );
retcode = ENOENT; }
break; }
} else
{
LIST_REMOVE(current, link); if( !entry->running ) {
free(current); dprintf( "[SCHEDULER] scheduler_signal_waitable: thread paused, resuming. waitable = 0x%08X, thread = 0x%08X, handle = 0x%X", waitable, thread, entry->resume->handle );
nentries--; event_signal( entry->resume );
} while(0); }
pthread_mutex_unlock(&scheduler_mutex); if( signal == Stop ) {
dprintf( "[SCHEDULER] scheduler_signal_waitable: stopping thread. waitable = 0x%08X, thread = 0x%08X, handle = 0x%X", waitable, thread, thread->sigterm->handle );
return retcode; thread_sigterm( thread );
} } else {
dprintf( "[SCHEDULER] scheduler_signal_waitable: thread already running. waitable = 0x%08X, thread = 0x%08X", waitable, thread );
/* }
* Runs the scheduler, checking waitable objects for data }
*/
DWORD result = ERROR_SUCCESS;
scheduler_run(THREAD *thread) break;
{ }
Remote *remote; }
remote = (Remote *) thread->parameter1;
WaitableEntry *current, *tmp; lock_release( schedulerThreadList->lock );
int ret, i, found, idx;
int timeout; dprintf( "[SCHEDULER] leaving scheduler_signal_waitable" );
timeout = 1000; return result;
}
// see if we can modify this code to use waitable as the index into polltable
// and waitable events. saves time looking up in exchange for more memory use. /*
* The schedulers waitable thread. Each scheduled item will have its own thread which
pthread_mutex_lock(&scheduler_mutex); * waits for either data to process or the threads signal to terminate.
*/
dprintf("Beginning loop"); DWORD THREADCALL scheduler_waitable_thread( THREAD * thread )
{
while( event_poll(thread->sigterm, 0) == FALSE ) struct pollfd pollDetail = {0};
{ WaitableEntry * entry = NULL;
// scheduler_mutex is held upon entry and execution of the loop DWORD result = 0;
BOOL terminate = FALSE;
idx = 0; UINT signalIndex = 0;
while(event_poll(thread->sigterm, 0) == FALSE && (LIST_EMPTY(&WEHead) || polltable == NULL)) { if( thread == NULL )
// XXX I'd prefer to use pthread_cond_timedwait, but it's broken in bionic and just return ERROR_INVALID_HANDLE;
// chews cpu
entry = (WaitableEntry *)thread->parameter1;
//dprintf(" Waiting for conditional (%08x). %d vs %d", if( entry == NULL )
// &scheduler_cond, LIST_EMPTY(&WEHead), polltable == NULL); return ERROR_INVALID_HANDLE;
pthread_cond_wait(&scheduler_cond, &scheduler_mutex); if( entry->routine == NULL )
return ERROR_INVALID_HANDLE;
// pthread_cond_wait still chews CPU in some cases, usleep to yield
// processor so we don't just spin. if( schedulerThreadList == NULL )
usleep(1000); return ERROR_INVALID_HANDLE;
}
list_add( schedulerThreadList, thread );
LIST_FOREACH(current, &WEHead, link) {
dprintf("current->waitable: %d, current->context: %08x, current->routine: %08x", pollDetail.fd = entry->waitable;
current->waitable, current->context, current->routine); pollDetail.events = POLLRDNORM;
polltable[idx].fd = current->waitable; pollDetail.revents = 0;
polltable[idx].events = POLLRDNORM;
polltable[idx].revents = 0; dprintf( "[SCHEDULER] entering scheduler_waitable_thread( 0x%08X )", thread );
idx++;
}
entry->running = TRUE;
dprintf("Created a polltable of %d", idx); while( !terminate )
{
pthread_mutex_unlock(&scheduler_mutex); if( event_poll( thread->sigterm, 0 ) ) {
dprintf( "[SCHEDULER] scheduler_waitable_thread( 0x%08X ), signaled to terminate...", thread );
ret = poll(polltable, idx, timeout); terminate = TRUE;
}
pthread_mutex_lock(&scheduler_mutex); else if( event_poll( entry->pause, 0 ) ) {
dprintf( "[SCHEDULER] scheduler_waitable_thread( 0x%08X ), signaled to pause...", thread );
if(ret == 0) continue; entry->running = FALSE;
if(ret == -1) { while( !event_poll( entry->resume, 1000 ) );
if(errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR) { entry->running = TRUE;
continue; dprintf( "[SCHEDULER] scheduler_waitable_thread( 0x%08X ), signaled to resume...", thread );
} }
dprintf("poll() failed, errno: %d (%s). Sleeping 1 second and retrying", errno, strerror(errno)); else if( poll( &pollDetail, 1, 100 ) == POLLIN ) {
sleep(1); //dprintf( "[SCHEDULER] scheduler_waitable_thread( 0x%08X ), signaled on waitable...", thread );
continue; entry->routine( entry->remote, entry->context, (LPVOID)thread->parameter2 );
} }
}
for (found = i = 0; i < idx && found < ret; i++)
{ dprintf( "[SCHEDULER] leaving scheduler_waitable_thread( 0x%08X )", thread );
if (polltable[i].revents)
{ // we acquire the lock for this block as we are freeing 'entry' which may be accessed
LIST_FOREACH(current, &WEHead, link) // in a second call to scheduler_signal_waitable for this thread (unlikely but best practice).
if (current->waitable == polltable[i].fd) dprintf( "[SCHEDULER] attempting to remove thread( 0x%08X )", thread );
break; lock_acquire( schedulerThreadList->lock );
if( list_remove( schedulerThreadList, thread ) )
if(current) {
{ if( entry->destroy ) {
ret = current->routine(remote, current->context); dprintf( "[SCHEDULER] destroying thread( 0x%08X )", thread );
if(ret != ERROR_SUCCESS) entry->destroy( entry->waitable, entry->context, (LPVOID)thread->parameter2 );
{ dprintf( "[SCHEDULER] destroyed thread( 0x%08X )", thread );
// could call close due to remote, but it would deadlock }
// if it calls remove waitable else if( entry->waitable ) {
// could make a separate list to handle when we are not locking dprintf( "[SCHEDULER] scheduler_waitable_thread( 0x%08X ) closing handle 0x%08X", thread, entry->waitable);
// unlink and let rest deal with it ? close( entry->waitable );
}
dprintf("current->routine (%08x / %08x) returned an error message. destroying", current->routine, current->context);
dprintf( "[SCHEDULER] cleaning up resume thread( 0x%08X )", thread );
LIST_REMOVE(current, link); event_destroy( entry->resume );
close(current->waitable); dprintf( "[SCHEDULER] cleaning up pause thread( 0x%08X )", thread );
channel_close((Channel *)current->context, remote, NULL, 0, NULL); event_destroy( entry->pause );
free(current); dprintf( "[SCHEDULER] cleaning up thread( 0x%08X )", thread );
thread_destroy( thread );
nentries--; dprintf( "[SCHEDULER] cleaning up entry( 0x%08X )", thread );
free( entry );
} }
} lock_release( schedulerThreadList->lock );
}
} return ERROR_SUCCESS;
} }
dprintf("Ending loop");
pthread_mutex_unlock(&scheduler_mutex);
}

@ -565,9 +565,10 @@ DWORD remote_request_core_channel_interact(Remote *remote, Packet *packet)
NativeChannelOps *native = (NativeChannelOps *)&channel->ops; NativeChannelOps *native = (NativeChannelOps *)&channel->ops;
// Check to see if this channel has a registered interact handler // Check to see if this channel has a registered interact handler
if (native->interact) dprintf( "[DISPATCH] attempting to set interactive: %d context 0x%p", interact, native->context );
result = native->interact(channel, packet, native->context, if (native->interact) {
interact); result = native->interact(channel, packet, native->context, interact);
}
} }
// Set the channel's interactive state // Set the channel's interactive state

@ -159,6 +159,7 @@ VOID channel_destroy(Channel *channel, Packet *request)
lock_destroy( channel->lock ); lock_destroy( channel->lock );
// Destroy the channel context // Destroy the channel context
dprintf( "[CHANNEL] Free up the channel context 0x%p", channel );
free(channel); free(channel);
} }
@ -445,13 +446,16 @@ DWORD _channel_packet_completion_routine(Remote *remote, Packet *packet,
length); length);
} }
else if ((!strcmp(method, "core_channel_close")) && else if ((!strcmp(method, "core_channel_close")) &&
(comp->routine.close)) (comp->routine.close)) {
dprintf( "[CHANNEL] freeing up the completion context" );
res = comp->routine.close(remote, channel, comp->context, result); res = comp->routine.close(remote, channel, comp->context, result);
}
else if ((!strcmp(method, "core_channel_interact")) && else if ((!strcmp(method, "core_channel_interact")) &&
(comp->routine.interact)) (comp->routine.interact))
res = comp->routine.interact(remote, channel, comp->context, result); res = comp->routine.interact(remote, channel, comp->context, result);
// Deallocate the completion context // Deallocate the completion context
dprintf( "[CHANNEL] freeing up the completion context" );
free(comp); free(comp);
return res; return res;

@ -44,7 +44,7 @@ int current_unix_timestamp(void) {
#ifndef _WIN32 #ifndef _WIN32
int debugging_enabled; int debugging_enabled = 1;
/* /*
*/ */

@ -11,12 +11,12 @@ typedef enum
Stop = 3 Stop = 3
} SchedularSignal; } SchedularSignal;
typedef DWORD (*WaitableNotifyRoutine)(Remote *remote, LPVOID context); typedef DWORD (*WaitableNotifyRoutine)(Remote *remote, LPVOID entryContext, LPVOID threadContext);
typedef DWORD (*WaitableDestroyRoutine)(HANDLE waitable, LPVOID context); typedef DWORD (*WaitableDestroyRoutine)(HANDLE waitable, LPVOID entryContext, LPVOID threadContext);
LINKAGE DWORD scheduler_initialize( Remote * remote ); LINKAGE DWORD scheduler_initialize( Remote * remote );
LINKAGE DWORD scheduler_destroy( VOID ); LINKAGE DWORD scheduler_destroy( VOID );
LINKAGE DWORD scheduler_insert_waitable( HANDLE waitable, LPVOID context, WaitableNotifyRoutine routine, WaitableDestroyRoutine destroy ); LINKAGE DWORD scheduler_insert_waitable( HANDLE waitable, LPVOID entryContext, LPVOID threadContext, WaitableNotifyRoutine routine, WaitableDestroyRoutine destroy );
LINKAGE DWORD scheduler_signal_waitable( HANDLE waitable, SchedularSignal signal ); LINKAGE DWORD scheduler_signal_waitable( HANDLE waitable, SchedularSignal signal );
LINKAGE DWORD THREADCALL scheduler_waitable_thread( THREAD * thread ); LINKAGE DWORD THREADCALL scheduler_waitable_thread( THREAD * thread );

@ -156,21 +156,26 @@ BOOL event_signal( EVENT * event )
*/ */
BOOL event_poll( EVENT * event, DWORD timeout ) BOOL event_poll( EVENT * event, DWORD timeout )
{ {
#ifdef _WIN32
if( event == NULL ) if( event == NULL )
return FALSE; return FALSE;
#ifdef _WIN32
if( WaitForSingleObject( event->handle, timeout ) == WAIT_OBJECT_0 ) if( WaitForSingleObject( event->handle, timeout ) == WAIT_OBJECT_0 )
return TRUE; return TRUE;
return FALSE; return FALSE;
#else #else
BOOL result = FALSE;
// DWORD WINAPI WaitForSingleObject( // DWORD WINAPI WaitForSingleObject(
// __in HANDLE hHandle, // __in HANDLE hHandle,
// __in DWORD dwMilliseconds // __in DWORD dwMilliseconds
// ); // );
// http://msdn.microsoft.com/en-us/library/ms687032(VS.85).aspx // http://msdn.microsoft.com/en-us/library/ms687032(VS.85).aspx
if( event == NULL )
return FALSE;
if(timeout) { if(timeout) {
struct timespec ts; struct timespec ts;
@ -191,7 +196,12 @@ BOOL event_poll( EVENT * event, DWORD timeout )
__futex_wait(&(event->handle), 0, &ts); __futex_wait(&(event->handle), 0, &ts);
} }
return event->handle ? TRUE : FALSE; // We should behave like an auto-reset event
result = event->handle ? TRUE : FALSE;
if( result )
event->handle = (HANDLE)0;
return result;
#endif #endif
} }

@ -328,7 +328,7 @@ DWORD create_tcp_client_channel(Remote *remote, LPCSTR remoteHost, USHORT remote
WSAEventSelect(ctx->fd, ctx->notify, FD_READ|FD_CLOSE); WSAEventSelect(ctx->fd, ctx->notify, FD_READ|FD_CLOSE);
dprintf( "[TCP] create_tcp_client_channel. host=%s, port=%d created the notify %.8x", remoteHost, remotePort, ctx->notify ); dprintf( "[TCP] create_tcp_client_channel. host=%s, port=%d created the notify %.8x", remoteHost, remotePort, ctx->notify );
scheduler_insert_waitable( ctx->notify, ctx, (WaitableNotifyRoutine)tcp_channel_client_local_notify, NULL); scheduler_insert_waitable( ctx->notify, ctx, NULL, (WaitableNotifyRoutine)tcp_channel_client_local_notify, NULL);
} }
} while (0); } while (0);

@ -105,7 +105,7 @@ TcpClientContext * tcp_channel_server_create_client( TcpServerContext * serverct
if( !clientctx->channel ) if( !clientctx->channel )
BREAK_WITH_ERROR( "[TCP-SERVER] tcp_channel_server_create_client. clientctx->channel == NULL", ERROR_INVALID_HANDLE ); BREAK_WITH_ERROR( "[TCP-SERVER] tcp_channel_server_create_client. clientctx->channel == NULL", ERROR_INVALID_HANDLE );
dwResult = scheduler_insert_waitable( clientctx->notify, clientctx, (WaitableNotifyRoutine)tcp_channel_client_local_notify, NULL ); dwResult = scheduler_insert_waitable( clientctx->notify, clientctx, NULL, (WaitableNotifyRoutine)tcp_channel_client_local_notify, NULL );
} while( 0 ); } while( 0 );
@ -266,7 +266,7 @@ DWORD request_net_tcp_server_channel_open( Remote * remote, Packet * packet )
if( !ctx->channel ) if( !ctx->channel )
BREAK_WITH_ERROR( "[TCP-SERVER] request_net_tcp_server_channel_open. channel_create_stream failed", ERROR_INVALID_HANDLE ); BREAK_WITH_ERROR( "[TCP-SERVER] request_net_tcp_server_channel_open. channel_create_stream failed", ERROR_INVALID_HANDLE );
scheduler_insert_waitable( ctx->notify, ctx, (WaitableNotifyRoutine)tcp_channel_server_notify, NULL ); scheduler_insert_waitable( ctx->notify, ctx, NULL, (WaitableNotifyRoutine)tcp_channel_server_notify, NULL );
packet_add_tlv_uint( response, TLV_TYPE_CHANNEL_ID, channel_get_id(ctx->channel) ); packet_add_tlv_uint( response, TLV_TYPE_CHANNEL_ID, channel_get_id(ctx->channel) );

@ -323,7 +323,7 @@ DWORD request_net_udp_channel_open( Remote * remote, Packet * packet )
if( !ctx->sock.channel ) if( !ctx->sock.channel )
BREAK_WITH_ERROR( "[UDP] request_net_udp_channel_open. channel_create_stream failed", ERROR_INVALID_HANDLE ); BREAK_WITH_ERROR( "[UDP] request_net_udp_channel_open. channel_create_stream failed", ERROR_INVALID_HANDLE );
scheduler_insert_waitable( ctx->sock.notify, ctx, (WaitableNotifyRoutine)udp_channel_notify, NULL ); scheduler_insert_waitable( ctx->sock.notify, ctx, NULL, (WaitableNotifyRoutine)udp_channel_notify, NULL );
packet_add_tlv_uint( response, TLV_TYPE_CHANNEL_ID, channel_get_id(ctx->sock.channel) ); packet_add_tlv_uint( response, TLV_TYPE_CHANNEL_ID, channel_get_id(ctx->sock.channel) );

@ -270,6 +270,7 @@ DWORD request_sys_process_execute(Remote *remote, Packet *packet)
memset(&chops, 0, sizeof(PoolChannelOps)); memset(&chops, 0, sizeof(PoolChannelOps));
// Initialize the channel operations // Initialize the channel operations
dprintf( "[PROCESS] context address 0x%p", ctx );
chops.native.context = ctx; chops.native.context = ctx;
chops.native.write = process_channel_write; chops.native.write = process_channel_write;
chops.native.close = process_channel_close; chops.native.close = process_channel_close;
@ -570,6 +571,7 @@ DWORD request_sys_process_execute(Remote *remote, Packet *packet)
int idx, i; int idx, i;
pid_t pid; pid_t pid;
int have_pty = -1; int have_pty = -1;
ProcessChannelContext * ctx = NULL;
int hidden = (flags & PROCESS_EXECUTE_FLAG_HIDDEN); int hidden = (flags & PROCESS_EXECUTE_FLAG_HIDDEN);
@ -620,7 +622,6 @@ DWORD request_sys_process_execute(Remote *remote, Packet *packet)
// such that input can be directed to and from the remote endpoint // such that input can be directed to and from the remote endpoint
if (flags & PROCESS_EXECUTE_FLAG_CHANNELIZED) if (flags & PROCESS_EXECUTE_FLAG_CHANNELIZED)
{ {
ProcessChannelContext * ctx = NULL;
PoolChannelOps chops; PoolChannelOps chops;
Channel *newChannel; Channel *newChannel;
@ -634,6 +635,7 @@ DWORD request_sys_process_execute(Remote *remote, Packet *packet)
memset(&chops, 0, sizeof(PoolChannelOps)); memset(&chops, 0, sizeof(PoolChannelOps));
// Initialize the channel operations // Initialize the channel operations
dprintf( "[PROCESS] context address 0x%p", ctx );
chops.native.context = ctx; chops.native.context = ctx;
chops.native.write = process_channel_write; chops.native.write = process_channel_write;
chops.native.close = process_channel_close; chops.native.close = process_channel_close;
@ -758,6 +760,7 @@ DWORD request_sys_process_execute(Remote *remote, Packet *packet)
if(have_pty) { if(have_pty) {
dprintf("child channelized\n"); dprintf("child channelized\n");
close(slave); close(slave);
ctx->pProcess = (HANDLE)pid;
} else { } else {
close(in[0]); close(in[0]);
close(out[1]); close(out[1]);
@ -1073,11 +1076,14 @@ DWORD process_channel_close( Channel *channel, Packet *request, LPVOID context )
return result; return result;
} }
DWORD process_channel_interact_destroy( HANDLE waitable, Channel* channel ) DWORD process_channel_interact_destroy( HANDLE waitable, LPVOID entryContext, LPVOID threadContext )
{ {
ProcessChannelContext *ctx = (ProcessChannelContext *)threadContext;
DWORD dwResult = ERROR_SUCCESS; DWORD dwResult = ERROR_SUCCESS;
dprintf( "[PROCESS] terminating context 0x%p", ctx );
#ifdef _WIN32 #ifdef _WIN32
ProcessChannelContext *ctx = (ProcessChannelContext *)channel->ops.stream.native.context;
CloseHandle( ctx->pStdin ); CloseHandle( ctx->pStdin );
CloseHandle( ctx->pStdout ); CloseHandle( ctx->pStdout );
@ -1087,8 +1093,16 @@ DWORD process_channel_interact_destroy( HANDLE waitable, Channel* channel )
TerminateProcess( ctx->pProcess, 0 ); TerminateProcess( ctx->pProcess, 0 );
} }
#else #else
close( ctx->pStdin ); //dprintf( "[PROCESS] closing stdin 0x%x", ctx->pStdin );
close( ctx->pStdout ); //close( ctx->pStdin );
//dprintf( "[PROCESS] closing stdout 0x%x", ctx->pStdout );
//close( ctx->pStdout );
dprintf( "[PROCESS] pid %u", ctx->pProcess );
if( ctx->pProcess ) {
dprintf( "[PROCESS] terminating pid %u", ctx->pProcess );
kill( (pid_t)ctx->pProcess, 9 );
}
#endif #endif
free( ctx ); free( ctx );
@ -1100,10 +1114,10 @@ DWORD process_channel_interact_destroy( HANDLE waitable, Channel* channel )
* Callback for when data is available on the standard output handle of * Callback for when data is available on the standard output handle of
* a process channel that is interactive mode * a process channel that is interactive mode
*/ */
DWORD process_channel_interact_notify(Remote *remote, Channel *channel) DWORD process_channel_interact_notify(Remote *remote, LPVOID entryContext, LPVOID threadContext)
{ {
Channel *channel = (Channel*)entryContext;
ProcessChannelContext *ctx = (ProcessChannelContext *)channel->ops.stream.native.context; ProcessChannelContext *ctx = (ProcessChannelContext *)threadContext;
DWORD bytesRead, bytesAvail = 0; DWORD bytesRead, bytesAvail = 0;
CHAR buffer[16384]; CHAR buffer[16384];
DWORD result = ERROR_SUCCESS; DWORD result = ERROR_SUCCESS;
@ -1174,7 +1188,7 @@ DWORD process_channel_interact(Channel *channel, Packet *request, LPVOID context
if (interact) { if (interact) {
// try to resume it first, if it's not there, we can create a new entry // try to resume it first, if it's not there, we can create a new entry
if( (result = scheduler_signal_waitable( ctx->pStdout, Resume )) == ERROR_NOT_FOUND ) { if( (result = scheduler_signal_waitable( ctx->pStdout, Resume )) == ERROR_NOT_FOUND ) {
result = scheduler_insert_waitable( ctx->pStdout, channel, result = scheduler_insert_waitable( ctx->pStdout, channel, context,
(WaitableNotifyRoutine)process_channel_interact_notify, (WaitableNotifyRoutine)process_channel_interact_notify,
(WaitableDestroyRoutine)process_channel_interact_destroy ); (WaitableDestroyRoutine)process_channel_interact_destroy );
} }