significant improvements to the server, now has timeouts and buffer cleanup
parent
75e29a6f66
commit
640de64f1d
|
@ -17,6 +17,8 @@ void Server::launch_threads()
|
|||
{
|
||||
threads.push_back( Glib::Thread::create( sigc::mem_fun(this, &Server::listen), false ) );
|
||||
threads.push_back( Glib::Thread::create( sigc::mem_fun(this, &Server::mix), false ) );
|
||||
threads.push_back( Glib::Thread::create( sigc::mem_fun(this, &Server::console), false) );
|
||||
threads.push_back( Glib::Thread::create( sigc::mem_fun(this, &Server::expire), false) );
|
||||
}
|
||||
|
||||
|
||||
|
@ -42,10 +44,9 @@ void Server::listen()
|
|||
udp::socket socket(io_service, udp::endpoint(udp::v4(), port));
|
||||
|
||||
cout << "listening" << endl;
|
||||
frame_t frame;
|
||||
while (1)
|
||||
{
|
||||
frame_t frame;
|
||||
|
||||
// creating the buffer each time is faster than zeroing it out
|
||||
boost::array<char, BUFLEN> recv_buf;
|
||||
udp::endpoint remote_endpoint;
|
||||
|
@ -54,12 +55,15 @@ void Server::listen()
|
|||
socket.receive_from(boost::asio::buffer(recv_buf),
|
||||
remote_endpoint, 0, error);
|
||||
|
||||
|
||||
// bufnum is used further down
|
||||
int bufnum = 0;
|
||||
/* the buffer is locked for a long long time, however, we need
|
||||
to make sure that none of the buffers expires while we're about
|
||||
to write to it */
|
||||
{
|
||||
Glib::Mutex::Lock lock(mutex_);
|
||||
int size = endpoints.size();
|
||||
int size = buffers.size();
|
||||
time(¤ttime);
|
||||
int bufnum = 0;
|
||||
bool known = false;
|
||||
for(bufnum = 0; bufnum < size; bufnum++)
|
||||
{
|
||||
|
@ -79,6 +83,7 @@ void Server::listen()
|
|||
cout << "adding new buffer for " << remote_endpoint << endl;
|
||||
buffers.push_back( new Buffer( endpointstring.str() ) );
|
||||
endpoints.push_back( remote_endpoint );
|
||||
times.push_back( currenttime );
|
||||
}
|
||||
|
||||
// discard packet, we're not accepting any more sources!
|
||||
|
@ -87,7 +92,6 @@ void Server::listen()
|
|||
cout << "no more buffers left! " << bufnum << endl;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if( packetcounter % 10000 == 0 )
|
||||
{
|
||||
|
@ -122,17 +126,15 @@ void Server::listen()
|
|||
}
|
||||
}
|
||||
|
||||
// this part needs to be made threadsafe because buffers will be accessed
|
||||
// by the mixer and the udp listener
|
||||
{
|
||||
Glib::Mutex::Lock lock(mutex_);
|
||||
// convert ascii to integer value
|
||||
// be extra certain that we're not writing into wild memory
|
||||
if( bufnum < buffers.size() )
|
||||
{
|
||||
buffers[ bufnum ]->set(frame);
|
||||
}
|
||||
}
|
||||
|
||||
// this is accurate enough for the purpose of expiring unused buffers
|
||||
times[bufnum] = currenttime;
|
||||
}
|
||||
} // lock is released
|
||||
|
||||
if (error && error != boost::asio::error::message_size)
|
||||
throw boost::system::system_error(error);
|
||||
|
@ -165,17 +167,15 @@ void Server::mix()
|
|||
{
|
||||
counter++;
|
||||
frame_t frame, temp_frame;
|
||||
|
||||
// we lock the buffers for a long time, but we need to make sure
|
||||
// that none of the buffers is allowed to expire while we're working on it!
|
||||
{
|
||||
Glib::Mutex::Lock lock(mutex_);
|
||||
size = buffers.size();
|
||||
}
|
||||
|
||||
for(int x = 0; x < size; x++)
|
||||
{
|
||||
{
|
||||
Glib::Mutex::Lock lock(mutex_);
|
||||
temp_frame = buffers[x]->get();
|
||||
}
|
||||
|
||||
for(int i = 0; i < HEIGHT; i++)
|
||||
{
|
||||
|
@ -200,7 +200,7 @@ void Server::mix()
|
|||
}
|
||||
}
|
||||
|
||||
if( counter % 100 == 0 )
|
||||
/*if( counter % 100 == 0 )
|
||||
{
|
||||
cout << counter << endl;
|
||||
for(int i = 0; i < HEIGHT; i++)
|
||||
|
@ -224,11 +224,51 @@ void Server::mix()
|
|||
cout << endl << endl;
|
||||
} //*/
|
||||
}
|
||||
|
||||
}
|
||||
usleep( 25000 );
|
||||
}
|
||||
}
|
||||
|
||||
void Server::control()
|
||||
void Server::console()
|
||||
{
|
||||
while(1)
|
||||
{
|
||||
usleep( 100000 );
|
||||
}
|
||||
}
|
||||
|
||||
int Server::get_size()
|
||||
{
|
||||
Glib::Mutex::Lock lock(mutex_);
|
||||
return buffers.size();
|
||||
}
|
||||
|
||||
|
||||
/* this expires buffers if they haven't been updated in a long time,
|
||||
* therefore allowing a new source to be added */
|
||||
|
||||
void Server::expire()
|
||||
{
|
||||
while(1)
|
||||
{
|
||||
{
|
||||
Glib::Mutex::Lock lock(mutex_);
|
||||
time(¤ttime);
|
||||
for(int i = 0; i < buffers.size(); i++)
|
||||
{
|
||||
if( difftime( currenttime, times[i] ) > BUFTIMEOUT )
|
||||
{
|
||||
cout << "buffer " << i << " will now expire\n";
|
||||
delete buffers[i];
|
||||
buffers.erase(buffers.begin()+i);
|
||||
times.erase(times.begin()+i);
|
||||
endpoints.erase(endpoints.begin()+i);
|
||||
|
||||
// element i has been deleted, i-- is required
|
||||
i--;
|
||||
}
|
||||
}
|
||||
}
|
||||
usleep( 1000000 );
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,6 +5,9 @@
|
|||
#include <string>
|
||||
#include <vector>
|
||||
#include <list>
|
||||
|
||||
#include <glibmm/timeval.h>
|
||||
|
||||
#include <boost/array.hpp>
|
||||
#include <boost/asio.hpp>
|
||||
|
||||
|
@ -24,17 +27,21 @@ public:
|
|||
|
||||
void listen();
|
||||
void mix();
|
||||
void expire();
|
||||
void launch_threads();
|
||||
void control();
|
||||
void console();
|
||||
int get_size();
|
||||
|
||||
private:
|
||||
Glib::Mutex mutex_;
|
||||
|
||||
vector<Glib::Thread*> threads;
|
||||
|
||||
vector<Buffer*> buffers;
|
||||
vector<udp::endpoint> endpoints;
|
||||
vector<time_t> times;
|
||||
|
||||
int numbufs;
|
||||
time_t currenttime;
|
||||
|
||||
int port;
|
||||
};
|
||||
|
|
|
@ -1,8 +1,10 @@
|
|||
#ifndef __DEFINES_H_
|
||||
#define __DEFINES_H_
|
||||
|
||||
#define BUFLEN 572
|
||||
#define NUMBUFS 100
|
||||
// four minutes should be enough
|
||||
#define BUFTIMEOUT 240
|
||||
|
||||
#define NUMBUFS 200
|
||||
|
||||
// one number + newline
|
||||
#define HEADEROFFSET 2
|
||||
|
@ -19,6 +21,8 @@
|
|||
#define SEGWIDTH 12
|
||||
#define SEGCHANNELS 4
|
||||
|
||||
#define BUFLEN HEADEROFFSET+WINDOWOFFSET+ (SEGNUM*SEGCHANNELS+1)*SEGWIDTH
|
||||
|
||||
// not used for simplicity
|
||||
//#define SEGHEIGHT 1
|
||||
|
||||
|
|
Loading…
Reference in New Issue