map and cloud fix, add cloud debug
This commit is contained in:
@@ -3,7 +3,7 @@ cmake_policy(SET CMP0017 NEW) # need include() with .cmake
|
|||||||
project(PIP)
|
project(PIP)
|
||||||
set(PIP_MAJOR 3)
|
set(PIP_MAJOR 3)
|
||||||
set(PIP_MINOR 15)
|
set(PIP_MINOR 15)
|
||||||
set(PIP_REVISION 1)
|
set(PIP_REVISION 2)
|
||||||
set(PIP_SUFFIX )
|
set(PIP_SUFFIX )
|
||||||
set(PIP_COMPANY SHS)
|
set(PIP_COMPANY SHS)
|
||||||
set(PIP_DOMAIN org.SHS)
|
set(PIP_DOMAIN org.SHS)
|
||||||
|
|||||||
@@ -34,12 +34,12 @@ PICloudServer::PICloudServer(const PIString & path, PIIODevice::DeviceMode mode)
|
|||||||
opened_ = true;
|
opened_ = true;
|
||||||
cvar.notifyOne();
|
cvar.notifyOne();
|
||||||
open_mutex.unlock();
|
open_mutex.unlock();
|
||||||
// piCoutObj << "connected";
|
piCoutObj << "connected";
|
||||||
tcp.sendStart();
|
tcp.sendStart();
|
||||||
});
|
});
|
||||||
CONNECTL(ð, disconnected, [this](bool) {
|
CONNECTL(ð, disconnected, [this](bool) {
|
||||||
if (is_deleted) return;
|
if (is_deleted) return;
|
||||||
// piCoutObj << "disconnected";
|
piCoutObj << "disconnected";
|
||||||
clients_mutex.lock();
|
clients_mutex.lock();
|
||||||
for (auto c: clients_) {
|
for (auto c: clients_) {
|
||||||
c->is_connected = false;
|
c->is_connected = false;
|
||||||
@@ -89,7 +89,7 @@ PIVector<PICloudServer::Client *> PICloudServer::clients() const {
|
|||||||
|
|
||||||
|
|
||||||
bool PICloudServer::openDevice() {
|
bool PICloudServer::openDevice() {
|
||||||
// piCout << "PICloudServer open device" << path();
|
piCoutObj << "open device" << path();
|
||||||
if (is_deleted) return false;
|
if (is_deleted) return false;
|
||||||
bool op = eth.connect(PINetworkAddress::resolve(path()), false);
|
bool op = eth.connect(PINetworkAddress::resolve(path()), false);
|
||||||
if (op) {
|
if (op) {
|
||||||
@@ -293,12 +293,6 @@ void PICloudServer::clientDeleted(PIObject * o) {
|
|||||||
clients_mutex.lock();
|
clients_mutex.lock();
|
||||||
clients_.removeOne(c);
|
clients_.removeOne(c);
|
||||||
removed_clients_.removeAll(c);
|
removed_clients_.removeAll(c);
|
||||||
auto it = index_clients.makeIterator();
|
index_clients.removeWhere([c](uint, Client * v) { return v == c; });
|
||||||
while (it.next()) {
|
|
||||||
if (it.value() == c) {
|
|
||||||
index_clients.remove(it.key());
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
clients_mutex.unlock();
|
clients_mutex.unlock();
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -425,6 +425,7 @@ public:
|
|||||||
--i;
|
--i;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
//! \~english Same as \a remove().
|
//! \~english Same as \a remove().
|
||||||
|
|||||||
@@ -357,8 +357,7 @@ void PIEthernet::applyTimeout(int fd, int opt, double ms) {
|
|||||||
|
|
||||||
|
|
||||||
void PIEthernet::applyOptInt(int level, int opt, int val) {
|
void PIEthernet::applyOptInt(int level, int opt, int val) {
|
||||||
if (sock < 0) return;
|
if (sock != -1) ethSetsockoptInt(sock, level, opt, val);
|
||||||
ethSetsockoptInt(sock, level, opt, val);
|
|
||||||
if (sock_s != sock && sock_s != -1) ethSetsockoptInt(sock_s, level, opt, val);
|
if (sock_s != sock && sock_s != -1) ethSetsockoptInt(sock_s, level, opt, val);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1335,7 +1334,7 @@ void PIEthernet::ethClosesocket(int sock, bool shutdown) {
|
|||||||
|
|
||||||
int PIEthernet::ethSetsockopt(int sock, int level, int optname, const void * optval, int optlen) {
|
int PIEthernet::ethSetsockopt(int sock, int level, int optname, const void * optval, int optlen) {
|
||||||
if (sock < 0) return -1;
|
if (sock < 0) return -1;
|
||||||
return setsockopt(sock,
|
auto ret = setsockopt(sock,
|
||||||
level,
|
level,
|
||||||
optname,
|
optname,
|
||||||
#ifdef WINDOWS
|
#ifdef WINDOWS
|
||||||
@@ -1343,6 +1342,8 @@ int PIEthernet::ethSetsockopt(int sock, int level, int optname, const void * opt
|
|||||||
#endif
|
#endif
|
||||||
optval,
|
optval,
|
||||||
optlen);
|
optlen);
|
||||||
|
if (ret != 0) piCout << "setsockopt error:" << ethErrorString();
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -28,7 +28,7 @@ void DispatcherServer::start() {
|
|||||||
|
|
||||||
|
|
||||||
void DispatcherServer::picoutStatus() {
|
void DispatcherServer::picoutStatus() {
|
||||||
map_mutex.lock();
|
PIMutexLocker locker(map_mutex);
|
||||||
piCout << PICoutManipulators::NewLine;
|
piCout << PICoutManipulators::NewLine;
|
||||||
piCout << "Connections:";
|
piCout << "Connections:";
|
||||||
for (auto c: clients) {
|
for (auto c: clients) {
|
||||||
@@ -40,16 +40,12 @@ void DispatcherServer::picoutStatus() {
|
|||||||
piCout << " " << it.key();
|
piCout << " " << it.key();
|
||||||
it.value()->printStatus();
|
it.value()->printStatus();
|
||||||
}
|
}
|
||||||
map_mutex.unlock();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void DispatcherServer::cleanClients() {
|
void DispatcherServer::cleanClients() {
|
||||||
map_mutex.lock();
|
PIMutexLocker locker(map_mutex);
|
||||||
for (auto c: rmrf_clients) {
|
piDeleteAllAndClear(rmrf_clients);
|
||||||
delete c;
|
|
||||||
}
|
|
||||||
rmrf_clients.clear();
|
|
||||||
for (auto c: clients) {
|
for (auto c: clients) {
|
||||||
if (!index_c_servers.contains(c) && !index_c_clients.contains(c)) {
|
if (!index_c_servers.contains(c) && !index_c_clients.contains(c)) {
|
||||||
if (!rm_clients.contains(c)) rm_clients << c;
|
if (!rm_clients.contains(c)) rm_clients << c;
|
||||||
@@ -88,12 +84,11 @@ void DispatcherServer::cleanClients() {
|
|||||||
index_c_clients.remove(c);
|
index_c_clients.remove(c);
|
||||||
rm_clients.removeAll(c);
|
rm_clients.removeAll(c);
|
||||||
}
|
}
|
||||||
map_mutex.unlock();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void DispatcherServer::updateConnectionsTile(TileList * tl) {
|
void DispatcherServer::updateConnectionsTile(TileList * tl) {
|
||||||
map_mutex.lock();
|
PIMutexLocker locker(map_mutex);
|
||||||
tl->content.clear();
|
tl->content.clear();
|
||||||
for (auto c: clients) {
|
for (auto c: clients) {
|
||||||
PIString role = "Invalid";
|
PIString role = "Invalid";
|
||||||
@@ -118,12 +113,11 @@ void DispatcherServer::updateConnectionsTile(TileList * tl) {
|
|||||||
for (auto c: rmrf_clients) {
|
for (auto c: rmrf_clients) {
|
||||||
tl->content << TileList::Row("[NULL]" + c->address(), PIScreenTypes::CellFormat());
|
tl->content << TileList::Row("[NULL]" + c->address(), PIScreenTypes::CellFormat());
|
||||||
}
|
}
|
||||||
map_mutex.unlock();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void DispatcherServer::updateServersTile(TileList * tl, PISet<const DispatcherClient *> servers) {
|
void DispatcherServer::updateServersTile(TileList * tl, PISet<const DispatcherClient *> servers) {
|
||||||
map_mutex.lock();
|
PIMutexLocker locker(map_mutex);
|
||||||
tl->content.clear();
|
tl->content.clear();
|
||||||
auto mi = c_servers.makeIterator();
|
auto mi = c_servers.makeIterator();
|
||||||
while (mi.next()) {
|
while (mi.next()) {
|
||||||
@@ -132,12 +126,11 @@ void DispatcherServer::updateServersTile(TileList * tl, PISet<const DispatcherCl
|
|||||||
PIScreenTypes::CellFormat());
|
PIScreenTypes::CellFormat());
|
||||||
if (servers.contains(mi.value()->getConnection())) tl->selected << (tl->content.size_s() - 1);
|
if (servers.contains(mi.value()->getConnection())) tl->selected << (tl->content.size_s() - 1);
|
||||||
}
|
}
|
||||||
map_mutex.unlock();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void DispatcherServer::updateClientsTile(TileList * tl, PISet<const DispatcherClient *> servers) {
|
void DispatcherServer::updateClientsTile(TileList * tl, PISet<const DispatcherClient *> servers) {
|
||||||
map_mutex.lock();
|
PIMutexLocker locker(map_mutex);
|
||||||
tl->content.clear();
|
tl->content.clear();
|
||||||
auto mi = c_servers.makeIterator();
|
auto mi = c_servers.makeIterator();
|
||||||
while (mi.next()) {
|
while (mi.next()) {
|
||||||
@@ -146,26 +139,23 @@ void DispatcherServer::updateClientsTile(TileList * tl, PISet<const DispatcherCl
|
|||||||
tl->content << TileList::Row(c->address(), PIScreenTypes::CellFormat());
|
tl->content << TileList::Row(c->address(), PIScreenTypes::CellFormat());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
map_mutex.unlock();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
const DispatcherClient * DispatcherServer::getConnection(int index) {
|
const DispatcherClient * DispatcherServer::getConnection(int index) {
|
||||||
|
PIMutexLocker locker(map_mutex);
|
||||||
const DispatcherClient * ret = nullptr;
|
const DispatcherClient * ret = nullptr;
|
||||||
map_mutex.lock();
|
|
||||||
if (index >= 0 && index < clients.size_s()) ret = clients[index];
|
if (index >= 0 && index < clients.size_s()) ret = clients[index];
|
||||||
map_mutex.unlock();
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
const DispatcherClient * DispatcherServer::getServer(int index) {
|
const DispatcherClient * DispatcherServer::getServer(int index) {
|
||||||
|
PIMutexLocker locker(map_mutex);
|
||||||
const DispatcherClient * ret = nullptr;
|
const DispatcherClient * ret = nullptr;
|
||||||
map_mutex.lock();
|
|
||||||
if (index >= 0 && index < clients.size_s()) {
|
if (index >= 0 && index < clients.size_s()) {
|
||||||
if (index_c_servers.contains(clients[index])) ret = clients[index];
|
if (index_c_servers.contains(clients[index])) ret = clients[index];
|
||||||
}
|
}
|
||||||
map_mutex.unlock();
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -173,14 +163,13 @@ const DispatcherClient * DispatcherServer::getServer(int index) {
|
|||||||
PISet<const DispatcherClient *> DispatcherServer::getServers(PISet<int> ids) {
|
PISet<const DispatcherClient *> DispatcherServer::getServers(PISet<int> ids) {
|
||||||
PISet<const DispatcherClient *> ret;
|
PISet<const DispatcherClient *> ret;
|
||||||
if (ids.isEmpty()) return ret;
|
if (ids.isEmpty()) return ret;
|
||||||
map_mutex.lock();
|
PIMutexLocker locker(map_mutex);
|
||||||
int i = 0;
|
int i = 0;
|
||||||
auto mi = c_servers.makeIterator();
|
auto mi = c_servers.makeIterator();
|
||||||
while (mi.next()) {
|
while (mi.next()) {
|
||||||
if (ids.contains(i)) ret << mi.value()->getConnection();
|
if (ids.contains(i)) ret << mi.value()->getConnection();
|
||||||
i++;
|
i++;
|
||||||
}
|
}
|
||||||
map_mutex.unlock();
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -196,7 +185,7 @@ void DispatcherServer::disconnectClient(DispatcherClient * client) {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
piCoutObj << "remove ..." << client->clientId();
|
piCoutObj << "remove ..." << client->clientId();
|
||||||
map_mutex.lock();
|
PIMutexLocker locker(map_mutex);
|
||||||
clients.removeAll(client);
|
clients.removeAll(client);
|
||||||
rm_clients.removeAll(client);
|
rm_clients.removeAll(client);
|
||||||
CloudServer * cs = index_c_servers.value(client, nullptr);
|
CloudServer * cs = index_c_servers.value(client, nullptr);
|
||||||
@@ -221,8 +210,7 @@ void DispatcherServer::disconnectClient(DispatcherClient * client) {
|
|||||||
index_c_clients.remove(client);
|
index_c_clients.remove(client);
|
||||||
}
|
}
|
||||||
// client->close();
|
// client->close();
|
||||||
rmrf_clients << client;
|
if (!rmrf_clients.contains(client)) rmrf_clients << client;
|
||||||
map_mutex.unlock();
|
|
||||||
piCoutObj << "remove done" << client->clientId();
|
piCoutObj << "remove done" << client->clientId();
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -235,7 +223,7 @@ void DispatcherServer::newConnection(PIEthernet * cl) {
|
|||||||
DispatcherClient * client = new DispatcherClient(cl, client_gid++);
|
DispatcherClient * client = new DispatcherClient(cl, client_gid++);
|
||||||
CONNECTU(client, disconnectEvent, this, disconnectClient);
|
CONNECTU(client, disconnectEvent, this, disconnectClient);
|
||||||
CONNECTL(client, registerServer, [this](const PIByteArray & sname, DispatcherClient * c) {
|
CONNECTL(client, registerServer, [this](const PIByteArray & sname, DispatcherClient * c) {
|
||||||
map_mutex.lock();
|
PIMutexLocker locker(map_mutex);
|
||||||
CloudServer * cs = c_servers.value(sname, nullptr);
|
CloudServer * cs = c_servers.value(sname, nullptr);
|
||||||
if (cs) {
|
if (cs) {
|
||||||
rm_clients << c;
|
rm_clients << c;
|
||||||
@@ -247,10 +235,9 @@ void DispatcherServer::newConnection(PIEthernet * cl) {
|
|||||||
index_c_servers.insert(c, cs);
|
index_c_servers.insert(c, cs);
|
||||||
c->authorise(true);
|
c->authorise(true);
|
||||||
}
|
}
|
||||||
map_mutex.unlock();
|
|
||||||
});
|
});
|
||||||
CONNECTL(client, registerClient, [this](const PIByteArray & sname, DispatcherClient * c) {
|
CONNECTL(client, registerClient, [this](const PIByteArray & sname, DispatcherClient * c) {
|
||||||
map_mutex.lock();
|
PIMutexLocker locker(map_mutex);
|
||||||
CloudServer * cs = c_servers.value(sname, nullptr);
|
CloudServer * cs = c_servers.value(sname, nullptr);
|
||||||
if (cs) {
|
if (cs) {
|
||||||
piCoutObj << "add new Client to Server ->" << sname.toHex();
|
piCoutObj << "add new Client to Server ->" << sname.toHex();
|
||||||
@@ -261,11 +248,9 @@ void DispatcherServer::newConnection(PIEthernet * cl) {
|
|||||||
rm_clients << c;
|
rm_clients << c;
|
||||||
piCoutObj << "Client can't connect to Server ->" << sname.toHex();
|
piCoutObj << "Client can't connect to Server ->" << sname.toHex();
|
||||||
}
|
}
|
||||||
map_mutex.unlock();
|
|
||||||
});
|
});
|
||||||
// piCoutObj << "add client" << client;
|
// piCoutObj << "add client" << client;
|
||||||
map_mutex.lock();
|
PIMutexLocker locker(map_mutex);
|
||||||
clients.push_back(client);
|
clients.push_back(client);
|
||||||
map_mutex.unlock();
|
|
||||||
client->start();
|
client->start();
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user