get rid of piForeach
apply some code analyzer recommendations ICU flag now check if libicu exists prepare for more accurate growth of containers (limited PoT, then constantly increase size)
This commit is contained in:
@@ -130,7 +130,7 @@ PIPeer::PeerInfo::PeerAddress::PeerAddress(const PINetworkAddress & a, const PIN
|
||||
|
||||
int PIPeer::PeerInfo::ping() const {
|
||||
int ret = -1;
|
||||
piForeachC(PeerAddress & a, addresses)
|
||||
for (const auto & a: addresses)
|
||||
if (a.ping > 0.) {
|
||||
if (ret < 0)
|
||||
ret = piRoundd(a.ping);
|
||||
@@ -155,7 +155,7 @@ void PIPeer::PeerInfo::destroy() {
|
||||
PINetworkAddress PIPeer::PeerInfo::fastestAddress() const {
|
||||
double mp = -1.;
|
||||
PINetworkAddress ret;
|
||||
piForeachC(PeerAddress & a, addresses) {
|
||||
for (const auto & a: addresses) {
|
||||
if (a.ping <= 0.) continue;
|
||||
if ((mp < 0) || (mp > a.ping)) {
|
||||
mp = a.ping;
|
||||
@@ -166,6 +166,27 @@ PINetworkAddress PIPeer::PeerInfo::fastestAddress() const {
|
||||
}
|
||||
|
||||
|
||||
void PIPeer::PeerInfo::addNeighbour(const PIString & n) {
|
||||
if (!neighbours.contains(n)) neighbours << n;
|
||||
}
|
||||
|
||||
|
||||
void PIPeer::PeerInfo::addNeighbours(const PIStringList & l) {
|
||||
for (const auto & n: l)
|
||||
if (!neighbours.contains(n)) neighbours << n;
|
||||
}
|
||||
|
||||
void PIPeer::PeerInfo::removeNeighbour(const PIString & n) {
|
||||
neighbours.removeAll(n);
|
||||
}
|
||||
|
||||
|
||||
void PIPeer::PeerInfo::resetPing() {
|
||||
for (auto & a: addresses)
|
||||
a.ping = -1;
|
||||
}
|
||||
|
||||
|
||||
REGISTER_DEVICE(PIPeer)
|
||||
|
||||
PIPeer::PIPeer(const PIString & n)
|
||||
@@ -226,7 +247,7 @@ void PIPeer::initEths(PIStringList al) {
|
||||
// piCoutObj << "initEths start";
|
||||
PIEthernet * ce;
|
||||
const PIEthernet::Interface * cint = 0;
|
||||
piForeachC(PIString & a, al) {
|
||||
for (const auto & a: al) {
|
||||
ce = new PIEthernet();
|
||||
ce->setDebug(false);
|
||||
ce->setName("_S.PIPeer.traf_rec_" + a);
|
||||
@@ -261,7 +282,7 @@ void PIPeer::initMBcasts(PIStringList al) {
|
||||
PIString nm;
|
||||
al << _PIPEER_MULTICAST_IP;
|
||||
// piCoutObj << "initMBcasts start" << al;
|
||||
piForeachC(PIString & a, al) {
|
||||
for (const auto & a: al) {
|
||||
// piCout << "mcast try" << a;
|
||||
ce = new PIEthernet();
|
||||
ce->setDebug(false);
|
||||
@@ -282,7 +303,7 @@ void PIPeer::initMBcasts(PIStringList al) {
|
||||
}
|
||||
}
|
||||
al.removeAll(_PIPEER_MULTICAST_IP);
|
||||
piForeachC(PIString & a, al) {
|
||||
for (const auto & a: al) {
|
||||
ce = new PIEthernet();
|
||||
ce->setDebug(false);
|
||||
ce->setName("_S.PIPeer.bcast_" + a);
|
||||
@@ -429,6 +450,42 @@ bool PIPeer::send(const PIString & to, const void * data, int size) {
|
||||
}
|
||||
|
||||
|
||||
bool PIPeer::send(const PeerInfo * to, const PIByteArray & data) {
|
||||
if (!to) return false;
|
||||
return send(to->name, data.data(), data.size_s());
|
||||
}
|
||||
|
||||
|
||||
bool PIPeer::send(const PeerInfo * to, const PIString & data) {
|
||||
if (!to) return false;
|
||||
return send(to->name, data.data(), data.size_s());
|
||||
}
|
||||
|
||||
|
||||
bool PIPeer::send(const PeerInfo * to, const void * data, int size) {
|
||||
if (!to) return false;
|
||||
return send(to->name, data, size);
|
||||
}
|
||||
|
||||
|
||||
void PIPeer::sendToAll(const PIByteArray & data) {
|
||||
for (const auto & i: peers)
|
||||
send(i.name, data.data(), data.size_s());
|
||||
}
|
||||
|
||||
|
||||
void PIPeer::sendToAll(const PIString & data) {
|
||||
for (const auto & i: peers)
|
||||
send(i.name, data.data(), data.size_s());
|
||||
}
|
||||
|
||||
|
||||
void PIPeer::sendToAll(const void * data, int size) {
|
||||
for (const auto & i: peers)
|
||||
send(i.name, data, size);
|
||||
}
|
||||
|
||||
|
||||
bool PIPeer::sendInternal(const PIString & to, const PIByteArray & data) {
|
||||
PIMutexLocker mlocker(peers_mutex);
|
||||
PeerInfo * dp = quickestPeer(to);
|
||||
@@ -484,7 +541,7 @@ bool PIPeer::dataRead(const uchar * readed, ssize_t size) {
|
||||
sba << int(6) << to << from << addr << time;
|
||||
// piCout << " ping from" << from << addr << ", send back to" << pi->name;
|
||||
send_mutex.lock();
|
||||
piForeachC(PeerInfo::PeerAddress & a, pi->addresses) {
|
||||
for (const auto & a: pi->addresses) {
|
||||
if (eth_send.send(a.address, sba)) diag_s.received(sba.size_s());
|
||||
}
|
||||
send_mutex.unlock();
|
||||
@@ -501,10 +558,10 @@ bool PIPeer::dataRead(const uchar * readed, ssize_t size) {
|
||||
// piCout << "ping reply" << to << from << addr;
|
||||
PIMutexLocker plocker(peers_mutex);
|
||||
if (to == self_info.name) { // ping echo
|
||||
piForeach(PeerInfo & p, peers) {
|
||||
for (auto & p: peers) {
|
||||
if (!p.isNeighbour()) continue;
|
||||
if (p.name != from) continue;
|
||||
piForeach(PeerInfo::PeerAddress & a, p.addresses) {
|
||||
for (auto & a: p.addresses) {
|
||||
if (a.address != addr) continue;
|
||||
if (a.last_ping >= time) break;
|
||||
ptime = ctime - time;
|
||||
@@ -662,11 +719,11 @@ bool PIPeer::mbcastRead(const uchar * data, ssize_t size) {
|
||||
}
|
||||
ch = true;
|
||||
}
|
||||
piForeach(PeerInfo & rpeer, rpeers) {
|
||||
for (auto & rpeer: rpeers) {
|
||||
// piCout << " to sync " << rpeer.name;
|
||||
if (rpeer.name == self_info.name) continue;
|
||||
bool exist = false;
|
||||
piForeach(PeerInfo & peer, peers) {
|
||||
for (auto & peer: peers) {
|
||||
if (peer.name == rpeer.name) {
|
||||
exist = true;
|
||||
if (isPeerRecent(peer, rpeer)) {
|
||||
@@ -706,7 +763,7 @@ bool PIPeer::mbcastRead(const uchar * data, ssize_t size) {
|
||||
}
|
||||
// piCout << "***";
|
||||
// piCout << self_info.name << self_info.neighbours;
|
||||
piForeach(PeerInfo & i, peers) {
|
||||
for (auto & i: peers) {
|
||||
if (i.dist == 0) {
|
||||
self_info.addNeighbour(i.name);
|
||||
i.addNeighbour(self_info.name);
|
||||
@@ -737,11 +794,11 @@ bool PIPeer::sendToNeighbour(PIPeer::PeerInfo * peer, const PIByteArray & ba) {
|
||||
void PIPeer::sendMBcast(const PIByteArray & ba) {
|
||||
send_mc_mutex.lock();
|
||||
// piCout << "sendMBcast" << ba.size() << "bytes ...";
|
||||
piForeach(PIEthernet * e, eths_mcast) {
|
||||
for (auto * e: eths_mcast) {
|
||||
if (e->isOpened())
|
||||
if (e->send(ba)) diag_s.sended(ba.size_s());
|
||||
}
|
||||
piForeach(PIEthernet * e, eths_bcast) {
|
||||
for (auto * e: eths_bcast) {
|
||||
if (e->isOpened())
|
||||
if (e->send(ba)) diag_s.sended(ba.size_s());
|
||||
}
|
||||
@@ -750,7 +807,7 @@ void PIPeer::sendMBcast(const PIByteArray & ba) {
|
||||
if (eth_lo.send(ba)) diag_s.sended(ba.size_s());
|
||||
}
|
||||
PIVector<PIEthernet *> cl = eth_tcp_srv.clients();
|
||||
piForeach(PIEthernet * e, cl) {
|
||||
for (auto * e: cl) {
|
||||
if (e->isOpened() && e->isConnected())
|
||||
if (e->send(ba)) diag_s.sended(ba.size_s());
|
||||
}
|
||||
@@ -763,7 +820,7 @@ void PIPeer::sendMBcast(const PIByteArray & ba) {
|
||||
|
||||
|
||||
void PIPeer::removeNeighbour(const PIString & name) {
|
||||
piForeach(PeerInfo & p, peers)
|
||||
for (auto & p: peers)
|
||||
p.neighbours.removeOne(name);
|
||||
self_info.removeNeighbour(name);
|
||||
}
|
||||
@@ -809,11 +866,11 @@ void PIPeer::pingNeighbours() {
|
||||
PIByteArray ba, sba;
|
||||
ba << int(5) << self_info.name;
|
||||
// piCoutObj << "*** pingNeighbours" << peers.size() << "...";
|
||||
piForeach(PeerInfo & p, peers) {
|
||||
for (auto & p: peers) {
|
||||
if (!p.isNeighbour()) continue;
|
||||
// piCout << " ping neighbour" << p.name << p.ping();
|
||||
send_mutex.lock();
|
||||
piForeach(PeerInfo::PeerAddress & a, p.addresses) {
|
||||
for (auto & a: p.addresses) {
|
||||
// piCout << " address" << a.address << a.wait_ping;
|
||||
if (a.wait_ping) {
|
||||
if ((PISystemTime::current(true) - a.last_ping).abs().toSeconds() <= _PIPEER_PING_TIMEOUT) continue;
|
||||
@@ -891,7 +948,7 @@ void PIPeer::syncPeers() {
|
||||
ba << int(3) << self_info.name << self_info << peers;
|
||||
peers_mutex.unlock();
|
||||
sendMBcast(ba);
|
||||
piForeachC(PIString & p, dpeers) {
|
||||
for (const auto & p: dpeers) {
|
||||
peerDisconnected(p);
|
||||
peerDisconnectedEvent(p);
|
||||
}
|
||||
@@ -930,6 +987,12 @@ void PIPeer::changeName(const PIString & new_name) {
|
||||
}
|
||||
|
||||
|
||||
void PIPeer::setTcpServerIP(const PIString & ip) {
|
||||
server_ip = ip;
|
||||
tcpClientReconnect();
|
||||
}
|
||||
|
||||
|
||||
ssize_t PIPeer::bytesAvailable() const {
|
||||
ssize_t ret = 0;
|
||||
read_buffer_mutex.lock();
|
||||
@@ -1054,7 +1117,7 @@ void PIPeer::buildMap() {
|
||||
// piCout << "[PIPeer \"" + name_ + "\"] buildMap";
|
||||
peers_map.clear();
|
||||
addresses_map.clear();
|
||||
piForeach(PeerInfo & i, peers) {
|
||||
for (auto & i: peers) {
|
||||
i.trace = -1;
|
||||
peers_map[i.name] = &i;
|
||||
}
|
||||
@@ -1065,8 +1128,8 @@ void PIPeer::buildMap() {
|
||||
while (!cwave.isEmpty()) {
|
||||
nwave.clear();
|
||||
++cwi;
|
||||
piForeachC(PeerInfo * p, cwave) {
|
||||
piForeachC(PIString & nn, p->neighbours) {
|
||||
for (const auto * p: cwave) {
|
||||
for (const auto & nn: p->neighbours) {
|
||||
PeerInfo * np = peers_map.value(nn);
|
||||
if (!np) continue;
|
||||
if (np->trace >= 0) continue;
|
||||
@@ -1077,14 +1140,14 @@ void PIPeer::buildMap() {
|
||||
cwave = nwave;
|
||||
}
|
||||
PIVector<PeerInfo *> cpath;
|
||||
piForeach(PeerInfo & c, peers) {
|
||||
for (auto & c: peers) {
|
||||
cpath.clear();
|
||||
cpath << &c;
|
||||
cwave << &c;
|
||||
for (int w = c.trace - 1; w >= 0; --w) {
|
||||
nwave.clear();
|
||||
piForeachC(PeerInfo * p, cwave) {
|
||||
piForeachC(PIString & nn, p->neighbours) {
|
||||
for (const auto * p: cwave) {
|
||||
for (const auto & nn: p->neighbours) {
|
||||
PeerInfo * np = peers_map.value(nn);
|
||||
if (!np) continue;
|
||||
if (np->trace != w) continue;
|
||||
@@ -1103,3 +1166,10 @@ void PIPeer::buildMap() {
|
||||
void PIPeer::tcpClientReconnect() {
|
||||
eth_tcp_cli.connect(server_ip, _PIPEER_TCP_PORT);
|
||||
}
|
||||
|
||||
|
||||
bool PIPeer::hasPeer(const PIString & name) {
|
||||
for (const auto & i: peers)
|
||||
if (i.name == name) return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user