2019-03-16 18:53:20 +00:00
|
|
|
#!/usr/bin/env python3
|
2020-09-22 17:20:20 +00:00
|
|
|
import base64
|
|
|
|
import json
|
|
|
|
from lxml import etree
|
2020-06-23 19:28:00 +00:00
|
|
|
from urllib.parse import quote
|
2018-06-20 18:54:31 +00:00
|
|
|
|
2019-03-16 18:59:02 +00:00
|
|
|
WORK_DIR = "/var/www/tilde.chat"
|
2018-06-20 18:54:31 +00:00
|
|
|
out = {}
|
2020-09-22 17:20:20 +00:00
|
|
|
parser = etree.XMLParser(strip_cdata=False)
|
|
|
|
root = etree.parse("http://localhost:8081/stats", parser)
|
|
|
|
|
|
|
|
assert root.getroot().tag == "inspircdstats"
|
2018-10-15 22:29:34 +00:00
|
|
|
|
2019-03-16 18:59:02 +00:00
|
|
|
with open(f"{WORK_DIR}/blacklist", "r") as f:
|
2019-03-16 18:53:20 +00:00
|
|
|
BLACKLIST = f.read().splitlines()
|
2018-10-15 22:29:34 +00:00
|
|
|
|
2019-03-16 19:26:15 +00:00
|
|
|
|
2018-10-15 22:29:34 +00:00
|
|
|
def define(name, xps, vfilter=lambda x: x):
|
|
|
|
global out
|
2020-09-22 17:20:20 +00:00
|
|
|
out[name] = vfilter(root.findall(xps)[0].text)
|
|
|
|
|
|
|
|
|
|
|
|
def unsanitize(node, default=""):
|
|
|
|
# workaround for weird behavior in insp's xml output
|
|
|
|
# https://github.com/inspircd/inspircd/blob/v3.7.0/src/modules/m_httpd_stats.cpp#L55
|
|
|
|
if node.text is None or node.text == "":
|
|
|
|
return default
|
|
|
|
elif str(etree.tostring(node)).startswith(f"b'<{node.tag}><![CDATA["):
|
|
|
|
missing_padding = len(node.text) % 4
|
|
|
|
if missing_padding:
|
|
|
|
v = node.text + "=" * (4 - missing_padding)
|
|
|
|
else:
|
|
|
|
v = node.text
|
|
|
|
return base64.b64decode(v).decode("utf-8")
|
|
|
|
|
|
|
|
return node.text
|
2018-10-15 22:29:34 +00:00
|
|
|
|
|
|
|
|
|
|
|
define("usercount", "./general/usercount", int)
|
|
|
|
define("channelcount", "./general/channelcount", int)
|
2020-09-22 17:20:20 +00:00
|
|
|
schannels = root.findall("./channellist/channel")
|
2019-03-16 18:53:20 +00:00
|
|
|
|
2018-06-20 18:54:31 +00:00
|
|
|
channels = []
|
|
|
|
for schannel in schannels:
|
2020-09-22 17:20:20 +00:00
|
|
|
channel = {}
|
|
|
|
channel["name"] = unsanitize(schannel.find("channelname"))
|
|
|
|
channel["topic"] = unsanitize(schannel.find("./channeltopic/topictext"))
|
|
|
|
channel["usercount"] = int(schannel.find("usercount").text)
|
|
|
|
channel["webchatlink"] = "https://web.tilde.chat/?join=" + quote(channel["name"])
|
2019-03-16 19:26:15 +00:00
|
|
|
|
2018-10-15 22:29:34 +00:00
|
|
|
if (
|
2019-03-16 19:26:15 +00:00
|
|
|
# skip channels in the blacklist or with mode +s
|
2020-09-22 17:20:20 +00:00
|
|
|
"s" in schannel.find("./channelmodes").text.split()[0]
|
2019-03-16 19:26:15 +00:00
|
|
|
or channel["name"] in BLACKLIST
|
2018-10-15 22:29:34 +00:00
|
|
|
):
|
|
|
|
continue
|
2019-03-16 19:26:15 +00:00
|
|
|
|
2018-10-15 22:29:34 +00:00
|
|
|
channels.append(channel)
|
2019-03-16 18:53:20 +00:00
|
|
|
|
2018-06-20 21:31:44 +00:00
|
|
|
channels.sort(key=lambda x: x["name"].lower())
|
2018-10-15 22:29:34 +00:00
|
|
|
out["channels"] = channels
|
2019-03-16 18:53:20 +00:00
|
|
|
|
2019-03-16 18:59:02 +00:00
|
|
|
with open(f"{WORK_DIR}/stats.json", "w") as f:
|
2018-10-15 22:29:34 +00:00
|
|
|
json.dump(out, f)
|