Compare commits

...
This repository has been archived on 2024-02-23. You can view files and clone it, but cannot push or open issues or pull requests.

1 Commits

Author SHA1 Message Date
450bee6a0d WIP: improve events retrieval and add some statistics 2019-12-25 17:16:04 -05:00

103
events.py
View File

@ -1,16 +1,23 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
from collections import defaultdict import datetime
from lxml import etree import itertools
import os
import re import re
import requests
from common import * import numpy as np
import pandas as pd
import requests
from lxml import etree
from common import doors
from hid.DoorController import ROOT, E
def getStrings(door): def getStrings(door):
"""Parses out the message strings from source.""" """Parses out the message strings from source."""
r = requests.get('https://' + door.ip + '/html/en_EN/en_EN.js', r = requests.get('https://' + door.ip + '/html/en_EN/en_EN.js',
auth=requests.auth.HTTPDigestAuth(door.username, door.password), auth=requests.auth.HTTPDigestAuth(door.username,
door.password),
verify=False) verify=False)
regex = re.compile(r'([0-9]+)="([^"]*)') regex = re.compile(r'([0-9]+)="([^"]*)')
strings = [regex.search(s) for s in r.text.split(';') strings = [regex.search(s) for s in r.text.split(';')
@ -18,46 +25,39 @@ def getStrings(door):
print({int(g.group(1)): g.group(2) for g in strings}) print({int(g.group(1)): g.group(2) for g in strings})
def getMessages(door): def getMessages(door):
# get parameters for messages to get? events = None
# honestly not really sure why this is required, their API is confusing recordCount = 0
parXMLIn = E_plain.VertXMessage( moreRecords = True
E.EventMessages({"action": "LR"}))
parXMLOut = door.doXMLRequest(parXMLIn)
etree.dump(parXMLOut)
if os.path.exists("logs/" + door.name + ".xml"): while moreRecords:
# read last log res = door.doXMLRequest(ROOT(
tree = etree.ElementTree(file="logs/" + door.name + ".xml") E.EventMessages({
root = tree.getroot() "action": "LR",
recordCount = int(parXMLOut[0].attrib["historyRecordMarker"]) - \ "recordCount": str(1000 - recordCount),
int(root[0][0].attrib["recordMarker"]) "recordOffset": str(recordCount),
})))
if events is None:
events = res[0]
else: else:
# first run for this door for event in res[0]:
root = None events.append(event)
recordCount = 1000
if recordCount == 0: recordCount += int(res[0].get('recordCount'))
print("No records to get!") moreRecords = res[0].get('moreRecords') == 'true'
return
print("Getting", recordCount, "records")
# get the actual messages
eventsXMLIn = E_plain.VertXMessage(
E.EventMessages({"action": "LR",
"recordCount": str(recordCount),
"historyRecordMarker": parXMLOut[0].attrib["historyRecordMarker"],
"historyTimestamp": parXMLOut[0].attrib["historyTimestamp"]}))
eventsXMLOut = door.doXMLRequest(eventsXMLIn)
#TODO: handle modeRecords=true
for index, event in enumerate(eventsXMLOut[0]): print(recordCount, moreRecords)
event.attrib["recordMarker"] = str(int(parXMLOut[0].attrib["historyRecordMarker"]) - index)
if root is None: etree.dump(events, pretty_print=True)
tree = etree.ElementTree(eventsXMLOut)
else: return events
for event in reversed(eventsXMLOut[0]):
root[0].insert(0, event) # def stats(events):
tree.write("logs/" + doorName + ".xml") # eventsByDay = {k: list(v) for k, v in
# itertools.groupby(sorted(events, key=get_day), key=get_day)}
# print({k: len(v) for k, v in eventsByDay.items()})
# #print([get_day(e) for e in events])
def main(): def main():
for door in doors.values(): for door in doors.values():
@ -65,3 +65,24 @@ def main():
if __name__ == '__main__': if __name__ == '__main__':
main() main()
events = getMessages(doors["Studio Space"])
# stats(events)
df = pd.DataFrame([dict(e.attrib) for e in events])
idx = pd.to_datetime(df['timestamp'], format='%Y-%m-%dT%H:%M:%S')
df = df.set_index(pd.DatetimeIndex(idx.values)).drop('timestamp', axis=1)
print()
print(df[df.eventType == '2020'].dropna(axis=1, how='all').head())
entriesPerDay = df[df.eventType == '2020'] \
.dropna(axis=1, how='all') \
.resample('1D') \
.count()['eventType']
entriesPerDay.index = entriesPerDay.index.map(lambda t: t.strftime('%Y-%m-%d'))
print(df.groupby(by=['forename', 'surname']).size().sort_values())
entriesPerDay.plot(kind='bar')