Toy workflow addition

This commit is contained in:
NikolajDanger
2023-06-09 10:57:08 +02:00
parent 52ac5b6576
commit e4b07c385c
10 changed files with 198 additions and 63 deletions

View File

@ -138,7 +138,8 @@ class BaseHandler:
self.handle(reply)
except Exception as e:
# TODO some error reporting here
pass
if not isinstance(e, TypeError):
raise e
def valid_handle_criteria(self, event:Dict[str,Any])->Tuple[bool,str]:
"""Function to determine given an event defintion, if this handler can
@ -158,7 +159,7 @@ class BaseHandler:
yaml_dict[var] = val
for var, val in rule.pattern.outputs.items():
yaml_dict[var] = val
yaml_dict[rule.pattern.triggering_file] = event[EVENT_PATH]
# yaml_dict[rule.pattern.triggering_file] = event[EVENT_PATH]
# If no parameter sweeps, then one job will suffice
if not rule.pattern.sweep:
@ -253,7 +254,7 @@ class BaseHandler:
"# Check hash of input file to avoid race conditions",
"actual_hash=$(sha256sum $event_path | cut -c -64)",
"echo actual_hash: $actual_hash",
"if [ $given_hash != $actual_hash ]; then",
"if [ \"$given_hash\" != \"$actual_hash\" ]; then",
" echo Job was skipped as triggering file has been modified since scheduling",
" exit 134",
"fi",

View File

@ -0,0 +1,21 @@
#!/bin/bash
# Get job params
given_hash=$(grep 'file_hash: *' $(dirname $0)/job.yml | tail -n1 | cut -c 14-)
event_path=$(grep 'event_path: *' $(dirname $0)/job.yml | tail -n1 | cut -c 15-)
echo event_path: $event_path
echo given_hash: $given_hash
# Check hash of input file to avoid race conditions
actual_hash=$(sha256sum $event_path | cut -c -64)
echo actual_hash: $actual_hash
if [ "$given_hash" != "$actual_hash" ]; then
echo Job was skipped as triggering file has been modified since scheduling
exit 134
fi
# Call actual job script
python3 job_queue/job_FQQUQMTGtqUw/recipe.py >>job_queue/job_FQQUQMTGtqUw/output.log 2>&1
exit $?

View File

@ -0,0 +1,38 @@
create: 2023-06-07 11:54:48.117191
end: 2023-06-07 11:54:53.231092
error: Job execution returned non-zero.
event:
event_path: /tmp/tmp3q4q94ee
event_rule: !!python/object:meow_base.core.rule.Rule
name: rule_xjGHQxaaray
pattern: !!python/object:meow_base.patterns.network_event_pattern.NetworkEventPattern
name: echo_pattern
outputs: {}
parameters: {}
recipe: echo_recipe
sweep: {}
triggering_port: 8080
recipe: !!python/object:meow_base.recipes.python_recipe.PythonRecipe
name: echo_recipe
parameters: {}
recipe:
- path = {PATH}
- print(path)
requirements: &id001 {}
event_time: 1686131685.3071685
event_type: network
file_hash: f2ca1bb6c7e907d06dafe4687e579fce76b37e4e93b7605022da52e6ccc26fd2
monitor_base: ''
triggering_port: 8080
id: job_FQQUQMTGtqUw
job_type: python
parameters: {}
pattern: echo_pattern
recipe: echo_recipe
requirements: *id001
rule: rule_xjGHQxaaray
start: 2023-06-07 11:54:53.168581
status: failed
tmp recipe command: python3 job_queue/job_FQQUQMTGtqUw/recipe.py >>job_queue/job_FQQUQMTGtqUw/output.log
2>&1
tmp script command: ./job.sh

View File

@ -0,0 +1,2 @@
path = {PATH}
print(path)

View File

@ -0,0 +1,53 @@
from time import sleep
from meow_base.core.runner import MeowRunner
from meow_base.patterns.network_event_pattern import NetworkMonitor, NetworkEventPattern
from meow_base.recipes.python_recipe import PythonRecipe, PythonHandler
from meow_base.conductors.local_python_conductor import LocalPythonConductor
PORTS = [8080,8181]
def main():
runners = []
for i, port in enumerate(PORTS):
other_port = PORTS[(i+1)%2]
# Gets the script ready
script = [
"import socket",
"sender = socket.socket(socket.AF_INET, socket.SOCK_STREAM)",
f"sender.connect(('127.0.0.1', {other_port}))",
"sender.sendall(b'test')",
"sender.close()"
]
# Initialize the network monitor
patterns = {
"echo_pattern":NetworkEventPattern(
"echo_pattern",
port,
"echo_recipe"
)
}
recipes = {"echo_recipe":PythonRecipe("echo_recipe", script)}
monitors = [NetworkMonitor(patterns, recipes)]
# Initialize the handler and conductor
handlers = [PythonHandler()]
conductors = [LocalPythonConductor()]
# Start the runner
runner = MeowRunner(monitors, handlers, conductors)
runner.start()
runners.append(runner)
sleep(120)
for runner in runners:
runner.stop()
if __name__ == "__main__":
main()

View File

@ -2,6 +2,7 @@ import sys
import socket
import threading
import tempfile
import hashlib
from os import unlink
from time import time
@ -16,10 +17,11 @@ from meow_base.core.base_pattern import BasePattern
from meow_base.functionality.meow import create_event
from meow_base.functionality.debug import setup_debugging, print_debug
from meow_base.core.meow import EVENT_KEYS
from meow_base.patterns.file_event_pattern import WATCHDOG_BASE, WATCHDOG_HASH
# network events
EVENT_TYPE_NETWORK = "network"
TRIGGERING_PORT = "triggering port"
TRIGGERING_PORT = "triggering_port"
NETWORK_EVENT_KEYS = {
TRIGGERING_PORT: int,
@ -27,7 +29,8 @@ NETWORK_EVENT_KEYS = {
}
def create_network_event(temp_path:str, rule:Any, time:float,
port: int, extras:Dict[Any,Any]={})->Dict[Any,Any]:
port: int, file_hash: str,
extras:Dict[Any,Any]={})->Dict[Any,Any]:
"""Function to create a MEOW event dictionary."""
return create_event(
EVENT_TYPE_NETWORK,
@ -36,6 +39,8 @@ def create_network_event(temp_path:str, rule:Any, time:float,
time,
extras={
TRIGGERING_PORT: port,
WATCHDOG_HASH: file_hash,
WATCHDOG_BASE: "",
**extras
}
)
@ -120,7 +125,8 @@ class NetworkMonitor(BaseMonitor):
event["tmp file"],
rule,
event["time stamp"],
event["triggering port"]
event["triggering port"],
event["file hash"]
)
print_debug(self._print_target, self.debug_level,
f"Event at {event['triggering port']} hit rule {rule.name}",
@ -206,10 +212,14 @@ class Listener():
tmp_name = tmp.name
with open(tmp_name, "rb") as file_pointer:
file_hash = hashlib.sha256(file_pointer.read()).hexdigest()
event = {
"triggering port": self.port,
"tmp file": tmp_name,
"time stamp": time_stamp
"time stamp": time_stamp,
"file hash": file_hash
}
self.monitor.match(event)

View File

@ -2,6 +2,7 @@ import socket
from multiprocessing import Pipe
from threading import Thread
from time import time, sleep
from numpy import std, floor, log10
from meow_base.patterns.network_event_pattern import NetworkMonitor, \
NetworkEventPattern
@ -65,6 +66,7 @@ def test_network(monitor_count: int, patterns_per_monitor: int,
start_time = time()
for p in range(start_port, port):
for _ in range(events_per_pattern):
send(p)
@ -79,13 +81,18 @@ def test_network(monitor_count: int, patterns_per_monitor: int,
return duration
def sigfigs(num):
if num < 10:
return round(num, -int(floor(log10(abs(num))-1)))
else:
return int(num)
def main():
monitors = 1000
monitors = 1
patterns = 1
events = 1
events = 1000
n = 50
n = 100
durations = []
for i in range(n):
@ -93,7 +100,10 @@ def main():
durations.append(test_network(monitors,patterns,events,1024))
sleep(0.5)
print(f"({monitors}, {patterns}, {events}) min: {min(durations)}, max: {max(durations)}, avg: {sum(durations)/n}")
print(f"({monitors}, {patterns}, {events}) min: {min(durations)}, max: {max(durations)}, avg: {sum(durations)/n}, std: {std(durations)}")
# print(f"{sigfigs(min(durations)*1000)}ms & {sigfigs((min(durations)*1000)/events)}ms & {sigfigs(max(durations)*1000)}ms & {sigfigs((max(durations)*1000)/events)}ms & {sigfigs((sum(durations)/n)*1000)}ms & {sigfigs(((sum(durations)/n)*1000)/events)}ms & {sigfigs(std(durations)*1000)}ms")
print(f"{sigfigs(min(durations)*1000)}ms & {sigfigs(max(durations)*1000)}ms & {sigfigs((sum(durations)/n)*1000)}ms & {sigfigs(std(durations)*1000)}ms")
if __name__ == "__main__":
main()

View File

@ -87,8 +87,8 @@ class PythonHandler(BaseHandler):
msg = ""
if type(event[EVENT_RULE].recipe) != PythonRecipe:
msg = "Recipe is not a PythonRecipe. "
if event[EVENT_TYPE] != EVENT_TYPE_WATCHDOG:
msg += f"Event type is not {EVENT_TYPE_WATCHDOG}."
# if event[EVENT_TYPE] != EVENT_TYPE_WATCHDOG:
# msg += f"Event type is not {EVENT_TYPE_WATCHDOG}."
if msg:
return False, msg
else: