Init
|
|
@ -0,0 +1,57 @@
|
|||
from Converter.Node import Node
|
||||
|
||||
class BinaryTree:
|
||||
def __init__(self, function):
|
||||
self.root = Node(function.start, function, None)
|
||||
return
|
||||
|
||||
def addNode(self, function):
|
||||
root = self.root
|
||||
while True:
|
||||
if function.start <= root.key:
|
||||
if root.left is None:
|
||||
root.left = Node(function.start, function, root)
|
||||
break
|
||||
else:
|
||||
root = root.left
|
||||
else:
|
||||
if root.right is None:
|
||||
root.right = Node(function.start, function, root)
|
||||
break
|
||||
else:
|
||||
root = root.right
|
||||
|
||||
def getParent(self, start, end, root=None):
|
||||
if root is None:
|
||||
root = self.root
|
||||
|
||||
node = None
|
||||
while True:
|
||||
if start <= root.key:
|
||||
if root.left is None:
|
||||
node = root
|
||||
break
|
||||
root = root.left
|
||||
else:
|
||||
if root.right is None:
|
||||
node = root
|
||||
break
|
||||
root = root.right
|
||||
|
||||
if end is None:
|
||||
return node.value
|
||||
|
||||
while True:
|
||||
if node.parent is None:
|
||||
break
|
||||
|
||||
if node.value.end is None:
|
||||
node = node.parent
|
||||
continue
|
||||
|
||||
if node.value.start < start and node.value.end > end:
|
||||
break
|
||||
|
||||
node = node.parent
|
||||
|
||||
return node.value
|
||||
|
|
@ -0,0 +1,289 @@
|
|||
|
||||
from Converter.Function import Function
|
||||
from Converter.BinaryTree import BinaryTree
|
||||
import time
|
||||
import pandas as pd
|
||||
|
||||
def validateTree(root):
|
||||
'''mostly left over from dev, checks if chidl functions start alter and end earlier than parent'''
|
||||
dur = 0
|
||||
for child in root.children:
|
||||
if child.end is not None:
|
||||
dur += child.end - child.start
|
||||
if root.end is not None and child.end is not None:
|
||||
if(child.start < root.start or child.end > root.end):
|
||||
print(child.start - root.start, root.end - child.end)
|
||||
raise Exception("Child starts too soon or ends too late")
|
||||
validateTree(child)
|
||||
|
||||
#if root.children:
|
||||
# if root.end is not None and dur != root.end - root.start:
|
||||
# difInS = (root.end - root.start - dur) / 1E9
|
||||
# if difInS > 0.01:
|
||||
# print(pd.to_datetime(difInS * 1E9, unit='ns'))
|
||||
# print(root.id, "\n", pd.to_datetime(root.start, unit='ns'), "\n", pd.to_datetime(root.end, unit='ns'))
|
||||
# print("")
|
||||
|
||||
|
||||
def indexOfEnd(name, lst):
|
||||
return indexOf(name, lst, "end")
|
||||
|
||||
|
||||
def indexOf(name, lst, key):
|
||||
tmp = [i for i, x in enumerate(lst) if x[0] == key and x[1] == name]
|
||||
if len(tmp) == 0:
|
||||
return len(lst) - 1
|
||||
return tmp[0]
|
||||
|
||||
|
||||
def indexOfNext(lst, key):
|
||||
tmp = [i for i, x in enumerate(lst) if x[0] == key]
|
||||
if len(tmp) == 0:
|
||||
return len(lst)
|
||||
return tmp[0]
|
||||
|
||||
|
||||
def indexOfNextEnd(lst, key, name):
|
||||
tmp = [i for i, x in enumerate(lst) if x[0] == key and x[1] == name]
|
||||
if len(tmp) == 0:
|
||||
return None
|
||||
return tmp[0]
|
||||
|
||||
|
||||
def endOfFunction(name, lst, denom="end"):
|
||||
tmp = [x[2] for i, x in enumerate(lst) if x[0] == denom and x[1] == name]
|
||||
if len(tmp) == 0:
|
||||
return None
|
||||
return tmp[0]
|
||||
|
||||
|
||||
def getLines(path):
|
||||
lines = []
|
||||
with open(path, "r") as f:
|
||||
root = Function(None, None, None)
|
||||
for line in f.readlines():
|
||||
try:
|
||||
parts = line.split(" ")
|
||||
parts = [part.strip() for part in parts]
|
||||
parts[2] = int(parts[2])
|
||||
lines.append(parts)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
exit(1)
|
||||
|
||||
lines.sort(key=lambda x: x[2])
|
||||
return lines
|
||||
|
||||
|
||||
def getEndofExpandedRoot(identifier, lines):
|
||||
"""return index and ts of end of function, with parallel functions it groups them together returns last end"""
|
||||
counter = 1
|
||||
dels = []
|
||||
for i, line in enumerate(lines):
|
||||
if counter == 0:
|
||||
return i, lines[i-1][2]
|
||||
if line[0] == "startRoot" and line[1] == identifier:
|
||||
counter += 1
|
||||
dels.append(i)
|
||||
if line[0] == "endRoot" and line[1] == identifier:
|
||||
counter -= 1
|
||||
dels.append(i)
|
||||
|
||||
for i, j in enumerate(dels[:-1]):
|
||||
del lines[j - i]
|
||||
|
||||
return len(lines)-1, lines[-1][2]
|
||||
|
||||
def convert(path):
|
||||
start = time.time()
|
||||
lines = getLines(path)
|
||||
|
||||
maxTs = lines[-1][2]
|
||||
root = Function(None, "root", 0)
|
||||
bt = BinaryTree(root)
|
||||
lenLines = len(lines)
|
||||
i = indexOfNext(lines, "startRoot")
|
||||
|
||||
# see thesis chapter profiler for the biog picture
|
||||
while i < lenLines:
|
||||
if i % 500 == 0:
|
||||
print(" ", end="\r")
|
||||
print(f"{i/len(lines) * 100}% ", i, end="\r")
|
||||
|
||||
line = lines[i]
|
||||
|
||||
keyword = line[0]
|
||||
identifier = line[1]
|
||||
ts = line[2]
|
||||
|
||||
if keyword == "end":
|
||||
i += 1
|
||||
continue
|
||||
|
||||
if keyword == "endRoot":
|
||||
i += indexOfNext(lines[i+1:], "startRoot") + 1
|
||||
continue
|
||||
|
||||
end = None
|
||||
if keyword == "startRoot":
|
||||
# start new binary tree for new interaction, still uses old root element for easy travering
|
||||
bt = BinaryTree(root)
|
||||
_, _ = getEndofExpandedRoot(identifier, lines[i+1:])
|
||||
end = maxTs
|
||||
else:
|
||||
end = endOfFunction(identifier, lines[i:])
|
||||
|
||||
parent = bt.getParent(ts, end)
|
||||
|
||||
func = Function(parent, identifier, ts, end)
|
||||
parent.addChild(func)
|
||||
|
||||
bt.addNode(func)
|
||||
|
||||
endIndex = indexOf(func.id, lines, "end")
|
||||
if endIndex is not None:
|
||||
del lines[endIndex]
|
||||
|
||||
i += 1
|
||||
lenLines = len(lines)
|
||||
|
||||
while root.parent is not None:
|
||||
root = root.parent
|
||||
|
||||
print("Built Calltree in ", time.time() - start, " s \n")
|
||||
|
||||
return root
|
||||
|
||||
|
||||
def truncateTree(root, limit=100000000, steps=5):
|
||||
'''traverse tree to given length, if node has length < limit the children are not logged'''
|
||||
if steps == 0:
|
||||
root.children = []
|
||||
return
|
||||
|
||||
if root.end is not None:
|
||||
root.duration = (root.end - root.start)
|
||||
|
||||
if root.duration is not None and root.duration < limit:
|
||||
root.children = []
|
||||
return
|
||||
|
||||
for child in root.children:
|
||||
truncateTree(child, limit, steps - 1)
|
||||
|
||||
|
||||
|
||||
def getEndTimes(lines):
|
||||
ends = {}
|
||||
for i, line in enumerate(lines):
|
||||
if line[0] == "startRoot":
|
||||
endIndex = i + indexOf(line[1], lines[i:], "endRoot")
|
||||
end = lines[endIndex]
|
||||
ends[line[2]] = end[2]
|
||||
|
||||
return ends
|
||||
|
||||
|
||||
def reduceRoots(root, path):
|
||||
''' trims children of root functions to logged length '''
|
||||
lines = getLines(path)
|
||||
ends = getEndTimes(lines)
|
||||
i = 0
|
||||
for child in sorted(root.children, key=lambda x: x.start):
|
||||
end = ends[child.start]
|
||||
# function start and end has only ms acc
|
||||
# 1 ms is added to ensure all functions are caught
|
||||
child.end = end + 1000000
|
||||
|
||||
deletes = []
|
||||
for j, cchild in enumerate(child.children):
|
||||
if cchild.end is not None and cchild.end > child.end:
|
||||
deletes.append(j)
|
||||
|
||||
for x, j in enumerate(deletes):
|
||||
del child.children[j-x]
|
||||
i += 1
|
||||
|
||||
def overlaps(start, end, fstart, fend):
|
||||
if end < fstart or fend < start:
|
||||
return False
|
||||
|
||||
r1 = start <= fstart and end >= fstart
|
||||
r3 = start <= fstart and end <= fstart
|
||||
|
||||
|
||||
r2 = fstart <= start and fend >= start
|
||||
r4 = fstart <= start and fend <= start
|
||||
|
||||
|
||||
return r1 or r2 or r3 or r4
|
||||
|
||||
def mergeFunctions(f1, f2):
|
||||
for f in f2.children:
|
||||
f1.addChild(f)
|
||||
return f1
|
||||
|
||||
def mergeAsyncInteractions(root):
|
||||
toMerge = {}
|
||||
# find overlapping interaction ids
|
||||
# group the ids in arrays
|
||||
for i in range(len(root.children)):
|
||||
indexes = set()
|
||||
for j in range(i+1, len(root.children)):
|
||||
child1 = root.children[i]
|
||||
child2 = root.children[j]
|
||||
|
||||
if child1.overlaps(child2):
|
||||
indexes.add(i)
|
||||
indexes.add(j)
|
||||
|
||||
if indexes:
|
||||
toMerge[i] = list(indexes)
|
||||
|
||||
clusters = []
|
||||
for key, values in toMerge.items():
|
||||
cluster = list(values)
|
||||
for value in values:
|
||||
if value in toMerge:
|
||||
cluster += toMerge[value]
|
||||
|
||||
clusters.append(set(cluster))
|
||||
|
||||
|
||||
# if there are interactions to merge:
|
||||
clusters = list(clusters)
|
||||
if clusters:
|
||||
merged = [list(clusters[0])]
|
||||
for c in clusters[1:]:
|
||||
if c.issubset(merged[-1]):
|
||||
continue
|
||||
else:
|
||||
merged.append(list(c))
|
||||
|
||||
# merge fountions in one group to single function
|
||||
functionsToAdd = []
|
||||
for indexes in merged:
|
||||
for i in indexes[1:]:
|
||||
mergeFunctions(root.children[indexes[0]], root.children[i])
|
||||
functionsToAdd.append(root.children[indexes[0]])
|
||||
|
||||
# delete all functions which were merged
|
||||
for i, j in enumerate([x for y in merged for x in y]):
|
||||
del root.children[j-i]
|
||||
|
||||
# add merged functions
|
||||
for f in functionsToAdd:
|
||||
if f.children:
|
||||
|
||||
fstart = min(f.children, key=lambda x: x.start).start
|
||||
maxF = max(f.children, key=lambda x: x.end)
|
||||
fend = maxF.end
|
||||
|
||||
if fstart < f.start:
|
||||
f.start = fstart
|
||||
if fend > f.end:
|
||||
f.end = fend
|
||||
root.addChild(f)
|
||||
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,57 @@
|
|||
from numpy import rad2deg
|
||||
|
||||
|
||||
class Function:
|
||||
def __init__(self, parent, id, start, end=None):
|
||||
self.parent = parent
|
||||
self.id = id
|
||||
self.start = start
|
||||
self.end = end
|
||||
self.children = []
|
||||
self.duration = None
|
||||
self.cpu = None
|
||||
self.ram = None
|
||||
self.net = None
|
||||
self.io = None
|
||||
self.isAsync = False
|
||||
|
||||
def addChild(self, child):
|
||||
self.children.append(child)
|
||||
|
||||
def __eq__(self, other):
|
||||
if other == "-1":
|
||||
return False
|
||||
return self.id == other.id and self.start == other.start and self.end == other.end
|
||||
|
||||
def overlaps(self, f):
|
||||
if self.end < f.start or f.end < self.start:
|
||||
return False
|
||||
|
||||
r1 = self.start <= f.start and self.end > f.start
|
||||
r3 = self.start <= f.start and self.end < f.start
|
||||
|
||||
r2 = f.start <= self.start and f.end > self.start
|
||||
r4 = f.start <= self.start and f.end < self.start
|
||||
|
||||
return r1 or r2 or r3 or r4
|
||||
|
||||
def getOverlap(self, f):
|
||||
if self.start < f.start:
|
||||
if self.end < f.start:
|
||||
return (None, None)
|
||||
else:
|
||||
return (f.start, self.end)
|
||||
else:
|
||||
if f.end < self.start:
|
||||
return (None, None)
|
||||
else:
|
||||
return (f.end, self.start)
|
||||
|
||||
def setRemoteValues(self):
|
||||
self.cpu = 1
|
||||
self.ram = 1
|
||||
self.io = 0
|
||||
self.net = 0
|
||||
self.duration = self.end - self.start
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,43 @@
|
|||
import networkx as nx
|
||||
from pyvis.network import Network
|
||||
import matplotlib.pyplot as plt
|
||||
import os
|
||||
|
||||
|
||||
def makeGraph(g, root):
|
||||
color_map = {}
|
||||
root_name = str(root.id) + "_" + str(root.start)[8:]
|
||||
for child in root.children:
|
||||
child_name = child.id + "_" + str(child.start)[8:]
|
||||
if child_name not in color_map and child.isAsync:
|
||||
color_map[child_name] = 1
|
||||
else:
|
||||
color_map[child_name] = 0
|
||||
|
||||
g.add_node(child_name)
|
||||
g.add_edge(root_name,
|
||||
child_name)
|
||||
cm2 = makeGraph(g, child)
|
||||
color_map = {**color_map, **cm2}
|
||||
|
||||
return color_map
|
||||
|
||||
def draw(root):
|
||||
net = Network(notebook=True)
|
||||
G = nx.DiGraph()
|
||||
color_map = makeGraph(G, root)
|
||||
pos = nx.spring_layout(G)
|
||||
nx.draw_networkx_edges(G, pos, edge_color='r', arrows=True)
|
||||
net.from_nx(G)
|
||||
net.height = "100%"
|
||||
net.width = "100%"
|
||||
for node in net.nodes:
|
||||
if node["id"] in color_map and color_map[node["id"]] == 1:
|
||||
node["color"] = "red"
|
||||
elif node["id"] in color_map and color_map[node["id"]] == 0:
|
||||
node["color"] = "blue"
|
||||
else:
|
||||
node["color"] = "green"
|
||||
|
||||
net.show(os.path.join(os.path.dirname(__file__), '../files/mygraph.html'))
|
||||
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
class Node:
|
||||
def __init__(self, key, value, parent):
|
||||
self.value = value
|
||||
self.key = key
|
||||
self.left = None
|
||||
self.right = None
|
||||
self.parent = parent
|
||||
|
|
@ -0,0 +1,445 @@
|
|||
import pandas as pd
|
||||
from Converter.Function import Function
|
||||
import json
|
||||
import copy
|
||||
import math
|
||||
|
||||
def addUtilFunction(root, util):
|
||||
start = root.start
|
||||
end = root.end
|
||||
if end is not None:
|
||||
duration = (end - start)
|
||||
if duration < 10*1E6:
|
||||
start = start - 10*1E6
|
||||
|
||||
duration = (end - start)
|
||||
|
||||
start = pd.to_datetime(start, unit='ns')
|
||||
end = pd.to_datetime(end, unit='ns')
|
||||
|
||||
cpu1 = util.loc[start:end]
|
||||
cpu = cpu1["cpu"].mean()
|
||||
ram = util.loc[start:end]["mem"].mean()
|
||||
|
||||
if math.isnan(cpu):
|
||||
# raise Exception("Function " + root.id + " was too short to attribute any utilization to.")
|
||||
cpu = 0
|
||||
ram = 0
|
||||
io = 0
|
||||
else:
|
||||
#tmp = util.loc[start:end]["io"]
|
||||
#if not tmp.empty:
|
||||
#io = tmp[-1] - tmp[0]
|
||||
io = 0
|
||||
else:
|
||||
duration = 0
|
||||
cpu = 0
|
||||
ram = 0
|
||||
io = 0
|
||||
|
||||
root.duration = float(duration)
|
||||
root.cpu = float(cpu)
|
||||
root.ram = float(ram)
|
||||
root.io = float(io)
|
||||
root.net = 0
|
||||
|
||||
def getFunctionUtilFactor(function, functions):
|
||||
if not function.isAsync:
|
||||
return 1
|
||||
|
||||
relevantFuncs = [function]
|
||||
for f in functions:
|
||||
if f == function:
|
||||
continue
|
||||
if not f.isAsync:
|
||||
continue
|
||||
if not function.overlaps(f):
|
||||
continue
|
||||
|
||||
relevantFuncs.append(f)
|
||||
|
||||
relevantFuncs.sort(key=lambda x: x.start)
|
||||
|
||||
vals = {}
|
||||
|
||||
for f in relevantFuncs:
|
||||
if f.start < function.start:
|
||||
vals[function.start] = "inc"
|
||||
else:
|
||||
vals[f.start] = "inc"
|
||||
|
||||
if f.end > function.end:
|
||||
vals[function.end] = "dec"
|
||||
else:
|
||||
vals[f.end] = "dec"
|
||||
|
||||
val2 = {}
|
||||
val2[function.start] = 0
|
||||
|
||||
counter = 0
|
||||
for key in sorted(vals.keys()):
|
||||
val = vals[key]
|
||||
if val == "inc":
|
||||
counter += 1
|
||||
if val == "dec":
|
||||
counter -= 1
|
||||
val2[key] = counter
|
||||
val2[function.end] = 0
|
||||
|
||||
keys = sorted(val2.keys())
|
||||
functionLength = function.end - function.start
|
||||
|
||||
factor = 0
|
||||
|
||||
for i in range(len(keys)-1):
|
||||
duration = keys[i+1] - keys[i]
|
||||
count = val2[keys[i]]
|
||||
factor += (duration / functionLength) * ( 1 / count )
|
||||
|
||||
return factor
|
||||
|
||||
|
||||
def addUtil(functions, utilLogPath):
|
||||
|
||||
data = pd.read_csv(utilLogPath, delimiter=",")
|
||||
data["time"] = pd.to_datetime(data["time"], unit='s')
|
||||
|
||||
data.set_index("time", inplace=True)
|
||||
data = data.resample(rule="1s").mean().resample(rule="10ms").mean().interpolate()
|
||||
data["mem"] = data["mem"] - data["mem"][0]
|
||||
data["mem"] = data["mem"].clip(lower=0)
|
||||
data["cpu"] = data["cpu"].clip(lower=0)
|
||||
|
||||
for child in functions:
|
||||
addUtilFunction(child, data)
|
||||
|
||||
return functions
|
||||
|
||||
|
||||
def markAsyncFunction(children):
|
||||
children.sort(key=lambda x: x.start)
|
||||
for i in range(len(children)):
|
||||
child1 = children[i]
|
||||
for j in range(i+1 , len(children)):
|
||||
child2 = children[j]
|
||||
if child1.end is None or child2.end is None:
|
||||
continue
|
||||
if child1.overlaps(child2):
|
||||
child1.isAsync = True
|
||||
child2.isAsync = True
|
||||
return children
|
||||
|
||||
|
||||
def markAsync(root):
|
||||
markAsyncFunction(root.children)
|
||||
for child in root.children:
|
||||
markAsync(child)
|
||||
|
||||
|
||||
def flattenTree(root, outL):
|
||||
if root.children:
|
||||
for child in sorted(root.children, key=lambda x: x.start):
|
||||
flattenTree(child, outL)
|
||||
else:
|
||||
outL.append(root)
|
||||
|
||||
|
||||
def getFunctionName(child):
|
||||
return child.id + "_" + str(child.start)
|
||||
|
||||
|
||||
def getFunctionDict(child, callbacks, delay):
|
||||
return {
|
||||
"functionID": getFunctionName(child),
|
||||
"cpu": child.cpu,
|
||||
"cpu_time": child.duration,
|
||||
"ram": child.ram,
|
||||
"io": child.io,
|
||||
"net": child.net,
|
||||
"delay": 0,
|
||||
"callbacks": [getFunctionName(callback) if callback != "-1" else "-1" for callback in callbacks]
|
||||
}
|
||||
|
||||
def getParallelFunctions(function, functions):
|
||||
par = []
|
||||
dels = []
|
||||
for i, f in enumerate(functions):
|
||||
if function.overlaps(f):
|
||||
par.append(f)
|
||||
dels.append(i)
|
||||
|
||||
for i, j in enumerate(dels):
|
||||
del functions[j-i]
|
||||
|
||||
return par, functions
|
||||
|
||||
|
||||
def getFunctionsArray(flattened):
|
||||
# convert tree structure to linear structure
|
||||
# x
|
||||
# / \
|
||||
# x x x - x - x
|
||||
# / \ x
|
||||
# x
|
||||
# only the longest of n parallel functions may have a callback
|
||||
|
||||
lastEnd = flattened[0].start
|
||||
syncJoin = False
|
||||
i = 0
|
||||
functions = []
|
||||
while i < len(flattened) - 1:
|
||||
child = flattened[i]
|
||||
callback1 = flattened[i+1]
|
||||
callbacks = []
|
||||
|
||||
fDelay = child.start - lastEnd
|
||||
|
||||
# TODO: multiple async functions after another have negative delay, this is a work around
|
||||
if fDelay < 0:
|
||||
fDelay = 0
|
||||
print("function delay was negativ, function: ", getFunctionName(child), "\n function delay was set to 0")
|
||||
|
||||
# if this is the first sync function after asyncs, add current function to the callbacks
|
||||
# -2 because the callbacks are added before the child is and we wqant to add this to the callbacks of the last chiuld not the parent
|
||||
if syncJoin:
|
||||
syncJoin = False
|
||||
functions[-2]["callbacks"] = [getFunctionName(child)]
|
||||
|
||||
if callback1.isAsync:
|
||||
syncJoin = True
|
||||
parallelFunctions, flattened = getParallelFunctions(callback1, flattened)
|
||||
|
||||
# add async functions to callback to last sync function and add functions themselves to the functions array
|
||||
for y in parallelFunctions:
|
||||
if y == child:
|
||||
continue
|
||||
callbacks.append(y)
|
||||
functions.append(getFunctionDict(y, ["-1"], y.start - lastEnd))
|
||||
lastEnd = max(parallelFunctions, key=lambda x: x.end).end
|
||||
i+=1
|
||||
|
||||
else:
|
||||
lastEnd = child.end
|
||||
callbacks.append(callback1)
|
||||
|
||||
functionObj = getFunctionDict(child, callbacks, fDelay)
|
||||
functions.append(functionObj)
|
||||
|
||||
i += 1
|
||||
|
||||
if flattened:
|
||||
if syncJoin:
|
||||
functions[-2]["callbacks"] = ["-1"]
|
||||
else:
|
||||
functionObj = getFunctionDict(flattened[-1], ["-1"], 0)
|
||||
functions.append(functionObj)
|
||||
|
||||
return functions
|
||||
|
||||
|
||||
def getNet(netLogPath):
|
||||
lines = []
|
||||
with open(netLogPath, "r") as f:
|
||||
for line in f.readlines():
|
||||
try:
|
||||
parts = line.split(",")
|
||||
parts = [part.strip() for part in parts]
|
||||
lines.append(parts)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
exit(1)
|
||||
lines = lines[1:]
|
||||
lines.sort(key=lambda x: x[0])
|
||||
|
||||
return lines
|
||||
|
||||
|
||||
def addNet(functions, netLogPath):
|
||||
'''insert network connections into functions'''
|
||||
lines = getNet(netLogPath)
|
||||
i = 0
|
||||
for line in lines:
|
||||
start = float(line[0])
|
||||
end = float(line[2])
|
||||
host = line[1]
|
||||
while i < len(functions):
|
||||
f1 = functions[i]
|
||||
f2 = copy.deepcopy(functions[i])
|
||||
f1.id = f1.id + "_1"
|
||||
f2.id = f2.id + "_2"
|
||||
|
||||
if f1.start <= start and f1.end >= end:
|
||||
remoteF = Function(f1, host, start, end=end)
|
||||
remoteF.setRemoteValues()
|
||||
remoteF.children = [f2]
|
||||
|
||||
f1.end = remoteF.start
|
||||
f2.start = remoteF.end
|
||||
f1.duration = f1.end - f1.start
|
||||
f2.duration = f2.end - f2.start
|
||||
|
||||
f2.parent = remoteF
|
||||
|
||||
functions.append(remoteF)
|
||||
functions.append(f2)
|
||||
break
|
||||
|
||||
i += 1
|
||||
|
||||
return functions
|
||||
|
||||
def makeInteraction(root, delay, utilLogPath, netLogPath):
|
||||
interaction = {"name": root.id, "interactionID": root.id,
|
||||
"delay": delay, "functions": []}
|
||||
|
||||
flattened = []
|
||||
flattenTree(root, flattened)
|
||||
flattened.sort(key=lambda x: x.end)
|
||||
functions = markAsyncFunction(flattened)
|
||||
functions = fillGaps(root, functions, True)
|
||||
functions = addUtil(functions, utilLogPath)
|
||||
functions = addNet(functions, netLogPath)
|
||||
functions = getFunctionsArray(functions)
|
||||
|
||||
if not functions:
|
||||
return
|
||||
|
||||
interaction["functions"] = functions
|
||||
|
||||
return interaction
|
||||
|
||||
|
||||
def createProfile(root, name, utilLogPath, netLogPath):
|
||||
profile = {
|
||||
"$id": "/Matz/Patrice/Master-Thesis/Profile.schema.json",
|
||||
"name": name,
|
||||
"scenarios": []
|
||||
}
|
||||
|
||||
# sort interaction nodes by start
|
||||
root.children.sort(key=lambda x: x.start)
|
||||
interactions = []
|
||||
child = root.children[0]
|
||||
previousStart = child.start
|
||||
for i, child in enumerate(root.children):
|
||||
|
||||
# delay is calculated to start of scenario not inbetween interactions, this way async interactions are possible
|
||||
delay = child.start - previousStart
|
||||
interaction = makeInteraction(child, delay, utilLogPath, netLogPath)
|
||||
if interaction is not None:
|
||||
interaction["interactionID"] = str(i) + " " + interaction["interactionID"]
|
||||
interactions.append(interaction)
|
||||
|
||||
|
||||
scenario = {
|
||||
"scenarioID": 1,
|
||||
"interactions": interactions
|
||||
}
|
||||
|
||||
profile["scenarios"] = [scenario]
|
||||
return profile
|
||||
|
||||
|
||||
def findGaps(child, children, allowedDiff):
|
||||
if not child.children:
|
||||
return []
|
||||
gaps = []
|
||||
children.sort(key=lambda x: x.start)
|
||||
firstChild = min(children, key=lambda x: x.start)
|
||||
# for gap at the very start
|
||||
if firstChild.start - child.start > allowedDiff:
|
||||
gaps.append([child.start, firstChild.start])
|
||||
|
||||
|
||||
# gap at the end
|
||||
lastChild = max(children, key=lambda x: x.end)
|
||||
if lastChild.end is not None:
|
||||
if child.end - lastChild.end > allowedDiff:
|
||||
gaps.append([lastChild.end, child.end])
|
||||
|
||||
return gaps
|
||||
|
||||
|
||||
def fillGaps(root, children, validate=False):
|
||||
if not children:
|
||||
return [root]
|
||||
# if difference greater than 0.001 secons / 1 mill sec
|
||||
allowedDiff = 0.001 * 1E9
|
||||
|
||||
gaps = findGaps(root, children, allowedDiff)
|
||||
for start, end in gaps:
|
||||
children.append(
|
||||
Function(root, root.id + "_fill_" + str(start), start, end))
|
||||
|
||||
children.sort(key=lambda x: x.end)
|
||||
|
||||
if validate:
|
||||
for i in range(len(children)-1):
|
||||
if "_fill_" in children[i].id and "_fill_" in children[i+1].id:
|
||||
print(gaps)
|
||||
raise Exception("Gaps was filled multiple times " + root.id)
|
||||
|
||||
gaps = findGaps(root, children, allowedDiff)
|
||||
if len(gaps) != 0:
|
||||
print(gaps)
|
||||
raise Exception(
|
||||
"Could not fill all Gaps in function: " + root.id)
|
||||
|
||||
return sorted(children, key=lambda x: x.end)
|
||||
|
||||
|
||||
def genMapping(profile, netLogPath):
|
||||
tmpMapping = {}
|
||||
net = getNet(netLogPath)
|
||||
net = [x[1] for x in net]
|
||||
for scenario in profile["scenarios"]:
|
||||
for interaction in scenario["interactions"]:
|
||||
for function in interaction["functions"]:
|
||||
name = function["functionID"].split("_")[0]
|
||||
if name in net:
|
||||
tmpMapping[function["functionID"]] = name
|
||||
return tmpMapping
|
||||
|
||||
|
||||
def getServiceDict(name):
|
||||
return {
|
||||
"scaleUpAt": 0.8,
|
||||
"scaleDownAt": 0.3,
|
||||
"scaleingMetric": "CPU",
|
||||
"serviceID": name,
|
||||
"scales": False,
|
||||
"scale": 1,
|
||||
"scalingDelay": 0,
|
||||
"defaultServer": {
|
||||
"maxCPU": 100,
|
||||
"maxRAM": 100,
|
||||
"maxIO": 100,
|
||||
"maxNET": 100
|
||||
}
|
||||
}
|
||||
|
||||
def genServices(netLogPath):
|
||||
services = []
|
||||
|
||||
hosts = set()
|
||||
with open(netLogPath, "r") as f:
|
||||
line = f.readlines()
|
||||
for line in f.readlines():
|
||||
|
||||
parts = line.split(" ")
|
||||
hosts.add(parts[1])
|
||||
|
||||
hosts = list(hosts)
|
||||
hosts.append("default")
|
||||
|
||||
servicesList = []
|
||||
for host in hosts[::-1]:
|
||||
servicesList.append(getServiceDict(host))
|
||||
|
||||
services = {
|
||||
"id": "/Matz/Patrice/Master-Thesis/Service.schema.json",
|
||||
"name": "Service Definition for Example Application",
|
||||
"services": servicesList
|
||||
}
|
||||
|
||||
return services
|
||||
|
|
@ -0,0 +1,14 @@
|
|||
startRoot i1 5000000000
|
||||
start 1 5800000000
|
||||
start x 6000000001
|
||||
end x 6250000000
|
||||
start x2 5900000001
|
||||
end x2 6250000000
|
||||
end 1 7000000000
|
||||
start 2 8000000000
|
||||
end 2 22000000000
|
||||
endRoot i1 25000000000
|
||||
startRoot i2 40000000000
|
||||
start 4 45000000000
|
||||
end 4 80000000000
|
||||
endRoot i2 90000000000
|
||||
|
|
|
|
After Width: | Height: | Size: 139 KiB |
|
After Width: | Height: | Size: 309 KiB |
|
After Width: | Height: | Size: 136 KiB |
|
|
@ -0,0 +1,94 @@
|
|||
|
||||
import os
|
||||
import json
|
||||
import argparse
|
||||
import plotly.express as px
|
||||
|
||||
from Converter.CallTreeBuilder import *
|
||||
from Converter.ProfileBuilder import *
|
||||
from Converter.GraphVis import *
|
||||
|
||||
def saveJSON(path, data):
|
||||
with open(path, 'w') as profileF:
|
||||
json.dump(data, profileF)
|
||||
|
||||
def gantPlotChild(root):
|
||||
lst = []
|
||||
for child in sorted(root.children, key=lambda x: x.start):
|
||||
lst.append(dict(Task=child.id, Start=pd.to_datetime(child.start), Finish=pd.to_datetime(child.end)))
|
||||
for child in sorted(child.children, key=lambda x: x.start):
|
||||
lst.append(dict(Task=child.id, Start=pd.to_datetime(child.start), Finish=pd.to_datetime(child.end)))
|
||||
for child in sorted(child.children, key=lambda x: x.start):
|
||||
lst.append(dict(Task=child.id, Start=pd.to_datetime(child.start), Finish=pd.to_datetime(child.end)))
|
||||
|
||||
df = pd.DataFrame(lst)
|
||||
fig = px.timeline(df, x_start="Start", x_end="Finish", y="Task")
|
||||
#fig.update_yaxes(autorange="reversed") # otherwise tasks are listed from the bottom up
|
||||
fig.show()
|
||||
|
||||
def main():
|
||||
|
||||
|
||||
# handle user input and build paths
|
||||
parser = argparse.ArgumentParser(description='program name, path to util log, path to network log')
|
||||
parser.add_argument('-cl', type=str, help='name of the programm running exp: java.exe', required=True)
|
||||
parser.add_argument('-l', type=str, help='relativ path of input util log', required=True)
|
||||
parser.add_argument('-n', type=str, help='relativ path of input network log', required=True)
|
||||
|
||||
parser.add_argument('-op', type=str, help='relativ path to output folder', default="./files")
|
||||
parser.add_argument('-d', type=int, help='depth', default=3)
|
||||
args = parser.parse_args()
|
||||
|
||||
callLogPath = os.path.join(os.path.dirname(__file__), args.cl)
|
||||
utilLogPath = os.path.join(os.path.dirname(__file__), args.l)
|
||||
netLogPath = os.path.join(os.path.dirname(__file__), args.n)
|
||||
depth = args.d
|
||||
|
||||
profilePath = os.path.join(os.path.dirname(__file__), args.op + "/profile.json")
|
||||
mappingPath = os.path.join(os.path.dirname(__file__), args.op + "/mapping.json")
|
||||
servicePath = os.path.join(os.path.dirname(__file__), args.op + "/services.json")
|
||||
|
||||
|
||||
# start actual work
|
||||
print("Building Call Tree")
|
||||
root = convert(callLogPath)
|
||||
reduceRoots(root, callLogPath)
|
||||
print("Truncating Tree to specified depth")
|
||||
truncateTree(root, 0.1 * 1E9, steps=depth)
|
||||
print("Merging async interactions")
|
||||
mergeAsyncInteractions(root)
|
||||
print("Validating Tree")
|
||||
validateTree(root)
|
||||
|
||||
print("Creating Profile")
|
||||
profile = createProfile(root, "name", utilLogPath, netLogPath)
|
||||
print("Creating Default Mapping")
|
||||
mapping = genMapping(profile, netLogPath)
|
||||
print("Creating Default Service Definition")
|
||||
services = genServices(netLogPath)
|
||||
|
||||
# save results
|
||||
saveJSON(profilePath, profile)
|
||||
saveJSON(mappingPath, mapping)
|
||||
saveJSON(servicePath, services)
|
||||
|
||||
print("Interactions: ")
|
||||
for i, child1 in enumerate(sorted(root.children, key=lambda x: x.start)):
|
||||
print(i, child1.id, "\n ", pd.to_datetime(child1.end - child1.start, unit='ns'))
|
||||
if child1.children:
|
||||
total = 0
|
||||
for child in child1.children:
|
||||
total += child.end - child.start
|
||||
print(" ",total, "\n", pd.to_datetime(max(child1.children, key=lambda x: x.end).end - min(child1.children, key=lambda x: x.start).start, unit='ns'))
|
||||
else:
|
||||
print("")
|
||||
|
||||
# plotting
|
||||
markAsync(root)
|
||||
print("Creating Tree Visualization")
|
||||
draw(root)
|
||||
gantPlotChild(root)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
networkx
|
||||
pyvis
|
||||
pandas
|
||||
matplotlib
|
||||
scipy
|
||||
|
|
@ -0,0 +1,50 @@
|
|||
package de.mypackage.aspects;
|
||||
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.aspectj.lang.ProceedingJoinPoint;
|
||||
import org.aspectj.lang.annotation.Around;
|
||||
import org.aspectj.lang.annotation.Aspect;
|
||||
|
||||
import javax.management.Attribute;
|
||||
import javax.management.AttributeList;
|
||||
import javax.management.MBeanServer;
|
||||
import javax.management.ObjectName;
|
||||
import java.io.FileWriter;
|
||||
import java.io.IOException;
|
||||
import java.lang.management.ManagementFactory;
|
||||
import java.lang.management.ThreadMXBean;
|
||||
|
||||
@Slf4j
|
||||
@Aspect
|
||||
public class Monitor {
|
||||
private FileWriter csvWriter = new FileWriter("./callLog.csv", true);
|
||||
|
||||
long startTime;
|
||||
long startNano;
|
||||
|
||||
public Monitor() throws IOException {
|
||||
this.startTime = System.currentTimeMillis() * 1000000;
|
||||
this.startNano = System.nanoTime();
|
||||
}
|
||||
|
||||
@Around("execution(* *(..)) && !@annotation(de.mypackage.aspects.NoLogging)")
|
||||
public Object log(ProceedingJoinPoint pjp) throws Throwable{
|
||||
|
||||
|
||||
long t = this.startTime + (System.nanoTime() - this.startNano);
|
||||
|
||||
csvWriter.append("start " + pjp.getSignature().toShortString().split("\\(")[0] + " " + t + "\n");
|
||||
Object result = pjp.proceed();
|
||||
t = this.startTime + (System.nanoTime() - this.startNano);
|
||||
csvWriter.append("end " + pjp.getSignature().toShortString().split("\\(")[0] + " " + t + "\n");
|
||||
csvWriter.flush();
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
public void signifyRoot(long t2, String name, String denom) throws IOException {
|
||||
t2 *= 1000000;
|
||||
csvWriter.append(denom+"Root " + name + " " + t2 + "\n");
|
||||
csvWriter.flush();
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
package de.mypackage.egov.aa.metrics.aspects;
|
||||
|
||||
import java.lang.annotation.Retention;
|
||||
import java.lang.annotation.RetentionPolicy;
|
||||
|
||||
@Retention(RetentionPolicy.RUNTIME)
|
||||
public @interface NoLogging {}
|
||||
|
|
@ -0,0 +1,13 @@
|
|||
<!DOCTYPE aspectj PUBLIC "-//AspectJ//DTD//EN" "https://www.eclipse.org/aspectj/dtd/aspectj.dtd">
|
||||
<!-- This config is required for load-time weaving to work. You also need to enable the javaagent, as described in the DEV_README.md -->
|
||||
<aspectj>
|
||||
<!-- use custom Xlint properties to disable a few error messages. -->
|
||||
<weaver options="-Xlintfile:META-INF/Xlint.properties -Xset:weaveJavaxPackages=true">
|
||||
<include within="de.mypackage..*" />
|
||||
<include within="io.micrometer..*" />
|
||||
<include within="javax.persistence.*" />
|
||||
</weaver>
|
||||
<aspects>
|
||||
<aspect name="de.mypackage.aspects.Monitor" />
|
||||
</aspects>
|
||||
</aspectj>
|
||||
|
|
@ -0,0 +1,15 @@
|
|||
|
||||
Monitor monitor;
|
||||
|
||||
public ServletMeter(){
|
||||
try{
|
||||
this.monitor = new Monitor();
|
||||
}
|
||||
catch (Exception e){
|
||||
log.error(e.toString());
|
||||
}
|
||||
}
|
||||
|
||||
this.monitor.signifyRoot(start, path, "start");
|
||||
|
||||
this.monitor.signifyRoot(System.currentTimeMillis(), path, "end");
|
||||
|
|
@ -0,0 +1 @@
|
|||
python .\Converter\main.py -cl Tests\callLog.csv -l Tests\log.csv -n files\net.csv
|
||||
|
|
@ -0,0 +1,20 @@
|
|||
import os
|
||||
import pandas as pd
|
||||
import matplotlib.pyplot as plt
|
||||
from pandas.core.frame import DataFrame
|
||||
|
||||
path = "C:/projects/Master/Projekt/validationTestProgram/utilLog.csv"
|
||||
data = pd.read_csv(path, delimiter=",")
|
||||
data["time"] = pd.to_datetime(data["time"], unit='s')
|
||||
data.set_index("time", inplace=True)
|
||||
|
||||
data.cpu[pd.to_datetime(1627321487, unit='s'):pd.to_datetime(1627321488 + 17.5, unit='s')].plot()
|
||||
plt.show()
|
||||
|
||||
path = "C:/projects/Master/Projekt/validationTestProgram/delay.csv"
|
||||
data2 = pd.read_csv(path, delimiter=",")
|
||||
|
||||
data2["time"] = pd.to_datetime(data2["time"], unit='s')
|
||||
data2.set_index("time", inplace=True)
|
||||
data2.plot()
|
||||
plt.show()
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
numpy
|
||||
psutil
|
||||
scapy
|
||||
|
|
@ -0,0 +1,175 @@
|
|||
# coding:utf-8
|
||||
from threading import current_thread
|
||||
from time import sleep
|
||||
import psutil
|
||||
import numpy as np
|
||||
import os
|
||||
import time
|
||||
import datetime
|
||||
import atexit
|
||||
import argparse
|
||||
import multiprocessing
|
||||
|
||||
class processMoniter:
|
||||
def __init__(self, name):
|
||||
self.name = name
|
||||
self.cpu_nums = psutil.cpu_count()
|
||||
self.max_mem = psutil.virtual_memory().total
|
||||
self.plist = [proc for proc in psutil.process_iter()
|
||||
if proc.name() == self.name]
|
||||
|
||||
self.get_system_info()
|
||||
self.get_processes_info()
|
||||
|
||||
def get_system_info(self):
|
||||
cpu_percent = psutil.cpu_percent(interval=None, percpu=False)
|
||||
mem_percent = psutil.virtual_memory().used
|
||||
return cpu_percent, mem_percent
|
||||
|
||||
def get_process_info(self, p):
|
||||
try:
|
||||
if p.is_running:
|
||||
cpu_percent = p.cpu_percent(interval=None)
|
||||
mem_percent = p.memory_percent()
|
||||
else:
|
||||
cpu_percent = 0.0
|
||||
mem_percent = 0.0
|
||||
return cpu_percent, mem_percent
|
||||
except:
|
||||
return 0.0, 0.0
|
||||
|
||||
def get_processes_info(self):
|
||||
infodic = []
|
||||
try:
|
||||
cpuG = 0
|
||||
memG = 0
|
||||
l = len(infodic)
|
||||
for p in self.plist:
|
||||
res = self.get_process_info(p)
|
||||
if res is not None:
|
||||
cpu, mem = res
|
||||
cpuG += cpu
|
||||
memG += mem
|
||||
else:
|
||||
l -= 1
|
||||
|
||||
return cpuG, memG
|
||||
except:
|
||||
self.plist = [proc for proc in psutil.process_iter()
|
||||
if proc.name() == self.name]
|
||||
return self.get_processes_info()
|
||||
|
||||
|
||||
class Logger:
|
||||
def __init__(self, name):
|
||||
self.taskmgr = processMoniter(name)
|
||||
self.name = name
|
||||
self.connections = {}
|
||||
self.netpath = ""
|
||||
self.pids = []
|
||||
|
||||
|
||||
def getFuncIdentifier(self, connection):
|
||||
return connection.laddr.ip + ":" + str(connection.laddr.port) + "-" + connection.raddr.ip + ":" + str(connection.raddr.port)
|
||||
|
||||
def logConnnections(self):
|
||||
connections = psutil.net_connections()
|
||||
returns = []
|
||||
current_time = time.time()
|
||||
connectionsList = []
|
||||
for connection in connections:
|
||||
if connection.status == "LISTEN" or connection.status == "NONE":
|
||||
continue
|
||||
if connection.raddr.ip == "127.0.0.1":
|
||||
continue
|
||||
if connection.status in ["CLOSE_WAIT", "FIN_WAIT1"] :
|
||||
continue
|
||||
#if connection.pid not in self.pids:
|
||||
# continue
|
||||
|
||||
|
||||
identifier = self.getFuncIdentifier(connection)
|
||||
if identifier in self.connections:
|
||||
connectionsList.append(identifier)
|
||||
continue
|
||||
else:
|
||||
self.connections[identifier] = [current_time, connection.raddr.ip + ":" + str(connection.raddr.port), None]
|
||||
connectionsList.append(identifier)
|
||||
|
||||
dels = []
|
||||
for i, ident in enumerate(self.connections):
|
||||
if ident not in connectionsList:
|
||||
res = self.connections[ident]
|
||||
res[-1] = current_time
|
||||
returns.append(res)
|
||||
dels.append(i)
|
||||
|
||||
for i, j in enumerate(dels):
|
||||
del self.connections[list(self.connections.keys())[j-i]]
|
||||
|
||||
return returns
|
||||
|
||||
def log(self, utilPath, netPath):
|
||||
self.netpath = netPath
|
||||
cycles = 0
|
||||
self.pids = [p.pid for p in self.taskmgr.plist]
|
||||
with open(utilPath, "w") as f:
|
||||
f.write("{},{},{}\n".format("time", "cpu", "mem"))
|
||||
with open(netPath, "w") as fn:
|
||||
fn.write("{},{},{}\n". format("start", "target", "end"))
|
||||
while True:
|
||||
|
||||
if cycles == 10:
|
||||
cycles = 0
|
||||
#cpu, mem = self.taskmgr.get_processes_info()
|
||||
io = 0
|
||||
current_time = time.time()
|
||||
cpu, mem = self.taskmgr.get_system_info()
|
||||
cpu *= multiprocessing.cpu_count()
|
||||
fn.flush()
|
||||
f.write("{},{},{}\n".format(
|
||||
current_time, cpu, mem))
|
||||
|
||||
|
||||
# if cycles > 100:
|
||||
# cycles = 0
|
||||
# self.taskmgr.plist = [
|
||||
# proc for proc in psutil.process_iter() if proc.name() == self.name]
|
||||
# self.pids = [p.pid for p in self.taskmgr.plist]
|
||||
|
||||
net = self.logConnnections()
|
||||
for entry in net:
|
||||
fn.write("{},{},{}\n". format(entry[0], entry[1], entry[2]))
|
||||
|
||||
# interval schould be 0.1 seconds at least
|
||||
time.sleep(.01)
|
||||
cycles += 1
|
||||
|
||||
|
||||
def cleanup(self, netPath):
|
||||
# write all cached connections to disk with the current time as end time
|
||||
with open(netPath, "a") as fn:
|
||||
current_time = time.time()
|
||||
for ident in self.connections:
|
||||
res = self.connections[ident]
|
||||
res[-1] = current_time
|
||||
fn.write("{},{},{}\n". format(res[0], res[1], res[2]))
|
||||
|
||||
# https://psutil.readthedocs.io/en/latest/
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description='program name, path to util log, path to network log')
|
||||
parser.add_argument('-p', type=str, help='name of the programm running exp: java.exe', required=True)
|
||||
parser.add_argument('-l', type=str, help='relativ path of output util log', default="./utilLog.csv")
|
||||
parser.add_argument('-n', type=str, help='relativ path of output network log', default="./networkLog.csv")
|
||||
args = parser.parse_args()
|
||||
|
||||
utilPath = os.path.join(os.path.dirname(__file__), args.l)
|
||||
netPath = os.path.join(os.path.dirname(__file__), args.n)
|
||||
|
||||
l = Logger(args.p)
|
||||
atexit.register(l.cleanup, netPath)
|
||||
l.log(utilPath, netPath)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
322
README.md
|
|
@ -1 +1,321 @@
|
|||
DSPS
|
||||
# DSPS
|
||||
|
||||
In this folder you will find the digital appendix and the implementation of my master thesis.
|
||||
As explained extensively in the thesis, the implementation consists of 2 parts: the profiler and the simulation.
|
||||
The implementation of the Profiler, to be precise the profiling components and the converter can be found in the Profiler folder.
|
||||
In the folder PurePy-Simulation you can find the files of the simulation.
|
||||
|
||||

|
||||
|
||||
## Profiler
|
||||
The profiler creates the application profile (, default service definition and default mapping).
|
||||
The profiler first replicates the structure of a function call, inserts network connections and then assigns a resource usage to the measured functions.
|
||||
|
||||
The Profiler itself consists of 3 components, an Aspect, the UtilLogger and the Profile Builder.
|
||||
The aspect must be inserted into the application. The Python tool (UtilLogger) must be executed while the application is running.
|
||||
|
||||
The data collected by these tools can be transferred to another workstation after the measurement and processed there with the Profile Builder and SImulation
|
||||

|
||||
|
||||
|
||||
|
||||
### Insert Java aspect
|
||||
The aspect logs the start and end time of each function, so the Profile Builder can reconstruct a call tree from it.
|
||||
The aspect can be found under "./Project/Profiler/Java Aspect Setup/Monitor.java".
|
||||
|
||||
The aspect must be registered in the aop.xml:
|
||||
|
||||
<aspects>
|
||||
<aspect name="io.micrometer.core.aop.TimedAspect" />
|
||||
<aspect name="en.mypackage.XX.aspects.TimedWithResultAspect" />
|
||||
<aspect name="en.mypackage.XX.aspects.Monitor" />
|
||||
</aspects>
|
||||
</aspectj>
|
||||
|
||||
Thus, all functions are logged. But if user interactions are to be clear in these another call must be inserted manually.
|
||||
|
||||
All HTML calls are filtered by ServletMeter at some point, so this is an ideal entry point to mark interactions.
|
||||
If API calls should also be divided into interactions, a common entry point for the API must be found and the same extension must be made there.
|
||||
|
||||
The constructor must be extended:
|
||||
|
||||
Monitor monitor;
|
||||
public ServletMeter(){
|
||||
try{
|
||||
this.monitor = new Monitor();
|
||||
}
|
||||
catch (Exception e){
|
||||
log.error(e.toString());
|
||||
}
|
||||
}
|
||||
|
||||
The start and end points of the interaction must also be marked:
|
||||
|
||||
this.monitor.signifyRoot(start, path, "start");
|
||||
this.monitor.signifyRoot(System.currentTimeMillis(), path, "end");
|
||||
|
||||
|
||||
#### ExampleLog:
|
||||
|
||||
startRoot index.xhtml 1629179509295000000
|
||||
start Monitor.signifyRoot 1629179514792504314
|
||||
end Monitor.signifyRoot 1629179514792679575
|
||||
start Tag.of 1629179514792785363
|
||||
end Tag.of 1629179514792804310
|
||||
start Tag.of 1629179514792866677
|
||||
end Tag.of 1629179514792880888
|
||||
start MetricsHelper.timerRecord 1629179514793136279
|
||||
start MetricsHelper.getOrCreateMeter 1629179514793226277
|
||||
start Tags.of 1629179514793253119
|
||||
start Tag.compareTo 1629179514793385354
|
||||
start ImmutableTag.getKey 1629179514793461932
|
||||
end ImmutableTag.getKey 1629179514793476537
|
||||
start ImmutableTag.getKey 1629179514793549562
|
||||
end ImmutableTag.getKey 1629179514793564562
|
||||
end Tag.compareTo 1629179514794631521
|
||||
[...]
|
||||
|
||||
### UtilLogger
|
||||
|
||||
The UtilLogger collects a time series with measured values of resource utilization (CPU, RAM (, IO, NET)) over its runtime.
|
||||
A program name must be specified, once a minute all process IDs of this program are collected and all outgoing network connections of these processes are logged.
|
||||
Also the resource usage is measured 10 times per second. This applies to the entire system. A process-based measurement fails because the utilization can then be read out much less frequently.
|
||||
|
||||
#### Run:
|
||||
|
||||
# python 3.7
|
||||
pip install -r requirements.txt
|
||||
python utilLogger.py -p java.exe
|
||||
|
||||
#### ExampleLog:
|
||||
UtilLog.csv:
|
||||
time,cpu,mem
|
||||
1629190398.3588154,0.0,12018507776
|
||||
1629190398.492411,315.2,12013142016
|
||||
1629190398.6207511,211.2,12007116800
|
||||
1629190398.7535014,132.8,12007591936
|
||||
1629190398.882328,88.0,12011560960
|
||||
1629190399.0100954,97.6,12016402432
|
||||
[...]
|
||||
|
||||
netLog.csv:
|
||||
start,target,end
|
||||
1629190398.362377,140.82.121.6:443,1629190401.2609098
|
||||
1629190398.362377,140.82.121.4:443,1629190401.2609098
|
||||
1629190398.362377,fe80::3ea6:2fff:fe78:3323:49000,1629190401.2609098
|
||||
1629190398.362377,192.108.239.107:443,1629190401.2609098
|
||||
1629190398.362377,23.63.121.119:443,1629190401.2609098
|
||||
1629190398.362377,192.99.44.195:443,1629190401.2609098
|
||||
1629190398.362377,35.186.224.25:443,1629190401.2609098
|
||||
1629190398.362377,35.186.224.45:443,1629190401.2609098
|
||||
[...]
|
||||
|
||||
## Profile Builder
|
||||
|
||||
The Profile Builder takes the 3 logs and builds a profile from them. This profile contains all the functions that were executed, with their name, runtime and the load they caused. Therefore, accurate measurement of the load during execution is very important.
|
||||
|
||||
The profile builder also creates defaults For the mapping and for the servers involved in the services.json and mapping.json.
|
||||
|
||||
#### Execute:
|
||||
|
||||
# python 3.7
|
||||
pip install -r requirements.txt
|
||||
python .\Converter\main.py -cl callLog.csv -l log.csv -n net.csv -d 1
|
||||
|
||||
-cl, l, n paths to the logs
|
||||
-d depth of traversal (detailsutfe, 1 coarse, 100 extremely fine).
|
||||
|
||||
The profile generated in this way can be validated against a JSON schema. The service definition can also be validated against a schema after customization. These are located here: "Project/purePy - Simulation/Application/files". None files can be validated here: https://www.jsonschemavalidator.net/
|
||||
|
||||
There are 2 graphs generated after the execution, a Gannt diagram and once the graph structure of the called functions.
|
||||
(the former is opened in the browser, the latter in the myGraph.html) This is for quick visual validation and was especially relevant during development.
|
||||
|
||||
### Profile
|
||||
|
||||
A profile consists of a list of scenarios. Each scenario contains interactions, each interaction consists of functions, each of which can have callbacks.
|
||||
|
||||
{
|
||||
"$id":"/Matz/Patrice/Master-Thesis/Profile.schema.json",
|
||||
"}, "name",
|
||||
"scenarios": [
|
||||
{
|
||||
"scenarioID": 1,
|
||||
"interactions": [
|
||||
{
|
||||
}, "name": "index.xhtml",
|
||||
"interactionID": "0 index.xhtml",
|
||||
"delay": 0, # delay measured to the start of the scenario, so the order of interactions can be changed arbitrarily to create new scenarios. In nanoseconds.
|
||||
"functions": [
|
||||
{
|
||||
"functionID": "index.xhtml_1629283083569000000",
|
||||
"cpu": 304.0175657805044, # 100 corresponds to one CPU core
|
||||
"cpu_time": 65120000.0, # in MB
|
||||
"ram": 0.0,
|
||||
"io": 0.0,
|
||||
"net": 0,
|
||||
"delay": 0, # in nanoseconds
|
||||
"callbacks": [
|
||||
"-1"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
### Services
|
||||
|
||||
This default file is always created and should be changed to the capabilities of the server on which the measurement was performed.
|
||||
|
||||
{
|
||||
"id":"/Matz/Patrice/Master-Thesis/Service.schema.json",
|
||||
"name": "Service Definition for Example Application",
|
||||
"services": [
|
||||
{
|
||||
"scaleUpAt": 0.8, # not implemented
|
||||
"scaleDownAt": 0.3, # not implemented
|
||||
"scaleingMetric": "CPU", # not implemented
|
||||
"serviceID": "default", # there must always be a default server
|
||||
"scales": false, # not implemented
|
||||
"scale": 1, # implemented
|
||||
"scalingDelay": 0, # not implemented
|
||||
"defaultServer": {
|
||||
"maxCPU": 100, # default one CPU core
|
||||
"maxRAM": 100, # 100mb RAM
|
||||
"maxIO": 100, # 100 IOPS
|
||||
"maxNET": 100, # 100 mb/s network
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
### Mapping
|
||||
|
||||
A JSON dictionary in which the affiliation of certain functions to services can be determined.
|
||||
The mapping happens via the ID of the function and the ID of the service:
|
||||
The specification of the class name is sufficient, it can also be mixed, simply the more detailed description of the function is used.
|
||||
Only changes have to be specified, all functions that do not appear in this mapping will be executed on the default server.
|
||||
|
||||
{
|
||||
"class1.func1": "default",
|
||||
"class1.func2": "server1",
|
||||
"class2": "server2",
|
||||
"class2.func1": "default".
|
||||
}
|
||||
|
||||
## Simulation
|
||||
The simulation takes the 3 files just created and additionally a scenario distribution as input.
|
||||
While profile, services and mapping define the behavior and the structure of the model, the scenario distribution decides about the load.
|
||||
|
||||
|
||||
The simulation can be executed solo via CLI or controlled via the dashboard. In both cases the results are stored in SimResults.json and can be visualized later with the dashboard.
|
||||
|
||||
In both cases the results are stored in SimResults.json and can be visualized later with the dashboard.
|
||||
|
||||

|
||||
|
||||
#### Call:
|
||||
|
||||
without dashboard:
|
||||
python main.py -p ../demo_files/profile.json -s ../demo_files/services.json -m ../demo_files/mapping.json -d ../demo_files/distribution.json
|
||||
with dashboard:
|
||||
python dashboard.py
|
||||
|
||||
### Scenario Distribution
|
||||
|
||||
A distribution, or statistical distribution, is a set of random numbers whose distribution over a definition range can be described using a mathematical formula.
|
||||
|
||||
We used these distributions to generate time points at which we enter scenarios into the simulation.
|
||||
The density function indicates how many scenarios are given into the system at a time, the distribution function indicates how many functions have already been given into the system.
|
||||
|
||||
Using the triangular distribution as an example: we have a start time, an end time, a peak and a volume for this distribution:
|
||||

|
||||
|
||||
|
||||
|
||||
distribution.json
|
||||
[
|
||||
{
|
||||
}, "kind": "triangle",
|
||||
"start": 0,
|
||||
"end": 2000,
|
||||
"highpoint": 100,
|
||||
"volume": 100,
|
||||
"scenarioID": 1
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
Distributions can also be combined to represent more complex behavior:
|
||||

|
||||
|
||||
|
||||
## Dashboard
|
||||
|
||||
The graphs:
|
||||
|
||||
- top left:
|
||||
- target / actual comparison, completed and expected curves should look identical, but may be shifted on the t-axis (by the runtime of the interaction)
|
||||
- if completed and expected are dissimilar there is an overload
|
||||
- the maximum number of users is then the maximum of the output diagram
|
||||
- bottom left:
|
||||
- Waiting time of an interaction, how long a user has to wait for the answer of his request
|
||||
- top right:
|
||||
- Utilization of the server
|
||||
- bottom right:
|
||||
- Simulation events, only relevant for development.
|
||||
|
||||
|
||||

|
||||
|
||||
|
||||
# Errors and meanings
|
||||
|
||||
## Converter
|
||||
#### function delay was negative, function: XSSRequestWrapper.getParameter...
|
||||
can be ignored, due to the (approximate) nanosecond precision with which start and end points are measured.
|
||||
|
||||
## Simulation
|
||||
|
||||
#### Function has higher requirements than server can satisfy
|
||||
Services.json must be adjusted, a server has less CPU or RAM than a single function needs
|
||||
|
||||
#### Callback error updating dones-dropdown.children, response_time-dropdown.children, service_util-dropdown.children, sim_events-dropdown.children
|
||||
Data could not be loaded because it is not there yet, just wait a moment.
|
||||
|
||||
#### One or more inputs missing!
|
||||
Check that at least Porfile, Services and Distribution Request are set.
|
||||
|
||||
#### Error in upload field
|
||||
Means that there was a validation error, the input was not schema conform.
|
||||
|
||||
# TODOs
|
||||
|
||||
### Automatic scaling
|
||||
|
||||
Scaling itself is implemented, must be integrated in push().
|
||||
Load average needs to be calculated over last X seconds.
|
||||
|
||||
### More efficient simulation
|
||||
It must be ensured that each function has only one recalculation event in the global event queue, currently it is one event per function that was assigned to the server during the execution of the current function. This means that 10 or even more events can be created quickly for a function that has already been completed.
|
||||
|
||||
### Faster Converter
|
||||
The binary tree used to create the call tree must be balanced.
|
||||
Balancing should be done about every 100
|
||||
[tutorial](https://www.tutorialspoint.com/balanced-binary-tree-in-python#:~:text=A%20Binary%20tree%20is%20said,or%20equal%20to%20'1'.&text=Explanation%3A,is%20a%20height%20Balanced%20tree.)
|
||||
|
||||
|
||||
### Reset button
|
||||
Just follow the pattern of the other buttons:
|
||||
|
||||
[tutorial](https://dash.plotly.com/dash-html-components/button)
|
||||
|
||||
Set all inputs to None, that should be enough.
|
||||
|
||||
Translated with www.DeepL.com/Translator (free version)
|
||||
|
||||
### Balance Binary Tree
|
||||
|
||||
For improved Converter performance. Balance the binary tree every 100 Functions or so.
|
||||
|
|
@ -0,0 +1,313 @@
|
|||
# DSPS
|
||||
|
||||
In diesem Ordner finden Sie den digitalen Anhang und die Implementierung meiner Masterthesis.
|
||||
Wie in der Thesis ausgiebig erläutert besteht die Implementierung aus 2 Teilen: dem Profiler und der Simulation.
|
||||
Die Implementierung des Profilers, um genau zu sein der Profilingkomponenten und des Converters finden Sie im Ordner Profiler.
|
||||
Im Ordner PurePy-Simulation finden Sie die Dateien der Simulation.
|
||||
|
||||

|
||||
|
||||
## Profiler
|
||||
Der Profiler erzeugt das Application-Profile (, Default-Service Definition und Default Mapping).
|
||||
Der Profiler bildet zuerst die Struktur eines Funktionsaufrufes nach, fügt Netzwerkverbindungen ein und weißt den gemessenen Funktionen anschließend eine Ressourcenauslastung zu.
|
||||
|
||||
Der Profiler selbst besteht aus 3 Komponenten, einem Aspect, dem UtilLogger und dem Profile Builder.
|
||||
Der Aspekt muss in die Applikation eingefügt werden. Das Python Tool (UtilLogger) muss während der Ausführung der Applikation ausgeführt werden.
|
||||
|
||||
Die Daten die diese Tools sammeln können nach der Messung auf eine andere Workstation übertragen werden und dort mit dem Profile Builder und der SImulation weiterverarbeitet werden
|
||||

|
||||
|
||||
|
||||
|
||||
### Java Aspekt einfügen
|
||||
Der Aspekt loggt den Start und Endzeitpunkt jeder Funktion, somit kann der Profile Builder einen Call Tree daraus rekonstruieren.
|
||||
Der Aspekt kann unter "./Projekt/Profiler/Java Aspekt Setup/Monitor.java" gefunden werden.
|
||||
|
||||
Der Aspekt muss in der aop.xml registriert werden:
|
||||
|
||||
<aspects>
|
||||
<aspect name="io.micrometer.core.aop.TimedAspect" />
|
||||
<aspect name="de.mypackage.XX.aspects.TimedWithResultAspect" />
|
||||
<aspect name="de.mypackage.XX.aspects.Monitor" />
|
||||
</aspects>
|
||||
</aspectj>
|
||||
|
||||
Somit werden alle Funktionen geloggt. Sollen aber Interaktionen des Nutzers in diesen deutlich werden muss ein weiterer Aufruf manuell eingefügt werden.
|
||||
|
||||
Alle HTML Aufrufe werden irgenwann vom ServletMeter gefiltert, das ist also ein idealer Einstiegspunkt um Interaktionen zu markieren.
|
||||
Sollen API-Aufrufe ebenfalls in Interaktionen unterteilt werden, muss ein gemeinsamer Einstiegspunkt für das API gefunden werden und dort die Selbe erweiterung vorgenommen werden.
|
||||
|
||||
Der Konstruktor muss erweitert werden:
|
||||
|
||||
Monitor monitor;
|
||||
public ServletMeter(){
|
||||
try{
|
||||
this.monitor = new Monitor();
|
||||
}
|
||||
catch (Exception e){
|
||||
log.error(e.toString());
|
||||
}
|
||||
}
|
||||
|
||||
Start und Endpunkt der Interaktion müssen ebenfalls markiert werden:
|
||||
|
||||
this.monitor.signifyRoot(start, path, "start");
|
||||
this.monitor.signifyRoot(System.currentTimeMillis(), path, "end");
|
||||
|
||||
|
||||
#### BeispielLog:
|
||||
|
||||
startRoot index.xhtml 1629179509295000000
|
||||
start Monitor.signifyRoot 1629179514792504314
|
||||
end Monitor.signifyRoot 1629179514792679575
|
||||
start Tag.of 1629179514792785363
|
||||
end Tag.of 1629179514792804310
|
||||
start Tag.of 1629179514792866677
|
||||
end Tag.of 1629179514792880888
|
||||
start MetricsHelper.timerRecord 1629179514793136279
|
||||
start MetricsHelper.getOrCreateMeter 1629179514793226277
|
||||
start Tags.of 1629179514793253119
|
||||
start Tag.compareTo 1629179514793385354
|
||||
start ImmutableTag.getKey 1629179514793461932
|
||||
end ImmutableTag.getKey 1629179514793476537
|
||||
start ImmutableTag.getKey 1629179514793549562
|
||||
end ImmutableTag.getKey 1629179514793564562
|
||||
end Tag.compareTo 1629179514794631521
|
||||
[...]
|
||||
|
||||
### UtilLogger
|
||||
|
||||
Der UtilLogger sammelt eine Zeitreihe mit Messwerten der Ressourcenauslastung (CPU, RAM (, IO, NET)) über seine Laufzeit.
|
||||
Es muss ein Programmname angegeben werden, einmal die Minute werden alle ProzessIDs dieses Programmes gesammelt und alle ausgehenden Netzwerkverbindungen dieser Prozesse geloggt.
|
||||
Ebenso wird die Ressourcenauslastung 10 mal pro Sekunde gemessen. Diese gilt für das gesamte System. Eine Prozessbasierte Messung scheitert daran, dass die Auslastung dann deutlich seltener ausgelesen werden kann.
|
||||
|
||||
#### Ausführen:
|
||||
|
||||
# python 3.7
|
||||
pip install -r requirements.txt
|
||||
python utilLogger.py -p java.exe
|
||||
|
||||
#### BeispielLog:
|
||||
UtilLog.csv:
|
||||
time,cpu,mem
|
||||
1629190398.3588154,0.0,12018507776
|
||||
1629190398.492411,315.2,12013142016
|
||||
1629190398.6207511,211.2,12007116800
|
||||
1629190398.7535014,132.8,12007591936
|
||||
1629190398.882328,88.0,12011560960
|
||||
1629190399.0100954,97.6,12016402432
|
||||
[...]
|
||||
|
||||
netLog.csv:
|
||||
start,target,end
|
||||
1629190398.362377,140.82.121.6:443,1629190401.2609098
|
||||
1629190398.362377,140.82.121.4:443,1629190401.2609098
|
||||
1629190398.362377,fe80::3ea6:2fff:fe78:3323:49000,1629190401.2609098
|
||||
1629190398.362377,192.108.239.107:443,1629190401.2609098
|
||||
1629190398.362377,23.63.121.119:443,1629190401.2609098
|
||||
1629190398.362377,192.99.44.195:443,1629190401.2609098
|
||||
1629190398.362377,35.186.224.25:443,1629190401.2609098
|
||||
1629190398.362377,35.186.224.45:443,1629190401.2609098
|
||||
[...]
|
||||
|
||||
## Profile Builder
|
||||
|
||||
Der Profile Builder nimmt die 3 Logs und baut ein Profile daraus. Dieses Profile enthält alle Funktionen die ausgeführt wurden, mit ihrem Namen, Laufzeit und der Auslastung die sie verursacht haben. Daher ist die akkurate Messung der Auslastung während der Ausführung sehr wichtig.
|
||||
|
||||
Der Profile Builder erzeugt auch Defaults Für das Mapping und für die beteiligten Server in der services.json und mapping.json.
|
||||
|
||||
#### Ausführen:
|
||||
|
||||
# python 3.7
|
||||
pip install -r requirements.txt
|
||||
python .\Converter\main.py -cl callLog.csv -l log.csv -n net.csv -d 1
|
||||
|
||||
-cl, l, n Pfade zu den Logs
|
||||
-d Tiefe der Traversierung (Detailsutfe, 1 grob, 100 extrem fein)
|
||||
|
||||
Das so erzeugt Profil kann gegen ein JSON Schema validiert werden. Auch die Service Definition kann nach der Anpassung gegen ein Schema validiert werden. Diese liegen hier: "Projekt/purePy - Simulation/Application/files". Keinere Dateien können hier validiert werden: https://www.jsonschemavalidator.net/
|
||||
|
||||
Es werden nach der Ausführung 2 Grafiken erzeugt, ein Ganntdiagramm und einmal die Graphen Struktur der aufgerufenen Funktionen.
|
||||
(ersteres wird im Browser geöffnet, letzteres in der myGraph.html) Dies dient der schnellen visuellen Validierung und war vorallem während der Entwicklung relevant.
|
||||
|
||||
### Profil
|
||||
|
||||
Ein Profil besteht aus einer Liste von Szenarien. Jedes Szenario enthält Interaktionen, jede Interaktion besteht aus Funktionen, die jeweils Callbacks besitzten können.
|
||||
|
||||
{
|
||||
"$id": "/Matz/Patrice/Master-Thesis/Profile.schema.json",
|
||||
"name": "name",
|
||||
"scenarios": [
|
||||
{
|
||||
"scenarioID": 1,
|
||||
"interactions": [
|
||||
{
|
||||
"name": "index.xhtml",
|
||||
"interactionID": "0 index.xhtml",
|
||||
"delay": 0, # delay gemessen zum beginn des Szenarios, so kann die Reihenfolge der Interaktionen belibig geändert werden, um neue Szenarien zu erzeugen. In Nanosekunden.
|
||||
"functions": [
|
||||
{
|
||||
"functionID": "index.xhtml_1629283083569000000",
|
||||
"cpu": 304.0175657805044, # 100 entspricht einem CPU-Kern
|
||||
"cpu_time": 6512000000.0, # in MB
|
||||
"ram": 0.0,
|
||||
"io": 0.0,
|
||||
"net": 0,
|
||||
"delay": 0, # in Nanosekunden
|
||||
"callbacks": [
|
||||
"-1"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
### Services
|
||||
|
||||
Diese Default Datei wird immer erzeugt und sollte, auf die Fähigkeiten des Server auf dem die Messung ausgeführt wurde, geändert werden.
|
||||
|
||||
{
|
||||
"id": "/Matz/Patrice/Master-Thesis/Service.schema.json",
|
||||
"name": "Service Definition for Example Application",
|
||||
"services": [
|
||||
{
|
||||
"scaleUpAt": 0.8, # nicht implementiert
|
||||
"scaleDownAt": 0.3, # nicht implementiert
|
||||
"scaleingMetric": "CPU", # nicht implementiert
|
||||
"serviceID": "default", # es muss immer einen default server geben
|
||||
"scales": false, # nicht implementiert
|
||||
"scale": 1, # implementiert
|
||||
"scalingDelay": 0, # nicht implementiert
|
||||
"defaultServer": {
|
||||
"maxCPU": 100, # Default ein CPU-Kern
|
||||
"maxRAM": 100, # 100mb RAM
|
||||
"maxIO": 100, # 100 IOPS
|
||||
"maxNET": 100 # 100 mb/s Netzwerk
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
### Mapping
|
||||
|
||||
Ein JSON Dictionary, in dem die Zugehörigkeit bestimmter Funktionen zu Servicen bestimmt werden kann.
|
||||
Die Zuordnung passiert über die ID der Funktion und der ID des Services:
|
||||
Die Angabe des Klassen Namen reicht aus, es kann auch gemischt werden, es wird ganz einfach die genauere Beschreibung der Funktion genutzt.
|
||||
Es müssen nur Änderungen angegeben werden, alle Funktionen, die in diesem Mapping nicht auftauchen werden auf dem Default Server ausgeführt.
|
||||
|
||||
{
|
||||
"class1.func1": "default",
|
||||
"class1.func2": "server1",
|
||||
"class2": "server2",
|
||||
"class2.func1": default
|
||||
}
|
||||
|
||||
## Simulation
|
||||
Die Simulation nimmt die 3 eben erzeugten Dateien und zusätzlich noch eine Szenario Verteilung als input.
|
||||
Während Profil, Services und Mapping das Verhalten unddie Struktur des Modells vorgeben entscheidet die Szenario Verteilung über die anliegende Last.
|
||||
|
||||
|
||||
Die Simulation kann solo per CLI ausgeführt werden oder über das Dashboard gesteuert werden. In beiden Fällen werden die Ergebnisse in der SimResults.json gespeichert und können später mit dem Dashboard visualisiert werden.
|
||||
|
||||

|
||||
|
||||
#### Aufruf:
|
||||
|
||||
ohne dashboard:
|
||||
python main.py -p ../demo_files/profile.json -s ../demo_files/services.json -m ../demo_files/mapping.json -d ../demo_files/distribution.json
|
||||
mit dashboard:
|
||||
python dashboard.py
|
||||
|
||||
### Scenario Distribution
|
||||
|
||||
Eine Distribution oder auch statistische Verteilung, ist eine Menge von Zufallszahlen, deren Verteilung über einen Definitionsbereich mithilfe eine mathematischen Formel beschreibbar ist.
|
||||
|
||||
Wir nutzten diese Verteilungen um Zeitpunkte zu erzeugen, zu denen wird Szenarien in die Simulation geben.
|
||||
Die Dichtefunktion gibt ab wie viele Szenarien zu einem Zeitpunkt in das System gegeben werden, die Verteilungsfunktion gibt an wie viele Funktionen bereits in das System gegeben wurden.
|
||||
|
||||
Am Beispiel der Dreiecksverteilung: wir haben einen Startzeitpunkt, einen Endzeitpunkt, einen Höhepunkt und ein Volumen für diese Verteilung:
|
||||

|
||||
|
||||
|
||||
|
||||
distribution.json
|
||||
[
|
||||
{
|
||||
"kind": "triangle",
|
||||
"start": 0,
|
||||
"end": 2000,
|
||||
"highpoint": 100,
|
||||
"volume": 100,
|
||||
"scenarioID": 1
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
Verteilungen können auch kombiniert werden um komplexeres Verhalten abzubilden:
|
||||

|
||||
|
||||
|
||||
## Dashboard
|
||||
|
||||
Die Graphen:
|
||||
|
||||
- links oben:
|
||||
- Soll- / Ist-Vergleich, completed und expected Kurven sollten identisch aussehen, dürfen aber auf der t-Achsen verschoben sein (um die Laufzeit der Interaktion)
|
||||
- sind completed und expected unähnlich gibt es eine Überlastung
|
||||
- Die maximale Nutzeranzahl ist dann das Maximum des Output-Diagrams
|
||||
- links unten:
|
||||
- Wartezeit einer Interaktion, wie lange ein Nutzer auf die Antwort seiner Anfrage warten muss
|
||||
- rechts oben:
|
||||
- Auslastung der Server
|
||||
- rechts unten:
|
||||
- Simulations Events, nur für die Entwicklung relevant.
|
||||
|
||||
|
||||

|
||||
|
||||
|
||||
# Fehler und Bedeutungen
|
||||
|
||||
## Converter
|
||||
#### function delay was negativ, function: XSSRequestWrapper.getParameter...
|
||||
kann ignoriert werden, liegt an der (annäherend) Nanosekundengenauigkeit mit der Start und Endzetpunkte gemessen werden
|
||||
|
||||
## Simulation
|
||||
|
||||
#### Function has higher requirements than server can satisfy
|
||||
Services.json muss angepasst werden, ein Server hat weniger CPU oder RAM als eine einzige Funktion braucht
|
||||
|
||||
#### Callback error updating dones-dropdown.children, response_time-dropdown.children, service_util-dropdown.children, sim_events-dropdown.children
|
||||
Daten konnten nicht geladen werden, weil sie noch nicht da sind, einfach einen Moment warten.
|
||||
|
||||
#### One or more inputs missing!
|
||||
Überprüfe, das mindestens Porfile, Services und Distribution Request gesetzt sind
|
||||
|
||||
#### Fehler in Uploadfeld
|
||||
Heißt das es einen Fehler bei der Validierung gab, der Input war nicht Schemakonform.
|
||||
|
||||
# TODOs
|
||||
|
||||
### Automatische Skalierung
|
||||
|
||||
Skalierungs selbst ist implementiert, muss in push() integriert werden.
|
||||
Auslastungsdruchschnitt muss über letzte X Sekunden berechnet werden.
|
||||
|
||||
### Effizientere Simulation
|
||||
Es muss sichergestellt werden, dass jede Funktion nur ein Recalculation-Event in der globalen Event Queue hat, momentan ist es ein Event pro Funktion die während der Ausführung der aktuellen Funktion dem Server zugewiesen wurde. Es können also schnell 10 oder noch deutlich mehr Events erstellt werden, für eine Funktion,die bereits fertiggestellt wurde.
|
||||
|
||||
### Schnellerer Converter
|
||||
Der Binary Tree der für die Erstellung des Call-Tree genutzt wird muss balanced werden.
|
||||
Balancing sollte ca. alle 100 erfolgen
|
||||
[tutorial](https://www.tutorialspoint.com/balanced-binary-tree-in-python#:~:text=A%20Binary%20tree%20is%20said,or%20equal%20to%20'1'.&text=Explanation%3A,is%20a%20height%20Balanced%20tree.)
|
||||
|
||||
|
||||
### Reset button
|
||||
Einfach am Muster der anderen Buttons orientieren:
|
||||
|
||||
[tutorial](https://dash.plotly.com/dash-html-components/button)
|
||||
|
||||
Alle Inputs auf None sezten, das sollte ausreichen.
|
||||
|
|
@ -0,0 +1,10 @@
|
|||
[
|
||||
{
|
||||
"kind": "triangle",
|
||||
"start": 0,
|
||||
"end": 100,
|
||||
"highpoint": 40,
|
||||
"volume": 200,
|
||||
"scenarioID": 1
|
||||
}
|
||||
]
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
{
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,30 @@
|
|||
{
|
||||
"$id": "/Matz/Patrice/Master-Thesis/Profile.schema.json",
|
||||
"name": "name",
|
||||
"scenarios": [
|
||||
{
|
||||
"scenarioID": 1,
|
||||
"interactions": [
|
||||
{
|
||||
"name": "1",
|
||||
"interactionID": "1",
|
||||
"delay": 0,
|
||||
"functions": [
|
||||
{
|
||||
"functionID": "f1",
|
||||
"cpu": 100,
|
||||
"cpu_time": 1000000000,
|
||||
"ram": 1,
|
||||
"io": 0,
|
||||
"net": 0,
|
||||
"delay": 0,
|
||||
"callbacks": [
|
||||
"-1"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
{
|
||||
"id": "/Matz/Patrice/Master-Thesis/Service.schema.json",
|
||||
"name": "Service Definition for Example Application",
|
||||
"services": [
|
||||
{
|
||||
"scaleUpAt": 0.8,
|
||||
"scaleDownAt": 0.3,
|
||||
"scaleingMetric": "CPU",
|
||||
"serviceID": "default",
|
||||
"scales": false,
|
||||
"scale": 1,
|
||||
"scalingDelay": 0,
|
||||
"defaultServer": {
|
||||
"maxCPU": 100,
|
||||
"maxRAM": 100,
|
||||
"maxIO": 10000,
|
||||
"maxNET": 10000
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -0,0 +1,10 @@
|
|||
[
|
||||
{
|
||||
"kind": "triangle",
|
||||
"start": 0,
|
||||
"end": 100,
|
||||
"highpoint": 40,
|
||||
"volume": 200,
|
||||
"scenarioID": 1
|
||||
}
|
||||
]
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
{
|
||||
|
||||
"x":"2"
|
||||
}
|
||||
|
|
@ -0,0 +1,54 @@
|
|||
{
|
||||
"$id": "/Matz/Patrice/Master-Thesis/Profile.schema.json",
|
||||
"name": "name",
|
||||
"scenarios": [
|
||||
{
|
||||
"scenarioID": 1,
|
||||
"interactions": [
|
||||
{
|
||||
"name": "1",
|
||||
"interactionID": "1",
|
||||
"delay": 0,
|
||||
"functions": [
|
||||
{
|
||||
"functionID": "x.f1",
|
||||
"cpu": 100,
|
||||
"cpu_time": 1000000,
|
||||
"ram": 1,
|
||||
"io": 0,
|
||||
"net": 0,
|
||||
"delay": 0,
|
||||
"callbacks": [
|
||||
"f2"
|
||||
]
|
||||
},
|
||||
{
|
||||
"functionID": "f2",
|
||||
"cpu": 100,
|
||||
"cpu_time": 1000000000,
|
||||
"ram": 1,
|
||||
"io": 0,
|
||||
"net": 0,
|
||||
"delay": 0,
|
||||
"callbacks": [
|
||||
"x.f3"
|
||||
]
|
||||
},
|
||||
{
|
||||
"functionID": "x.f3",
|
||||
"cpu": 100,
|
||||
"cpu_time": 1000000,
|
||||
"ram": 1,
|
||||
"io": 0,
|
||||
"net": 0,
|
||||
"delay": 0,
|
||||
"callbacks": [
|
||||
"-1"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -0,0 +1,36 @@
|
|||
{
|
||||
"id": "/Matz/Patrice/Master-Thesis/Service.schema.json",
|
||||
"name": "Service Definition for Example Application",
|
||||
"services": [
|
||||
{
|
||||
"scaleUpAt": 0.8,
|
||||
"scaleDownAt": 0.3,
|
||||
"scaleingMetric": "CPU",
|
||||
"serviceID": "default",
|
||||
"scales": false,
|
||||
"scale": 1,
|
||||
"scalingDelay": 0,
|
||||
"defaultServer": {
|
||||
"maxCPU": 100,
|
||||
"maxRAM": 100,
|
||||
"maxIO": 10000,
|
||||
"maxNET": 10000
|
||||
}
|
||||
},
|
||||
{
|
||||
"scaleUpAt": 0.8,
|
||||
"scaleDownAt": 0.3,
|
||||
"scaleingMetric": "CPU",
|
||||
"serviceID": "2",
|
||||
"scales": false,
|
||||
"scale": 1,
|
||||
"scalingDelay": 0,
|
||||
"defaultServer": {
|
||||
"maxCPU": 1000,
|
||||
"maxRAM": 1000,
|
||||
"maxIO": 10000,
|
||||
"maxNET": 10000
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -0,0 +1,10 @@
|
|||
[
|
||||
{
|
||||
"kind": "triangle",
|
||||
"start": 0,
|
||||
"end": 0.01,
|
||||
"highpoint": 0.001,
|
||||
"volume": 100,
|
||||
"scenarioID": 1
|
||||
}
|
||||
]
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
{
|
||||
|
||||
"x":"default"
|
||||
}
|
||||
|
|
@ -0,0 +1,30 @@
|
|||
{
|
||||
"$id": "/Matz/Patrice/Master-Thesis/Profile.schema.json",
|
||||
"name": "name",
|
||||
"scenarios": [
|
||||
{
|
||||
"scenarioID": 1,
|
||||
"interactions": [
|
||||
{
|
||||
"name": "1",
|
||||
"interactionID": "1",
|
||||
"delay": 0,
|
||||
"functions": [
|
||||
{
|
||||
"functionID": "x.f1",
|
||||
"cpu": 100,
|
||||
"cpu_time": 100000000,
|
||||
"ram": 1,
|
||||
"io": 0,
|
||||
"net": 0,
|
||||
"delay": 0,
|
||||
"callbacks": [
|
||||
"-1"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
{
|
||||
"id": "/Matz/Patrice/Master-Thesis/Service.schema.json",
|
||||
"name": "Service Definition for Example Application",
|
||||
"services": [
|
||||
{
|
||||
"scaleUpAt": 0.8,
|
||||
"scaleDownAt": 0.3,
|
||||
"scaleingMetric": "CPU",
|
||||
"serviceID": "default",
|
||||
"scales": false,
|
||||
"scale": 3,
|
||||
"scalingDelay": 0,
|
||||
"defaultServer": {
|
||||
"maxCPU": 100,
|
||||
"maxRAM": 100,
|
||||
"maxIO": 10000,
|
||||
"maxNET": 10000
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -0,0 +1,18 @@
|
|||
[
|
||||
{
|
||||
"kind": "triangle",
|
||||
"start": 0,
|
||||
"end": 60,
|
||||
"highpoint": 20,
|
||||
"volume": 100,
|
||||
"scenarioID": 1
|
||||
},
|
||||
{
|
||||
"kind": "triangle",
|
||||
"start": 120,
|
||||
"end": 300,
|
||||
"highpoint": 170,
|
||||
"volume": 100,
|
||||
"scenarioID": 1
|
||||
}
|
||||
]
|
||||
|
|
@ -0,0 +1,10 @@
|
|||
[
|
||||
{
|
||||
"kind": "triangle",
|
||||
"start": 0,
|
||||
"end": 2,
|
||||
"highpoint": 1,
|
||||
"volume": 1,
|
||||
"scenarioID": 1
|
||||
}
|
||||
]
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
{
|
||||
"f2": "extern"
|
||||
}
|
||||
|
|
@ -0,0 +1,54 @@
|
|||
{
|
||||
"$id": "/Matz/Patrice/Master-Thesis/Profile.schema.json",
|
||||
"name": "name",
|
||||
"scenarios": [
|
||||
{
|
||||
"scenarioID": 1,
|
||||
"interactions": [
|
||||
{
|
||||
"name": "1",
|
||||
"interactionID": "1",
|
||||
"delay": 0,
|
||||
"functions": [
|
||||
{
|
||||
"functionID": "f1",
|
||||
"cpu": 10,
|
||||
"cpu_time": 500000000,
|
||||
"ram": 100,
|
||||
"io": 0.0,
|
||||
"net": 10,
|
||||
"delay": 0,
|
||||
"callbacks": [
|
||||
"f2"
|
||||
]
|
||||
},
|
||||
{
|
||||
"functionID": "f2",
|
||||
"cpu": 100,
|
||||
"cpu_time": 500000000,
|
||||
"ram": 100,
|
||||
"io": 0.0,
|
||||
"net": 10,
|
||||
"delay": 0,
|
||||
"callbacks": [
|
||||
"f3"
|
||||
]
|
||||
},
|
||||
{
|
||||
"functionID": "f3",
|
||||
"cpu": 10,
|
||||
"cpu_time": 500000000,
|
||||
"ram": 10,
|
||||
"io": 0.0,
|
||||
"net": 10,
|
||||
"delay": 0,
|
||||
"callbacks": [
|
||||
"-1"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -0,0 +1,36 @@
|
|||
{
|
||||
"id": "/Matz/Patrice/Master-Thesis/Service.schema.json",
|
||||
"name": "Service Definition for Example Application",
|
||||
"services": [
|
||||
{
|
||||
"scaleUpAt": 0.8,
|
||||
"scaleDownAt": 0.3,
|
||||
"scaleingMetric": "CPU",
|
||||
"serviceID": "default",
|
||||
"scales": false,
|
||||
"scale": 1,
|
||||
"scalingDelay": 0,
|
||||
"defaultServer": {
|
||||
"maxCPU": 100,
|
||||
"maxRAM": 10000,
|
||||
"maxIO": 10000,
|
||||
"maxNET": 1000
|
||||
}
|
||||
},
|
||||
{
|
||||
"scaleUpAt": 0.8,
|
||||
"scaleDownAt": 0.3,
|
||||
"scaleingMetric": "CPU",
|
||||
"serviceID": "extern",
|
||||
"scales": false,
|
||||
"scale": 1,
|
||||
"scalingDelay": 0,
|
||||
"defaultServer": {
|
||||
"maxCPU": 100,
|
||||
"maxRAM": 10000,
|
||||
"maxIO": 10000,
|
||||
"maxNET": 1000
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -0,0 +1,34 @@
|
|||
# https://stackoverflow.com/questions/2632520/what-is-the-fastest-way-to-send-100-000-http-requests-in-python
|
||||
import time
|
||||
import asyncio
|
||||
from aiohttp import ClientSession, ClientConnectorError
|
||||
|
||||
async def fetch_html(url: str, session: ClientSession, delay:float, **kwargs) -> tuple:
|
||||
await asyncio.sleep(delay * 0.0001)
|
||||
t1 = time.time()
|
||||
try:
|
||||
resp = await session.request(method="GET", url=url, **kwargs)
|
||||
except ClientConnectorError:
|
||||
return (url, 404)
|
||||
return (url, resp.status, time.time() , time.time() - t1)
|
||||
|
||||
async def make_requests(urls: set, **kwargs) -> None:
|
||||
print(f'time,ans')
|
||||
async with ClientSession() as session:
|
||||
tasks = []
|
||||
for i, url in enumerate(urls):
|
||||
tasks.append(
|
||||
fetch_html(url=url, session=session, delay=i, **kwargs)
|
||||
)
|
||||
results = await asyncio.gather(*tasks)
|
||||
|
||||
for result in sorted(results, key=lambda x: x[3]):
|
||||
print(f'{result[2]},{result[3]}')
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
|
||||
assert sys.version_info >= (3, 7), "Script requires Python 3.7+."
|
||||
urls = ["http://server:8080"]*100
|
||||
|
||||
asyncio.run(make_requests(urls=urls))
|
||||
|
|
@ -0,0 +1,101 @@
|
|||
time,response_time
|
||||
1628061042.5649183,0.23157548904418945
|
||||
1628061042.667307,0.32996296882629395
|
||||
1628061042.7667441,0.4294002056121826
|
||||
1628061042.8669772,0.5276312828063965
|
||||
1628061042.9677513,0.627406120300293
|
||||
1628061043.067698,0.7293546199798584
|
||||
1628061043.1729245,0.8345811367034912
|
||||
1628061043.2685366,0.9311926364898682
|
||||
1628061043.3688073,1.031463384628296
|
||||
1628061043.469585,1.129239797592163
|
||||
1628061043.5698938,1.229548692703247
|
||||
1628061043.6706767,1.3333327770233154
|
||||
1628061043.7710128,1.433668851852417
|
||||
1628061043.871862,1.5335185527801514
|
||||
1628061043.9724119,1.633065938949585
|
||||
1628061044.0726924,1.734349012374878
|
||||
1628061044.173468,1.8351247310638428
|
||||
1628061044.2736285,1.9342825412750244
|
||||
1628061044.3748221,2.03747820854187
|
||||
1628061044.4749622,2.1366188526153564
|
||||
1628061044.5749788,2.234633684158325
|
||||
1628061044.6758363,2.3384923934936523
|
||||
1628061044.7768857,2.43753981590271
|
||||
1628061044.8770125,2.5376665592193604
|
||||
1628061044.977586,2.639242649078369
|
||||
1628061045.0780318,2.7376866340637207
|
||||
1628061045.1780467,2.8377015590667725
|
||||
1628061045.2786665,2.9393205642700195
|
||||
1628061045.3790157,3.0406723022460938
|
||||
1628061045.4798765,3.1425325870513916
|
||||
1628061045.5800197,3.240673780441284
|
||||
1628061045.6810322,3.3406870365142822
|
||||
1628061045.7819874,3.442641496658325
|
||||
1628061045.8823185,3.541973352432251
|
||||
1628061045.9821696,3.6448256969451904
|
||||
1628061046.0830283,3.742683172225952
|
||||
1628061046.1836507,3.8453073501586914
|
||||
1628061046.2846985,3.9443533420562744
|
||||
1628061046.3847606,4.046417236328125
|
||||
1628061046.4852402,4.148894548416138
|
||||
1628061046.586028,4.245682954788208
|
||||
1628061046.686651,4.346305847167969
|
||||
1628061046.7867246,4.445379257202148
|
||||
1628061046.886841,4.5454957485198975
|
||||
1628061046.987857,4.646511793136597
|
||||
1628061047.0886152,4.747269868850708
|
||||
1628061047.1888301,4.847484827041626
|
||||
1628061047.2889,4.947554588317871
|
||||
1628061047.3898757,5.048530340194702
|
||||
1628061047.4902446,5.14889931678772
|
||||
1628061047.590829,5.249483585357666
|
||||
1628061047.6916041,5.350258827209473
|
||||
1628061047.7919614,5.449618101119995
|
||||
1628061047.8926713,5.551326036453247
|
||||
1628061047.9928167,5.650473356246948
|
||||
1628061048.0932875,5.750944137573242
|
||||
1628061048.1941674,5.851824045181274
|
||||
1628061048.2941709,5.952825546264648
|
||||
1628061048.3950322,6.052688837051392
|
||||
1628061048.4951012,6.152757883071899
|
||||
1628061048.5956073,6.2532639503479
|
||||
1628061048.696817,6.35447359085083
|
||||
1628061048.7966778,6.454334497451782
|
||||
1628061048.8975618,6.555218458175659
|
||||
1628061048.9979446,6.654600143432617
|
||||
1628061049.0988848,6.755540370941162
|
||||
1628061049.1989539,6.85560941696167
|
||||
1628061049.2993329,6.956989526748657
|
||||
1628061049.4000535,7.056709051132202
|
||||
1628061049.5007453,7.157400846481323
|
||||
1628061049.6011474,7.257802963256836
|
||||
1628061049.7019966,7.358652114868164
|
||||
1628061049.8024044,7.459059953689575
|
||||
1628061049.9026802,7.559335708618164
|
||||
1628061050.0030172,7.659672737121582
|
||||
1628061050.1037266,7.760382175445557
|
||||
1628061050.2036915,7.859348773956299
|
||||
1628061050.3047783,7.960435628890991
|
||||
1628061050.405053,8.060710191726685
|
||||
1628061050.5059469,8.161604166030884
|
||||
1628061050.6060805,8.261737823486328
|
||||
1628061050.7063167,8.361974000930786
|
||||
1628061050.8070118,8.462669134140015
|
||||
1628061050.9078999,8.563557147979736
|
||||
1628061051.0086617,8.664319038391113
|
||||
1628061051.108565,8.764222383499146
|
||||
1628061051.2102685,8.86492657661438
|
||||
1628061051.3100119,8.966667413711548
|
||||
1628061051.4108407,9.066498041152954
|
||||
1628061051.5109522,9.166609525680542
|
||||
1628061051.6123142,9.266972303390503
|
||||
1628061051.7120194,9.366677522659302
|
||||
1628061051.812532,9.467190027236938
|
||||
1628061051.913675,9.568333148956299
|
||||
1628061052.01374,9.668398141860962
|
||||
1628061052.1145074,9.769165515899658
|
||||
1628061052.214403,9.86906099319458
|
||||
1628061052.3157895,9.970447540283203
|
||||
1628061052.4158397,10.070497751235962
|
||||
1628061052.516158,10.17081618309021
|
||||
|
|
|
@ -0,0 +1,14 @@
|
|||
from flask import Flask
|
||||
import requests
|
||||
import time
|
||||
app = Flask(__name__)
|
||||
|
||||
@app.route("/")
|
||||
def main():
|
||||
t1 = time.time()
|
||||
x = requests.get("http://127.0.0.1:8081")
|
||||
print(time.time() - t1)
|
||||
return "yes"
|
||||
|
||||
if __name__ == '__main__':
|
||||
app.run(host="localhost", port=8080, threaded=True, debug=False)
|
||||
|
|
@ -0,0 +1,17 @@
|
|||
from flask import Flask
|
||||
import time
|
||||
app = Flask(__name__)
|
||||
|
||||
@app.route("/")
|
||||
def main():
|
||||
t1 = time.time()
|
||||
while True:
|
||||
x = 213456789 ** 12
|
||||
if time.time() - t1 > .0999:
|
||||
break
|
||||
|
||||
print(time.time() - t1)
|
||||
return "<p>Hello, World!</p>"
|
||||
|
||||
if __name__ == '__main__':
|
||||
app.run(host="localhost", port=8081, threaded=False, debug=False)
|
||||
|
|
@ -0,0 +1,209 @@
|
|||
time,cpu,mem
|
||||
1627321488.06,0.0,1054646272
|
||||
1627321488.16,50.0,1054650368
|
||||
1627321488.26,0.0,1054650368
|
||||
1627321488.36,0.0,1054650368
|
||||
1627321488.46,0.0,1054650368
|
||||
1627321488.56,0.0,1054650368
|
||||
1627321488.66,0.0,1054650368
|
||||
1627321488.76,0.0,1054650368
|
||||
1627321488.86,0.0,1054650368
|
||||
1627321488.96,0.0,1054650368
|
||||
1627321489.06,0.0,1054650368
|
||||
1627321489.16,0.0,1054650368
|
||||
1627321489.26,20.0,1054650368
|
||||
1627321489.36,0.0,1054650368
|
||||
1627321489.46,0.0,1054650368
|
||||
1627321489.56,0.0,1054650368
|
||||
1627321489.66,0.0,1054650368
|
||||
1627321489.77,0.0,1054650368
|
||||
1627321489.87,0.0,1054650368
|
||||
1627321489.97,0.0,1054650368
|
||||
1627321490.07,0.0,1054650368
|
||||
1627321490.17,0.0,1054650368
|
||||
1627321490.27,0.0,1054650368
|
||||
1627321490.37,0.0,1054650368
|
||||
1627321490.47,9.6,1054650368
|
||||
1627321490.57,0.0,1054650368
|
||||
1627321490.67,0.0,1054650368
|
||||
1627321490.77,10.4,1054650368
|
||||
1627321490.87,0.0,1054650368
|
||||
1627321490.97,0.0,1054650368
|
||||
1627321491.07,0.0,1054650368
|
||||
1627321491.17,0.0,1054650368
|
||||
1627321491.27,29.2,1054650368
|
||||
1627321491.37,9.6,1054650368
|
||||
1627321491.47,0.0,1054650368
|
||||
1627321491.57,0.0,1054650368
|
||||
1627321491.67,0.0,1054650368
|
||||
1627321491.77,0.0,1054650368
|
||||
1627321491.87,0.0,1054650368
|
||||
1627321491.98,0.0,1054650368
|
||||
1627321492.08,0.0,1054650368
|
||||
1627321492.18,0.0,1054650368
|
||||
1627321492.28,10.0,1054642176
|
||||
1627321492.38,0.0,1054642176
|
||||
1627321492.48,0.0,1054642176
|
||||
1627321492.58,0.0,1054642176
|
||||
1627321492.68,0.0,1054642176
|
||||
1627321492.78,0.0,1054642176
|
||||
1627321492.88,110.0,1056215040
|
||||
1627321492.98,129.6,1058017280
|
||||
1627321493.08,129.6,1060290560
|
||||
1627321493.18,123.2,1061138432
|
||||
1627321493.28,100.0,1061138432
|
||||
1627321493.38,107.2,1061138432
|
||||
1627321493.48,100.0,1061138432
|
||||
1627321493.58,100.0,1061138432
|
||||
1627321493.68,100.0,1061138432
|
||||
1627321493.78,110.0,1060909056
|
||||
1627321493.88,100.0,1061433344
|
||||
1627321493.98,107.2,1061433344
|
||||
1627321494.08,102.4,1061433344
|
||||
1627321494.18,100.0,1061433344
|
||||
1627321494.28,100.0,1061433344
|
||||
1627321494.38,100.0,1061433344
|
||||
1627321494.48,100.0,1061203968
|
||||
1627321494.58,107.2,1061171200
|
||||
1627321494.68,100.0,1061171200
|
||||
1627321494.78,100.0,1061171200
|
||||
1627321494.89,100.0,1061105664
|
||||
1627321494.99,100.0,1061105664
|
||||
1627321495.09,100.0,1061105664
|
||||
1627321495.19,100.0,1061105664
|
||||
1627321495.29,100.0,1061105664
|
||||
1627321495.39,112.8,1061113856
|
||||
1627321495.49,100.0,1060884480
|
||||
1627321495.59,100.0,1060884480
|
||||
1627321495.69,100.0,1060884480
|
||||
1627321495.79,100.0,1060851712
|
||||
1627321495.89,100.0,1060753408
|
||||
1627321495.99,100.0,1060757504
|
||||
1627321496.09,114.4,1060757504
|
||||
1627321496.19,100.0,1060757504
|
||||
1627321496.29,100.0,1060757504
|
||||
1627321496.39,100.0,1060757504
|
||||
1627321496.49,100.0,1060528128
|
||||
1627321496.59,102.4,1060528128
|
||||
1627321496.69,100.0,1060528128
|
||||
1627321496.79,100.0,1060532224
|
||||
1627321496.89,110.0,1060433920
|
||||
1627321496.99,100.0,1060405248
|
||||
1627321497.09,100.0,1060409344
|
||||
1627321497.19,100.0,1060159488
|
||||
1627321497.29,100.0,1060159488
|
||||
1627321497.39,100.0,1060159488
|
||||
1627321497.49,100.0,1059930112
|
||||
1627321497.59,107.2,1059930112
|
||||
1627321497.69,100.0,1059930112
|
||||
1627321497.79,102.4,1059930112
|
||||
1627321497.89,100.0,1059831808
|
||||
1627321497.99,100.0,1059831808
|
||||
1627321498.09,100.0,1059831808
|
||||
1627321498.22,125.6,1059573760
|
||||
1627321498.32,102.4,1059573760
|
||||
1627321498.42,100.0,1059590144
|
||||
1627321498.52,100.0,1059360768
|
||||
1627321498.62,100.0,1059364864
|
||||
1627321498.72,100.0,1059364864
|
||||
1627321498.82,102.4,1059266560
|
||||
1627321498.92,107.2,1059008512
|
||||
1627321499.02,107.2,1059008512
|
||||
1627321499.12,100.0,1059008512
|
||||
1627321499.22,100.0,1058979840
|
||||
1627321499.32,100.0,1058979840
|
||||
1627321499.42,100.0,1058779136
|
||||
1627321499.52,100.0,1058549760
|
||||
1627321499.62,100.0,1058549760
|
||||
1627321499.72,100.0,1058553856
|
||||
1627321499.82,100.0,1058455552
|
||||
1627321499.92,110.0,1058394112
|
||||
1627321500.02,110.0,1058394112
|
||||
1627321500.12,100.0,1058394112
|
||||
1627321500.22,100.0,1058394112
|
||||
1627321500.32,100.0,1058394112
|
||||
1627321500.42,100.0,1058394112
|
||||
1627321500.52,107.2,1058164736
|
||||
1627321500.62,100.0,1058164736
|
||||
1627321500.72,100.0,1057964032
|
||||
1627321500.83,100.0,1057865728
|
||||
1627321500.93,100.0,1057824768
|
||||
1627321501.03,100.0,1057824768
|
||||
1627321501.13,100.0,1057824768
|
||||
1627321501.23,100.0,1057820672
|
||||
1627321501.33,102.4,1057824768
|
||||
1627321501.43,110.0,1057824768
|
||||
1627321501.53,100.0,1057595392
|
||||
1627321501.63,100.0,1057595392
|
||||
1627321501.73,100.0,1057337344
|
||||
1627321501.83,100.0,1057243136
|
||||
1627321501.93,107.2,1057239040
|
||||
1627321502.03,100.0,1056980992
|
||||
1627321502.13,100.0,1056980992
|
||||
1627321502.23,100.0,1056980992
|
||||
1627321502.33,100.0,1056985088
|
||||
1627321502.43,100.0,1056985088
|
||||
1627321502.53,100.0,1056497664
|
||||
1627321502.63,100.0,1056497664
|
||||
1627321502.73,102.4,1056239616
|
||||
1627321502.83,107.2,1056145408
|
||||
1627321502.93,110.0,1056145408
|
||||
1627321503.03,39.2,1056079872
|
||||
1627321503.13,0.0,1056079872
|
||||
1627321503.23,0.0,1056079872
|
||||
1627321503.33,0.0,1056079872
|
||||
1627321503.43,0.0,1056079872
|
||||
1627321503.53,0.0,1056079872
|
||||
1627321503.63,0.0,1056079872
|
||||
1627321503.73,0.0,1056079872
|
||||
1627321503.83,0.0,1056079872
|
||||
1627321503.94,0.0,1056079872
|
||||
1627321504.04,0.0,1056079872
|
||||
1627321504.14,0.0,1056079872
|
||||
1627321504.24,0.0,1056079872
|
||||
1627321504.34,0.0,1056079872
|
||||
1627321504.44,19.6,1056079872
|
||||
1627321504.54,0.0,1056079872
|
||||
1627321504.64,0.0,1056079872
|
||||
1627321504.74,0.0,1056079872
|
||||
1627321504.84,0.0,1056079872
|
||||
1627321504.94,0.0,1056079872
|
||||
1627321505.04,0.0,1056079872
|
||||
1627321505.14,0.0,1056079872
|
||||
1627321505.24,0.0,1056079872
|
||||
1627321505.34,0.0,1056079872
|
||||
1627321505.44,0.0,1056079872
|
||||
1627321505.54,0.0,1056079872
|
||||
1627321505.64,0.0,1056079872
|
||||
1627321505.74,0.0,1056079872
|
||||
1627321505.84,0.0,1056079872
|
||||
1627321505.94,19.6,1056079872
|
||||
1627321506.05,0.0,1056079872
|
||||
1627321506.15,0.0,1056079872
|
||||
1627321506.25,0.0,1056079872
|
||||
1627321506.35,0.0,1056079872
|
||||
1627321506.45,0.0,1056079872
|
||||
1627321506.55,0.0,1056079872
|
||||
1627321506.65,0.0,1056079872
|
||||
1627321506.75,0.0,1056079872
|
||||
1627321506.85,0.0,1056079872
|
||||
1627321506.95,0.0,1056079872
|
||||
1627321507.05,0.0,1056079872
|
||||
1627321507.15,0.0,1056079872
|
||||
1627321507.25,0.0,1056079872
|
||||
1627321507.35,0.0,1056079872
|
||||
1627321507.45,10.0,1056079872
|
||||
1627321507.55,9.6,1056079872
|
||||
1627321507.65,0.0,1056079872
|
||||
1627321507.75,0.0,1056079872
|
||||
1627321507.85,0.0,1056079872
|
||||
1627321507.95,0.0,1056079872
|
||||
1627321508.05,0.0,1056079872
|
||||
1627321508.15,0.0,1056079872
|
||||
1627321508.26,0.0,1056079872
|
||||
1627321508.39,22.8,1056071680
|
||||
1627321508.49,0.0,1056071680
|
||||
1627321508.59,0.0,1056071680
|
||||
1627321508.69,0.0,1056071680
|
||||
1627321508.79,0.0,1056071680
|
||||
1627321508.89,0.0,1056071680
|
||||
|
|
After Width: | Height: | Size: 40 KiB |
|
After Width: | Height: | Size: 26 KiB |
|
After Width: | Height: | Size: 455 KiB |
|
After Width: | Height: | Size: 139 KiB |
|
After Width: | Height: | Size: 131 KiB |
|
After Width: | Height: | Size: 125 KiB |
|
After Width: | Height: | Size: 79 KiB |
|
After Width: | Height: | Size: 16 KiB |
|
After Width: | Height: | Size: 99 KiB |
|
After Width: | Height: | Size: 57 KiB |
|
After Width: | Height: | Size: 11 KiB |
|
After Width: | Height: | Size: 11 KiB |
|
After Width: | Height: | Size: 25 KiB |
|
After Width: | Height: | Size: 15 KiB |
|
After Width: | Height: | Size: 141 KiB |
|
After Width: | Height: | Size: 84 KiB |
|
After Width: | Height: | Size: 32 KiB |
|
After Width: | Height: | Size: 32 KiB |
|
After Width: | Height: | Size: 32 KiB |
|
After Width: | Height: | Size: 18 KiB |
|
After Width: | Height: | Size: 17 KiB |
|
After Width: | Height: | Size: 28 KiB |
|
After Width: | Height: | Size: 26 KiB |
|
After Width: | Height: | Size: 77 KiB |
|
After Width: | Height: | Size: 25 KiB |
|
After Width: | Height: | Size: 22 KiB |
|
After Width: | Height: | Size: 20 KiB |
|
After Width: | Height: | Size: 9.6 KiB |
|
After Width: | Height: | Size: 55 KiB |
|
After Width: | Height: | Size: 46 KiB |
|
After Width: | Height: | Size: 15 KiB |
|
After Width: | Height: | Size: 208 KiB |
|
After Width: | Height: | Size: 157 KiB |
|
After Width: | Height: | Size: 34 KiB |
|
After Width: | Height: | Size: 147 KiB |
|
After Width: | Height: | Size: 65 KiB |
|
After Width: | Height: | Size: 56 KiB |
|
|
@ -0,0 +1,235 @@
|
|||
import json
|
||||
from jsonschema import validate
|
||||
from Application.Function import *
|
||||
from Application.Event import *
|
||||
import copy
|
||||
|
||||
|
||||
class DistributionFactory:
|
||||
|
||||
def __init__(self):
|
||||
self.profilePath = None
|
||||
self.servicePath = None
|
||||
self.mapping = None
|
||||
self.profile = None
|
||||
|
||||
@staticmethod
|
||||
def getContentFromFile(path):
|
||||
with open(path) as profileF:
|
||||
content = json.load(profileF)
|
||||
return content
|
||||
|
||||
@staticmethod
|
||||
def validateContentvsSchema(profile, schema):
|
||||
try:
|
||||
validate(instance=profile, schema=schema)
|
||||
return True, None
|
||||
except Exception as e:
|
||||
return False, e
|
||||
|
||||
def validateByPath(self, contentPath, schemaPath):
|
||||
content = self.getContentFromFile(contentPath)
|
||||
schema = self.getContentFromFile(schemaPath)
|
||||
return self.validateContentvsSchema(content, schema)
|
||||
|
||||
@staticmethod
|
||||
def getFuncDist(times, functionDict):
|
||||
''' deprecated
|
||||
times according to a distribution, Dictionary from which Function Objects can be generated'''
|
||||
funcDist = dict()
|
||||
for t in times:
|
||||
functionDict["start"] = t
|
||||
funcDist[t] = [Event(None, 1, Function(**functionDict))]
|
||||
return funcDist
|
||||
|
||||
def resolvCallback(self, functionDict, functions):
|
||||
'''recursiv function to create a callback Function object from the callback ID of each function dict,
|
||||
the resolved function is then added to the function dict and converted to a function object'''
|
||||
callbacks = []
|
||||
for i in range(len(functionDict["callbacks"])):
|
||||
if isinstance(functionDict["callbacks"][i], Function):
|
||||
return copy.deepcopy(Function(**functionDict))
|
||||
if functionDict["callbacks"][i] == "-1":
|
||||
return copy.deepcopy(Function(**functionDict))
|
||||
else:
|
||||
for callbackFunction in [x for x in functions if x["functionID"] == functionDict["callbacks"][i]]:
|
||||
callbackFunction["scenarioID"] = functionDict["scenarioID"]
|
||||
callbackFunction["interactionID"] = functionDict["interactionID"]
|
||||
functionDict["callbacks"][i] = self.resolvCallback(callbackFunction, functions)
|
||||
function = copy.deepcopy(Function(**functionDict))
|
||||
return function
|
||||
|
||||
def getFirstFunction(self, functions):
|
||||
# get first function in interaction
|
||||
# the first function in the list might be callback, so we have toi find the first function wihch is never called as a callback
|
||||
if len(functions) == 1:
|
||||
return functions[0]
|
||||
firstF = None
|
||||
for f in functions:
|
||||
if f["callbacks"] != ["-1"]:
|
||||
firstF = f
|
||||
break
|
||||
return firstF
|
||||
|
||||
def getScenarioDist(self, inputs, profile, mapping):
|
||||
'''returns dictionary with timestamp as key and array of events as value
|
||||
{
|
||||
0:[event1, event2],
|
||||
1:[event4],
|
||||
...
|
||||
}
|
||||
'''
|
||||
try:
|
||||
if not self.validateMapping(profile, mapping):
|
||||
raise Exception("Function not mapped to Service")
|
||||
|
||||
funcDist = dict()
|
||||
|
||||
functions = {}
|
||||
|
||||
# for every distribution and every moment in the distribution all interactions of the scenario are created each intaeraction has a delay added,
|
||||
# so the actual start point is t_start = t_current + delay for every interaction
|
||||
# this way parallel interactions are possible
|
||||
|
||||
# inputs is an array of tupels [([moment1, moment2, moment3...], scenarioID), ...]
|
||||
for i in inputs:
|
||||
times, scenarioID = i
|
||||
scenario = [x for x in profile["scenarios"] if x["scenarioID"] == scenarioID][0]
|
||||
|
||||
for t in times:
|
||||
for interaction in scenario["interactions"]:
|
||||
|
||||
ts = t + interaction["delay"]
|
||||
functionDict = copy.deepcopy(self.getFirstFunction(interaction["functions"]))
|
||||
functionDict["start"] = ts
|
||||
functionDict["scenarioID"] = scenarioID
|
||||
functionDict["interactionID"] = interaction["interactionID"]
|
||||
|
||||
if functionDict["functionID"] in functions:
|
||||
function = functions[functionDict["functionID"]]
|
||||
else:
|
||||
function = self.resolvCallback(functionDict, interaction["functions"])
|
||||
functions[function.functionID] = function
|
||||
|
||||
function.start = ts
|
||||
function.scheduled = ts
|
||||
|
||||
serviceID = mapping[function.functionID]
|
||||
self.addEvent(funcDist, Event(ts, None, serviceID, copy.deepcopy(function)))
|
||||
|
||||
|
||||
# set runtime ID for every function
|
||||
runtimeID = 0
|
||||
for val in funcDist.values():
|
||||
for v in val:
|
||||
v.function.runtimeID = runtimeID
|
||||
runtimeID += 1
|
||||
callbacks = [cb for cb in v.function.callbacks]
|
||||
|
||||
i = 0
|
||||
while i < len(callbacks):
|
||||
callback = callbacks[i]
|
||||
i+=1
|
||||
if callback != "-1":
|
||||
callback.runtimeID = runtimeID
|
||||
runtimeID += 1
|
||||
for cb in callback.callbacks:
|
||||
if not isinstance(cb, Function) and cb != "-1":
|
||||
raise Exception("Function was not resolved during callback resolving")
|
||||
callbacks.append(cb)
|
||||
if callback == "-1" and not callbacks:
|
||||
break
|
||||
|
||||
|
||||
return funcDist
|
||||
|
||||
except Exception as e:
|
||||
print(e)
|
||||
|
||||
@staticmethod
|
||||
def addEvent(eventDict, event):
|
||||
ts = event.t
|
||||
if ts in eventDict:
|
||||
eventDict[ts].append(event)
|
||||
else:
|
||||
eventDict[ts] = [event]
|
||||
|
||||
@staticmethod
|
||||
def validateMapping(profile, mapping):
|
||||
for scenario in profile["scenarios"]:
|
||||
for interaction in scenario["interactions"]:
|
||||
for function in interaction["functions"]:
|
||||
if function["functionID"] not in mapping:
|
||||
raise Exception(function["functionID"] + " not mapped to Service")
|
||||
return False
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
def getProfileAsDict(profileIn):
|
||||
'''Converts input JSON to a usable nested Dict()'''
|
||||
|
||||
# Yeah... so this is ugly...
|
||||
# this function converts the inpuput nested object structure which has arrays of objects and converts them to dictionaries of objects with the objectID as the key
|
||||
# this was done to avoid redundancy in the input file, since this way a potential user doesn't have to make sure the key of the object and the ID match and can work with a simpler input
|
||||
|
||||
profile = dict()
|
||||
profile["scenarios"] = {}
|
||||
|
||||
for si, scenario in enumerate(profileIn["scenarios"]):
|
||||
profile["scenarios"][scenario["scenarioID"]] = copy.deepcopy(scenario)
|
||||
profile["scenarios"][scenario["scenarioID"]]["interactions"] = {}
|
||||
for ii, interaction in enumerate(profileIn["scenarios"][si]["interactions"]):
|
||||
profile["scenarios"][scenario["scenarioID"]]["interactions"][
|
||||
interaction["interactionID"]] = copy.deepcopy(interaction)
|
||||
profile["scenarios"][scenario["scenarioID"]]["interactions"][interaction["interactionID"]][
|
||||
"functions"] = {}
|
||||
for function in profileIn["scenarios"][si]["interactions"][ii]["functions"]:
|
||||
profile["scenarios"][scenario["scenarioID"]]["interactions"][interaction["interactionID"]][
|
||||
"functions"][function["functionID"]] = function
|
||||
|
||||
return profile
|
||||
|
||||
@staticmethod
|
||||
def createNetworkGraph(profile, mapping):
|
||||
# creates a JS definition for a graph of the application
|
||||
inclNodes = []
|
||||
nodes = []
|
||||
edges = []
|
||||
|
||||
for scenario in profile["scenarios"]:
|
||||
for interaction in scenario["interactions"]:
|
||||
for function in interaction["functions"]:
|
||||
serviceID = mapping[function["functionID"]]
|
||||
if serviceID not in inclNodes:
|
||||
inclNodes.append(serviceID)
|
||||
nodes.append({"data": {"id": serviceID, "label": serviceID}})
|
||||
|
||||
if function["callbacks"][0] != "-1":
|
||||
callbackServiceID = mapping[function["callbacks"][0].functionID]
|
||||
if mapping[callbackServiceID] != serviceID:
|
||||
if callbackServiceID not in inclNodes:
|
||||
inclNodes.append(callbackServiceID)
|
||||
nodes.append({"data": {"id": callbackServiceID, "label": callbackServiceID}})
|
||||
edges.append({"data": {"source": serviceID, "target": callbackServiceID}})
|
||||
|
||||
graph = nodes + edges
|
||||
return graph
|
||||
|
||||
@staticmethod
|
||||
def genMapping(profile, mapping):
|
||||
# this function could be made redundant
|
||||
# as of right now all functions that are not on the default server
|
||||
# have a mapping for a simple lookup all function not in the mapping are mapped to the default server
|
||||
# default server needs to have the ID "default"
|
||||
tmpMapping = {}
|
||||
|
||||
for scenario in profile["scenarios"]:
|
||||
for interaction in scenario["interactions"]:
|
||||
for function in interaction["functions"]:
|
||||
className = function["functionID"].split(".")[0]
|
||||
service = mapping[className] if className in mapping else "default"
|
||||
service = mapping[function["functionID"]] if function["functionID"] in mapping else service
|
||||
|
||||
tmpMapping[function["functionID"]] = service
|
||||
|
||||
return tmpMapping
|
||||
|
|
@ -0,0 +1,29 @@
|
|||
class Event:
|
||||
def __init__(self, t, type, serviceId, function):
|
||||
self.t = t
|
||||
# type can be either recalculation or None
|
||||
# if type is None, then Event contains function
|
||||
self.type = type
|
||||
self.serviceId = str(serviceId)
|
||||
self.function = function
|
||||
|
||||
# functions for sorting events to keep FIFO even for deferred functions
|
||||
def __lt__(self, other):
|
||||
if self.function is None:
|
||||
return False
|
||||
if other.function is None:
|
||||
return False
|
||||
|
||||
return self.function.scheduled > other.function.scheduled
|
||||
|
||||
def __eq__(self, other):
|
||||
if self.function is None and other.function is None:
|
||||
return True
|
||||
elif self.function.scheduled == other.function.scheduled:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def print(self):
|
||||
return f"t: {self.t}, type: {self.type}, serviceID: {self.serviceId}, function:{self.function}"
|
||||
|
|
@ -0,0 +1,70 @@
|
|||
class Function:
|
||||
def __init__(self, cpu, ram, cpu_time, io, net, start=None, callbacks=None, functionID=None,
|
||||
interactionID=None, scenarioID=None, runtimeID=None, delay=0):
|
||||
self.cpu = cpu
|
||||
self.cput = cpu_time
|
||||
self.remainT = cpu_time
|
||||
self.ram = ram
|
||||
self.end = None
|
||||
self.start = start
|
||||
self.scheduled = start
|
||||
self.io = io
|
||||
self.net = net
|
||||
self.perfMod = 1
|
||||
self.callbacks = callbacks
|
||||
self.functionID = functionID
|
||||
self.interactionID = interactionID
|
||||
self.scenarioID = scenarioID
|
||||
self.delay = delay
|
||||
self.runtimeID = runtimeID
|
||||
|
||||
def calcEndTime(self, sCpu, time, io, net):
|
||||
self.effCpu(io, net)
|
||||
|
||||
if self.start is None:
|
||||
self.start = time
|
||||
|
||||
if sCpu <= 0:
|
||||
raise Exception("Server seems to have no resources for this function or in general")
|
||||
|
||||
perfMod = self.cpu / sCpu
|
||||
perfMod = max(self.perfMod, perfMod)
|
||||
if perfMod < 1:
|
||||
perfMod = 1
|
||||
|
||||
if self.end is None:
|
||||
self.end = self.start + perfMod * self.cput
|
||||
return self.end
|
||||
|
||||
self.remainT = self.cput * perfMod - (time - self.start)
|
||||
self.end = time + self.remainT
|
||||
if self.end == float("inf"):
|
||||
raise Exception("Function duration is infinite")
|
||||
return self.end
|
||||
|
||||
def effCpu(self, io, net):
|
||||
# to simulate wating for data IO and NET usage of the server are taken into consideration
|
||||
perfModIo = 1
|
||||
perfModNet = 1
|
||||
|
||||
if io < self.io / self.cput:
|
||||
perfModIo = io / (self.io / self.cput)
|
||||
|
||||
if net < self.net / self.cput:
|
||||
perfModNet = net / (self.net / self.cput)
|
||||
|
||||
perfMod = 1 / min(perfModIo, perfModNet)
|
||||
self.perfMod = perfMod
|
||||
return self.cpu * perfMod
|
||||
|
||||
def setCallbackStart(self, t):
|
||||
self.start = self.delay + t
|
||||
self.scheduled = self.start
|
||||
|
||||
|
||||
# just list events functions can be ordered to keep FIFO for deferred functions
|
||||
def __gt__(self, f2):
|
||||
return self.start < f2.start
|
||||
|
||||
def __lt__(self, f2):
|
||||
return self.start > f2.start
|
||||
|
|
@ -0,0 +1,123 @@
|
|||
class Server:
|
||||
def __init__(self, maxCPU, maxRAM, maxIO, maxNET):
|
||||
self.functions = []
|
||||
self.maxcpu = maxCPU # 100 per core
|
||||
self.maxram = maxRAM # mb
|
||||
self.maxio = maxIO
|
||||
self.maxnet = maxNET
|
||||
self.io = self.maxio # mb/s
|
||||
self.net = self.maxnet # mb/s
|
||||
self.cpu = self.maxcpu
|
||||
self.ram = self.maxram
|
||||
self.perf = self.cpu
|
||||
|
||||
def fits(self, function):
|
||||
if len(self.functions) == 0:
|
||||
return True
|
||||
|
||||
net = self.maxnet / (max(sum(f.net / f.cput for f in self.functions) + function.net, 1) / function.cput)
|
||||
io = self.maxio / (max(sum(f.io / f.cput for f in self.functions) + function.io, 1) / function.cput)
|
||||
ram = self.maxram / max(sum(f.ram for f in self.functions) + function.ram, 1)
|
||||
cpu = self.cpu / max(sum(f.cpu for f in self.functions) + function.cpu, 1)
|
||||
|
||||
if min(net, io, ram, cpu) >= 1:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def push(self, function, t):
|
||||
# assign function to servers array of active functions
|
||||
if function.cpu > self.maxcpu or function.ram > self.maxram:
|
||||
raise Exception("Function has higher requirements than server can satisfy")
|
||||
|
||||
ets = set()
|
||||
|
||||
self.functions.append(function)
|
||||
tpf = self.calcEffPerf()
|
||||
for function in self.functions:
|
||||
et = function.calcEndTime(tpf, t, self.io, self.net)
|
||||
ets.add(et)
|
||||
|
||||
return list(ets), None
|
||||
|
||||
def pop(self, time):
|
||||
# remove old functions from array of active ones and return callbacks
|
||||
ets = []
|
||||
delete = []
|
||||
callbacks = []
|
||||
returns = []
|
||||
done = 0
|
||||
for i, function in enumerate(self.functions):
|
||||
if function.end <= time:
|
||||
|
||||
for callback in function.callbacks:
|
||||
if callback != "-1":
|
||||
callbacks.append(callback)
|
||||
|
||||
delete.append(i)
|
||||
returns.append(function)
|
||||
done += 1
|
||||
|
||||
# function is dropped after 30sec timeout
|
||||
# no callbacks will be called
|
||||
# elif function.end - function.scheduled > 30:
|
||||
# print("dropped ", function.functionID)
|
||||
# delete.append(i)
|
||||
# done += 1
|
||||
|
||||
for i, j in enumerate(delete):
|
||||
try:
|
||||
x = self.functions[j - i]
|
||||
del self.functions[j - i]
|
||||
except:
|
||||
print("")
|
||||
tpf = self.calcEffPerf()
|
||||
for function in self.functions:
|
||||
et = function.calcEndTime(tpf, time, self.io, self.net)
|
||||
ets.append(et)
|
||||
|
||||
return returns, list(set(ets)), callbacks
|
||||
|
||||
def getResUtil(self):
|
||||
'''Ressource Utilization between 0 and 1 cpu, ram, io, net'''
|
||||
if len(self.functions) > 0:
|
||||
net = sum(f.net for f in self.functions) / self.maxnet
|
||||
io = sum(f.io for f in self.functions) / self.maxio
|
||||
ram = sum(f.ram for f in self.functions) / self.maxram
|
||||
cpu = sum(f.cpu for f in self.functions) / self.cpu
|
||||
|
||||
return cpu, ram, net, io
|
||||
else:
|
||||
return 0, 0, 0, 0
|
||||
|
||||
def calcEffPerf(self):
|
||||
|
||||
# simulate slow down caused by swap
|
||||
# can make model unstable and increase function cpu time to inf
|
||||
# ram = sum(f.ram for f in self.functions)
|
||||
# self.swap = 0
|
||||
# if ram > self.ram:
|
||||
# swap = ram - self.ram
|
||||
# swap = (swap / ram)
|
||||
# self.perf = ((1-swap) + swap*self.swapSlowdown)*self.cpu
|
||||
# self.swap = swap*10
|
||||
|
||||
# self.net = self.maxnet - sum(f.net for f in self.functions)
|
||||
# self.io = self.maxio - sum(f.io for f in self.functions)
|
||||
# self.ram = self.maxram - sum(f.ram for f in self.functions)
|
||||
# self.cpu = self.maxcpu - sum(f.cpu for f in self.functions)
|
||||
|
||||
self.perf = self.cpu
|
||||
tpf = ((self.perf) / (max(1, len(self.functions))))
|
||||
return tpf
|
||||
|
||||
def getFreeBy(self):
|
||||
return min(f.end for f in self.functions)
|
||||
|
||||
# servers can be sorted and minimum can be calculated, used to determine earliest time by which sevrer is available again
|
||||
|
||||
def __lt__(self, other):
|
||||
return self.getFreeBy() < other.getFreeBy()
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.getFreeBy() == other.getFreeBy()
|
||||
|
|
@ -0,0 +1,86 @@
|
|||
import copy
|
||||
import numpy as np
|
||||
|
||||
class Service:
|
||||
def __init__(self, defaultServer, serviceID, scaleUpAt, scaleDownAt, scaleingMetric, scales, scale, scalingDelay):
|
||||
# TODO scaling, also get load average over x seconds for scaling needed
|
||||
self.scaleUpAt = scaleUpAt
|
||||
self.scaleDownAt = scaleDownAt
|
||||
self.scaleable = scales
|
||||
self.scaleingDown = False
|
||||
self.defaultServer = copy.deepcopy(defaultServer)
|
||||
self.scale = scale
|
||||
self.servers = [copy.deepcopy(defaultServer) for i in range(scale)]
|
||||
self.serviceId = str(serviceID)
|
||||
self.avgUtil = None
|
||||
self.scalingDelay = scalingDelay
|
||||
|
||||
def calcAverageUtil(self):
|
||||
avgcpu, avgram, avgnet, avgio = 0, 0, 0, 0
|
||||
for server in self.servers:
|
||||
tcpu, tram, tnet, tio = server.getResUtil()
|
||||
avgcpu += tcpu
|
||||
avgram += tram
|
||||
avgnet += tnet
|
||||
avgio += tio
|
||||
|
||||
self.avgUtil = np.divide([avgcpu, avgram, avgnet, avgio], len(self.servers))
|
||||
|
||||
def getAvgUtil(self):
|
||||
self.calcAverageUtil()
|
||||
return self.avgUtil
|
||||
|
||||
def push(self, function, t):
|
||||
# assign function to server with lowest util
|
||||
ets = set()
|
||||
returns = None
|
||||
scaleingDownBy = None
|
||||
if self.scaleingDown and len(self.servers) > 1:
|
||||
scaleingDownBy = -1
|
||||
|
||||
serverAssigned = False
|
||||
|
||||
for sc, server in enumerate(self.servers[:scaleingDownBy]):
|
||||
if server.fits(function):
|
||||
et, x = server.push(function, t)
|
||||
if x is not None:
|
||||
raise Exception("Service wanted to push function to Server without capacity")
|
||||
ets.update(et)
|
||||
serverAssigned = True
|
||||
break
|
||||
|
||||
if not serverAssigned:
|
||||
function.start = sorted(self.servers)[0].getFreeBy()
|
||||
returns = function
|
||||
|
||||
self.calcAverageUtil()
|
||||
|
||||
return list(ets), returns
|
||||
|
||||
def pop(self, ts):
|
||||
done = []
|
||||
ets = []
|
||||
callbacks = []
|
||||
for server in self.servers:
|
||||
d, et, callbackstTemp = server.pop(ts)
|
||||
done += d
|
||||
ets += et
|
||||
callbacks += callbackstTemp
|
||||
# self.scaleDown()
|
||||
|
||||
self.calcAverageUtil()
|
||||
return done, ets, callbacks
|
||||
|
||||
def scaleServiceUp(self):
|
||||
self.servers.append(copy.deepcopy(self.defaultServer))
|
||||
|
||||
def scaleDown(self):
|
||||
self.scaleingDown = True
|
||||
try:
|
||||
if len(self.servers) > 1 and len(self.servers[-1].functions) == 0:
|
||||
self.servers.pop(-1)
|
||||
self.scaleingDown = False
|
||||
return True
|
||||
except Exception as e:
|
||||
print(e)
|
||||
return False
|
||||
|
|
@ -0,0 +1,297 @@
|
|||
import os
|
||||
import time
|
||||
from pprint import pprint
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
from Application.Server import *
|
||||
from Application.Service import *
|
||||
from Application.DistributionFactory import *
|
||||
from queue import Queue
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
|
||||
|
||||
class Simulation:
|
||||
|
||||
def __init__(self, schema, serviceSchema):
|
||||
self.schema = schema
|
||||
self.serviceSchema = serviceSchema
|
||||
self.observationQueue = Queue()
|
||||
self.dfDicts = {}
|
||||
|
||||
def main(self, profile, mapping, serviceDict, distRequest):
|
||||
|
||||
distributionFactory = DistributionFactory()
|
||||
|
||||
# https://www.jsonschemavalidator.net/
|
||||
suc, e = distributionFactory.validateContentvsSchema(profile, self.schema)
|
||||
if not suc:
|
||||
raise Exception(e)
|
||||
suc, e = distributionFactory.validateContentvsSchema(serviceDict, self.serviceSchema)
|
||||
if not suc:
|
||||
raise Exception(e)
|
||||
|
||||
self.profile = distributionFactory.getProfileAsDict(profile)
|
||||
print("Generating input distribution")
|
||||
eventSeries = distributionFactory.getScenarioDist(distRequest, profile, mapping)
|
||||
|
||||
services = self.getServices(serviceDict)
|
||||
print("starting simulation")
|
||||
self.simLoop(eventSeries, services, mapping)
|
||||
|
||||
def saveOberservations(self, dfs):
|
||||
observationDict = self.observationQueueToDict()
|
||||
self.observations = observationDict
|
||||
self.saveSimResult(observationDict, savePath="../SimResults.json")
|
||||
return observationDict
|
||||
|
||||
|
||||
def addEvent(self, eventDict, event, ts):
|
||||
if ts in eventDict:
|
||||
eventDict[ts].append(event)
|
||||
else:
|
||||
eventDict[ts] = [event]
|
||||
|
||||
def simLoop(self, eventSeries, services, mapping):
|
||||
|
||||
done = 0
|
||||
t = 0
|
||||
t3 = time.time_ns()
|
||||
times = set()
|
||||
lastT = 0
|
||||
doneInteractions = dict()
|
||||
awaitedFunctions = dict()
|
||||
|
||||
ids = []
|
||||
|
||||
# primary loop see thesis chapter "simulation engine"
|
||||
while len(eventSeries.keys()) > 0:
|
||||
|
||||
# get next timestamp and events
|
||||
t = min(eventSeries.keys())
|
||||
events = eventSeries.pop(t)
|
||||
if t < lastT:
|
||||
lastT = t
|
||||
#print(len(events), events[0])
|
||||
self.observationQueue.put(("sim_events", t, "error", len(events)))
|
||||
continue
|
||||
lastT = t
|
||||
|
||||
# print progression
|
||||
if done % 1000 == 0:
|
||||
print("simulation time: " + str(t /1E9), end="\r")
|
||||
self.observationQueue.put(("sim_events", t, "total", len(events)))
|
||||
|
||||
# secondary loop see thesis chapter "simulation engine"
|
||||
# iterate over all events
|
||||
while events:
|
||||
events = sorted(events)
|
||||
event = events.pop()
|
||||
|
||||
# monitor for simulation internal error
|
||||
if event.type is None:
|
||||
if event.function.runtimeID in ids and event.function.start == event.function.scheduled :
|
||||
raise Exception(str(event.function.runtimeID) + " Runtime ID was not unique")
|
||||
else:
|
||||
ids.append(event.function.runtimeID)
|
||||
|
||||
# monitor for simulation internal error
|
||||
if t != event.t or (event.function is not None and t != event.function.start):
|
||||
self.observationQueue.put(("sim_events", t, "t_not_start", 1))
|
||||
print(event.function.functionID, "\n", t, "\n", event.t, "\n", event.function.start, "\n")
|
||||
|
||||
event.function.start = t
|
||||
|
||||
|
||||
# retrieve completed functions and callbacks
|
||||
d, ets, callbacks = services[event.serviceId].pop(t)
|
||||
done += len(d)
|
||||
|
||||
# track completed functions for visualization
|
||||
for doneFunction in d:
|
||||
self.trackPop(doneInteractions, awaitedFunctions, self.observationQueue, doneFunction, t)
|
||||
|
||||
# create events for callbacks
|
||||
for callback in callbacks:
|
||||
callback.setCallbackStart(t)
|
||||
|
||||
serviceID = mapping[callback.functionID]
|
||||
self.addEvent(eventSeries, Event(callback.start, None, serviceID, callback), callback.start)
|
||||
|
||||
|
||||
if event.type == "recalc":
|
||||
# track number of recalc events
|
||||
self.observationQueue.put(("sim_events", t, "recalculations", 1))
|
||||
else:
|
||||
# push function if function is in event
|
||||
# event has function if tape is None
|
||||
# if service is busy function is returned to be referred
|
||||
ets, function = services[event.serviceId].push(event.function, t)
|
||||
|
||||
# function is not None if the function was deferred, because the service didn't have ressources
|
||||
if function is not None:
|
||||
function = copy.deepcopy(function)
|
||||
serviceID = mapping[function.functionID]
|
||||
self.addEvent(eventSeries, Event(function.start, None, serviceID, function), function.start)
|
||||
self.observationQueue.put(("sim_events", t, "rescheduled", 1))
|
||||
else:
|
||||
self.trackPush(awaitedFunctions, event.function)
|
||||
|
||||
for et in ets:
|
||||
if et not in eventSeries.keys():
|
||||
# recalculation events occour when a function should be finished and a server should have free ressources
|
||||
# that function might have already finished and there might be multiple recalc events for a single function
|
||||
self.addEvent(eventSeries, Event(et, "recalc", services[event.serviceId].serviceId, None), et)
|
||||
times.add(et)
|
||||
# monitor services for visualization
|
||||
self.monitorServices(self.observationQueue, services, t)
|
||||
|
||||
# track completed interactions
|
||||
for key, value in doneInteractions.items():
|
||||
self.observationQueue.put(("dones", t, key, value))
|
||||
|
||||
#self.observationQueue.put(("dones", t, "total", 0))
|
||||
self.observationQueue.put("done")
|
||||
print("time: ", t/ 1E9, "completed functions: ", done, " in ", (time.time_ns() - t3) / 1E9, "s")
|
||||
|
||||
def saveSimResult(self, observationDict, savePath="SimResults.json"):
|
||||
savePath = os.path.join(os.path.dirname(__file__), savePath)
|
||||
print(savePath)
|
||||
with open(savePath, 'w') as fp:
|
||||
json.dump(observationDict, fp)
|
||||
|
||||
def getServices(self, serviceDict):
|
||||
'''create service objects from service definition'''
|
||||
services = dict()
|
||||
|
||||
for service in serviceDict["services"]:
|
||||
tmpService = copy.deepcopy(service)
|
||||
tmpService["defaultServer"] = copy.deepcopy(Server(**service["defaultServer"]))
|
||||
services[service["serviceID"]] = Service(**tmpService)
|
||||
|
||||
return services
|
||||
|
||||
def observationQueueToDict(self, chunks=None):
|
||||
columnNames = {
|
||||
"sim_events": ["active"],
|
||||
"dones": ["completed"],
|
||||
"response_time": ["delay", "response time"],
|
||||
"service_util": ["CPU", "RAM", "NET", "IO"],
|
||||
}
|
||||
i = 0
|
||||
while True:
|
||||
if chunks is not None and i >= chunks:
|
||||
break
|
||||
|
||||
content = self.observationQueue.get()
|
||||
if content == "done":
|
||||
break
|
||||
|
||||
Simulation.transfromQueue(columnNames, content, self.dfDicts)
|
||||
|
||||
i += 1
|
||||
|
||||
return self.dfDicts
|
||||
|
||||
@staticmethod
|
||||
def transfromQueue(columnNames, content, dfDicts):
|
||||
'''transform Queue into Dicts'''
|
||||
key, t, identifier, value = content
|
||||
# this is bad
|
||||
# I am not sure why it happens
|
||||
if isinstance(value, np.ndarray):
|
||||
value = value
|
||||
else:
|
||||
if not isinstance(value, list):
|
||||
value = [value]
|
||||
else:
|
||||
value = value
|
||||
if key not in dfDicts:
|
||||
dfDicts[key] = {"t": [], "identifier": []}
|
||||
|
||||
dfDicts[key]["t"].append(t)
|
||||
dfDicts[key]["identifier"].append(identifier)
|
||||
|
||||
for i, val in enumerate(value):
|
||||
if i >= len(columnNames[key]):
|
||||
newKey = "value_" + str(i)
|
||||
else:
|
||||
newKey = columnNames[key][i]
|
||||
if newKey not in dfDicts[key]:
|
||||
dfDicts[key][newKey] = []
|
||||
dfDicts[key][newKey].append(val)
|
||||
|
||||
|
||||
def plotResults(self, dfDicts):
|
||||
|
||||
dfs = dict()
|
||||
for value, df in dfDicts.items():
|
||||
dfs[value] = pd.DataFrame.from_dict(df)
|
||||
dfs[value]["t"] = pd.to_datetime(dfs[value]["t"], unit='ns')
|
||||
dfs[value].set_index("t", inplace=True)
|
||||
|
||||
for key, df in dfs.items():
|
||||
for i in df.identifier.unique():
|
||||
if key == "sim_events":
|
||||
df.loc[df["identifier"] == i].resample(rule="1s").sum().interpolate().plot(kind='line', title=f"{key} {i}")
|
||||
else:
|
||||
df.loc[df["identifier"] == i].resample(rule="1s").mean().interpolate().plot(kind='line', title=f"{key} {i}")
|
||||
|
||||
plt.show()
|
||||
|
||||
|
||||
|
||||
def monitorServices(self, observationQueue, services, t):
|
||||
avgUtil = [0, 0, 0, 0]
|
||||
for service in services.values():
|
||||
observationQueue.put(("service_util", t, "service " + service.serviceId, service.getAvgUtil()))
|
||||
avgUtil = np.add(avgUtil, service.getAvgUtil())
|
||||
|
||||
avgUtil /= len(services)
|
||||
observationQueue.put(("service_util", t, "average", avgUtil))
|
||||
|
||||
def trackPush(self, awaitedFunctions, function):
|
||||
'''if function was first in interaction put the last functionID in the awaited functions, used to track interaction delay and time'''
|
||||
interactions = self.profile["scenarios"][function.scenarioID]["interactions"]
|
||||
|
||||
# get first function in interaction
|
||||
firstF = None
|
||||
interactionFunctions = list(interactions[function.interactionID]["functions"].values())
|
||||
if len(interactionFunctions) == 1:
|
||||
firstF = interactionFunctions[0]["functionID"]
|
||||
|
||||
for f in interactionFunctions:
|
||||
if f["callbacks"] != ["-1"]:
|
||||
firstF = f["functionID"]
|
||||
break
|
||||
|
||||
if function.functionID == firstF:
|
||||
callbacks = function.callbacks
|
||||
if callbacks == ["-1"]:
|
||||
awaitedFunctions[function.runtimeID] = [function.start, function.scheduled]
|
||||
return
|
||||
cb2 = None
|
||||
while True:
|
||||
if callbacks == ["-1"]:
|
||||
break
|
||||
for callback in callbacks:
|
||||
if callback != "-1":
|
||||
cb2 = callback
|
||||
callbacks = callback.callbacks
|
||||
continue
|
||||
|
||||
awaitedRunTimeID = cb2.runtimeID
|
||||
awaitedFunctions[awaitedRunTimeID] = [function.start, function.scheduled]
|
||||
|
||||
def trackPop(self, doneInteractions, awaitedFunctions, observationQueue, function, t):
|
||||
'''remove function from awaited functions if interaction is completed'''
|
||||
key = f"scenario {str(function.scenarioID)} interaction {str(function.interactionID)}"
|
||||
|
||||
if function.runtimeID in awaitedFunctions:
|
||||
if key not in doneInteractions:
|
||||
doneInteractions[key] = 0
|
||||
doneInteractions[key] += 1
|
||||
|
||||
fStart, fScheduled = awaitedFunctions.pop(function.runtimeID)
|
||||
tmp = [(fStart - fScheduled) / 1E9, (function.end - fScheduled)/ 1E9]
|
||||
observationQueue.put(("response_time", t, key, tmp))
|
||||
|
|
@ -0,0 +1,6 @@
|
|||
|
||||
|
||||
bucketSize = "10s"
|
||||
average = "1s"
|
||||
defaultDistVolume = 10
|
||||
refreshTime = 5 #seconds
|
||||
|
|
@ -0,0 +1,115 @@
|
|||
{
|
||||
"$schema": "https://json-schema.org/draft/2020-12/schema",
|
||||
"$id": "/Matz/Patrice/Master-Thesis/Profile.schema.json",
|
||||
"title": "Profile",
|
||||
"description": "An Applications measure Profile in different Scenarios",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {
|
||||
"description": "user defined name, optional",
|
||||
"type": "string"
|
||||
},
|
||||
"scenarios": {
|
||||
"description": "",
|
||||
"type": "array",
|
||||
"minItems": 1,
|
||||
"uniqueItems": true,
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {
|
||||
"description": "user defined name, optional",
|
||||
"type": "string"
|
||||
},
|
||||
"scenarioID": {
|
||||
"description": "The unique identifier for a scenario",
|
||||
"type": "integer"
|
||||
},
|
||||
"interactions": {
|
||||
"description": "",
|
||||
"type": "array",
|
||||
"minItems": 1,
|
||||
"uniqueItems": true,
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {
|
||||
"description": "user defined name, optional",
|
||||
"type": "string"
|
||||
},
|
||||
"interactionID": {
|
||||
"description": "The unique identifier for a interaction",
|
||||
"type": "string"
|
||||
},
|
||||
"delay": {
|
||||
"description": "Time between two interactions in seconds, cause by human interaction",
|
||||
"type": "integer"
|
||||
},
|
||||
"functions": {
|
||||
"description": "array of functions with measured ressource utilization",
|
||||
"type": "array",
|
||||
"minItems": 1,
|
||||
"uniqueItems": true,
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"functionID": {
|
||||
"description": "The unique identifier for a function",
|
||||
"type": "string"
|
||||
},
|
||||
"cpu": {
|
||||
"description": "cpu utilization in percent",
|
||||
"type": "number"
|
||||
},
|
||||
"cpu_time": {
|
||||
"description": "amount of time function takes to execute in seconds",
|
||||
"type": "number"
|
||||
},
|
||||
"ram": {
|
||||
"description": "amount of RAM used while executign the function",
|
||||
"type": "number"
|
||||
},
|
||||
"io": {
|
||||
"description": "amount of data read from Disk",
|
||||
"type": "number"
|
||||
},
|
||||
"net": {
|
||||
"description": "amount of data gotten over the network",
|
||||
"type": "number"
|
||||
},
|
||||
"delay": {
|
||||
"description": "delay in nano seconds",
|
||||
"type": "integer"
|
||||
},
|
||||
"callbacks": {
|
||||
"description": "The ID of the Callback Function. -1 means no callback",
|
||||
"type": "array",
|
||||
"minItems": 1,
|
||||
"uniqueItems": true,
|
||||
"items":{
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"functionID"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"interactionID",
|
||||
"functions",
|
||||
"delay"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"scenarioID",
|
||||
"interactions"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,100 @@
|
|||
{
|
||||
"$schema": "https://json-schema.org/draft/2020-12/schema",
|
||||
"$id": "/Matz/Patrice/Master-Thesis/Service.schema.json",
|
||||
"title": "Profile",
|
||||
"description": "An Applicarions measure Profile in different Scenarios",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"id":{
|
||||
"description": "name of the schema used to validate, optinal",
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "user defined name, optinal",
|
||||
"type": "string"
|
||||
},
|
||||
"scenarios": {
|
||||
"description": "",
|
||||
"type": "array",
|
||||
"minItems": 1,
|
||||
"uniqueItems": true,
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"serviceID": {
|
||||
"description": "user defined name, optinal",
|
||||
"type": "string"
|
||||
},
|
||||
"scales": {
|
||||
"description": "Does this service scale",
|
||||
"type": "boolean"
|
||||
},
|
||||
"scaleUpAt": {
|
||||
"description": "average load to send the scale up command",
|
||||
"type": "number"
|
||||
},
|
||||
"scaleTime": {
|
||||
"description": "amount of time in seconds needed for the deployment of another replic",
|
||||
"type": "number"
|
||||
},
|
||||
"scaleDownAt": {
|
||||
"description": "avergae load to sclae down at",
|
||||
"type": "number"
|
||||
},
|
||||
"scale": {
|
||||
"description": "initial scale of a service",
|
||||
"type": "number"
|
||||
},
|
||||
"scalingDelay":{
|
||||
"description": "how long a service need to scale up in seconds",
|
||||
"type": "number"
|
||||
},
|
||||
"scalingMetric": {
|
||||
"description": "which metric usage to go by CPU, RAM, IO, NET",
|
||||
"type" : "object",
|
||||
"required" : ["metric"],
|
||||
"properties" : {
|
||||
"metric" : {
|
||||
"type" : "string",
|
||||
"enum" : ["CPU", "RAM", "IO", "NET"]
|
||||
}
|
||||
}
|
||||
},
|
||||
"defaultServer": {
|
||||
"description": "The unique identifier for a scenario",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"maxCPU": {
|
||||
"description": "which metric usage to go by CPU, RAM, IO, NET",
|
||||
"type": "integer"
|
||||
},
|
||||
"maxRAM": {
|
||||
"description": "which metric usage to go by CPU, RAM, IO, NET",
|
||||
"type": "integer"
|
||||
},
|
||||
"maxIO": {
|
||||
"description": "which metric usage to go by CPU, RAM, IO, NET",
|
||||
"type": "integer"
|
||||
},
|
||||
"maxNET": {
|
||||
"description": "which metric usage to go by CPU, RAM, IO, NET",
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"maxCPU",
|
||||
"maxRAM",
|
||||
"maxIO",
|
||||
"maxNET"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"defaultServer",
|
||||
"serviceID",
|
||||
"scales"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,597 @@
|
|||
|
||||
import pandas as pd
|
||||
|
||||
import dash
|
||||
import dash_core_components as dcc
|
||||
import dash_html_components as html
|
||||
import dash_bootstrap_components as dbc
|
||||
|
||||
from Application.Simulation import Simulation
|
||||
import os
|
||||
|
||||
import numpy as np
|
||||
from dash.dependencies import Input, Output, State
|
||||
from Application.DistributionFactory import *
|
||||
import base64
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
import threading
|
||||
|
||||
import plotly.graph_objects as go
|
||||
from plotly.subplots import make_subplots
|
||||
import Application.config as config
|
||||
|
||||
pd.options.plotting.backend = "plotly"
|
||||
|
||||
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.SLATE, {
|
||||
'href': 'https://use.fontawesome.com/releases/v5.8.1/css/all.css',
|
||||
'rel': 'stylesheet',
|
||||
'integrity': 'sha384-50oBUHEmvpQ+1lW4y57PTFmhCaXp0ML5d60M1M7uH2+nqUivzIebhndOJK28anvf',
|
||||
'crossorigin': 'anonymous'
|
||||
}])
|
||||
sys.setrecursionlimit(10 ** 6)
|
||||
dfs = dict()
|
||||
graphs = []
|
||||
|
||||
schemaPath = os.path.join(os.path.dirname(__file__), "./Application/files/profile_schema.json")
|
||||
serviceSchemaPath = os.path.join(os.path.dirname(__file__), "./Application/files/service_schema.json")
|
||||
|
||||
schema = DistributionFactory.getContentFromFile(schemaPath)
|
||||
serviceSchema = DistributionFactory.getContentFromFile(serviceSchemaPath)
|
||||
|
||||
profile = None
|
||||
services = None
|
||||
mapping = {}
|
||||
|
||||
seed = None
|
||||
|
||||
lastDropDown = dict()
|
||||
|
||||
expectedFig = None
|
||||
distRequest = None # [(np.random.triangular(0, 10E9, 60E9, 1), 1)]
|
||||
|
||||
|
||||
@app.callback(
|
||||
Output("errors", "children"),
|
||||
Input('input_go', 'n_clicks'))
|
||||
def startSimulation(value):
|
||||
global profile
|
||||
global services
|
||||
global mapping
|
||||
global seed
|
||||
global distRequest
|
||||
|
||||
np.random.seed(1)
|
||||
|
||||
if value > 0:
|
||||
if profile is not None and services is not None and mapping is not None:
|
||||
try:
|
||||
sim = Simulation(schema, serviceSchema)
|
||||
|
||||
mapping2 = DistributionFactory.genMapping(profile, mapping)
|
||||
sim.main(profile, mapping2, services, distRequest)
|
||||
tmp = readFromQueue(sim)
|
||||
saveSimResult(tmp)
|
||||
|
||||
createLayout()
|
||||
|
||||
|
||||
|
||||
return html.Div(["Simulation complete"])
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
print(e)
|
||||
return html.Div(str(e)),
|
||||
else:
|
||||
return html.Div("One or more inputs missing!")
|
||||
return html.Div([""])
|
||||
|
||||
|
||||
def saveSimResult(observationDict, savePath="SimResults.json"):
|
||||
savePath = os.path.join(os.path.dirname(__file__), savePath)
|
||||
print(savePath)
|
||||
with open(savePath, 'w') as fp:
|
||||
json.dump(observationDict, fp)
|
||||
|
||||
|
||||
def readFromQueue(sim):
|
||||
global dfs
|
||||
dfs = dict()
|
||||
|
||||
# TODO: auto update the dfs dict properly
|
||||
|
||||
tmp = sim.observationQueueToDict()
|
||||
|
||||
for value, df in tmp.items():#
|
||||
x = pd.DataFrame.from_dict(df)
|
||||
x["t"] = pd.to_datetime(x["t"], unit='ns')
|
||||
x.set_index("t", inplace=True)
|
||||
|
||||
if value in dfs:
|
||||
dfs[value].append(x)
|
||||
else:
|
||||
dfs[value] = x
|
||||
|
||||
return tmp
|
||||
|
||||
def createLayout():
|
||||
uploadStyle = {
|
||||
'width': '100%',
|
||||
'height': '60px',
|
||||
'lineHeight': '60px',
|
||||
'borderWidth': '1px',
|
||||
'borderStyle': 'dashed',
|
||||
'borderRadius': '5px',
|
||||
'textAlign': 'center',
|
||||
'margin': '10px'
|
||||
}
|
||||
global app
|
||||
app.layout = html.Div(
|
||||
[
|
||||
html.Div([
|
||||
dcc.Interval(
|
||||
id='interval-component',
|
||||
interval=int(config.refreshTime) * 1000, # in milliseconds
|
||||
n_intervals=0
|
||||
),
|
||||
dcc.Interval(
|
||||
id='interval2',
|
||||
interval=int(config.refreshTime/2) * 1000, # in milliseconds
|
||||
n_intervals=0
|
||||
),
|
||||
html.Div([
|
||||
html.Div([
|
||||
dcc.Upload(
|
||||
id='input_profile',
|
||||
children=html.Div([
|
||||
'Select Application Profile'
|
||||
]),
|
||||
style=uploadStyle,
|
||||
multiple=False
|
||||
)
|
||||
], className="col-2"),
|
||||
html.Div([
|
||||
dcc.Upload(
|
||||
id='input_services',
|
||||
children=html.Div([
|
||||
'Select Service Definition'
|
||||
]),
|
||||
style=uploadStyle,
|
||||
multiple=False
|
||||
)
|
||||
], className="col-2"),
|
||||
html.Div([
|
||||
dcc.Upload(
|
||||
id='input_mapping',
|
||||
children=html.Div([
|
||||
'Select Service Mapping'
|
||||
]),
|
||||
style=uploadStyle,
|
||||
multiple=False
|
||||
)
|
||||
], className="col-2"),
|
||||
html.Div([
|
||||
dcc.Upload(
|
||||
id='input_dist',
|
||||
children=html.Div([
|
||||
'Select Distribution Request'
|
||||
]),
|
||||
style=uploadStyle,
|
||||
multiple=False
|
||||
)
|
||||
], className="col-2"),
|
||||
html.Div([
|
||||
dcc.Input(id="input_seed", placeholder='seed', style=uploadStyle)
|
||||
], className="col-1"),
|
||||
html.Div([
|
||||
html.Button(children=html.I(className="far fa-play-circle fa-3x",
|
||||
style={"display": "inline-block",
|
||||
"margin": "-8px auto auto -8px", "padding": "0"}),
|
||||
id='input_go',
|
||||
className="btn btn-outline-success",
|
||||
style={'lineHeight': '60px', 'margin': '10px',
|
||||
'width': '60px', 'height': '60px', "display": "inline-block",
|
||||
},
|
||||
n_clicks=0
|
||||
)
|
||||
], className="col-1"),
|
||||
html.Div([
|
||||
|
||||
], className="col-2", id="errors")
|
||||
], className="row g-2 pt-3")
|
||||
]),
|
||||
html.Div(getPlots() , className="container-fluid px-2 overflow-hidden", id="plots"),
|
||||
|
||||
]
|
||||
, style={"overflow": "hidden"})
|
||||
|
||||
|
||||
|
||||
def getPlots():
|
||||
plots = [
|
||||
html.Div([
|
||||
html.Div([
|
||||
html.Div([
|
||||
html.Div([
|
||||
getDropdown("dones")
|
||||
],
|
||||
id="dones-dropdown"
|
||||
),
|
||||
html.Div([
|
||||
],
|
||||
id="dones-plot"
|
||||
)
|
||||
], id="done", className="col-6"),
|
||||
html.Div([
|
||||
html.Div([
|
||||
getDropdown("service_util")
|
||||
],
|
||||
id="service_util-dropdown"
|
||||
),
|
||||
html.Div([
|
||||
],
|
||||
id="service_util-plot"
|
||||
)
|
||||
], id="service_util", className="col-6")
|
||||
], className="row g-2 pt-3"),
|
||||
html.Div([
|
||||
html.Div([
|
||||
html.Div([
|
||||
getDropdown("response_time")
|
||||
],
|
||||
id="response_time-dropdown"
|
||||
),
|
||||
html.Div([
|
||||
],
|
||||
id="response_time-plot"
|
||||
)
|
||||
], id="response_time", className="col-6"),
|
||||
|
||||
html.Div([
|
||||
html.Div([
|
||||
getDropdown("sim_events")
|
||||
],
|
||||
id="sim_events-dropdown"
|
||||
),
|
||||
html.Div([
|
||||
],
|
||||
id="sim_events-plot"
|
||||
)
|
||||
], id="sim_events", className="col-6")
|
||||
|
||||
], className="row g-2 pt-3")
|
||||
])
|
||||
]
|
||||
|
||||
return plots
|
||||
|
||||
|
||||
@app.callback(Output('input_profile', 'children'),
|
||||
Input('input_profile', 'contents'))
|
||||
def setProfileInput(content):
|
||||
global profile
|
||||
try:
|
||||
if content is None and profile is not None:
|
||||
return html.Div(['Application Profile ✔'])
|
||||
|
||||
if content is not None:
|
||||
content_type, content_string = content.split(',')
|
||||
p = json.loads(base64.b64decode(content_string))
|
||||
suc, e = DistributionFactory.validateContentvsSchema(p, schema)
|
||||
if not suc:
|
||||
return html.Div([str(e)])
|
||||
|
||||
profile = p
|
||||
return html.Div(['Application Profile ✔'])
|
||||
else:
|
||||
return html.Div(['Select Application Profile'])
|
||||
except Exception as e:
|
||||
print(e)
|
||||
return html.Div(['Application Profile ❌'])
|
||||
|
||||
|
||||
@app.callback(Output('input_services', 'children'),
|
||||
Input('input_services', 'contents'))
|
||||
def setServicesInput(content):
|
||||
try:
|
||||
global services
|
||||
if content is None and services is not None:
|
||||
return html.Div(['Service Definition ✔'])
|
||||
|
||||
if content is not None:
|
||||
content_type, content_string = content.split(',')
|
||||
p = json.loads(base64.b64decode(content_string))
|
||||
suc, e = DistributionFactory.validateContentvsSchema(p, serviceSchema)
|
||||
if not suc:
|
||||
return html.Div([str(e)])
|
||||
|
||||
services = copy.deepcopy(p)
|
||||
return html.Div(['Service Definition ✔'])
|
||||
else:
|
||||
return html.Div(['Select Service Definition'])
|
||||
except Exception as e:
|
||||
print(e)
|
||||
return html.Div(['Service Definition ❌'])
|
||||
|
||||
|
||||
@app.callback(Output('input_mapping', 'children'),
|
||||
Input('input_mapping', 'contents'))
|
||||
def setMappingInput(content):
|
||||
try:
|
||||
global mapping
|
||||
global profile
|
||||
if content is None and mapping is not None:
|
||||
return html.Div(['Service Mapping ✔'])
|
||||
|
||||
if content is not None:
|
||||
content_type, content_string = content.split(',')
|
||||
p = json.loads(base64.b64decode(content_string))
|
||||
|
||||
if profile is None:
|
||||
return html.Div(['Select Application Profile first'])
|
||||
|
||||
mapping = copy.deepcopy(p)
|
||||
return html.Div(['Service Mapping ✔'])
|
||||
else:
|
||||
return html.Div(['Select Service Mapping'])
|
||||
|
||||
except Exception as e:
|
||||
print(e)
|
||||
return html.Div(['Service Mapping ❌'])
|
||||
|
||||
|
||||
@app.callback(Output('input_dist', 'children'),
|
||||
Input('input_dist', 'contents'))
|
||||
def setDistInput(content):
|
||||
try:
|
||||
global distRequest
|
||||
|
||||
|
||||
if content is None and distRequest is not None:
|
||||
return html.Div(['Distributions ✔'])
|
||||
|
||||
if content is not None:
|
||||
distRequest = []
|
||||
content_type, content_string = content.split(',')
|
||||
p = json.loads(base64.b64decode(content_string))
|
||||
|
||||
# TODO: validate that requested scenarioids are present in profile
|
||||
for req in p:
|
||||
if req["kind"] == "triangle":
|
||||
dist = (np.random.triangular(req["start"]*1E9, req["highpoint"]*1E9, req["end"]*1E9, req["volume"]), req["scenarioID"])
|
||||
distRequest.append(dist)
|
||||
|
||||
return html.Div(['Distributions ✔'])
|
||||
else:
|
||||
return html.Div(['Select Distribution Request'])
|
||||
|
||||
except Exception as e:
|
||||
print(e)
|
||||
return html.Div(['Distributions ❌'])
|
||||
|
||||
@app.callback(Output('input_seed', 'value'),
|
||||
Input('input_seed', 'value'))
|
||||
def setSeed(value):
|
||||
if value is not None:
|
||||
global seed
|
||||
seed = int(value)
|
||||
return value
|
||||
|
||||
def getDropdown(keyword):
|
||||
if keyword not in dfs:
|
||||
return dcc.Dropdown(
|
||||
id=keyword + '-dropdown-dd',
|
||||
options=[],
|
||||
value=[]
|
||||
)
|
||||
|
||||
options = []
|
||||
for i in sorted(dfs[keyword].identifier.unique()):
|
||||
options.append({"label": i, "value": i})
|
||||
|
||||
return dcc.Dropdown(
|
||||
id=keyword + '-dropdown-dd',
|
||||
options=options,
|
||||
value=dfs[keyword].identifier.unique()[0]
|
||||
)
|
||||
|
||||
@app.callback(
|
||||
Output(component_id='dones-dropdown', component_property='children'),
|
||||
Output(component_id='response_time-dropdown', component_property='children'),
|
||||
Output(component_id='service_util-dropdown', component_property='children'),
|
||||
Output(component_id='sim_events-dropdown', component_property='children'),
|
||||
Input('interval2', 'n_intervals'))
|
||||
def getDropdown2(n_intervals):
|
||||
returns = []
|
||||
global lastDropDown
|
||||
titles = ["sim_events","dones", "response_time", "service_util"]
|
||||
|
||||
if not dfs:
|
||||
for i in titles:
|
||||
returns.append(dcc.Dropdown(
|
||||
id=str(i) + '-dropdown-dd',
|
||||
options=[],
|
||||
value=[])
|
||||
)
|
||||
return returns
|
||||
|
||||
else:
|
||||
for keyword in sorted(list(dfs.keys())):
|
||||
options = []
|
||||
for i in sorted(dfs[keyword].identifier.unique()):
|
||||
options.append({"label": i, "value": i})
|
||||
|
||||
value = lastDropDown[keyword] if lastDropDown[keyword] != [] else dfs[keyword].identifier.unique()[0]
|
||||
|
||||
returns.append( dcc.Dropdown(
|
||||
id=keyword + '-dropdown-dd',
|
||||
options=options,
|
||||
value=value)
|
||||
)
|
||||
|
||||
return returns
|
||||
|
||||
@app.callback(
|
||||
Output(component_id='dones-plot', component_property='children'),
|
||||
Input(component_id='dones-dropdown-dd', component_property='value'),
|
||||
Input('interval-component', 'n_intervals')
|
||||
)
|
||||
def update_output_div(input_value, n_intervals):
|
||||
global lastDropDown
|
||||
if input_value is not None:
|
||||
lastDropDown["dones"] = input_value
|
||||
else:
|
||||
input_value = lastDropDown["dones"]
|
||||
|
||||
return getPlot("dones", input_value)
|
||||
|
||||
|
||||
@app.callback(
|
||||
Output(component_id='response_time-plot', component_property='children'),
|
||||
Input(component_id='response_time-dropdown-dd', component_property='value'),
|
||||
Input('interval-component', 'n_intervals')
|
||||
)
|
||||
def update_output_div(input_value, n_intervals):
|
||||
global lastDropDown
|
||||
if input_value is not None:
|
||||
lastDropDown["response_time"] = input_value
|
||||
else:
|
||||
input_value = lastDropDown["response_time"]
|
||||
return getPlot("response_time", input_value)
|
||||
|
||||
|
||||
@app.callback(
|
||||
Output(component_id='service_util-plot', component_property='children'),
|
||||
Input(component_id='service_util-dropdown-dd', component_property='value'),
|
||||
Input('interval-component', 'n_intervals')
|
||||
)
|
||||
def update_output_div(input_value, n_intervals):
|
||||
global lastDropDown
|
||||
if input_value is not None:
|
||||
lastDropDown["service_util"] = input_value
|
||||
else:
|
||||
input_value = lastDropDown["service_util"]
|
||||
return getPlot("service_util", input_value)
|
||||
|
||||
|
||||
@app.callback(
|
||||
Output(component_id='sim_events-plot', component_property='children'),
|
||||
Input(component_id='sim_events-dropdown-dd', component_property='value'),
|
||||
Input('interval-component', 'n_intervals')
|
||||
)
|
||||
def update_output_div(input_value, n_intervals):
|
||||
global lastDropDown
|
||||
if input_value is not None:
|
||||
lastDropDown["sim_events"] = input_value
|
||||
else:
|
||||
input_value = lastDropDown["sim_events"]
|
||||
return getPlot("sim_events", input_value)
|
||||
|
||||
|
||||
def loadSimResult(savePath="SimResults.json"):
|
||||
global dfs
|
||||
|
||||
savePath = os.path.join(os.path.dirname(__file__), savePath)
|
||||
if not os.path.isfile(savePath):
|
||||
print(savePath)
|
||||
return dfs
|
||||
|
||||
with open(savePath, "r") as fp:
|
||||
dfDicts = json.load(fp)
|
||||
|
||||
for value, df in dfDicts.items():
|
||||
dfs[value] = pd.DataFrame.from_dict(df)
|
||||
dfs[value]["t"] = pd.to_datetime(dfs[value]["t"], unit='ns')
|
||||
dfs[value].set_index("t", inplace=True)
|
||||
|
||||
return dfs
|
||||
|
||||
def getHistPlot(fig, title, changes):
|
||||
global distRequest
|
||||
fig2 = make_subplots()
|
||||
|
||||
hist = pd.DataFrame()
|
||||
|
||||
if distRequest is not None:
|
||||
dists = sorted([pd.to_datetime(y/1E9, unit="s") for x,_ in distRequest for y in x])
|
||||
integratedCurve = [i for i in range(len(dists))]
|
||||
|
||||
expectedFig =go.Scatter(
|
||||
x=dists,
|
||||
y=integratedCurve,
|
||||
mode="lines",
|
||||
line=go.scatter.Line(color="black"),
|
||||
name="expected"
|
||||
)
|
||||
fig2.add_trace(expectedFig)
|
||||
hist["t"] = dists
|
||||
hist["t2"] = integratedCurve
|
||||
hist.set_index("t", inplace=True)
|
||||
hist = hist.resample(rule=config.bucketSize).last().interpolate()
|
||||
fig2.add_trace(go.Bar(x=hist.index, y=hist["t2"].diff(), name="input"))
|
||||
|
||||
fig2.add_trace(fig)
|
||||
|
||||
#hist = go.Histogram(x=dists, name="input")
|
||||
|
||||
changes = changes.resample(rule=config.bucketSize).last().interpolate()
|
||||
|
||||
fig2.add_trace(go.Bar(x=changes.index, y=changes["completed"].diff(), name="output"))
|
||||
|
||||
fig2.update_layout({"title": title})
|
||||
|
||||
return fig2
|
||||
|
||||
|
||||
def getPlot(keyword, identifier=None):
|
||||
global dfs
|
||||
if keyword not in dfs:
|
||||
return html.P(keyword + ' graph')
|
||||
|
||||
titles = {
|
||||
"sim_events": "events in queue",
|
||||
"dones": "completed interactions",
|
||||
"response_time": "response time",
|
||||
"service_util": "service utilization",
|
||||
}
|
||||
|
||||
df = dfs[keyword]
|
||||
|
||||
if identifier is None:
|
||||
identifier = df.identifier.unique()[0]
|
||||
|
||||
if keyword == "sim_events":
|
||||
fig = df.loc[df["identifier"] == identifier].resample(rule=config.average).sum().interpolate().plot(kind='bar', title=f"{titles[keyword]}: {identifier}")
|
||||
|
||||
return dcc.Graph(id=f"{keyword} {identifier}",
|
||||
figure=fig,
|
||||
style={"height": "24rem"}, animate=True)
|
||||
else:
|
||||
|
||||
if keyword == "dones":
|
||||
data = df.loc[df["identifier"] == identifier].resample(rule = config.average).last().interpolate("pad")
|
||||
fig = go.Scatter(
|
||||
x=data.index,
|
||||
y=data["completed"],
|
||||
mode="lines",
|
||||
line=go.scatter.Line(color="blue"),
|
||||
name="completed"
|
||||
)
|
||||
|
||||
fig = getHistPlot(fig, f"{titles[keyword]}: {identifier}", data)
|
||||
|
||||
else:
|
||||
data = df.loc[df["identifier"] == identifier].resample(rule=config.average).mean().interpolate("pad")
|
||||
fig = data.plot(kind='line', title=f"{titles[keyword]}: {identifier}")
|
||||
|
||||
return dcc.Graph(id=f"{keyword} {identifier}",
|
||||
figure=fig,
|
||||
style={"height": "24rem"}, animate=True)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
dfs = loadSimResult()
|
||||
|
||||
createLayout()
|
||||
app.run_server(debug=True)
|
||||
|
|
@ -0,0 +1 @@
|
|||
python main.py -p ../demo_files/profile.json -s ../demo_files/services.json -m ../demo_files/mapping.json -d ../demo_files/distribution.json
|
||||
|
After Width: | Height: | Size: 20 KiB |
|
After Width: | Height: | Size: 59 KiB |
|
After Width: | Height: | Size: 188 KiB |
|
After Width: | Height: | Size: 70 KiB |
|
After Width: | Height: | Size: 7.5 KiB |