Daily CheckIn

This commit is contained in:
marcodn 2024-08-16 22:13:36 +02:00
parent 1bcc6e1c15
commit a1af60e945
9 changed files with 315 additions and 13 deletions

View file

@ -77,6 +77,10 @@ print_process() {
{hr=$4/1024; hp=$5/1024; printf("%6i %5.1f %4.1f %13.2f %13.2f %s\n", $1, $2, $3,hr, hp, $6) }'
}
print_docker_process() {
sudo docker stats --no-stream
}
# the main domain
#hostname="https://briefedition.wedekind.h-da.de"
hostname="http://localhost:8080/WedekindJSF-1.0.0"
@ -84,17 +88,19 @@ hostname="http://localhost:8080/WedekindJSF-1.0.0"
# the Array of the Urls
url_arr=(
"$hostname/index.xhtml"
#"$hostname/view/document/list.xhtml"
"$hostname/view/document/listsearch.xhtml"
"$hostname/view/document/list.xhtml"
#"$hostname/view/document/listsearch.xhtml"
#"$hostname/view/correspondent/list.xhtml"
#"$hostname/view/person/list.xhtml"
)
print_process
#print_process
print_docker_process
echo ""
# Execute all the URLs for 10 rounds
main 10 ${url_arr[@]}
echo ""
print_process
##print_process
print_docker_process

View file

@ -0,0 +1,14 @@
% !TeX root = ../../thesis.tex
%********************************************************************
% Appendix
%*******************************************************
% If problems with the headers: get headings in appendix etc. right
%\markboth{\spacedlowsmallcaps{Appendix}}{\spacedlowsmallcaps{Appendix}}
\chapter{Docker Konfiguration}
\label{ap:docker_config}
Da die Leistung des Computers zu hoch ist, um vergleichbare Zahlen bei den Datenbank-Anfragen ermitteln zu können,
wurde der Postgres-Server sowie der Payara-Server in einen Docker-Container verpackt und die Leistung limitiert.
\includecode[yaml]{chapters/thesis/appendix03_compose.yaml}{lst:docker_compose}{Docker-Compose}

View file

@ -0,0 +1,39 @@
services:
db:
image: postgres
container_name: postgredb
restart: always
hostname: postgres
ports:
- "5432:5432"
environment:
POSTGRES_USER: <username>
POSTGRES_PASSWORD: <password>
POSTGRES_DB: <dbname>
volumes:
- ./data/:/var/lib/postgresql/data
deploy:
resources:
limits:
cpus: '0.30'
memory: 500m
ws:
image: payara/server-full
container_name: payara
ports:
- "4848:4848"
- "8080:8080"
- "8181:8181"
- "9009:9009"
volumes:
- ./payara/postgresql-42.7.3.jar:/opt/payara/appserver/glassfish/domains/domain1/lib/postgresql-42.7.3.jar
- ./payara/logs/:/opt/payara/appserver/glassfish/domains/domain1/logs/
- ./payara/config/:/opt/payara/appserver/glassfish/domains/domain1/config/
- ./payara/applications/:/opt/payara/appserver/glassfish/domains/domain1/applications/
deploy:
resources:
limits:
cpus: '2.00'
memory: 2g
links:
- db

View file

@ -0,0 +1,23 @@
% !TeX root = ../../thesis.tex
%********************************************************************
% Appendix
%*******************************************************
% If problems with the headers: get headings in appendix etc. right
%\markboth{\spacedlowsmallcaps{Appendix}}{\spacedlowsmallcaps{Appendix}}
\chapter{Aufruf Skript}
\label{ap:calling_script}
Um die Messungen etwas zu vereinfachen wurde ein Skript erstellt um die Aufrufe gesammelt durchzuführen. Um die
Messungen durchzuführen werden die Befehl, wie in \ref{lst:calling_script_exec} dargestellt aufgerufen.
\includecode[bash]{chapters/thesis/appendix04_calling_script.sh}{lst:calling_script}{Calling Script}
\begin{lstlisting}[language=Bash,caption={Aufrufe des Unterstützungsscriptes},label=lst:calling_script_exec]
callscript.sh measinit
callscript.sh measres
callscript.sh meascall
callscript.sh meascall
callscript.sh meascall
callscript.sh meascall
\end{lstlisting}

View file

@ -0,0 +1,160 @@
#!/bin/bash
payara_path="/opt/docker/payara"
postgres_path="/var/lib/postgres"
postgres_path="/opt/docker"
postgres_data_path="$postgres_path/data"
postgres_log_path=$postgres_data_path/log
payara_config="$payara_path/config/domain.xml"
domain_log="$payara_path/logs/server.log"
script_path="/opt/docker/timing.sh"
pgbadger_out="/opt/docker/pgreport"
report_postfix=""
docker_name=dcpgbatch
COMPOSE_FILE=/opt/docker/docker-compose.yaml
gflog() {
echo "follow the log: $domain_log"
tail -f $domain_log
}
gfconf() {
nvim "$payara_config"
}
gfscript() {
outPath=$pgbadger_out$report_postfix/bash.out
touch "$outPath"
echo "============================================================" >>"$outPath"
bash $script_path | tee -a "$outPath"
}
pginit() {
sudo chmod g+x $postgres_path
sudo chmod g+x $postgres_data_path
sudo chmod g+rx $postgres_log_path
}
pglogls() {
echo "show postgresql logfiles"
ls $postgres_log_path/ -lhtc
}
pglogrm() {
cnt=${1:-10}
cntTail=$(($cnt + 1))
echo "remove old postgresql logfiles from $(ls $postgres_log_path/ -tc | wc -l) until $cnt (~${cnt}00 MB) $cntTail"
ls $postgres_log_path/ -tc | tail -n +$cntTail | xargs -r -I{} sudo rm "$postgres_log_path/{}"
}
pglog() {
pg_log=$(ls $postgres_log_path/ -tc --ignore '*log' | head -n 1)
echo "follow the log: $postgres_log_path/$pg_log"
tail -n 3 -f $postgres_log_path/$pg_log
}
pgconf() {
sudo nvim "${postgres_path}/postgresql.conf"
}
pgrp() {
mkdir -p $pgbadger_out$report_postfix
outPath=$pgbadger_out$report_postfix/bash.out
touch "$outPath"
echo "" >>"$outPath"
pgbadger -X -I -f jsonlog -j 10 -O $pgbadger_out$report_postfix $postgres_log_path/postgresql-*.json 2>&1 | tee -a "$outPath"
}
pgrpres() {
rm -R $pgbadger_out$report_postfix
}
dccreate() {
sudo docker compose -f $COMPOSE_FILE create --force-recreate
}
dcstart() {
sudo docker compose -f $COMPOSE_FILE start
sleep 2
pginit
}
dcstop() {
sudo docker compose -f $COMPOSE_FILE stop
}
dcstats() {
sudo docker stats
}
for name in "$@"; do
case $name in
--rppf=*) report_postfix="${1#*=}" ;;
gflog) gflog ;;
gfconf) gfconf ;;
gfscript) gfscript ;;
gfrestart)
pgrpinit
;;
pginit) pginit ;;
pglogls) pglogls ;;
pglogrm) pglogrm ;;
pglog) pglog ;;
pgconf) pgconf ;;
pgrestart) pgrestart ;;
pgrp) pgrp ;;
pgrpres) pgrpres ;;
dcinit) dccreate ;;
dcstart) dcstart ;;
dcstop) dcstop ;;
dcstats) dcstats ;;
measinit)
pgrpres
pglogrm 0
dccreate
dcstart
;;
measres)
dcstop
pgrpres
pglogrm 0
dcstart
pgrp
pglogrm
;;
meascall)
gfscript
pgrp
pglogrm
;;
help)
echo "CALLING: $0 <function> [ <function>]"
echo "The overview of the functions of this script."
echo "It is allowed to enter multiple functions in one execute,"
echo "that would be called serialized."
echo "ATTENTION: parameter must be defined in front of the commands!"
echo ""
echo "*** parameter ***"
echo " --rppf=<val> Postfix name for the report-folder (used by gfscript, pgrp, pgrpres, measres, meascall)"
echo ""
echo "*** glassfish ***"
echo " gflog Show and follow the log of the glassfish server with $domain_name"
echo " gfconf Open the configuration file from glassfish server"
echo " gfscript Calls the testscript for the website"
echo ""
echo "*** postgresql ***"
echo " pginit Initialize the folder for postgresql log-folder"
echo " pglogls Show the current content of the log-folder from postgresql"
echo " pglogrm Clean the log-folder from postgresql, the newest 20 files are sill available"
echo " pglog Show and follow the last log from postgresql"
echo " pgconf Open the configuration file from postgresql"
echo " pgrestart Restart the postgresql"
echo " pgrp Generate the pgbadger report from postgresql log files"
echo " pgrpres Resetet the output of pgbadger"
echo ""
echo "*** docker ***"
echo " dcinit Docker erstellen"
echo " dcstart Docker Container erstellen und starten"
echo " dcstop Docker Container stoppen und loeschen"
echo " dcstats Docker live Statistik anzeigen"
echo ""
echo "*** combine cmds ***"
echo " measinit Initialize everthing after start"
echo " measres reset data for new measuring"
echo " meascall execute one measure"
;;
*)
echo >&2 "Invalid option $name"
exit 1
;;
esac
done

View file

@ -58,11 +58,13 @@ Ebenfalls sieht man eindeutig, dass die Anzahl der Anfragen nach dem ersten Aufr
Der Speicherbedarf steigt auch relative gleichmässig, was nicht recht ins Bild passt, da hier keine Objekte im Cache
gehalten werden sollten.
\mytodos{hier noch text einfügen, der erklärt wie die Spalten zu werten sind, also Aufrufzeit ist kürzer gleich besser}
\begin{table}[h!]
\centering
\begin{tabular}{|r|r|r|r|r|r|r|r|}
\hline
& \multicolumn{3}{|c|}{Aufrufzeit} & & \multicolumn{3}{|c|}{RSS} \\
& \multicolumn{3}{|c|}{Aufrufzeit (ms)} & & \multicolumn{3}{|c|}{RSS (MB)} \\
\hline
\# & min & avg & max & Queries & davor & danach & diff \\
\hline
@ -80,6 +82,13 @@ Vor jedem weiteren Test-Lauf wurde die Domain beendet und komplett neugestartet,
beginnen. Hierbei ist aufgefallen, dass fast immer 62 Abfragen zur Startup-Phase dazugehört haben, unabhängig von den
konfigurierten Cache Einstellungen.
Da die Abfragezeiten auf der Datenbank zu gering waren, um eine Verbesserung feststellen zu können, wurde für den
PostgreSQL und den Payara-Server ein Docker-Container erzeugt und diese limitiert. Die Konfiguration ist im Anhang
\ref{ap:docker_config} beschrieben.
Mit dem neuen Aufbau ergeben sich nun neue Messungen. Für den Speicherbedarf wird nun nicht mehr der benutzte Speicher
der Anwendung beobachtet, sondern die Speichernutzung des Docker-Containers für den Payara-Server.
\subsection{Caching im OpenJPA}
\label{sec:performance-checking:investigation-application:caching-openjpa}
@ -99,7 +108,7 @@ werden.
\centering
\begin{tabular}{|r|r|r|r|r|r|r|r|}
\hline
& \multicolumn{3}{|c|}{Aufrufzeit} & & \multicolumn{3}{|c|}{RSS} \\
& \multicolumn{3}{|c|}{Aufrufzeit (ms)} & & \multicolumn{3}{|c|}{RSS (MB)} \\
\hline
\# & min & avg & max & Queries & davor & danach & diff \\
\hline
@ -123,7 +132,7 @@ beschleunigt werden konnte.
\centering
\begin{tabular}{|r|r|r|r|r|r|r|r|}
\hline
& \multicolumn{3}{|c|}{Aufrufzeit} & & \multicolumn{3}{|c|}{RSS} \\
& \multicolumn{3}{|c|}{Aufrufzeit (ms)} & & \multicolumn{3}{|c|}{RSS (MB)} \\
\hline
\# & min & avg & max & Queries & davor & danach & diff \\
\hline
@ -191,7 +200,7 @@ Die Cache-Einstellungen des \ac{EJB} sind in der Admin-Oberfläche des Payara-Se
\centering
\begin{tabular}{|r|r|r|r|r|r|r|r|}
\hline
& \multicolumn{3}{|c|}{Aufrufzeit} & & \multicolumn{3}{|c|}{RSS} \\
& \multicolumn{3}{|c|}{Aufrufzeit (ms)} & & \multicolumn{3}{|c|}{RSS (MB)} \\
\hline
\# & min & avg & max & Queries & davor & danach & diff \\
\hline
@ -286,7 +295,7 @@ in den Java-Objekten fast identisch sind. Und in der Datenbank sind die Anfragen
\centering
\begin{tabular}{|r|r|r|r|r|r|r|r|}
\hline
& \multicolumn{3}{|c|}{Aufrufzeit} & & \multicolumn{3}{|c|}{RSS} \\
& \multicolumn{3}{|c|}{Aufrufzeit (ms)} & & \multicolumn{3}{|c|}{RSS (MB)} \\
\hline
\# & min & avg & max & Queries & davor & danach & diff \\
\hline
@ -401,7 +410,7 @@ LEFT JOIN sitecity sc ON sc.id = d.city_id;
\centering
\begin{tabular}{|r|r|r|r|r|r|r|r|}
\hline
& \multicolumn{3}{|c|}{Aufrufzeit} & & \multicolumn{3}{|c|}{RSS} \\
& \multicolumn{3}{|c|}{Aufrufzeit (ms)} & & \multicolumn{3}{|c|}{RSS (MB)} \\
\hline
\# & min & avg & max & Queries & davor & danach & diff \\
\hline
@ -437,7 +446,7 @@ Nach dem Anpassungen haben sich dann die Werte aus \ref{tbl:measure-materialized
\centering
\begin{tabular}{|r|r|r|r|r|r|r|r|}
\hline
& \multicolumn{3}{|c|}{Aufrufzeit} & & \multicolumn{3}{|c|}{RSS} \\
& \multicolumn{3}{|c|}{Aufrufzeit (ms)} & & \multicolumn{3}{|c|}{RSS (MB)} \\
\hline
\# & min & avg & max & Queries & davor & danach & diff \\
\hline
@ -466,7 +475,7 @@ abfragt.
\centering
\begin{tabular}{|r|r|r|r|r|r|r|r|}
\hline
& \multicolumn{3}{|c|}{Aufrufzeit} & & \multicolumn{3}{|c|}{RSS} \\
& \multicolumn{3}{|c|}{Aufrufzeit (ms)} & & \multicolumn{3}{|c|}{RSS (MB)} \\
\hline
\# & min & avg & max & Queries & davor & danach & diff \\
\hline
@ -493,4 +502,6 @@ Wenn man die Dokumentenliste als statische Webseiten ablegt, werden die Zugriffs
funktionieren in statische Webseiten aber keine Suche oder eine Sortierung. Sonst müsste man für jede mögliche
Sortierung und Suchanfrage einen Satz der Dokumentenliste als statische Webseite bereitstellen. Für die Sortierungen
wäre das noch möglich, aber für die Suchanfragen ist dies nicht mehr möglich. Daher ist die Umstellung auf statische
Webseiten nicht sinnvoll.
Webseiten nicht sinnvoll.
\mytodos{docker-file und bsopt in den Anhang packen}

Binary file not shown.

View file

@ -35,6 +35,8 @@
% Beispiel für Inline:
%lorem ipsum \lstinline|code| lorem ipsum
\input{tools/yaml_syntax_highlighting.tex}
%*************************************************************************
% Bibliographies
@ -96,6 +98,8 @@
\part{Appendix}
%\include{chapters/thesis/appendix01}
\include{chapters/thesis/appendix02}
\include{chapters/thesis/appendix03}
\include{chapters/thesis/appendix04}
%\include{chapters/examples/appendix02}
%*************************************************************************
% Other Stuff in the Back

View file

@ -0,0 +1,45 @@
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%% YAML syntax highlighting %%%%%%%%%%%%%%%%%
% http://tex.stackexchange.com/questions/152829/how-can-i-highlight-yaml-code-in-a-pretty-way-with-listings
% here is a macro expanding to the name of the language
% (handy if you decide to change it further down the road)
\newcommand\YAMLcolonstyle{\color{red}\mdseries}
\newcommand\YAMLkeystyle{\color{black}\bfseries}
\newcommand\YAMLvaluestyle{\color{blue}\mdseries}
\makeatletter
\newcommand\language@yaml{yaml}
\expandafter\expandafter\expandafter\lstdefinelanguage
\expandafter{\language@yaml}
{
keywords={true,false,null,y,n},
keywordstyle=\color{darkgray}\bfseries,
basicstyle=\YAMLkeystyle, % assuming a key comes first
sensitive=false,
comment=[l]{\#},
morecomment=[s]{/*}{*/},
commentstyle=\color{purple}\ttfamily,
stringstyle=\YAMLvaluestyle\ttfamily,
moredelim=[l][\color{orange}]{\&},
moredelim=[l][\color{magenta}]{*},
moredelim=**[il][\YAMLcolonstyle{:}\YAMLvaluestyle]{:}, % switch to value style at :
morestring=[b]',
morestring=[b]",
literate = {---}{{\ProcessThreeDashes}}3
{>}{{\textcolor{red}\textgreater}}1
{|}{{\textcolor{red}\textbar}}1
{\ -\ }{{\mdseries\ -\ }}3,
}
% switch to key style at EOL
\lst@AddToHook{EveryLine}{\ifx\lst@language\language@yaml\YAMLkeystyle\fi}
\makeatother
\newcommand\ProcessThreeDashes{\llap{\color{cyan}\mdseries-{-}-}}
%%%%%%%%%%% YAML syntax highlighting %%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%