firethorn
changeset 4243:0421694773a1 2.1.32-zrq-thread-pools
Added notes on testing
author | Dave Morris <dmr@roe.ac.uk> |
---|---|
date | Mon Jun 03 19:01:47 2019 +0100 (2019-06-03) |
parents | 0c760661dfde |
children | 8dbad0ec9b89 |
files | doc/notes/zrq/20190603-01-testing.txt doc/notes/zrq/20190603-02-updates.txt |
line diff
1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/doc/notes/zrq/20190603-01-testing.txt Mon Jun 03 19:01:47 2019 +0100 1.3 @@ -0,0 +1,412 @@ 1.4 +# 1.5 +# <meta:header> 1.6 +# <meta:licence> 1.7 +# Copyright (c) 2019, ROE (http://www.roe.ac.uk/) 1.8 +# 1.9 +# This information is free software: you can redistribute it and/or modify 1.10 +# it under the terms of the GNU General Public License as published by 1.11 +# the Free Software Foundation, either version 3 of the License, or 1.12 +# (at your option) any later version. 1.13 +# 1.14 +# This information is distributed in the hope that it will be useful, 1.15 +# but WITHOUT ANY WARRANTY; without even the implied warranty of 1.16 +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 1.17 +# GNU General Public License for more details. 1.18 +# 1.19 +# You should have received a copy of the GNU General Public License 1.20 +# along with this program. If not, see <http://www.gnu.org/licenses/>. 1.21 +# </meta:licence> 1.22 +# </meta:header> 1.23 +# 1.24 +# 1.25 + 1.26 + 1.27 +# ----------------------------------------------------- 1.28 +# Create a new VM. 1.29 +#[user@trop01] 1.30 + 1.31 + createvm 1.32 + 1.33 + > INFO : Node name [Astoalith] 1.34 + > INFO : Base name [fedora-28-docker-base-20180708.qcow] 1.35 + > INFO : Base path [/var/lib/libvirt/images/base/fedora-28-docker-base-20180708.qcow] 1.36 + > INFO : Disc name [Astoalith.qcow] 1.37 + > INFO : Disc size [16GiB] 1.38 + 1.39 + vmname=Astoalith 1.40 + 1.41 + 1.42 +# ----------------------------------------------------- 1.43 +# Define a hosts lookup function. 1.44 +# https://askubuntu.com/questions/627906/why-is-my-etc-hosts-file-not-queried-when-nslookup-tries-to-resolve-an-address#comment1536517_627909 1.45 +#[user@trop01] 1.46 + 1.47 + getipv4() 1.48 + { 1.49 + getent hosts "${1:?}" | cut -d ' ' -f 1 1.50 + } 1.51 + 1.52 + 1.53 +#--------------------------------------------------------------------- 1.54 +# Update the ssh keys for the VM. 1.55 +#[user@trop01] 1.56 + 1.57 + # Remove the current key. 1.58 + ssh-keygen -q -R "${vmname:?}" 1.59 + 1.60 + # Add the host key(s) to known_hosts 1.61 + ssh-keyscan "${vmname:?}" >> "${HOME}/.ssh/known_hosts" 1.62 + 1.63 + # Add the IP address key(s) to known_hosts 1.64 + ssh-keyscan -t ecdsa $(getipv4 "${vmname:?}") >> "${HOME}/.ssh/known_hosts" 1.65 + 1.66 + 1.67 +# ----------------------------------------------------- 1.68 +# Login to the virtual machine. 1.69 +#[user@trop] 1.70 + 1.71 + ssh "${vmname:?}" 1.72 + 1.73 + 1.74 +# ----------------------------------------------------- 1.75 +# Install our secret function. 1.76 +#[user@virtual] 1.77 + 1.78 + mkdir "${HOME:?}/bin" 1.79 + cat > "${HOME:?}/bin/secret" << 'EOF' 1.80 +#!/bin/sh 1.81 +ssh -n \ 1.82 + 'dave@shepseskaf.roe.ac.uk' \ 1.83 + "bin/secret '${1}'" 1.84 +EOF 1.85 + 1.86 + chmod u+x "${HOME:?}/bin/secret" 1.87 + secret 'frog' 1.88 + 1.89 + 1.90 +# ----------------------------------------------------- 1.91 +# Create our chain properties. 1.92 +#[user@virtual] 1.93 + 1.94 + cat > "${HOME:?}/chain.properties" << EOF 1.95 + 1.96 + buildtag=latest 1.97 + 1.98 + metadata=$(pwgen 20 1) 1.99 + metauser=$(pwgen 20 1) 1.100 + metapass=$(pwgen 20 1) 1.101 + 1.102 + usertype=mssql 1.103 + userhost=$(secret 'firethorn.user.host') 1.104 + userdata=$(secret 'firethorn.user.data') 1.105 + useruser=$(secret 'firethorn.user.user') 1.106 + userpass=$(secret 'firethorn.user.pass') 1.107 + 1.108 + datatype=mssql 1.109 + datahost=$(secret 'firethorn.data.host') 1.110 + datadata=$(secret 'firethorn.data.data') 1.111 + datauser=$(secret 'firethorn.data.user') 1.112 + datapass=$(secret 'firethorn.data.pass') 1.113 + 1.114 + tunneluser=$(secret 'ssh.tunnel.user') 1.115 + tunnelhost=$(secret 'ssh.tunnel.host') 1.116 + 1.117 + admingroup=Hyaenidae 1.118 + adminuser=Aardwolf 1.119 + adminpass=$(pwgen 20 1) 1.120 + 1.121 + guestgroup=Afrotheria 1.122 + guestuser=Hyrax 1.123 + guestpass=$(pwgen 20 1) 1.124 + 1.125 + tapresource=Wilhelmina 1.126 + tapschemadata=data-$(pwgen 10 1) 1.127 + tapschemauser=user-$(pwgen 10 1) 1.128 + tapschemapass=pass-$(pwgen 10 1) 1.129 + 1.130 +EOF 1.131 + 1.132 +# ----------------------------------------------------- 1.133 +# Link our compose config. 1.134 +#[user@virtual] 1.135 + 1.136 + ln -sf "${HOME:?}/chain.properties" "${HOME:?}/.env" 1.137 + 1.138 +# ----------------------------------------------------- 1.139 +# Identify our location. 1.140 +#[user@virtual] 1.141 + 1.142 +# Choose one. 1.143 +# 'local' if VM is inside UoE 1.144 +# 'remote' if VM is at outside UoE 1.145 +# TODO Change these to 'internal' and 'external' 1.146 + 1.147 + external=$(curl -4 --silent 'http://icanhazip.com/') 1.148 + 1.149 + EDINBURGH='129.215.*' 1.150 + 1.151 + if [[ ${external:?} == ${EDINBURGH} ]] 1.152 + then 1.153 + echo "Address [${external:?}] is in Edinburgh" 1.154 + #location=internal 1.155 + location=local 1.156 + else 1.157 + echo "Address [${external:?}] is not in Edinburgh" 1.158 + #location=external 1.159 + location=remote 1.160 + fi 1.161 + 1.162 +# ----------------------------------------------------- 1.163 +# Download our compose file. 1.164 +#[user@virtual] 1.165 + 1.166 + wget \ 1.167 + --output-document "baryptera-${location:?}.yml" \ 1.168 + "http://wfau.metagrid.co.uk/code/firethorn/raw-file/tip/docker/compose/tests/baryptera/baryptera-${location:?}.yml" 1.169 + 1.170 + 1.171 +# ----------------------------------------------------- 1.172 +# Start our tests ... 1.173 +#[user@virtual] 1.174 + 1.175 + docker-compose \ 1.176 + --file "baryptera-${location:?}.yml" \ 1.177 + run \ 1.178 + angela 1.179 + 1.180 +# ----------------------------------------------------- 1.181 +# ----------------------------------------------------- 1.182 +# Separate shell on the host VM, locate the logs volume and tail the firethorn log. 1.183 +#[user@virtual] 1.184 + 1.185 + sudo -s 1.186 + 1.187 + container=baryptera_gillian_1 1.188 + container=stevedore_gillian_1 1.189 + 1.190 + pushd $( 1.191 + docker inspect \ 1.192 + "${container:?}" \ 1.193 + | jq -r ' 1.194 + .[].Mounts | .[] | select(.Destination == "/var/local/tomcat/logs") | .Source 1.195 + ' 1.196 + ) 1.197 + 1.198 + tail -f firethorn-debug.log 1.199 + 1.200 +# ----------------------------------------------------- 1.201 +# ----------------------------------------------------- 1.202 +# Separate shell on the host VM, locate the logs volume and tail the ogsadai log. 1.203 +#[user@virtual] 1.204 + 1.205 + sudo -s 1.206 + 1.207 + container=baryptera_jarmila_1 1.208 + container=stevedore_jarmila_1 1.209 + 1.210 + pushd $( 1.211 + docker inspect \ 1.212 + "${container:?}" \ 1.213 + | jq -r ' 1.214 + .[].Mounts | .[] | select(.Destination == "/var/local/tomcat/logs") | .Source 1.215 + ' 1.216 + ) 1.217 + 1.218 + tail -f ogsadai.log 1.219 + 1.220 +# ----------------------------------------------------- 1.221 +# ----------------------------------------------------- 1.222 +# Run our Python tests ... 1.223 +#[user@python] 1.224 + 1.225 +import os 1.226 +import uuid 1.227 +import time 1.228 +import firethorn as ftpy 1.229 + 1.230 +# 1.231 +# Create our firethorn client (using named param). 1.232 +firethorn = ftpy.Firethorn( 1.233 + endpoint = os.environ.get( 1.234 + 'endpoint' 1.235 + ) 1.236 + ) 1.237 + 1.238 +# 1.239 +# Login as the admin account. 1.240 +firethorn.login( 1.241 + os.environ.get('adminuser'), 1.242 + os.environ.get('adminpass'), 1.243 + os.environ.get('admingroup') 1.244 + ) 1.245 + 1.246 +# 1.247 +# Create a JdbcResource to connect to the ATLAS database. 1.248 +atlas_jdbc = firethorn.firethorn_engine.create_jdbc_resource( 1.249 + "ATLAS JDBC resource", 1.250 + os.environ.get('datadata'), 1.251 + '*', 1.252 + os.environ.get('datatype'), 1.253 + os.environ.get('datahost'), 1.254 + os.environ.get('datauser'), 1.255 + os.environ.get('datapass') 1.256 + ) 1.257 +print( 1.258 + atlas_jdbc 1.259 + ) 1.260 + 1.261 +# 1.262 +# Create an AdqlResource to represent the JdbcResource. 1.263 +atlas_adql = firethorn.firethorn_engine.create_adql_resource( 1.264 + "ATLAS ADQL resource" 1.265 + ) 1.266 +print( 1.267 + atlas_adql 1.268 + ) 1.269 + 1.270 +# 1.271 +# Import the target JdbcSchema into AdqlSchema. 1.272 +schema_names = [ 1.273 + "ATLASDR1" 1.274 + ] 1.275 + 1.276 +for schema_name in schema_names: 1.277 + print(schema_name) 1.278 + jdbc_schema = atlas_jdbc.select_schema_by_name( 1.279 + schema_name, 1.280 + "dbo" 1.281 + ) 1.282 + if (None != jdbc_schema): 1.283 + metadoc="https://raw.githubusercontent.com/wfau/metadata/master/metadocs/" + schema_name + "_TablesSchema.xml" 1.284 + adql_schema = atlas_adql.import_jdbc_schema( 1.285 + jdbc_schema, 1.286 + schema_name, 1.287 + metadoc=metadoc 1.288 + ) 1.289 + 1.290 +# 1.291 +# Admin user 1.292 +# -------- -------- -------- -------- 1.293 +# Normal user 1.294 +# 1.295 + 1.296 +# 1.297 +# Login using a guest account. 1.298 +firethorn.login( 1.299 + str(uuid.uuid4()), 1.300 + str(uuid.uuid4()), 1.301 + None 1.302 + ) 1.303 + 1.304 +# 1.305 +# Create a new workspace. 1.306 +workspace = firethorn.firethorn_engine.create_adql_resource( 1.307 + "Query resource" 1.308 + ) 1.309 + 1.310 +# 1.311 +# Import the ATLAS schemas into our workspace 1.312 +for schema in atlas_adql.select_schemas(): 1.313 + workspace.import_adql_schema( 1.314 + schema 1.315 + ) 1.316 + 1.317 +# 1.318 +# Create and run a query. 1.319 +query_str = "SELECT TOP 1000 ra, dec FROM ATLASDR1.atlasSource" 1.320 +query_obj = workspace.create_query( 1.321 + query_str, 1.322 + "COMPLETED", 1.323 + None, 1.324 + 3000000 1.325 + ) 1.326 +print( 1.327 + query_obj 1.328 + ) 1.329 +print( 1.330 + query_obj.table() 1.331 + ) 1.332 +print( 1.333 + query_obj.table().count() 1.334 + ) 1.335 + 1.336 +# 1.337 +# Iterate the metadata tree 1.338 +for schema in atlas_adql.select_schemas(): 1.339 + for table in schema.select_tables(): 1.340 + print( 1.341 + "table [{}][{}]".format( 1.342 + schema.name(), 1.343 + table.name() 1.344 + ) 1.345 + ) 1.346 + query_str = "SELECT TOP 10 * FROM {}.{}".format( 1.347 + schema.name(), 1.348 + table.name() 1.349 + ) 1.350 + query_obj = workspace.create_query( 1.351 + query_str, 1.352 + "COMPLETED", 1.353 + None, 1.354 + 3000000 1.355 + ) 1.356 + py_table = query_obj.table().as_astropy() 1.357 + py_table.pprint() 1.358 + 1.359 +# 1.360 +# Run multiple queries in parallel 1.361 +from concurrent.futures import ThreadPoolExecutor 1.362 +import concurrent.futures 1.363 +from datetime import datetime 1.364 + 1.365 +query_str = "SELECT TOP 10000 ra, dec FROM ATLASDR1.atlasSource" 1.366 + 1.367 +def do_query(workspace, query_str, limit, delay): 1.368 + before = datetime.now() 1.369 + query_obj = workspace.create_query( 1.370 + query_str, 1.371 + "COMPLETED", 1.372 + None, 1.373 + 200000, 1.374 + { 1.375 + "adql.query.limit.rows" : limit, 1.376 + "adql.query.delay.every" : delay 1.377 + } 1.378 + ) 1.379 + after = datetime.now() 1.380 + return ( 1.381 + (after - before), 1.382 + query_obj.json_object.get("results").get("count") 1.383 + ) 1.384 + 1.385 +def do_queries(workspace, query_str, threads, delay): 1.386 + with concurrent.futures.ThreadPoolExecutor(threads) as executor: 1.387 + futures = { 1.388 + executor.submit( 1.389 + do_query, 1.390 + workspace, 1.391 + query_str, 1.392 + limit, 1.393 + delay 1.394 + ): limit for limit in range(threads, 0, -1) 1.395 + } 1.396 + for future in concurrent.futures.as_completed(futures): 1.397 + print( 1.398 + future.result()[0], 1.399 + ':', 1.400 + future.result()[1] 1.401 + ) 1.402 + 1.403 +for loop in range(1, 10): 1.404 + for threads in range(1, 50): 1.405 + for delay in range(1000, -100, -100): 1.406 + print("---- ", loop, threads, delay) 1.407 + do_queries( 1.408 + workspace, 1.409 + query_str, 1.410 + threads, 1.411 + delay 1.412 + ) 1.413 + 1.414 + 1.415 +
2.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 2.2 +++ b/doc/notes/zrq/20190603-02-updates.txt Mon Jun 03 19:01:47 2019 +0100 2.3 @@ -0,0 +1,39 @@ 2.4 +# 2.5 +# <meta:header> 2.6 +# <meta:licence> 2.7 +# Copyright (c) 2019, ROE (http://www.roe.ac.uk/) 2.8 +# 2.9 +# This information is free software: you can redistribute it and/or modify 2.10 +# it under the terms of the GNU General Public License as published by 2.11 +# the Free Software Foundation, either version 3 of the License, or 2.12 +# (at your option) any later version. 2.13 +# 2.14 +# This information is distributed in the hope that it will be useful, 2.15 +# but WITHOUT ANY WARRANTY; without even the implied warranty of 2.16 +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 2.17 +# GNU General Public License for more details. 2.18 +# 2.19 +# You should have received a copy of the GNU General Public License 2.20 +# along with this program. If not, see <http://www.gnu.org/licenses/>. 2.21 +# </meta:licence> 2.22 +# </meta:header> 2.23 +# 2.24 +# 2.25 + 2.26 + # 2.27 + # Dev branch to experiment with Thread pool allocations. 2.28 + # It is possible for the system to lock up under concurrent tests. 2.29 + # Incomplete callbacks from OGSA-DAI stuck waiting in the queue means everything stalls. 2.30 + # 2.31 + 2.32 +# ----------------------------------------------------- 2.33 +# Create a new development branch. 2.34 +#[user@desktop] 2.35 + 2.36 + devname=zrq-thread-pools 2.37 + 2.38 + source "${HOME:?}/firethorn.settings" 2.39 + gedit "${FIRETHORN_CODE:?}/doc/notes/zrq/20180302-02-hg-branch.txt" & 2.40 + 2.41 + 2.42 +