当前位置:网站首页>测试dble split功能执行+导入耗时shell脚本参考
测试dble split功能执行+导入耗时shell脚本参考
2022-06-29 17:39:00 【ActionTech】
测试 split 计时 shell 脚本参考
#!/bin/bash
# set -euxo
#如果是一个新的环境,则需要设置变量TEST_ENV="new"
TEST_ENV=
#如果在dble本机导入split后的mysqldump文件,则需要设置变量LOADIN="locally",
#如果需要将split后的mysqldump文件cp到后端mysql所在主机去执行导入,则需要设置变量LOADIN="remote"
LOADIN="locally"
test_dir='/tmp/splittest/'
dble_conf_dir='/opt/dble/conf/'
ex_dump_file_name='benchmarksql_with_data'
dump_file_name='benchmarksql_with_data'
shardingNum=10
ex_shardingNum=10
MYSQL='/usr/bin/mysql -uxxxx -pxxxx -P3306'
MYSQL_HOST[0]='xx.xx.xx.101'
MYSQL_HOST[1]='xx.xx.xx.102'
MYSQL_HOST[2]='xx.xx.xx.105'
MYSQL_HOST[3]='xx.xx.xx.108'
DBLE_ROOT='/usr/bin/mysql -uroot -p111111 -P9066 -h127.0.0.1'
function create_backend_mysqls_from_dble_cmd () {
#make file drop_and_create_shardingnodes_10dn for further test
drop_ex_shardingnodes="${test_dir}drop_ex_shardingnodes_dnx.sql"
create_shardingnodes="${test_dir}create_shardingnodes_dnx.sql"
rm -f ${drop_ex_shardingnodes}
cat>"${drop_ex_shardingnodes}"<<EOFDROP drop database @@shardingnode="an\$1-${ex_shardingNum}"; EOFDROP
rm -f ${create_shardingnodes}
cat>"${create_shardingnodes}"<<EOFCREATE reload @@config_all; create database @@shardingnode="an\$1-${shardingNum}"; EOFCREATE
${DBLE_ROOT} < ${drop_ex_shardingnodes}
if [[ $? != 0 ]]; then
echo "fail to drop backend schema via dble admin cmd line"
exit 1
fi
#change sharding.xml conf of new_sharding.xml
mv ${dble_conf_dir}sharding.xml ${dble_conf_dir}sharding_${ex_shardingNum}.xml
mv ${dble_conf_dir}sharding_${shardingNum}.xml ${dble_conf_dir}sharding.xml
${DBLE_ROOT} < ${create_shardingnodes}
if [[ $? != 0 ]]; then
echo "fail to create backend schema via dble admin cmd line"
exit 1
fi
# check backend mysqldbs has been created
${MYSQL} -h${MYSQL_HOST[1]} -e "show databases;" | grep "dh_dn_"
if [[ $? != 0 ]]; then
echo "fail to create backend schema via dble admin cmd line"
fi
}
function generate_rm_old_split_dump_files () {
filename="${test_dir}rm_old_split_dump_files.sh"
rm -f ${filename}
cat>"${filename}"<<EOF #!/usr/bin/expect -f set timeout -1 set host [lindex \$argv 0] set file_name [lindex \$argv 1] spawn ssh [email protected]\$host "rm -f \$file_name.sql-an*;ls -l /tmp/splittest/" expect "*continue connecting*" send "yes\r" expect "*password:" send "mypasswordfortestuser\r" expect eof EOF
}
function rm_old_dump_files_on_each_shardingnodes () {
rm -f ${test_dir}${ex_dump_file_name}.sql-an*
# 新环境中首次跑测试的时候需要生成这样一个文件 ${test_dir}rm_old_split_dump_files.sh
if [[ "${TEST_ENV}" == "new" ]]; then
generate_rm_old_split_dump_files
fi
for host in "${MYSQL_HOST[@]}";do
echo "${host}"
expect ${test_dir}rm_old_split_dump_files.sh "${host}" "${test_dir}${ex_dump_file_name}"
done
}
# split_cmd.sql 文件中有 dump_file_name,此处还可以参数化 -l等参数的数值
function get_split_exec_cost_time () {
filename="${test_dir}split_cmd.sql"
rm -f ${filename}
cat>"${filename}"<<EOF split /opt/splitTest/${dump_file_name}.sql /tmp/splittest -sbenchmarksql; EOF
{
echo "$(date +["%Y-%m-%d %H:%M:%S"]) <==========================> start exec split cmd on dble manager 9066"
echo "$(date +["%Y-%m-%d %H:%M:%S"]) <=======> startTime"
# echo "${DBLE_ROOT} < ${filename}"
echo "split /opt/splitTest/${dump_file_name}.sql /tmp/splittest -sbenchmarksql; "
}>> time_record.log
timer_start=$(date +%s)
${DBLE_ROOT} < ${filename}
if [[ $? != 0 ]]; then
echo "$(date +["%Y-%m-%d %H:%M:%S"]) !!!!!!!!!!! fail to exec !!!!!!!!!!" >> time_record.log
fi
timer_end=$(date +%s)
duration=$(echo "$timer_end" "$timer_start" | awk '{print $1-$2}')s
{
echo "dble管理端执行split的耗时: ${duration} "
echo "total time is ${duration} "
echo "$(date +["%Y-%m-%d %H:%M:%S"]) <=======> endTime"
echo "$(date +["%Y-%m-%d %H:%M:%S"]) <==========================> finish exec split cmd on dble manager 9066"
echo " "
}>> time_record.log
}
# dble本机远程连接后端mysql,分别并发导入
function loadAll_shardingDumpFiles_on_dble_server () {
{
echo " "
echo "$(date +["%Y-%m-%d %H:%M:%S"]) <==========================> loadAll_shardingDumpFiles_on_dble_server startTime"
echo "$(date +["%Y-%m-%d %H:%M:%S"]) <=======> startTime"
}>> time_record.log
timer_s=$(date +%s)
for ((i=1;i<="${shardingNum}";i++))
do
{
if [ ! -f "${test_dir}${dump_file_name}.sql-an${i}.sql" ]; then
mv ${test_dir}${dump_file_name}.sql-an"${i}"-*dump ${test_dir}${dump_file_name}.sql-an"${i}".sql
fi
node=$(( i % 4 ))
case ${node} in
1) host=${MYSQL_HOST[0]};;
2) host=${MYSQL_HOST[1]};;
3) host=${MYSQL_HOST[2]};;
0) host=${MYSQL_HOST[3]};;
esac
echo "${MYSQL} -h${host} dh_dn_${i} < ${test_dir}${dump_file_name}.sql-an${i}.sql" >> time_record.log
${MYSQL} -h${host} dh_dn_"${i}" < ${test_dir}${dump_file_name}.sql-an"${i}".sql
}&
done
wait
timer_e=$(date +%s)
tduration=$(echo "$timer_e" "$timer_s" | awk '{print $1-$2}')s
{
echo "split后的dumpfile文件从dble本机并发导入,总时长(即最长耗时)为: ${tduration} "
echo "$(date +["%Y-%m-%d %H:%M:%S"]) <=======> endTime"
echo "$(date +["%Y-%m-%d %H:%M:%S"]) <==========================> loadAll_shardingDumpFiles_on_dble_server finishTime"
echo " "
}>> time_record.log
}
# local导入耗时不需要此处
function generate_send_split_dump_files () {
filename="${test_dir}send_split_dump_files.sh"
rm -f ${filename}
cat>"${filename}"<<EOF #!/usr/bin/expect -f set timeout -1 set file_num [lindex \$argv 0] set host [lindex \$argv 1] set file_name [lindex \$argv 2] spawn scp \$file_name.sql-an\$file_num.sql [email protected]\$host:/tmp/splittest/. expect "*continue connecting*" send "yes\r" expect "*password:" send "mypasswordfortestuser\r" expect eof EOF
}
# local导入耗时不需要此处
function change_split_dumpfile_name_and_cp_them_to_their_local () {
# 新环境中首次跑测试的时候需要生成这样一个文件 ${test_dir}t_dump_files.sh
if [[ ("${TEST_ENV}" == "new") && ("${LOADIN}" == "remote") ]]; then
generate_send_split_dump_files
fi
{
echo " "
echo "$(date +["%Y-%m-%d %H:%M:%S"]) <==========================> start change split filename and cp them to their local mysql server hosts"
echo "$(date +["%Y-%m-%d %H:%M:%S"]) <=======> startTime"
}>> time_record.log
timer_st=$(date +%s)
for ((i=1;i<="${shardingNum}";i++))
do
{
if [ ! -f "${test_dir}${dump_file_name}.sql-an${i}.sql" ]; then
mv ${test_dir}${dump_file_name}.sql-an"${i}"-*dump ${test_dir}${dump_file_name}.sql-an"${i}".sql
fi
node=$(( i % 4 ))
case ${node} in
1) host=${MYSQL_HOST[0]};;
2) host=${MYSQL_HOST[1]};;
3) host=${MYSQL_HOST[2]};;
0) host=${MYSQL_HOST[3]};;
esac
# scp ${test_dir}${dump_file_name}.sql-an"${i}".sql [email protected]${host}:/tmp/splittest/.
expect "${test_dir}"send_split_dump_files.sh "${i}" "${host}" "${test_dir}${dump_file_name}"
}
done
timer_ed=$(date +%s)
cpduration=$(echo "$timer_ed" "$timer_st" | awk '{print $1-$2}')s
{
echo "split后的dumpfile文件cp到对应后端mysql本机,总时长(即最长耗时)为: ${cpduration} "
echo "$(date +["%Y-%m-%d %H:%M:%S"]) <=======> endTime"
echo "$(date +["%Y-%m-%d %H:%M:%S"]) <==========================> finish change split filename and cp them to their local mysql server hosts"
echo " "
}>> time_record.log
}
function exec_romote_test_time_on_each_hosts () {
{
echo " "
echo "$(date +["%Y-%m-%d %H:%M:%S"]) <==========================> exec_remote_loadin_on_each_host startTime"
echo "$(date +["%Y-%m-%d %H:%M:%S"]) <=======> startTime"
}>> time_record.log
timer_s=$(date +%s)
for host in "${MYSQL_HOST[@]}"
do
{
echo "${host}"
expect "${test_dir}"exec_shell_on_remote_server.sh "${host}"
}&
done
wait
timer_e=$(date +%s)
tduration=$(echo "$timer_e" "$timer_s" | awk '{print $1-$2}')s
{
echo "split后的dumpfile文件从后端mysql本机并发导入,总时长(即最长耗时)为: ${tduration} "
echo "$(date +["%Y-%m-%d %H:%M:%S"]) <=======> endTime"
echo "$(date +["%Y-%m-%d %H:%M:%S"]) <==========================> exec_remote_loadin_on_each_host finishTime"
echo " "
}>> time_record.log
}
function clean_backend_schemas_and_old_dump_files () {
rm_old_dump_files_on_each_shardingnodes
create_backend_mysqls_from_dble_cmd
}
function test_of_split_dumpfile_on_mysql_respectively () {
echo "NOW we are testing scp split_dumpfiles to mysql and loadin respectively,and shardingnode num is ${shardingNum}--------------------------------------------------------------">> time_record.log
clean_backend_schemas_and_old_dump_files
get_split_exec_cost_time
change_split_dumpfile_name_and_cp_them_to_their_local
exec_romote_test_time_on_each_hosts
echo "NOW we have finished the testing of scp split_dumpfiles to mysql and loadin respectively,and shardingnode num is ${shardingNum}---------------------------------------------">> time_record.log
}
function test_of_split_dumpfile_on_dbleServer () {
echo "NOW we are testing loadin on dble server locally,and shardingnode num is ${shardingNum}--------------------------------------------------------------">> time_record.log
clean_backend_schemas_and_old_dump_files
get_split_exec_cost_time
loadAll_shardingDumpFiles_on_dble_server
echo "NOW we have finished the testing of loadin on dble server locally,and shardingnode num is ${shardingNum}---------------------------------------------">> time_record.log
}
if [ ${LOADIN} == "locally" ]; then
test_of_split_dumpfile_on_dbleServer
elif [ ${LOADIN} == "remote" ]; then
test_of_split_dumpfile_on_mysql_respectively
else
echo "please check the file line 7: The value of variable LOADIN should be one of 'locally' or 'remote' ;The variable maybe on line 7"
exit 1
fi
边栏推荐
- Tencent cloud released orbit, an automated delivery and operation and maintenance product, to promote enterprise applications to be fully cloud native
- SRM系统是什么系统?如何应用SRM系统?
- Leetcode daily question - 535 Encryption and decryption of tinyurl
- Self taught structure (small turtle C language)
- 在线SQL转CSV工具
- LeetCode 每日一题——535. TinyURL 的加密与解密
- 自定义HandlerInterceptor拦截器实现用户鉴权
- Uploading files using AutoIT
- 关于日期相加减问题
- KUKA robot external axis configuration what you must know
猜你喜欢

How to create a virtual image

Inherit Chinese virtues, pay attention to the health of the middle-aged and the elderly, and Yurun milk powder has strong respect for the elderly

育润多维发力慈善领域,勇抗企业公益大旗

mysql. What is the concept of sock

位图的详细介绍及模拟实现

Openfeign use step polling strategy and weight log4j configuration of openfeign interceptor

mysql.sock的概念是什么

剖析下零拷贝机制的实现原理,适用场景和代码实现

Basic operations such as MySQL startup under Windows platform

Face recognition 4- research on Baidu commercial solutions
随机推荐
The soft youth under the blessing of devcloud makes education "smart" in the cloud
2022 software evaluator examination outline
如何使用B/S开发工具DevExtreme的图表控件 - 自定义轴位置?
Bottom level internal skill cultivation
Visual Studio插件CodeRush正式发布v22.1——优化调试可视化工具
R语言ggplot2可视化:使用patchwork包(直接使用加号+)将一个ggplot2可视化结果和一个plot函数可视化结果横向组合起来形成最终结果图
R language uses user-defined functions to write deep learning leaky relu activation functions and visualize leaky relu activation functions
腾讯云发布自动化交付和运维产品Orbit,推动企业应用全面云原生化
SRM系统是什么系统?如何应用SRM系统?
R language ggplot2 visualization: use the patchwork package (directly use the plus sign +) to horizontally combine the two ggplot2 visualization results, and then horizontally combine them with the th
Maidong Internet won the bid of Dajia Insurance Group
mysql在linux中2003错误如何解决
Redis 原理 - Sorted Set (ZSet)
KUKA robot external axis configuration what you must know
Kubernetes deployment dashboard (Web UI management interface)
使用autoIt 上传文件
位图的详细介绍及模拟实现
mysql.sock的概念是什么
Online text digit recognition list summation tool
What is the MySQL query view command