当前位置:网站首页>2、 Multi concurrent interface pressure test
2、 Multi concurrent interface pressure test
2022-07-29 06:08:00 【My hair is messy】
One 、flsak Interface pressure test
import base64
import logging
import os, cv2,time
import urllib, glob
import numpy as np
import requests, time, json, threading, random
import traceback
def cv_imread(file_path):
# Reading Chinese path
cv_img = cv2.imdecode(np.fromfile(file_path, dtype=np.uint8), -1)
return cv_img
class LogTxt:
def __init__(self, txt_path):
self.current_path = txt_path
create_time = time.strftime("%Y-%m-%d %H:%M:%S")
self.txt_handle = open(self.current_path, 'a',encoding="utf-8")
self.info(f" ----------- start test in {create_time} --------\n")
def info(self, content):
try:
now_time = time.strftime("%Y-%m-%d %H:%M:%S")
write_content = now_time + f' - keyPoint - INFO - :' + content
self.txt_handle.write(write_content + ' \n')
self.txt_handle.flush()
print(write_content)
except:
print("=============== > LogTxt info have bug < =========== ", traceback.format_exc())
def close(self):
self.txt_handle.close()
class Presstest(object):
"""
Concurrent stress testing
"""
def __init__(self, press_url, file,logger):
self.press_url = press_url
self.file = file
self.logger=logger
def test_interface(self):
''' Pressure measuring interface '''
global INDEX
INDEX += 1
global ERROR_NUM
global TIME_LENS
try:
start = time.time()
logger.info(f" The start time of identifying this HD image is :{start}")
r = requests.post(self.press_url, files=self.file)
# print(r.text)
logger.info(f" The recognition result of this HD image is :{r.text}")
end = time.time()
total_time=end-start
logger.info(f" The end time of this HD image recognition is :{start}")
logger.info(f" The total time for identifying this HD image is :{total_time}")
TIME_LENS.append(end - start)
logger.info(" HD image recognition is over , Identify the next high-definition image .\n\n")
print('end')
except Exception as e:
ERROR_NUM += 1
logger.info(f" It is recognized that the abnormality of this HD image is :{e}")
print(e)
# headers = {
'Content-Type': 'application/json; charset=UTF-8',
# 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}
def test_onework(self):
''' Concurrent processing of a single task at a time '''
i = 0
while i < ONE_WORKER_NUM:
i += 1
self.test_interface()
# print('one worker num {}'.format(i))
logger.info('one worker num {}'.format(i))
time.sleep(LOOP_SLEEP)
##---------------------------------------------
# todo Universal http obtain webapi Request result method
## --------------------------------------------
# def do_request(self, press_url,register_data):
# ''' Universal http obtain webapi Request result method '''
# headers = {
# 'Content-Type': 'application/json; charset=UTF-8',
# }
# request = urllib.request.Request(url, json.dumps(payload).encode("utf-8"), headers=headers)
# retry_num = 0
# while retry_num < 3:
# response = urllib.request.urlopen(request, timeout=300)
# if not response or response.status == 421:
# time.sleep(1)
# retry_num = retry_num + 1
# continue
# else:
# break
# response_content = response.read()
# if hasattr(response_content, 'decode'):
# response_content = response_content.decode('utf-8')
#
# return response_content
def run(self):
''' Concurrent testing using multithreaded processes '''
t1 = time.time()
Threads = []
for i in range(THREAD_NUM):
# print('thread Num {}'.format(i))
logger.info('thread Num {}'.format(i))
t = threading.Thread(target=self.test_onework, name="T" + str(i))
t.setDaemon(True)
Threads.append(t)
for t in Threads:
t.start()
for t in Threads:
t.join()
t2 = time.time()
# print("=============== Pressure test results ===================")
# print("URL:", self.press_url)
# print(" Number of tasks :", THREAD_NUM, "*", ONE_WORKER_NUM, "=", THREAD_NUM * ONE_WORKER_NUM)
# print(" Total time ( second ):", t2 - t1)
# print(" Each request takes time ( second ):", (t2 - t1) / (THREAD_NUM * ONE_WORKER_NUM))
# print(" Number of requests per second :", 1 / ((t2 - t1) / (THREAD_NUM * ONE_WORKER_NUM)))
# print(" Number of errors :", ERROR_NUM)
# print(INDEX)
logger.info("=============== Pressure test results ===================")
logger.info("URL: {}".format(self.press_url))
logger.info(" Number of tasks :{} * {} = {}".format(THREAD_NUM,ONE_WORKER_NUM,THREAD_NUM * ONE_WORKER_NUM))
logger.info(" Total time ( second ):{}".format(t2-t1))
logger.info(" Each request takes time ( second ):{}".format((t2 - t1) / (THREAD_NUM * ONE_WORKER_NUM)))
logger.info(" Number of requests per second :{}".format(1 / ((t2 - t1) / (THREAD_NUM * ONE_WORKER_NUM))))
logger.info(" Number of errors :{}".format(ERROR_NUM))
logger.info("{}".format(INDEX))
logger.info("---------------- This test is over , The above is the test result -----------------")
if __name__ == '__main__':
press_url = 'http://192.168.2.93:8095/recog'
TIME_LENS = []
INDEX = 0
THREAD_NUM = 25 # Total concurrent threads
ONE_WORKER_NUM = 50 # Number of cycles per thread
LOOP_SLEEP = 0 # Time interval per request ( second )
ERROR_NUM = 0 # The number of errors
logger=LogTxt(rf"./log/thread-{THREAD_NUM}.log")
path1 = r"C:\val"
for path in glob.glob(os.path.join(path1, "*.jpg" or "*.png")):
# path=r"/data2/enducation/answer_card/answer-card-recognition/pic/2021-08-17_14_16_18.jpg"
with open(path, "rb") as f:
img_data = f.read() # <class 'bytes'>
# print(type(img_data))
files = {
'file': img_data}
obj = Presstest(press_url, files,logger)
obj.run()
print('onetime')
边栏推荐
- Operation commands in anaconda, such as removing old environment, adding new environment, viewing environment, installing library, cleaning cache, etc
- Error in installing pyspider under Windows: Please specify --curl dir=/path/to/build/libcurl solution
- 研究生新生培训第一周:深度学习和pytorch基础
- Analysis on the principle of flow
- [semantic segmentation] setr_ Rethinking Semantic Segmentation from a Sequence-to-Sequence Perspective with Transformer
- "Full flash measurement" database acceleration solution
- 【DL】关于tensor(张量)的介绍和理解
- Ml17 neural network practice
- [pycharm] pycharm remote connection server
- 个人学习网站
猜你喜欢
CNOOC, desktop cloud & network disk storage system application case
【Transformer】AdaViT: Adaptive Tokens for Efficient Vision Transformer
Improve quality with intelligence financial imaging platform solution
[target detection] KL loss: bounding box progression with uncertainty for accurate object detection
虚假新闻检测论文阅读(五):A Semi-supervised Learning Method for Fake News Detection in Social Media
ReportingService WebService form authentication
These process knowledge you must know
[semantic segmentation] full attention network for semantic segmentation
1、 Usage of common loss function
Ml17 neural network practice
随机推荐
三、如何搞自定义数据集?
Technology that deeply understands the principle of MMAP and makes big manufacturers love it
【目标检测】6、SSD
有价值的博客、面经收集(持续更新)
ML11-SKlearn实现支持向量机
【Transformer】SegFormer:Simple and Efficient Design for Semantic Segmentation with Transformers
【Transformer】SOFT: Softmax-free Transformer with Linear Complexity
ML15 neural network (1)
五、图像像素统计
Improve quality with intelligence financial imaging platform solution
【Transformer】SOFT: Softmax-free Transformer with Linear Complexity
The third week of postgraduate freshman training: resnet+resnext
ROS常用指令
anaconda中移除旧环境、增加新环境、查看环境、安装库、清理缓存等操作命令
ML15-神经网络(1)
tensorflow中tf.get_variable()函数详解
第三周周报 ResNet+ResNext
虚假新闻检测论文阅读(一):Fake News Detection using Semi-Supervised Graph Convolutional Network
3、 How to read video?
3、 How to customize data sets?