You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
270 lines
22 KiB
270 lines
22 KiB
# -*- coding: utf-8 -*-
|
|
"""
|
|
@Time : 2019/9/16 11:00
|
|
@Author : 杰森·家乐森
|
|
@File : ANN_test.py
|
|
@Software: PyCharm
|
|
"""
|
|
import json
|
|
import os
|
|
import time
|
|
|
|
import keras
|
|
import numpy as np
|
|
import requests
|
|
import tensorflow as tf
|
|
from keras import backend
|
|
from keras.models import model_from_json
|
|
from sklearn.preprocessing import MinMaxScaler
|
|
|
|
import config
|
|
|
|
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
|
|
|
|
|
|
def get_history_value(points, time1, interval, typedata):
|
|
url = f"http://{config._EXA_IP}:9000/exawebapi/exatime/GetSamplingValueArrayFloat"
|
|
headers = {"Content-Type": "application/json;charset=utf-8"} # ,"token":get_token()
|
|
point_array = points.split(",")
|
|
time_span = time1.split(";")
|
|
value_array = []
|
|
for item in point_array:
|
|
value_group = []
|
|
for time_piece in time_span:
|
|
st = time_piece.split(",")[0]
|
|
et = time_piece.split(",")[1]
|
|
para = {"ItemName": item, "StartingTime": st, "TerminalTime": et, "SamplingPeriod": interval}
|
|
response = requests.get(url, headers=headers, params=para)
|
|
value = eval(str(response.text).replace("\"", "").replace("null", "0"))
|
|
for row in value:
|
|
value_group.append(row[1])
|
|
value_array.append(value_group)
|
|
valuetrs = np.array(value_array)
|
|
typeArr = list(enumerate(typedata.split(",")))
|
|
data_x = [(valuetrs.T)[:, item[0]].tolist() for item in typeArr if item[1] == "0"]
|
|
# data_y = [(valuetrs.T)[:, item[0]].tolist() for item in typeArr if item[1] == "1"]
|
|
x_data = np.array(data_x).T
|
|
# y_data = np.array(data_y).T
|
|
return x_data, valuetrs
|
|
|
|
|
|
def rmse(y_true, y_pred):
|
|
return backend.sqrt(backend.mean(keras.losses.mean_squared_error(y_true, y_pred), axis=-1))
|
|
|
|
|
|
def main(mms1, mms2, x_data, origndata, filepath, weight):
|
|
# x_data=np.array(x_data)
|
|
mms_x = MinMaxScaler()
|
|
mms_y = MinMaxScaler()
|
|
mms_x.data_max_ = np.array(mms1["data_max_"])
|
|
# mms_x.data_max_ = np.array(mms1.data_max_)
|
|
mms_x.data_min_ = np.array(mms1["data_min_"])
|
|
mms_x.data_range_ = np.array(mms1["data_range_"])
|
|
mms_x.min_ = np.array(mms1["min_"])
|
|
mms_x.scale_ = np.array(mms1["scale_"])
|
|
mms_y.data_max_ = np.array(mms2["data_max_"])
|
|
mms_y.data_min_ = np.array(mms2["data_min_"])
|
|
mms_y.data_range_ = np.array(mms2["data_range_"])
|
|
mms_y.min_ = np.array(mms2["min_"])
|
|
mms_y.scale_ = np.array(mms2["scale_"])
|
|
for i in range(len(weight)):
|
|
weight[i] = np.array(weight[i])
|
|
# 用对应的归一化信息将输入数据归一化,输出反归一化
|
|
normal_data = mms_x.transform(x_data)
|
|
t1 = time.time()
|
|
# model = load_model("my_model.h5")
|
|
# model=load_model(filepath)
|
|
try:
|
|
filepath = json.dumps(filepath)
|
|
model = model_from_json(filepath)
|
|
except TypeError:
|
|
model = model_from_json(json.loads(filepath))
|
|
model.set_weights(weight)
|
|
t2 = time.time()
|
|
predict_data = model.predict(normal_data, batch_size=100)
|
|
t3 = time.time()
|
|
y_normal = mms_y.transform(origndata)
|
|
with tf.compat.v1.Session():
|
|
spe = rmse(predict_data, y_normal).eval()
|
|
mse = tf.sqrt(keras.losses.mean_squared_error(predict_data, y_normal)).eval()
|
|
y_data = mms_y.inverse_transform(predict_data)
|
|
# return y_data
|
|
result = {}
|
|
errorData = y_data - origndata
|
|
result["reconData"] = (y_data.T).tolist()
|
|
result["errorData"] = (errorData.T).tolist()
|
|
result["SPE"] = spe
|
|
result["mse"] = mse.tolist()
|
|
result["x"] = x_data.tolist()
|
|
result["time1"] = t2 - t1
|
|
result["time2"] = t3 - t2
|
|
return result
|
|
|
|
|
|
def test_offline_main(mms1, mms2, x_data, output_data, filepath, weight):
|
|
# x_data=np.array(x_data)
|
|
mms_x = MinMaxScaler()
|
|
mms_y = MinMaxScaler()
|
|
mms3 = MinMaxScaler()
|
|
mms_x.data_max_ = np.array(mms1["data_max_"])
|
|
# mms_x.data_max_ = np.array(mms1.data_max_)
|
|
mms_x.data_min_ = np.array(mms1["data_min_"])
|
|
mms_x.data_range_ = np.array(mms1["data_range_"])
|
|
mms_x.min_ = np.array(mms1["min_"])
|
|
mms_x.scale_ = np.array(mms1["scale_"])
|
|
mms_y.data_max_ = np.array(mms2["data_max_"])
|
|
mms_y.data_min_ = np.array(mms2["data_min_"])
|
|
mms_y.data_range_ = np.array(mms2["data_range_"])
|
|
mms_y.min_ = np.array(mms2["min_"])
|
|
mms_y.scale_ = np.array(mms2["scale_"])
|
|
for i in range(len(weight)):
|
|
weight[i] = np.array(weight[i])
|
|
# 用对应的归一化信息将输入数据归一化,输出反归一化
|
|
normal_data = mms_x.transform(x_data)
|
|
# model = load_model("my_model.h5")
|
|
# model=load_model(filepath)
|
|
model = model_from_json(json.dumps(filepath))
|
|
model.set_weights(weight)
|
|
predict_data = model.predict(normal_data, batch_size=100)
|
|
y_data = mms_y.inverse_transform(predict_data)
|
|
# return y_data
|
|
result = {}
|
|
y_normal = mms3.fit_transform(output_data)
|
|
with tf.compat.v1.Session():
|
|
spe = rmse(predict_data, y_normal).eval()
|
|
errorData = output_data - np.array(y_data)
|
|
paraState = []
|
|
temp_list = []
|
|
for item in output_data[0]:
|
|
temp_list.append(0)
|
|
paraState.append(temp_list)
|
|
result["reconData"] = (y_data).tolist()
|
|
result["errorData"] = errorData.tolist()
|
|
result["paraState"] = paraState
|
|
result["FAI"] = [spe]
|
|
return result
|
|
|
|
|
|
def isnumber(limits):
|
|
flag = True
|
|
for item in limits:
|
|
item = item.replace("-", "")
|
|
if (item.isdigit() == False):
|
|
flag = False
|
|
break
|
|
return flag
|
|
|
|
|
|
def clean_main(info):
|
|
try:
|
|
datatype = info['type']
|
|
condition = info["condition"].replace("=", "==").replace(">=", ">").replace("<=", "<")
|
|
times = info["time"].split(';')
|
|
points = info["point"].split(',')
|
|
interval = 300000
|
|
dead = info["dead"].split(',')
|
|
limit = info["limit"].split(',')
|
|
uplower = info["uplow"].split(';')
|
|
res = json.loads(info["model"])
|
|
weight = res["weight"]
|
|
filename = res["filename"]
|
|
mms1 = res["mms1"]
|
|
mms2 = res["mms2"]
|
|
count = 0
|
|
ItemsInfo, SamplingTimePeriods = [], []
|
|
Constraint = ""
|
|
for i in range(len(points)):
|
|
iteminfo = {}
|
|
iteminfo["ItemName"] = points[i] # 加点
|
|
if (dead[i] == "1"): # 判断是否参与死区清洗
|
|
iteminfo["ClearDeadZone"] = "true"
|
|
else:
|
|
iteminfo["ClearDeadZone"] = "false"
|
|
if (limit[i] == "1"): # 参与上下限清洗
|
|
limits = uplower[i].split(',')
|
|
if (isnumber(limits) == True): # 输入上下限正确
|
|
count += 1
|
|
Constraint += "[" + points[i] + "]>" + limits[0] + " and " + "[" + points[i] + "]<" + limits[
|
|
1] + " and "
|
|
ItemsInfo.append(iteminfo)
|
|
if (count != 0):
|
|
Constraint = Constraint[:len(Constraint) - 4:]
|
|
else:
|
|
Constraint = "1==1" # 没有上下限清洗
|
|
Constraint += " and (" + condition + ")"
|
|
for i in range(len(times)):
|
|
Eachsampletime = {}
|
|
timess = times[i].split(',')
|
|
Eachsampletime["StartingTime"] = timess[0]
|
|
Eachsampletime["TerminalTime"] = timess[1]
|
|
SamplingTimePeriods.append(Eachsampletime)
|
|
Constraint = Constraint.replace("\n", " ")
|
|
url = f"http://{config._CLEAN_IP}/exawebapi/exatime/GetCleaningData?ItemsInfo=%s&SamplingTimePeriods=%s&Constraint=%s&SamplingPeriod=%s&DCount=6" % (
|
|
ItemsInfo, SamplingTimePeriods, Constraint, interval)
|
|
response = requests.get(url)
|
|
content = json.loads(response.text)
|
|
origndata = np.array([item for item in content["ClearData"]])
|
|
d_type = list(enumerate(datatype.split(",")))
|
|
y_data = [origndata.tolist()[item[0]] for item in d_type if item[1] == "1"]
|
|
x_data = [origndata.tolist()[item[0]] for item in d_type if item[1] == "0"]
|
|
result = main(mms1, mms2, np.array(x_data).T, np.array(y_data).T, filename, weight)
|
|
result["sampleData"] = origndata.tolist()
|
|
result["CleanOrNot"] = True
|
|
except Exception as e:
|
|
points = info['point']
|
|
time1 = info["time"]
|
|
datatype = info['type']
|
|
interval = info['interval']
|
|
data_x, origndata = get_history_value(points, time1, interval, datatype)
|
|
res = json.loads(info["model"])
|
|
weight = res["weight"]
|
|
filename = res["filename"]
|
|
mms1 = res["mms1"]
|
|
mms2 = res["mms2"]
|
|
d_type = list(enumerate(datatype.split(",")))
|
|
origndata = origndata.tolist()
|
|
output_data = [origndata[item[0]] for item in d_type if item[1] == "1"]
|
|
result = main(mms1, mms2, data_x, np.array(output_data).T, filename, weight)
|
|
result["sampleData"] = origndata
|
|
result["CleanOrNot"] = False
|
|
return result
|
|
|
|
|
|
if __name__ == "__main__":
|
|
# info_str=r'{"model":"{\"filename\":{\"class_name\":\"Sequential\",\"config\":{\"name\":\"sequential\",\"layers\":[{\"class_name\":\"Dense\",\"config\":{\"name\":\"dense\",\"trainable\":true,\"batch_input_shape\":[null,2],\"dtype\":\"float32\",\"units\":5,\"activation\":\"sigmoid\",\"use_bias\":true,\"kernel_initializer\":{\"class_name\":\"GlorotUniform\",\"config\":{\"seed\":null,\"dtype\":\"float32\"}},\"bias_initializer\":{\"class_name\":\"Zeros\",\"config\":{\"dtype\":\"float32\"}},\"kernel_regularizer\":null,\"bias_regularizer\":null,\"activity_regularizer\":null,\"kernel_constraint\":null,\"bias_constraint\":null}},{\"class_name\":\"Dense\",\"config\":{\"name\":\"dense_1\",\"trainable\":true,\"dtype\":\"float32\",\"units\":1,\"activation\":\"linear\",\"use_bias\":true,\"kernel_initializer\":{\"class_name\":\"GlorotUniform\",\"config\":{\"seed\":null,\"dtype\":\"float32\"}},\"bias_initializer\":{\"class_name\":\"Zeros\",\"config\":{\"dtype\":\"float32\"}},\"kernel_regularizer\":null,\"bias_regularizer\":null,\"activity_regularizer\":null,\"kernel_constraint\":null,\"bias_constraint\":null}}]},\"keras_version\":\"2.2.4-tf\",\"backend\":\"tensorflow\"},\"mms1\":{\"data_max_\":[78.94322,81.25558],\"data_min_\":[64.52349,63.5361977],\"data_range_\":[14.419730000000001,17.719382299999992],\"min_\":[-4.47466700139323,-3.5856891975291956],\"scale_\":[0.06934942609882432,0.056435375853931456]},\"mms2\":{\"data_max_\":[82.4897156],\"data_min_\":[64.52349],\"data_range_\":[17.9662256],\"min_\":[-3.59137703358239],\"scale_\":[0.0556599934935694]},\"train_max\":[82.4897156,81.25558,80.96978,79.69671,77.449295,74.86414,71.75935,68.6935349,68.45969,71.23971,73.13635,74.24058,75.4357452,76.6438751,77.20247,77.69612,77.96892,78.16381,78.4236,78.68343,78.81335,78.90425,78.91726,78.7873459,78.709404,78.60549,78.59249,78.4236,78.306694,78.11184,77.94295,75.7994843,74.30555,74.95509,75.47472,75.57862,75.72154,75.61759,75.42274,75.16291,75.02003,74.8771057,74.7602,74.64329,74.44843,74.33152,74.1886,73.9937439,73.9158,73.8119,73.74692,73.61704,73.44816,73.2532654,73.14935,72.9415,72.79862,72.61673,72.4608459,72.31795,72.2010345,72.058136,71.92822,71.75935,71.5774841,71.38261,71.27868,71.09681,70.96692,70.837,70.7070847,70.60317,70.4862442,70.3433456,70.2654,70.23942,70.20045,70.1355057,70.0835342,70.00559,69.9406357,69.84971,69.70681,69.64185,69.56391,69.434,69.35606,69.33007,69.33007,69.2781143,69.3170853,69.23914,69.2911,69.39503,69.56391,69.60288,69.5379257,69.4989548,69.48597,69.434,69.33007,69.2261353,69.2261353,69.33007,69.3041,69.0962448,68.03099,67.0307159,66.4850845,66.0823746,65.7186356,65.41985,65.12107,64.77032,64.52349,64.6403961,65.66668,66.40714,66.88782,67.08267,67.22557,67.3295,67.55034,67.5763245,66.83584],\"train_min\":[78.94322,77.4752655,77.5532455,76.35808,73.96778,71.03185,67.7971649,64.6923752,66.44611,70.51223,72.79862,74.21461,75.3058243,76.2801361,76.8907,77.30642,77.61818,77.76106,77.9559555,78.18978,78.33266,78.4236,78.39764,78.345665,78.18978,78.07287,78.00789,77.87801,77.80003,77.67016,77.488266,75.56566,74.29255,74.55234,74.7732,74.95509,75.046,74.83814,74.63032,74.42246,74.25358,74.1366653,73.9937439,73.9028,73.7339554,73.5261,73.37022,73.20133,73.05841,72.9285,72.79862,72.72068,72.56476,72.43486,72.27898,72.07112,71.85028,71.655426,71.49952,71.35662,71.2527161,71.07082,70.8889542,70.77204,70.61616,70.46027,70.3563461,70.14849,70.04456,69.84971,69.7197952,69.62887,69.4989548,69.36903,69.25213,69.26511,69.26511,69.21316,69.07026,69.0183,68.8624,68.7844543,68.70651,68.6025848,68.55064,68.40773,68.34277,68.34277,68.25183,68.18689,68.12193,68.08296,68.12193,68.13492,68.1479,68.17389,68.10893,68.08296,68.03099,68.0180054,67.91408,67.73221,67.7971649,67.73221,67.70622,67.36847,66.38118,65.95248,65.48481,65.0691,64.74433,64.44555,64.14677,63.7830238,63.5361977,63.5361977,64.1077957,64.70536,65.1340561,65.41985,65.54975,65.79658,65.80958,65.96546,65.52378],\"train_ave\":[80.5497552,79.99547516666667,79.83093516666668,78.58383333333333,76.28879,73.58671,70.43862163333334,67.359815,67.78849666666666,70.99721666666666,73.02377333333332,74.23192333333333,75.39243823333334,76.52262876666667,77.09854666666666,77.56622,77.84768516666666,78.02956,78.22874983333332,78.46258666666667,78.60548,78.70073,78.71374,78.6054803,78.49722466666667,78.40629,78.35865333333332,78.20710150000001,78.11182466666666,77.96461333333333,77.79138866666666,75.72154286666667,74.30121666666666,74.82083999999999,75.24088,75.37077666666666,75.49636,75.35777333333334,75.15859999999999,74.91609333333334,74.76454666666667,74.63029223333334,74.50471463333334,74.39645999999999,74.2102718,74.06304666666666,73.91580666666665,73.72960593333333,73.63000333333333,73.51743333333333,73.43082,73.31825333333335,73.13636666666666,72.98046360000001,72.85922666666666,72.65137333333332,72.48250666666667,72.29629533333333,72.14040393333333,71.99750666666667,71.88492836666667,71.72903066666667,71.58179806666666,71.43024666666666,71.25704273333334,71.07516333333332,70.97123536666666,70.78070333333334,70.65946666666667,70.50790333333333,70.37798819999999,70.27840333333334,70.15714773333333,70.01857373333333,69.92764333333334,69.91465,69.88867,69.82805713333333,69.74577613333334,69.67649333333333,69.58122379999999,69.48596143333333,69.37337666666667,69.2911016,69.21316333333334,69.08758,69.01396666666666,68.98365143333334,68.92735666666665,68.87106476666666,68.84508670000001,68.81044000000001,68.84075,68.89271333333333,68.97066000000001,68.97932333333333,68.94467333333334,68.88838493333334,68.88405859999999,68.82343513333332,68.70651813333335,68.5809332,68.60258483333332,68.60691666666668,68.59393333333334,68.2301916,67.31650666666667,66.67130393333333,66.13866816666666,65.7446164,65.39386706666666,65.09508333333333,64.79630333333334,64.44122126666666,64.19439256666666,64.27232996666667,65.0344619,65.66667333333334,66.0867187,66.34219183333333,66.480758,66.63665666666667,66.74491,66.82285816666666,66.24259666666667],\"pre_max\":[2.6586617944335984],\"pre_min\":[-5.7904892206054654],\"pre_ave\":[0.049737347943749455],\"pre_s\":[4.881696903414218],\"pre_s_ave\":4.881696903414218,\"limit\":0.2718421401094552,\"CleanOrNot\":true,\"BeforeCleanSamNum\":125,\"AfterCleanSamNum\":125}","type":"0,0,0","point":"JL_D1_10MILLA:SEP_TEMP.PNT,JL_D1_10FSSS20A:HFC10CT301.PNT,JL_D1_10FSSS20A:HFC10CT302.PNT","time":"2020-02-04 23:21:28,2020-02-05 23:21:28","interval":300000}'
|
|
# info_str=r'{"model":"{\"filename\":{\"class_name\":\"Sequential\",\"config\":{\"name\":\"sequential\",\"layers\":[{\"class_name\":\"Dense\",\"config\":{\"name\":\"dense\",\"trainable\":true,\"batch_input_shape\":[null,2],\"dtype\":\"float32\",\"units\":6,\"activation\":\"sigmoid\",\"use_bias\":true,\"kernel_initializer\":{\"class_name\":\"GlorotUniform\",\"config\":{\"seed\":null,\"dtype\":\"float32\"}},\"bias_initializer\":{\"class_name\":\"Zeros\",\"config\":{\"dtype\":\"float32\"}},\"kernel_regularizer\":null,\"bias_regularizer\":null,\"activity_regularizer\":null,\"kernel_constraint\":null,\"bias_constraint\":null}},{\"class_name\":\"Dense\",\"config\":{\"name\":\"dense_1\",\"trainable\":true,\"dtype\":\"float32\",\"units\":1,\"activation\":\"linear\",\"use_bias\":true,\"kernel_initializer\":{\"class_name\":\"GlorotUniform\",\"config\":{\"seed\":null,\"dtype\":\"float32\"}},\"bias_initializer\":{\"class_name\":\"Zeros\",\"config\":{\"dtype\":\"float32\"}},\"kernel_regularizer\":null,\"bias_regularizer\":null,\"activity_regularizer\":null,\"kernel_constraint\":null,\"bias_constraint\":null}}]},\"keras_version\":\"2.2.4-tf\",\"backend\":\"tensorflow\"},\"mms1\":{\"data_max_\":[82.97037,87.59508],\"data_min_\":[46.5572548,53.2085228],\"data_range_\":[36.4131152,34.3865572],\"min_\":[-1.2785847776078219,-1.5473640612093613],\"scale_\":[0.027462632474795783,0.029081131739469402]},\"mms2\":{\"data_max_\":[88.43947],\"data_min_\":[53.2085228],\"data_range_\":[35.2309472],\"min_\":[-1.5102779524474435],\"scale_\":[0.028384136092713393]},\"train_max\":[88.43947,82.97037,87.59508],\"train_min\":[53.2085228,46.5572548,53.2085228],\"train_ave\":[73.95152649122672,69.7637995632764,72.95635137818323],\"pre_max\":[2.1512201489257876],\"pre_min\":[-9.576423670019537],\"pre_ave\":[-0.11074971603245708],\"pre_s\":[4.083063363267552],\"pre_s_ave\":4.083063363267552,\"limit\":0.11627729309035165,\"CleanOrNot\":true,\"BeforeCleanSamNum\":1288,\"AfterCleanSamNum\":1288}","type":"1,0,0","point":"JL_D1_10MILLA:SEP_TEMP.PNT,JL_D1_10FSSS20A:HFC10CT301.PNT,JL_D1_10FSSS20A:HFC10CT302.PNT","time":"2020-01-18 22:16:49,2020-01-23 09:33:25","interval":300000}'
|
|
# info = json.loads(info_str)
|
|
# points = info['point']
|
|
# time1 = info["time"]
|
|
# datatype = info['type']
|
|
# interval = info['interval']
|
|
# import ANN_Test_offline
|
|
# data_x, origndata = ANN_Test_offline.get_history_value(points, time1, interval, datatype)
|
|
# res = json.loads(info["model"])
|
|
# filename = res["filename"]
|
|
# mms1 = res["mms1"]
|
|
# mms2 = res["mms2"]
|
|
# d_type = list(enumerate(datatype.split(",")));
|
|
# output_data = [origndata.tolist()[item[0]] for item in d_type if item[1] == "1"]
|
|
# result = main(mms1, mms2, data_x, np.array(output_data).T, filename)
|
|
# result["sampleData"] = origndata.tolist()
|
|
|
|
# points = "JL_D1_10MILLA:SEP_TEMP.PNT,JL_D1_10FSSS20A:HFC10CT301.PNT,JL_D1_10FSSS20A:HFC10CT302.PNT,JL_D1_10FSSS20A:HFC10CT303.PNT,JL_D1_10MCS14B:HFC10CT304.PNT,JL_D1_10MCS14B:HFC10CT305.PNT"
|
|
# time1 = "2020-01-14 15:11:02,2020-01-15 15:11:02"
|
|
# datatype = "1,1,0,0,0,0"
|
|
# interval = 300000
|
|
# data_x, origndata =get_history_value(points, time1, interval, datatype)
|
|
# filename = "D:\\FlaskWebApi\\ModelOline\\M_2020-01-15-15-08-30-232860.h5"
|
|
# mms1 = {"data_max_":[73.17532,73.16236,73.2142944,73.2662659],"data_min_":[69.55091,69.56391,69.51194,69.47297],"data_range_":[3.6244099999999975,3.5984499999999997,3.7023544000000044,3.7932958999999897],"min_":[-19.18958120080235,-19.331631674748856,-18.77506378103617,-18.314671945312835],"scale_":[0.27590697520424035,0.27789742805930334,0.27009840008833264,0.2636229881249187]}
|
|
# mms2 = {"data_max_":[73.2142944,73.2142944],"data_min_":[69.4989548,69.52494],"data_range_":[3.715339599999993,3.689354399999999],"min_":[-18.705949464215905,-18.8447442186633],"scale_":[0.26915439977546113,0.2710501327820391]}
|
|
# d_type = list(enumerate(datatype.split(",")));
|
|
# output_data = [origndata.tolist()[item[0]] for item in d_type if item[1]=="1"]
|
|
# result = main(mms1, mms2, data_x,np.array(output_data).T, filename)
|
|
# result["sampleData"] = origndata.tolist()
|
|
# print(result)
|
|
# info = {"time":"2020-02-17 20:42:27,2020-02-18 20:42:27","model":"{\"filename\":{\"class_name\":\"Sequential\",\"config\":{\"name\":\"sequential\",\"layers\":[{\"class_name\":\"Dense\",\"config\":{\"name\":\"dense\",\"trainable\":true,\"batch_input_shape\":[null,3],\"dtype\":\"float32\",\"units\":6,\"activation\":\"sigmoid\",\"use_bias\":true,\"kernel_initializer\":{\"class_name\":\"GlorotUniform\",\"config\":{\"seed\":null,\"dtype\":\"float32\"}},\"bias_initializer\":{\"class_name\":\"Zeros\",\"config\":{\"dtype\":\"float32\"}},\"kernel_regularizer\":null,\"bias_regularizer\":null,\"activity_regularizer\":null,\"kernel_constraint\":null,\"bias_constraint\":null}},{\"class_name\":\"Dense\",\"config\":{\"name\":\"dense_1\",\"trainable\":true,\"dtype\":\"float32\",\"units\":1,\"activation\":\"linear\",\"use_bias\":true,\"kernel_initializer\":{\"class_name\":\"GlorotUniform\",\"config\":{\"seed\":null,\"dtype\":\"float32\"}},\"bias_initializer\":{\"class_name\":\"Zeros\",\"config\":{\"dtype\":\"float32\"}},\"kernel_regularizer\":null,\"bias_regularizer\":null,\"activity_regularizer\":null,\"kernel_constraint\":null,\"bias_constraint\":null}}]},\"keras_version\":\"2.2.4-tf\",\"backend\":\"tensorflow\"},\"mms1\":{\"data_max_\":[12.06677,985.0054,2.124662],\"data_min_\":[-17.3588982,-1.27402341,0],\"data_range_\":[29.425668199999997,986.27942341,2.124662],\"min_\":[0.5899236707902525,0.0012917469225862412,0],\"scale_\":[0.03398393515495428,0.0010139114497011018,0.47066309841282994]},\"mms2\":{\"data_max_\":[195.334641],\"data_min_\":[0],\"data_range_\":[195.334641],\"min_\":[0],\"scale_\":[0.005119419652758877]},\"train_max\":[195.334641,12.06677,985.0054,2.124662],\"train_min\":[0,-17.3588982,-1.27402341,0],\"train_ave\":[84.26160105192767,-6.486345117671159,384.6270930435513,1.3900702566091216],\"pre_max\":[13.77818346484375],\"pre_min\":[-14.17829944726563],\"pre_ave\":[0.10243439774088742],\"pre_s\":[5.9496869582874545],\"pre_s_ave\":5.9496869582874545,\"limit\":0.030499546486388234,\"weight\":[[[-0.8573902249336243,0.9685378670692444,0.11035796999931335,1.377178430557251,-1.1593875885009766,-0.17057353258132935],[-1.4343105554580688,-0.985152542591095,-18.533418655395508,0.029492884874343872,0.42058074474334717,2.7748944759368896],[2.4034135341644287,-2.0230712890625,-0.363259881734848,1.1434800624847412,-0.7518630027770996,0.28533411026000977]],[-0.3990252614021301,0.25575289130210876,-0.7662039399147034,0.41984841227531433,0.09407302737236023,-3.880751609802246],[[0.5584389567375183],[0.8358727693557739],[-1.6243451833724976],[-0.5204495191574097],[-0.32732364535331726],[3.4856417179107666]],[0.25276824831962585]],\"CleanOrNot\":true,\"BeforeCleanSamNum\":5694,\"AfterCleanSamNum\":5613}","interval":300000,"condition":"1=1","limit":"0,0,0,0","point":"JL_D1_10DAS01B:MAG10CE101.PNT,JL_D1_10MCS12A:HLA10CT301.PNT,JL_D1_10MCS01A:MAG10AN001ZT.PNT,JL_D1_10DAS01B:MAG03CS101","type":"1,0,0,0","uplow":",0;-30,-30;,0;,0","dead":"1,1,1,1"}
|
|
# res = clean_main(info)
|
|
# print("aaa")
|
|
info = {}
|
|
result = main(info["mms1"], info["mms2"], np.array(info["testDataX"]),
|
|
np.array(info["testDataY"]), info["filename"], info["weight"])
|
|
|