Browse Source

Merge pull request 'cjl-dev' (#1) from cjl-dev into master

Reviewed-on: http://120.26.116.243:3000/root/model-lab/pulls/1
master
root 2 months ago
parent
commit
51727fd4f4
  1. 39
      .drone.yml
  2. 63
      AANN_Train.py
  3. 112
      ANN_Train_offline.py
  4. 2
      ASSESS.py
  5. 11
      Dockerfile
  6. 8
      PCA_Test.py
  7. 6
      PCA_Test_offline.py
  8. 63
      app.py
  9. 44
      app.spec
  10. BIN
      build/temp.win-amd64-3.6/Release/AANN_Derivative.cp36-win_amd64.exp
  11. BIN
      build/temp.win-amd64-3.6/Release/AANN_Derivative.cp36-win_amd64.lib
  12. BIN
      build/temp.win-amd64-3.6/Release/AANN_Derivative.obj
  13. BIN
      build/temp.win-amd64-3.6/Release/AANN_RB.cp36-win_amd64.exp
  14. BIN
      build/temp.win-amd64-3.6/Release/AANN_RB.cp36-win_amd64.lib
  15. BIN
      build/temp.win-amd64-3.6/Release/AANN_RB.obj
  16. BIN
      build/temp.win-amd64-3.6/Release/AANN_Train.cp36-win_amd64.exp
  17. BIN
      build/temp.win-amd64-3.6/Release/AANN_Train.cp36-win_amd64.lib
  18. BIN
      build/temp.win-amd64-3.6/Release/AANN_Train.obj
  19. BIN
      build/temp.win-amd64-3.6/Release/AANN_fit.cp36-win_amd64.exp
  20. BIN
      build/temp.win-amd64-3.6/Release/AANN_fit.cp36-win_amd64.lib
  21. BIN
      build/temp.win-amd64-3.6/Release/AANN_fit.obj
  22. BIN
      build/temp.win-amd64-3.6/Release/ANN_Test_offline.cp36-win_amd64.exp
  23. BIN
      build/temp.win-amd64-3.6/Release/ANN_Test_offline.cp36-win_amd64.lib
  24. BIN
      build/temp.win-amd64-3.6/Release/ANN_Test_offline.obj
  25. BIN
      build/temp.win-amd64-3.6/Release/ASSESS.cp36-win_amd64.exp
  26. BIN
      build/temp.win-amd64-3.6/Release/ASSESS.cp36-win_amd64.lib
  27. BIN
      build/temp.win-amd64-3.6/Release/ASSESS.obj
  28. BIN
      build/temp.win-amd64-3.6/Release/aannmtcltest_recon.cp36-win_amd64.exp
  29. BIN
      build/temp.win-amd64-3.6/Release/aannmtcltest_recon.cp36-win_amd64.lib
  30. BIN
      build/temp.win-amd64-3.6/Release/aannmtcltest_recon.obj
  31. BIN
      build/temp.win-amd64-3.6/Release/pca_test_by_rb.cp36-win_amd64.exp
  32. BIN
      build/temp.win-amd64-3.6/Release/pca_test_by_rb.cp36-win_amd64.lib
  33. BIN
      build/temp.win-amd64-3.6/Release/pca_test_by_rb.obj
  34. BIN
      build/temp.win-amd64-3.6/Release/pca_test_by_rb_plot.cp36-win_amd64.exp
  35. BIN
      build/temp.win-amd64-3.6/Release/pca_test_by_rb_plot.cp36-win_amd64.lib
  36. BIN
      build/temp.win-amd64-3.6/Release/pca_test_by_rb_plot.obj
  37. BIN
      build/temp.win-amd64-3.6/Release/pca_train_off.cp36-win_amd64.exp
  38. BIN
      build/temp.win-amd64-3.6/Release/pca_train_off.cp36-win_amd64.lib
  39. BIN
      build/temp.win-amd64-3.6/Release/pca_train_off.obj
  40. BIN
      build/temp.win-amd64-3.6/Release/pcamtcltest_recon.cp36-win_amd64.exp
  41. BIN
      build/temp.win-amd64-3.6/Release/pcamtcltest_recon.cp36-win_amd64.lib
  42. BIN
      build/temp.win-amd64-3.6/Release/pcamtcltest_recon.obj
  43. BIN
      build/temp.win-amd64-3.6/Release/web.cp36-win_amd64.exp
  44. BIN
      build/temp.win-amd64-3.6/Release/web.cp36-win_amd64.lib
  45. BIN
      build/temp.win-amd64-3.6/Release/web.obj
  46. 1
      cjl勿删/11cf0.json
  47. 1
      cjl勿删/11cf1.json
  48. 1
      cjl勿删/11cf10.json
  49. 1
      cjl勿删/11cf11.json
  50. 1
      cjl勿删/11cf12.json
  51. 1
      cjl勿删/11cf2.json
  52. 1
      cjl勿删/11cf3.json
  53. 1
      cjl勿删/11cf4.json
  54. 1
      cjl勿删/11cf5.json
  55. 1
      cjl勿删/11cf6.json
  56. 1
      cjl勿删/11cf7.json
  57. 1
      cjl勿删/11cf8.json
  58. 1
      cjl勿删/11cf9.json
  59. 1
      cjl勿删/1detection_fan0_1.json
  60. 1
      cjl勿删/1detection_fan0_5.json
  61. 1
      cjl勿删/22cf0.json
  62. 1
      cjl勿删/22cf1.json
  63. 1
      cjl勿删/22cf10.json
  64. 1
      cjl勿删/22cf11.json
  65. 1
      cjl勿删/22cf2.json
  66. 28
      cjl勿删/22cf3.json
  67. 1
      cjl勿删/22cf4.json
  68. 1
      cjl勿删/22cf5.json
  69. 1
      cjl勿删/22cf6.json
  70. 1
      cjl勿删/22cf7.json
  71. 1
      cjl勿删/22cf8.json
  72. 1
      cjl勿删/22cf9.json
  73. 1
      cjl勿删/2detection_fan0_1.json
  74. 1
      cjl勿删/2detection_fan0_5.json
  75. 1
      cjl勿删/3detection_fan0_1.json
  76. 1
      cjl勿删/3detection_fan0_5.json
  77. 1
      cjl勿删/4detection_fan0_1.json
  78. 1
      cjl勿删/4detection_fan0_5.json
  79. 1
      cjl勿删/5detection_fan0_1.json
  80. 1
      cjl勿删/5detection_fan0_5.json
  81. BIN
      cjl勿删/__pycache__/ae_train.cpython-36.pyc
  82. BIN
      cjl勿删/__pycache__/pca_diagnosis.cpython-36.pyc
  83. BIN
      cjl勿删/__pycache__/sae_diagnosis.cpython-36.pyc
  84. 95
      cjl勿删/ae_train.py
  85. 109
      cjl勿删/autoencoder1.json
  86. 1
      cjl勿删/autoencoder2.json
  87. 387
      cjl勿删/autoencoder3.json
  88. 1
      cjl勿删/cf.json
  89. 1
      cjl勿删/cf0.json
  90. 1
      cjl勿删/cf1.json
  91. 1
      cjl勿删/cf10.json
  92. 1
      cjl勿删/cf11.json
  93. 1
      cjl勿删/cf2.json
  94. 1
      cjl勿删/cf3.json
  95. 1
      cjl勿删/cf4.json
  96. 1
      cjl勿删/cf5.json
  97. 1
      cjl勿删/cf6.json
  98. 1
      cjl勿删/cf7.json
  99. 1
      cjl勿删/cf8.json
  100. 1
      cjl勿删/cf9.json

39
.drone.yml

@ -0,0 +1,39 @@
kind: pipeline
type: docker
name: build-and-run-python
trigger:
branch:
include:
- master
event:
include:
- push
- custom
- merge_request
steps:
- name: build python image
image: docker
volumes:
- name: dockersock
path: /var/run/docker.sock
commands:
# 构建镜像(你需要在项目中准备 scripts/Dockerfile.python)
- docker build -t alert-python:latest -f Dockerfile.python .
- name: run python container
image: docker
volumes:
- name: dockersock
path: /var/run/docker.sock
commands:
- docker stop alert-python || true
- docker rm alert-python || true
# 启动 Python 服务容器,假设服务监听 8082 端口
- docker run -d --restart always --name alert-python --network alert-net -p 8082:8082 alert-python:latest
volumes:
- name: dockersock
host:
path: /var/run/docker.sock

63
AANN_Train.py

@ -5,10 +5,6 @@ PCA source code
@author: rsj zjl
"""
import matplotlib.pyplot as plt
from numba import jit
import time
@ -24,6 +20,7 @@ import jenkspy
import xlrd
import AANN_Fit
class Model(object):
def __init__(self):
self.v1 = []
@ -34,6 +31,7 @@ class Model(object):
self.maxdata = 0
self.mindata = 0
def AANN(training_Sample, Nodes, num_epochs):
"""
AANN建模
@ -59,11 +57,12 @@ def AANN(training_Sample, Nodes, num_epochs):
print(type(Nodes[0]))
count, spe, o, v1, v2, w1, w2, sigma = cur_aann(mm, nn, num_epochs, Nodes, mdata)
reconData = AANN_Fit.AANN_Fit(mdata, v1, v2, w1, w2)
r2 = 1-np.sum(np.power((mdata-reconData),2),axis=0)/np.sum(np.power((np.tile(np.average(mdata,axis=0), (mm,1))-reconData),2),axis=0)
r2 = 1 - np.sum(np.power((mdata - reconData), 2), axis=0) / np.sum(
np.power((np.tile(np.average(mdata, axis=0), (mm, 1)) - reconData), 2), axis=0)
# 预处理 数据反归一化
a = maxdata - mindata + np.zeros((mm, nn))
b = maxdata + mindata + np.zeros((mm, nn))
reconData = np.matrix((np.multiply(mdata,a) + b)/2)
reconData = np.matrix((np.multiply(mdata, a) + b) / 2)
Train_X_min = np.min(training_Sample, axis=0) # 训练值最小值
Train_X_max = np.max(training_Sample, axis=0) # 训练值最大值
Train_X_mean = np.mean(training_Sample, axis=0) # 训练值平均值
@ -89,8 +88,8 @@ def AANN(training_Sample, Nodes, num_epochs):
items = [('Train_X_min', np.around(Train_X_min, decimals=3).tolist()),
('Train_X_max', np.around(Train_X_max, decimals=3).tolist()),
('Train_X_std', np.around(Train_X_std, decimals=3).tolist()),
('Train_X_mean',np.around(Train_X_mean, decimals=3).tolist()),
(('Train_X_bais_max',np.around(Train_X_bais_max, decimals=3).tolist())),
('Train_X_mean', np.around(Train_X_mean, decimals=3).tolist()),
(('Train_X_bais_max', np.around(Train_X_bais_max, decimals=3).tolist())),
(('Train_X_bais_min', np.around(Train_X_bais_min, decimals=3).tolist())),
(('Train_X_bais_mean', np.around(Train_X_bais_mean, decimals=3).tolist())),
('QCUL_95_line', np.around(QCUL_95_line, decimals=3).tolist()),
@ -111,6 +110,7 @@ def AANN(training_Sample, Nodes, num_epochs):
result = dict(res_items) # json.dumps(result)
return json.dumps(result)
@jit(nopython=True, cache=True)
def cur_aann(mm, nn, num_epochs, Nodes, mdata):
alpha0 = 0.001
@ -173,15 +173,15 @@ def cur_aann(mm, nn, num_epochs, Nodes, mdata):
sigma = 3 * np.sum((expectlist - o) * (expectlist - o)) / mm
return count, spe, o, v1, v2, w1, w2, sigma
def isnumber(limits):
flag=True
flag = True
for item in limits:
item=item.replace("-","")
if(item.isdigit()==False):
flag=False
item = item.replace("-", "")
if (item.isdigit() == False):
flag = False
break
return flag
@ -204,10 +204,10 @@ def clearmain(info):
limit = Train_Data["limit"].split(',')
uplower = Train_Data["uplow"].split(';')
condition=info["conditon"].replace("=","==").replace(">=",">").replace("<=","<")
#percent = info["Hyper_para"]["percent"]
condition = info["conditon"].replace("=", "==").replace(">=", ">").replace("<=", "<")
# percent = info["Hyper_para"]["percent"]
count=0
count = 0
ItemsInfo, SamplingTimePeriods = [], []
Constraint = ""
for i in range(len(points)):
@ -221,13 +221,14 @@ def clearmain(info):
limits = uplower[i].split(',')
if (isnumber(limits) == True): # 输入上下限正确
count += 1
Constraint += "[" + points[i] + "]>" + limits[0] + " and " + "[" + points[i] + "]<" + limits[1] + " and "
Constraint += "[" + points[i] + "]>" + limits[0] + " and " + "[" + points[i] + "]<" + limits[
1] + " and "
ItemsInfo.append(iteminfo)
if(count!=0):
if (count != 0):
Constraint = Constraint[:len(Constraint) - 4:]
else:
Constraint="1==1"#没有上下限清洗
Constraint+=" and ("+condition+")"
Constraint = "1==1" # 没有上下限清洗
Constraint += " and (" + condition + ")"
Constraint = Constraint.replace("\n", " ")
for i in range(len(times)):
Eachsampletime = {}
@ -236,7 +237,7 @@ def clearmain(info):
Eachsampletime["TerminalTime"] = timess[1]
SamplingTimePeriods.append(Eachsampletime)
url = f"http://{config._CLEAN_IP}/exawebapi/exatime/GetCleaningData?ItemsInfo=%s&SamplingTimePeriods=%s&Constraint=%s&SamplingPeriod=%s&DCount=%d" % (
ItemsInfo, SamplingTimePeriods, Constraint, interval, DCount)
ItemsInfo, SamplingTimePeriods, Constraint, interval, DCount)
response = requests.get(url)
content = json.loads(response.text)
data = np.array([item for item in content["ClearData"]]).T
@ -256,24 +257,22 @@ def clearmain(info):
else:
if len(smote_num) != 0:
data, *_ = smote(data, smote_index, smote_num, max_value, min_value)
Nodes=info["layer"]
num_epochs=Train_Data["interval"]
result =AANN(data, Nodes, num_epochs)#看看nodes和num_epochs怎么传进来
#result = pca(data, percent)
Nodes = info["layer"]
num_epochs = Train_Data["interval"]
result = AANN(data, Nodes, num_epochs) # 看看nodes和num_epochs怎么传进来
# result = pca(data, percent)
result = result.replace("NaN", "-1")
result=json.loads(result)
result["BeforeCleanSamNum"]=content["BeforeCleanSamNum"]
result["AfterCleanSamNum"]=content["AfterCleanSamNum"]
result = json.loads(result)
result["BeforeCleanSamNum"] = content["BeforeCleanSamNum"]
result["AfterCleanSamNum"] = content["AfterCleanSamNum"]
result["CleanOrNot"] = True
return json.dumps(result)
except Exception as e:
result = [{"CleanOrNot": False, "msg": traceback.format_exc()}]
return json.dumps(result, ensure_ascii=False)
if __name__ == "__main__":
info_str='{"layer":[5,2,5],"Train_Data":{"time":"2020-10-14 04:10:28,2020-10-17 14:53:00;2021-04-07 07:32:47,2021-04-16 08:39:01;2021-06-01 18:48:17,2021-06-03 14:29:40","points":"DH4_40MAG20CT362,DH4_40MAG20AN002GT,DH4_40MAG20CE102,DH4_40MAG20CT312,DH4_40MAG20CT322,DH4_40MAG20CT332","interval":300000,"dead":"1,1,1,1,1,1","limit":"0,0,0,0,0,0","uplow":"null,null;null,null;null,null;null,null;null,null;null,null"},"type":"AANN","conditon":"[DH4_40MAG20CE102]>20","epoch":"10000"}'
info_str = '{"layer":[5,2,5],"Train_Data":{"time":"2020-10-14 04:10:28,2020-10-17 14:53:00;2021-04-07 07:32:47,2021-04-16 08:39:01;2021-06-01 18:48:17,2021-06-03 14:29:40","points":"DH4_40MAG20CT362,DH4_40MAG20AN002GT,DH4_40MAG20CE102,DH4_40MAG20CT312,DH4_40MAG20CT322,DH4_40MAG20CT332","interval":300000,"dead":"1,1,1,1,1,1","limit":"0,0,0,0,0,0","uplow":"null,null;null,null;null,null;null,null;null,null;null,null"},"type":"AANN","conditon":"[DH4_40MAG20CE102]>20","epoch":"10000"}'
info = json.loads(info_str)
print(clearmain(info))

112
ANN_Train_offline.py

@ -5,25 +5,27 @@
@File : ANN_train.py
@Software: PyCharm
"""
import os
import datetime
import json
import os
import time
import datetime
import requests
import config
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import requests
import tensorflow as tf
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras import backend
import matplotlib.pyplot as plt
from tensorflow.keras import layers
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.models import load_model
from tensorflow.keras.models import model_from_json
import config
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
def get_history_value(points, time, interval,typedata):
def get_history_value(points, time, interval, typedata):
# url="http://192.168.1.201:8080/openPlant/getMultiplePointHistorys"
url = f"http://{config._EXA_IP}:9000/exawebapi/exatime/GetSamplingValueArrayFloat"
headers = {"Content-Type": "application/json;charset=utf-8"} # ,"token":get_token()
@ -41,20 +43,21 @@ def get_history_value(points, time, interval,typedata):
for row in value:
value_group.append(row[1])
value_array.append(value_group)
valuetrs=np.array(value_array)
valuetrs = np.array(value_array)
typeArr = list(enumerate(typedata.split(",")))
data_x = [(valuetrs.T)[:, item[0]].tolist() for item in typeArr if item[1] == "0"]
data_y = [(valuetrs.T)[:, item[0]].tolist() for item in typeArr if item[1] == "1"]
x_data = np.array(data_x).T
y_data = np.array(data_y).T
return x_data,y_data,valuetrs
return x_data, y_data, valuetrs
def rmse(y_true, y_pred):
return backend.sqrt(backend.mean(tf.keras.losses.mean_squared_error(y_true, y_pred), axis=-1))
def TrainOffline(x_data,y_data,hidden_layers,epochsdata):
#计算原来每一列数据的最大值
def TrainOffline(x_data, y_data, hidden_layers, epochsdata):
# 计算原来每一列数据的最大值
valuetrs = np.hstack((x_data, y_data))
# valuetrs = valuetrs
train_max, train_min, train_ave = [], [], []
@ -128,11 +131,12 @@ def TrainOffline(x_data,y_data,hidden_layers,epochsdata):
result["weight"] = [model_weight[index].tolist() for index in range(len(model_weight))]
return json.dumps(result)
def Train(x_data,y_data,hidden_layers,valuetrs,epochsdata):
def Train(x_data, y_data, hidden_layers, valuetrs, epochsdata):
# 计算原来每一列数据的最大值
#x_data = np.array(x_data)
#y_data = np.array(y_data)
valuetrs=valuetrs
# x_data = np.array(x_data)
# y_data = np.array(y_data)
valuetrs = valuetrs
train_max, train_min, train_ave = [], [], []
for row in valuetrs.T:
train_max.append(np.max(row))
@ -140,7 +144,7 @@ def Train(x_data,y_data,hidden_layers,valuetrs,epochsdata):
train_ave.append(np.mean(row))
mms1 = MinMaxScaler()
mms2 = MinMaxScaler()
mms3=MinMaxScaler()
mms3 = MinMaxScaler()
x_normal = mms1.fit_transform(x_data)
y_normal = mms2.fit_transform(y_data)
x_train = x_normal
@ -156,17 +160,16 @@ def Train(x_data,y_data,hidden_layers,valuetrs,epochsdata):
model.compile(loss="mse", optimizer="adam", metrics=["accuracy"])
# 训练模型
history = model.fit(x_train, y_train, epochs=epochsdata, batch_size=80)
path=os.getcwd()+"\\ModelOline";
path = os.getcwd() + "\\ModelOline";
if not os.path.exists(path):
os.makedirs(path)
now=datetime.datetime.now()
#filepath=path + "\\M"+"_"+str(now).replace(" ","-").replace(".","-").replace(":","-")+".h5"
#model.save(filepath)
filepath=model.to_json()
model_weight=model.get_weights()
now = datetime.datetime.now()
# filepath=path + "\\M"+"_"+str(now).replace(" ","-").replace(".","-").replace(":","-")+".h5"
# model.save(filepath)
filepath = model.to_json()
model_weight = model.get_weights()
test_data = model.predict(x_normal, batch_size=400)
# 反归一化
predict_data = mms2.inverse_transform(test_data)
with tf.compat.v1.Session():
@ -181,20 +184,20 @@ def Train(x_data,y_data,hidden_layers,valuetrs,epochsdata):
pre_min.append(np.min(row))
pre_ave.append(np.mean(row))
pre_s.append(np.std(row) * 3)
result,mms1new,mms2new={},{},{}
mms1new["data_max_"]=mms1.data_max_.tolist()
mms1new["data_min_"]=mms1.data_min_.tolist()
mms1new["data_range_"]=mms1.data_range_.tolist()
mms1new["min_"]=mms1.min_.tolist()
mms1new["scale_"]=mms1.scale_.tolist()
result, mms1new, mms2new = {}, {}, {}
mms1new["data_max_"] = mms1.data_max_.tolist()
mms1new["data_min_"] = mms1.data_min_.tolist()
mms1new["data_range_"] = mms1.data_range_.tolist()
mms1new["min_"] = mms1.min_.tolist()
mms1new["scale_"] = mms1.scale_.tolist()
mms2new["data_max_"] = mms2.data_max_.tolist()
mms2new["data_min_"] = mms2.data_min_.tolist()
mms2new["data_range_"] = mms2.data_range_.tolist()
mms2new["min_"] = mms2.min_.tolist()
mms2new["scale_"] = mms2.scale_.tolist()
result["filename"]=filepath
result["mms1"]=mms1new
result["mms2"]=mms2new
result["filename"] = filepath
result["mms1"] = mms1new
result["mms2"] = mms2new
result["train_max"] = np.array(train_max).tolist()
result["train_min"] = np.array(train_min).tolist()
result["train_ave"] = np.array(train_ave).tolist()
@ -209,14 +212,15 @@ def Train(x_data,y_data,hidden_layers,valuetrs,epochsdata):
def isnumber(limits):
flag=True
flag = True
for item in limits:
item=item.replace("-","")
if(item.isdigit()==False):
flag=False
item = item.replace("-", "")
if (item.isdigit() == False):
flag = False
break
return flag
def clearmain(info):
try:
points = info["point"].split(',')
@ -224,12 +228,12 @@ def clearmain(info):
epochs = info["iter"]
layer = info["layer"]
typedata = info["type"]
condition=info["condition"].replace("=","==").replace(">=",">").replace("<=","<")
condition = info["condition"].replace("=", "==").replace(">=", ">").replace("<=", "<")
interval = 300000
dead = info["dead"].split(',')
limit = info["limit"].split(',')
uplower = info["uplow"].split(';')
count=0
count = 0
ItemsInfo, SamplingTimePeriods = [], []
Constraint = ""
for i in range(len(points)):
@ -243,13 +247,14 @@ def clearmain(info):
limits = uplower[i].split(',')
if (isnumber(limits) == True): # 输入上下限正确
count += 1
Constraint += "[" + points[i] + "]>" + limits[0] + " and " + "[" + points[i] + "]<" + limits[1] + " and "
Constraint += "[" + points[i] + "]>" + limits[0] + " and " + "[" + points[i] + "]<" + limits[
1] + " and "
ItemsInfo.append(iteminfo)
if count!=0:
if count != 0:
Constraint = Constraint[:len(Constraint) - 4:]
else:
Constraint="1==1"#没有上下限清洗
Constraint+=" and ("+condition+")"
Constraint = "1==1" # 没有上下限清洗
Constraint += " and (" + condition + ")"
Constraint = Constraint.replace("\n", " ")
for i in range(len(times)):
Eachsampletime = {}
@ -258,19 +263,19 @@ def clearmain(info):
Eachsampletime["TerminalTime"] = timess[1]
SamplingTimePeriods.append(Eachsampletime)
url = f"http://{config._CLEAN_IP}/exawebapi/exatime/GetCleaningData?ItemsInfo=%s&SamplingTimePeriods=%s&Constraint=%s&SamplingPeriod=%s&DCount=6" % (
ItemsInfo, SamplingTimePeriods, Constraint, interval)
ItemsInfo, SamplingTimePeriods, Constraint, interval)
response = requests.get(url)
content = json.loads(response.text)
# data =np.array([item for item in content["ClearData"] if item ]).T
valuetrs = np.array([item for item in content["ClearData"]]).T
typeArr = list(enumerate(typedata.split(",")))
data_x = [valuetrs[:,item[0]].tolist() for item in typeArr if item[1]=="0"]
data_y = [valuetrs[:,item[0]].tolist() for item in typeArr if item[1]=="1"]
data_x = [valuetrs[:, item[0]].tolist() for item in typeArr if item[1] == "0"]
data_y = [valuetrs[:, item[0]].tolist() for item in typeArr if item[1] == "1"]
data_x = np.array(data_x).T
data_y = np.array(data_y).T
result = Train(data_x, data_y, layer, valuetrs, eval(epochs))
result["BeforeCleanSamNum"]=content["BeforeCleanSamNum"]
result["AfterCleanSamNum"]=content["AfterCleanSamNum"]
result["BeforeCleanSamNum"] = content["BeforeCleanSamNum"]
result["AfterCleanSamNum"] = content["AfterCleanSamNum"]
result["CleanOrNot"] = True
return result
except Exception as e:
@ -278,7 +283,6 @@ def clearmain(info):
return result
def main(info):
points = info["point"]
time1 = info["time"]
@ -293,9 +297,9 @@ def main(info):
if __name__ == "__main__":
# info_str = '{"time":"2020-01-19 23:26:04,2020-01-25 20:42:16;2020-01-26 16:23:49,2020-02-03 11:36:42;2020-02-05 20:02:49,2020-02-06 05:51:40","condition":"1=1","interval":300000,"dead":"1,1,1","layer":["6"],"point":"JL_D1_10MILLA:SEP_TEMP.PNT,JL_D1_10FSSS20A:HFC10CT301.PNT,JL_D1_10FSSS20A:HFC10CT302.PNT","type":"1,0,0","limit":"0,0,0","uplow":",;,;,","iter":"100"}'
info_str = '{"iter":"800","dead":"1,1,1,1","point":"DH4_40HLF10CE101,DH4_40HLF10CT351,DH4_40HLF10CT352,DH4_40HLF10CY101","limit":"0,0,0,0","layer":["5"],"type":"0,0,0,1","time":"2020-10-06 19:17:46,2020-10-12 14:58:19","condition":"1=1","interval":300000,"uplow":"null,null;null,null;null,null;null,null"}'
info = json.loads(info_str)
result = clearmain(info)
print(result)
# info = {"iter":"800","Train_Data_X":[[7.0,7.0,7.0,7.0,7.0,7.0,7.0],[8.0,8.0,8.0,8.0,8.0,8.0,8.0],[1.0,1.0,1.0,1.0,1.0,1.0,1.0],[2.0,2.0,2.0,2.0,2.0,2.0,2.0],[3.0,3.0,3.0,3.0,3.0,3.0,3.0],[4.0,4.0,4.0,4.0,4.0,4.0,4.0],[5.0,5.0,5.0,5.0,5.0,5.0,5.0],[6.0,6.0,6.0,6.0,6.0,6.0,6.0],[7.0,7.0,7.0,7.0,7.0,7.0,7.0],[8.0,8.0,8.0,8.0,8.0,8.0,8.0],[1.0,1.0,1.0,1.0,1.0,1.0,1.0],[2.0,2.0,2.0,2.0,2.0,2.0,2.0]],"hide":["7","5","1"],"Train_Data_Y":[[7.0],[8.0],[1.0],[2.0],[3.0],[4.0],[5.0],[6.0],[7.0],[8.0],[1.0],[2.0]]}
# result = TrainOffline(np.array(info["Train_Data_X"]), np.array(info["Train_Data_Y"]),info["hide"], eval(info["iter"]))
# info_str = '{"iter":"800","dead":"1,1,1,1","point":"DH4_40HLF10CE101,DH4_40HLF10CT351,DH4_40HLF10CT352,DH4_40HLF10CY101","limit":"0,0,0,0","layer":["5"],"type":"0,0,0,1","time":"2020-10-06 19:17:46,2020-10-12 14:58:19","condition":"1=1","interval":300000,"uplow":"null,null;null,null;null,null;null,null"}'
# info = json.loads(info_str)
# result = clearmain(info)
# print(result)
info = {"iter":"800","Train_Data_X":[[7.0,7.0,7.0,7.0,7.0,7.0,7.0],[8.0,8.0,8.0,8.0,8.0,8.0,8.0],[1.0,1.0,1.0,1.0,1.0,1.0,1.0],[2.0,2.0,2.0,2.0,2.0,2.0,2.0],[3.0,3.0,3.0,3.0,3.0,3.0,3.0],[4.0,4.0,4.0,4.0,4.0,4.0,4.0],[5.0,5.0,5.0,5.0,5.0,5.0,5.0],[6.0,6.0,6.0,6.0,6.0,6.0,6.0],[7.0,7.0,7.0,7.0,7.0,7.0,7.0],[8.0,8.0,8.0,8.0,8.0,8.0,8.0],[1.0,1.0,1.0,1.0,1.0,1.0,1.0],[2.0,2.0,2.0,2.0,2.0,2.0,2.0]],"hide":["7","5","1"],"Train_Data_Y":[[7.0],[8.0],[1.0],[2.0],[3.0],[4.0],[5.0],[6.0],[7.0],[8.0],[1.0],[2.0]]}
result = TrainOffline(np.array(info["Train_Data_X"]), np.array(info["Train_Data_Y"]),info["hide"], eval(info["iter"]))

2
ASSESS.py

@ -12,7 +12,7 @@ import config
class HealthyScoringSystem:
def __init__(self):
# 读取配置
ms = MSSQL(host="172.28.137.230", user="sa", pwd="powerSIS#123", database="ASSESS")
ms = MSSQL(host="120.26.116.243", user="root", pwd="123456", database="alert")
conditionlist = ms.ExecQuery(f"SELECT * FROM [ASSESS].[dbo].[conditionlist]")
pointconfigs = ms.ExecQuery(f"SELECT * FROM [ASSESS].[dbo].[pointconfigs]")

11
Dockerfile

@ -0,0 +1,11 @@
FROM docker.io/python:3.9.20-slim
WORKDIR /app
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
COPY . .
CMD ["python", "app.py"]

8
PCA_Test.py

@ -159,14 +159,14 @@ class MSSQL:
self.conn.close()
def get_model_by_ID(model_id, version="v-test"):
ms = MSSQL(host=config._SQL_IP, user="sa", pwd="powerSIS#123", database="alert")
resList = ms.ExecQuery(f"SELECT Model_info FROM [alert].[dbo].[Model_CFG] where model_id={model_id}")
ms = MSSQL(host=config._SQL_IP, user="root", pwd="123456", database="alert")
resList = ms.ExecQuery(f"SELECT Model_info FROM model_cfg where model_id={model_id}")
return json.loads(resList[0][0])
def get_model_by_id_and_version(model_id, version):
ms = MSSQL(host=config._SQL_IP, user="sa", pwd="powerSIS#123", database="alert")
resList = ms.ExecQuery(f"SELECT Model_info FROM [alert].[dbo].[model_version] where model_id={model_id} and version='{version}'")
ms = MSSQL(host=config._SQL_IP, user="root", pwd="123456", database="alert")
resList = ms.ExecQuery(f"SELECT Model_info FROM model_version where model_id={model_id} and version='{version}'")
return json.loads(resList[0][0])
def pca(model, Data_origin):

6
PCA_Test_offline.py

@ -97,8 +97,8 @@ class MSSQL:
self.conn.close()
def get_model_by_ID(model_id):
ms = MSSQL(host=config._SQL_IP, user="sa", pwd="powerSIS#123", database="alert")
resList = ms.ExecQuery("SELECT Model_info FROM [alert].[dbo].[Model_CFG] where \"model_id\"="+str(model_id))
ms = MSSQL(host="120.26.116.243", user="root", pwd="powerSIS#123", database="alert")
resList = ms.ExecQuery("SELECT Model_info FROM model_cfg where \"model_id\"="+str(model_id))
#return json.loads(resList[0][0])["para"]
return json.loads(resList[0][0])
@ -111,7 +111,7 @@ def get_model_by_id(model_id):
conn = pyodbc.connect(
r"DRIVER={SQL SERVER NATIVE CLIENT 10.0};SERVER=%s;DATABASE=alert;UID=sa;PWD=powerSIS#123" % config._SQL_IP) # 连接数据库
cursor = conn.cursor() # 获得操作的游标
cursor.execute(f"SELECT Model_info FROM [alert].[dbo].[Model_CFG] where model_id={model_id}")
cursor.execute(f"SELECT Model_info FROM model_cfg where model_id={model_id}")
res_list = cursor.fetchall() # 获取查询的结果
conn.commit() # 提交执行
cursor.close() # 关闭游标

63
web.py → app.py

@ -1,28 +1,31 @@
# -*- coding: utf-8 -*-
import traceback
from flask import Flask
from flask import request
import datetime
import json
import PCA_Test_offline
import numpy as np
import PCA_Test
import config
import json
import sys
import requests
import datetime
import traceback
import PCA_Test
import PCA_Test_offline
import config
import jenkspy
import numpy as np
import requests
import xlrd
from flask import Flask
from flask import request
from numba import jit
app = Flask(__name__)
# app.permanent_session_lifetime = timedelta(minutes=5)
@app.route('/', methods=["POST", "GET"])
def FirstPage():
return "This is a Flask WebAPi"
@app.route('/api/test/Test_offline', methods=["GET"])
def GetTest_offline():
try:
@ -67,6 +70,7 @@ def GetTest_offline():
result = json.dumps({"msg": traceback.format_exc()}, ensure_ascii=False)
return result
@app.route('/api/predict', methods=["POST"])
def get_predict():
try:
@ -103,6 +107,7 @@ def get_predict():
result = json.dumps({"msg": traceback.format_exc()}, ensure_ascii=False)
return result
@app.route('/api/test/ANN_Test_offline', methods=["GET", "POST"])
def GetANNTestOffline():
try:
@ -114,6 +119,7 @@ def GetANNTestOffline():
result = {"error_msg": str(e)}
return json.dumps(result)
@app.route('/api/test/Train', methods=["POST", "GET"])
def GetTrain():
result = ""
@ -127,6 +133,7 @@ def GetTrain():
result = GMM_train.main(jsonstr)
return result
@app.route('/api/test/ASSESS', methods=["POST", "GET"])
def GetAssess():
result = {}
@ -154,7 +161,7 @@ def GetClearTrain():
if filename == "PCA":
import PCA_Train
result = PCA_Train.clearmain(jsonstr)
elif filename == "AANN": #AANN added by rsj 2021-5-3
elif filename == "AANN": # AANN added by rsj 2021-5-3
import AANN_Train
result = AANN_Train.clearmain(jsonstr)
elif filename == "sPCA":
@ -200,10 +207,10 @@ def GetTest():
points = Test_Data["points"]
time1 = Test_Data["time"]
interval = Test_Data["interval"]
#print('1111111',res["para"])
# print('1111111',res["para"])
Data = AANN_Fit.get_history_value(points, time1, interval)
model = res["para"]["Model_info"]
result = AANN_Fit.AANN_Test(model,Data)
result = AANN_Fit.AANN_Test(model, Data)
index = time1.index(",")
result["time"] = time1[:index:]
@ -213,6 +220,7 @@ def GetTest():
except Exception as e:
return json.dumps({"error_msg": traceback.format_exc()})
@app.route("/api/aann_clean_test", methods=["POST"])
def get_aann_clean_test():
try:
@ -223,6 +231,7 @@ def get_aann_clean_test():
result = {"error_msg": str(e)}
return json.dumps(result)
@app.route('/api/test/ANN_Train_offline', methods=["GET", "POST"])
def GetANNTrainOffline():
try:
@ -235,6 +244,7 @@ def GetANNTrainOffline():
result = {"error_msg": str(e)}
return json.dumps(result)
@app.route('/api/test/ANN_Train', methods=["POST"])
def GetANNTrain():
try:
@ -246,6 +256,7 @@ def GetANNTrain():
result = {"error": str(e)}
return json.dumps(result)
@app.route('/api/test/ANN_Test', methods=["POST"])
def GetANNTest():
try:
@ -270,6 +281,7 @@ def GetANNTest():
result["error_msg"] = str(e)
return json.dumps(result)
@app.route('/api/test/PLS_Train_online', methods=["POST", "GET"])
def getPLSTrainOnline():
result = ""
@ -278,6 +290,7 @@ def getPLSTrainOnline():
result = PLS_Train.main(jsonstr)
return result
@app.route('/api/test/PLS_Test_online', methods=["POST", "GET"])
def GetPLSTestOnline():
result = ""
@ -286,6 +299,7 @@ def GetPLSTestOnline():
result = PLS_Test.main(info)
return result
@app.route('/api/test/getliner', methods=["POST", "GET"])
def get_liner():
data = json.loads(request.data)
@ -296,6 +310,7 @@ def get_liner():
y = inputdata[:, 1].reshape(-1, 1)
return Liner.main(degree, x, y)
@app.route('/HeatRate', methods=["POST", "GET"])
def get_heat_rate():
data = json.loads(request.args.get("json"))
@ -303,6 +318,7 @@ def get_heat_rate():
result = heatrate.main(data)
return result
@app.route('/api/Test_offlinerb', methods=["POST"])
def get_test_offlinerb():
info = json.loads(request.data)
@ -336,6 +352,7 @@ def get_test_offlinerb():
result["time"] = time1[:index:]
return json.dumps(result)
@app.route("/api/pca_off_train", methods=["POST"])
def get_pca_off_train():
try:
@ -346,6 +363,7 @@ def get_pca_off_train():
result = json.dumps({"error_msg": traceback.format_exc()})
return result
@app.route('/api/pcamtcltest', methods=["POST"])
def get_test_pcamtcltest():
try:
@ -357,6 +375,7 @@ def get_test_pcamtcltest():
result = {"error_msg": str(e)}
return json.dumps(result)
@app.route('/api/pcamtcltest_recon', methods=["POST"])
def get_test_pcamtcltest_recon():
try:
@ -368,6 +387,7 @@ def get_test_pcamtcltest_recon():
result = {"error_msg": str(e)}
return json.dumps(result)
@app.route("/api/mtcltest_recon_exa", methods=["POST"])
def get_mtcltest_recon_exa():
try:
@ -377,7 +397,7 @@ def get_mtcltest_recon_exa():
import pcamtcltest_recon
result = pcamtcltest_recon.main(info)
return json.dumps(result)
elif filename =="AANN":
elif filename == "AANN":
import aannmtcltest_recon
result = aannmtcltest_recon.main(info)
return json.dumps(result)
@ -385,6 +405,7 @@ def get_mtcltest_recon_exa():
result = {"error_msg": str(e)}
return json.dumps(result)
@app.route("/api/ann_clean_test", methods=["POST"])
def get_ann_clean_test():
try:
@ -395,6 +416,7 @@ def get_ann_clean_test():
result = {"error_msg": str(e)}
return json.dumps(result)
@app.route("/api/test/model_performance", methods=["POST"])
def get_model_performance():
info = json.loads(request.data)
@ -402,6 +424,7 @@ def get_model_performance():
result = model_performance.main(info)
return json.dumps(result).encode("utf-8").decode("unicode_escape")
@app.route("/api/model_performance_data", methods=["POST"])
def get_model_performance_data():
info = json.loads(request.data)
@ -409,6 +432,7 @@ def get_model_performance_data():
result = model_performance.data_main(info)
return json.dumps(result).encode("utf-8").decode("unicode_escape")
@app.route("/api/test/spca_train", methods=["POST"])
def get_spca_train():
try:
@ -419,6 +443,7 @@ def get_spca_train():
result = json.dumps({"error_msg": str(e)})
return result
@app.route("/api/test/spca_test", methods=["POST"])
def get_spca_test():
try:
@ -429,6 +454,7 @@ def get_spca_test():
result = json.dumps({"error_msg": traceback.format_exc()})
return result
@app.route("/api/test/clean_data", methods=["POST"])
def get_clean_data():
try:
@ -439,6 +465,7 @@ def get_clean_data():
result = json.dumps({"error_msg": traceback.format_exc()})
return result
@app.route("/api/model_coverage", methods=["POST"])
def model_coverage():
try:
@ -449,6 +476,7 @@ def model_coverage():
result = json.dumps({"error_msg": traceback.format_exc()})
return result
@app.route('/api/get_smote_data', methods=["POST"])
def get_smote_data():
try:
@ -459,6 +487,7 @@ def get_smote_data():
res = {"error_msg": traceback.format_exc()}
return json.dumps(res)
@app.route('/api/ae_train', methods=["POST"])
def ae_train():
info = json.loads(request.data)
@ -466,6 +495,7 @@ def ae_train():
res = train_main(info)
return res
@app.route('/api/ae_test', methods=["POST"])
def ae_test():
try:
@ -476,11 +506,12 @@ def ae_test():
res = json.dumps({"error_msg": traceback.format_exc()})
return res
@app.route("/api/get_pca_cp_mtcl_test", methods=["POST"])
def get_pca_cp_mtcl_test():
try:
info = json.loads(request.data)
from pca import cp_main ##
from pca import cp_main ##
res = cp_main(info)
except Exception as e:
res = json.dumps({"error_msg": traceback.format_exc()})
@ -489,5 +520,5 @@ def get_pca_cp_mtcl_test():
if __name__ == '__main__':
a = 1
#GetAssess()
app.run(host="172.28.137.230", port="8082", threaded=True)
# GetAssess()
app.run(host="0.0.0.0", port="8082", threaded=True)

44
app.spec

@ -0,0 +1,44 @@
# -*- mode: python ; coding: utf-8 -*-
block_cipher = None
a = Analysis(['app.py'],
pathex=[],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
hooksconfig={},
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
[],
exclude_binaries=True,
name='app',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
console=True,
disable_windowed_traceback=False,
target_arch=None,
codesign_identity=None,
entitlements_file=None )
coll = COLLECT(exe,
a.binaries,
a.zipfiles,
a.datas,
strip=False,
upx=True,
upx_exclude=[],
name='app')

BIN
build/temp.win-amd64-3.6/Release/AANN_Derivative.cp36-win_amd64.exp

Binary file not shown.

BIN
build/temp.win-amd64-3.6/Release/AANN_Derivative.cp36-win_amd64.lib

Binary file not shown.

BIN
build/temp.win-amd64-3.6/Release/AANN_Derivative.obj

Binary file not shown.

BIN
build/temp.win-amd64-3.6/Release/AANN_RB.cp36-win_amd64.exp

Binary file not shown.

BIN
build/temp.win-amd64-3.6/Release/AANN_RB.cp36-win_amd64.lib

Binary file not shown.

BIN
build/temp.win-amd64-3.6/Release/AANN_RB.obj

Binary file not shown.

BIN
build/temp.win-amd64-3.6/Release/AANN_Train.cp36-win_amd64.exp

Binary file not shown.

BIN
build/temp.win-amd64-3.6/Release/AANN_Train.cp36-win_amd64.lib

Binary file not shown.

BIN
build/temp.win-amd64-3.6/Release/AANN_Train.obj

Binary file not shown.

BIN
build/temp.win-amd64-3.6/Release/AANN_fit.cp36-win_amd64.exp

Binary file not shown.

BIN
build/temp.win-amd64-3.6/Release/AANN_fit.cp36-win_amd64.lib

Binary file not shown.

BIN
build/temp.win-amd64-3.6/Release/AANN_fit.obj

Binary file not shown.

BIN
build/temp.win-amd64-3.6/Release/ANN_Test_offline.cp36-win_amd64.exp

Binary file not shown.

BIN
build/temp.win-amd64-3.6/Release/ANN_Test_offline.cp36-win_amd64.lib

Binary file not shown.

BIN
build/temp.win-amd64-3.6/Release/ANN_Test_offline.obj

Binary file not shown.

BIN
build/temp.win-amd64-3.6/Release/ASSESS.cp36-win_amd64.exp

Binary file not shown.

BIN
build/temp.win-amd64-3.6/Release/ASSESS.cp36-win_amd64.lib

Binary file not shown.

BIN
build/temp.win-amd64-3.6/Release/ASSESS.obj

Binary file not shown.

BIN
build/temp.win-amd64-3.6/Release/aannmtcltest_recon.cp36-win_amd64.exp

Binary file not shown.

BIN
build/temp.win-amd64-3.6/Release/aannmtcltest_recon.cp36-win_amd64.lib

Binary file not shown.

BIN
build/temp.win-amd64-3.6/Release/aannmtcltest_recon.obj

Binary file not shown.

BIN
build/temp.win-amd64-3.6/Release/pca_test_by_rb.cp36-win_amd64.exp

Binary file not shown.

BIN
build/temp.win-amd64-3.6/Release/pca_test_by_rb.cp36-win_amd64.lib

Binary file not shown.

BIN
build/temp.win-amd64-3.6/Release/pca_test_by_rb.obj

Binary file not shown.

BIN
build/temp.win-amd64-3.6/Release/pca_test_by_rb_plot.cp36-win_amd64.exp

Binary file not shown.

BIN
build/temp.win-amd64-3.6/Release/pca_test_by_rb_plot.cp36-win_amd64.lib

Binary file not shown.

BIN
build/temp.win-amd64-3.6/Release/pca_test_by_rb_plot.obj

Binary file not shown.

BIN
build/temp.win-amd64-3.6/Release/pca_train_off.cp36-win_amd64.exp

Binary file not shown.

BIN
build/temp.win-amd64-3.6/Release/pca_train_off.cp36-win_amd64.lib

Binary file not shown.

BIN
build/temp.win-amd64-3.6/Release/pca_train_off.obj

Binary file not shown.

BIN
build/temp.win-amd64-3.6/Release/pcamtcltest_recon.cp36-win_amd64.exp

Binary file not shown.

BIN
build/temp.win-amd64-3.6/Release/pcamtcltest_recon.cp36-win_amd64.lib

Binary file not shown.

BIN
build/temp.win-amd64-3.6/Release/pcamtcltest_recon.obj

Binary file not shown.

BIN
build/temp.win-amd64-3.6/Release/web.cp36-win_amd64.exp

Binary file not shown.

BIN
build/temp.win-amd64-3.6/Release/web.cp36-win_amd64.lib

Binary file not shown.

BIN
build/temp.win-amd64-3.6/Release/web.obj

Binary file not shown.

1
cjl勿删/11cf0.json

@ -1 +0,0 @@
{"FDR": 0.0020000000000000018, "FAR": 0.0018076923076923077}

1
cjl勿删/11cf1.json

@ -1 +0,0 @@
{"FDR": 0.09199999999999997, "FAR": 0.0028461538461538463}

1
cjl勿删/11cf10.json

@ -1 +0,0 @@
{"FDR": 1.0, "FAR": 0.0016153846153846153}

1
cjl勿删/11cf11.json

@ -1 +0,0 @@
{"FDR": 1.0, "FAR": 0.0013846153846153845}

1
cjl勿删/11cf12.json

@ -1 +0,0 @@
{"FDR": 1.0, "FAR": 0.0015769230769230769}

1
cjl勿删/11cf2.json

@ -1 +0,0 @@
{"FDR": 0.7130000000000001, "FAR": 0.003269230769230769}

1
cjl勿删/11cf3.json

@ -1 +0,0 @@
{"FDR": 0.994, "FAR": 0.0016538461538461537}

1
cjl勿删/11cf4.json

@ -1 +0,0 @@
{"FDR": 1.0, "FAR": 0.0015769230769230769}

1
cjl勿删/11cf5.json

@ -1 +0,0 @@
{"FDR": 1.0, "FAR": 0.0016153846153846153}

1
cjl勿删/11cf6.json

@ -1 +0,0 @@
{"FDR": 1.0, "FAR": 0.0015769230769230769}

1
cjl勿删/11cf7.json

@ -1 +0,0 @@
{"FDR": 1.0, "FAR": 0.0015384615384615385}

1
cjl勿删/11cf8.json

@ -1 +0,0 @@
{"FDR": 1.0, "FAR": 0.0016153846153846153}

1
cjl勿删/11cf9.json

@ -1 +0,0 @@
{"FDR": 1.0, "FAR": 0.0016153846153846153}

1
cjl勿删/1detection_fan0_1.json

@ -1 +0,0 @@
{"FDR": 0.46699999999999997, "FAR": 0.13777777777777778}

1
cjl勿删/1detection_fan0_5.json

@ -1 +0,0 @@
{"FDR": 0.868, "FAR": 0.15344444444444444}

1
cjl勿删/22cf0.json

@ -1 +0,0 @@
{"FDR": 0.007845188284518856, "FAR": 0.001953125}

1
cjl勿删/22cf1.json

@ -1 +0,0 @@
{"FDR": 0.1931106471816284, "FAR": 0.004943390208898103}

1
cjl勿删/22cf10.json

@ -1 +0,0 @@
{"FDR": 1.0, "FAR": 0.0016729735112527384}

1
cjl勿删/22cf11.json

@ -1 +0,0 @@
{"FDR": 1.0, "FAR": 0.0015537229592446516}

1
cjl勿删/22cf2.json

@ -1 +0,0 @@
{"FDR": 0.770440251572327, "FAR": 0.001912960306073649}

28
cjl勿删/22cf3.json

@ -1,28 +0,0 @@
try:
for i in range(1, 3):
k = i
model = pca_train_k.pca(train_data, k)
if al_type == "SPE":
limit = model["QCUL_95"]
elif al_type == "FAI":
limit = model["Kesi_95"]
else:
limit = model["T2CUL_95"]
_, test_data, f_m = get_test_data_1(train_data, samples, amplitudes, fault_index, 1)
data = (test_data - model["Train_X_mean"]) / model["Train_X_std"]
t_r = get_rb_pca(data, model, limit, al_type, f_m)
result.append(t_r)
except Exception as e:
with open('log.log', "a") as f:
f.write(f"{str(datetime.datetime.now())}{traceback.format_exc()}")
# for index in range(data.shape[0]):
# line = data[index] @ m @ data[index].T
# lines.append(line)
# x = list(range(data.shape[0]))
# limits_line = list(repeat(limit, data.shape[0]))
# plt.plot(x, lines)
# plt.plot(x, limits_line)
# plt.title(f'k={k},limit={limit}')

1
cjl勿删/22cf4.json

@ -1 +0,0 @@
{"FDR": 1.0, "FAR": 0.0015949599266318435}

1
cjl勿删/22cf5.json

@ -1 +0,0 @@
{"FDR": 1.0, "FAR": 0.0017141034840149885}

1
cjl勿删/22cf6.json

@ -1 +0,0 @@
{"FDR": 1.0, "FAR": 0.0016721076518831117}

1
cjl勿删/22cf7.json

@ -1 +0,0 @@
{"FDR": 1.0, "FAR": 0.0013954786491766676}

1
cjl勿删/22cf8.json

@ -1 +0,0 @@
{"FDR": 1.0, "FAR": 0.0015545280612244898}

1
cjl勿删/22cf9.json

@ -1 +0,0 @@
{"FDR": 1.0, "FAR": 0.001634117178158629}

1
cjl勿删/2detection_fan0_1.json

@ -1 +0,0 @@
{"FDR": 0.926, "FAR": 0.0006666666666666666}

1
cjl勿删/2detection_fan0_5.json

@ -1 +0,0 @@
{"FDR": 0.984, "FAR": 0.0071111111111111115}

1
cjl勿删/3detection_fan0_1.json

@ -1 +0,0 @@
{"FDR": 0.17000000000000004, "FAR": 0.025444444444444443}

1
cjl勿删/3detection_fan0_5.json

@ -1 +0,0 @@
{"FDR": 0.821, "FAR": 0.018}

1
cjl勿删/4detection_fan0_1.json

@ -1 +0,0 @@
{"FDR": 0.5720000000000001, "FAR": 0.008}

1
cjl勿删/4detection_fan0_5.json

@ -1 +0,0 @@
{"FDR": 0.921, "FAR": 0.008222222222222223}

1
cjl勿删/5detection_fan0_1.json

@ -1 +0,0 @@
{"FDR": 0.783, "FAR": 0.0035555555555555557}

1
cjl勿删/5detection_fan0_5.json

@ -1 +0,0 @@
{"FDR": 0.958, "FAR": 0.0012222222222222222}

BIN
cjl勿删/__pycache__/ae_train.cpython-36.pyc

Binary file not shown.

BIN
cjl勿删/__pycache__/pca_diagnosis.cpython-36.pyc

Binary file not shown.

BIN
cjl勿删/__pycache__/sae_diagnosis.cpython-36.pyc

Binary file not shown.

95
cjl勿删/ae_train.py

@ -1,95 +0,0 @@
# -*- coding: utf-8 -*-
"""
@Time : 2020/5/29 13:52
@Author : 杰森·家乐森
@File : ae_train.py
@Software: PyCharm
"""
import time
import json
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.metrics import r2_score
from tensorflow.keras import backend
from tensorflow.keras.models import Model
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import Dense, Input
def rmse(y_true, y_pred):
return backend.sqrt(backend.mean(tf.keras.losses.mean_squared_error(y_true, y_pred), axis=-1))
def train(layers, data, test, epoch):
"""
自编码训练函数
:param data: 归一化数据
:param layers: 网络结构int类型数组
:param epoch: 训练次数
:return:
"""
mms = MinMaxScaler()
data = mms.fit_transform(data)
test = mms.transform(test)
mid = len(layers) // 2 - 1
layer = [Input(shape=(layers[0],))]
for i in range(len(layers) - 1):
if i == mid:
layer.append(Dense(layers[i + 1])(layer[i]))
else:
layer.append(Dense(layers[i + 1], activation="sigmoid")(layer[i]))
autoencoder = Model(layer[0], layer[-1])
autoencoder.compile(optimizer="adam", loss="mse",
metrics=['accuracy'])
autoencoder.fit(data, data, epochs=epoch, batch_size=32, shuffle=True)
test_data = autoencoder.predict(data, batch_size=400)
test_out = autoencoder.predict(test, batch_size=400)
with tf.Session():
spe = rmse(data, test_data).eval()
limit = 3 * spe
mse = rmse(test, test_out).eval()
# r2 = r2_score(data, test_data)
r2 = r2_score(test, test_out)
weight = autoencoder.get_weights()
weights = []
bias = []
is_weight = True
model = {
"weights": weights,
"bias": bias
}
for i in range(len(weight)):
if is_weight:
weights.append(weight[i].tolist())
is_weight = False
else:
bias.append(weight[i].tolist())
is_weight = True
return model, float(limit), r2, float(mse)
if __name__ == '__main__':
with open('data.json', 'r') as f:
data = np.array(json.load(f)['data'])
layer_arr = [
[10, 1, 10],
[10, 8, 1, 8, 10],
[10, 8, 6, 1, 6, 8, 10]
]
for i in range(len(layer_arr)):
k = len(layer_arr[i]) // 2
for j in range(5):
layer_arr[i][k] = j + 1
model, limit, r2, mse = train(layer_arr[i], data[:3000, :], data, 5000)
model_config = {
"layer": layer_arr[i],
"r2": r2,
"mse": mse,
"model": model,
"spe": limit
}
with open('model%d_%d.json' % (len(layer_arr[i]), j + 1), 'w') as f:
f.write(json.dumps(model_config))
print('OK')

109
cjl勿删/autoencoder1.json

@ -1,109 +0,0 @@
{
"model": {
"weights": [
[
[
0.013267557136714458,
1.161040186882019
],
[
0.053286824375391006,
-1.0407843589782715
],
[
-0.7668898105621338,
1.2049490213394165
],
[
0.13135650753974915,
-1.8514107465744019
],
[
-0.05695141851902008,
0.8509989380836487
],
[
1.7956933975219727,
0.868218183517456
],
[
-0.6104443669319153,
-0.19653184711933136
],
[
1.109096884727478,
-0.017840193584561348
],
[
-2.00811767578125,
-0.4331284761428833
],
[
0.8440019488334656,
0.004479509778320789
]
],
[
[
-2.9800901412963867,
2.9839468002319336,
-1.9138803482055664,
0.763970136642456,
3.1833763122558594,
7.314971923828125,
-7.338461875915527,
5.048229217529297,
-2.2446556091308594,
-11.515352249145508
],
[
7.767373561859131,
-7.772740840911865,
5.254458904266357,
-2.1539909839630127,
-12.252762794494629,
1.64174222946167,
-1.6902512311935425,
1.0922235250473022,
-0.36473581194877625,
-1.7786568403244019
]
]
],
"bias": [
[
-0.12595483660697937,
-0.46088284254074097
],
[
-2.623779535293579,
1.970546007156372,
-1.4591680765151978,
-0.1846103072166443,
0.5310178995132446,
-5.116513729095459,
4.508279800415039,
-3.150614023208618,
0.5747373700141907,
3.220733165740967
]
]
},
"spe": 0.17004422843456268,
"cov": [
[
22.631289381543212,
-3.8047325457751078
],
[
-3.8047325457751078,
23.33727614059547
]
],
"h_mean": [
0.5599127411842346,
0.538157045841217
],
"limit_95": 6.011475027591657,
"limit_99": 9.251043652650376
}

1
cjl勿删/autoencoder2.json

@ -1 +0,0 @@
{"model": {"weights": [[[-0.8356160521507263, 0.3092801868915558, -1.2909915447235107, 1.6997143030166626, -0.027433719485998154], [1.1384114027023315, 0.9021164774894714, 0.39725518226623535, -1.7175440788269043, -1.1450926065444946], [-0.5036107897758484, 0.9385666251182556, -1.476340651512146, 0.5507526993751526, -0.1961209625005722], [0.6321204304695129, -0.1261664777994156, 0.3924834430217743, -1.504025936126709, -1.271727204322815], [-1.4868829250335693, -2.190732479095459, 2.3284878730773926, -0.6612717509269714, 1.4683117866516113], [0.3885435163974762, 1.6360031366348267, -0.27096644043922424, -0.6505817770957947, 0.9483583569526672], [0.6231023669242859, 0.44627267122268677, -1.5316928625106812, -1.2492015361785889, -3.0080792903900146], [0.2088644951581955, 2.1375482082366943, -0.5260413289070129, -1.106449842453003, 1.1216826438903809], [1.161962628364563, -1.5129520893096924, -0.9803615212440491, 0.8483885526657104, -1.1303361654281616], [-0.3191094696521759, -1.0919346809387207, 0.6873847842216492, 1.3502848148345947, 1.280632734298706]], [[2.3238298892974854, -0.4271831512451172], [-3.0681726932525635, 1.4455369710922241], [3.102893114089966, 2.151425361633301], [-1.994104266166687, -2.915308952331543], [-0.658724844455719, 3.064145565032959]], [[4.225986480712891, -3.9873640537261963, -3.688408136367798, -10.041897773742676, -4.217791557312012], [-3.526071310043335, 9.990665435791016, -3.4148740768432617, -1.254059076309204, 4.050563335418701]], [[-2.1572444438934326, 1.85689115524292, -1.498786449432373, 1.785080075263977, 1.9605295658111572, -3.683119535446167, 3.6307613849639893, -1.7958505153656006, 0.5846244692802429, 1.6343960762023926], [-2.4148974418640137, 2.371803045272827, -1.5240622758865356, 0.8642465472221375, 1.2768983840942383, 2.6133084297180176, -3.3966615200042725, 1.2262998819351196, -2.612510919570923, -4.64479923248291], [3.0364913940429688, -3.201200008392334, 2.247159719467163, -3.5875837802886963, -5.923583030700684, -3.643436908721924, 3.541846990585327, -2.394259452819824, 0.3634031414985657, 1.1130130290985107], [5.331246376037598, -5.80121374130249, 3.4133965969085693, 1.897201418876648, 1.3766930103302002, -0.1153433546423912, 0.14817313849925995, -0.06482663750648499, -0.26036545634269714, -0.28526535630226135], [0.09504624456167221, -0.30022457242012024, 0.09537942707538605, -0.5121979117393494, -1.3358240127563477, 4.144266605377197, -4.262178421020508, 3.3335225582122803, 0.04168545454740524, -1.972151756286621]]], "bias": [[0.506768524646759, 1.2093656063079834, -0.8774029612541199, -0.9476589560508728, -0.5230045318603516], [0.29825013875961304, -1.4348657131195068], [0.0223399605602026, -0.46129781007766724, 4.27229642868042, 1.873478889465332, -0.6493011713027954], [-1.0499862432479858, 0.8853863477706909, -0.5634945034980774, 0.2314375638961792, 0.9517301917076111, -0.668080747127533, 0.9133151769638062, -0.1797475665807724, 1.027596116065979, 1.3987751007080078]]}, "spe": 0.10670268908143044, "cov": [[31.721639732741355, -6.242508859273951], [-6.2425088592739515, 26.28154801896884]], "h_mean": [0.2731820046901703, 0.5072057843208313], "limit_95": 6.011475027591657, "limit_99": 9.251043652650376}

387
cjl勿删/autoencoder3.json

@ -1,387 +0,0 @@
{
"model": {
"weights": [
[
[
-1.2245333194732666,
0.9217612743377686,
0.872409462928772,
1.0764983892440796,
0.2999080717563629,
5.244951248168945,
-0.9299842119216919
],
[
-0.8295643329620361,
-1.1188461780548096,
-1.5337766408920288,
0.2272522896528244,
-0.09671537578105927,
-0.2725476622581482,
3.725551128387451
],
[
-1.4028007984161377,
0.49336981773376465,
-0.058944400399923325,
0.9814965724945068,
1.141343355178833,
5.971405982971191,
-0.7457082867622375
],
[
-0.8749677538871765,
-0.8031793236732483,
-0.27733367681503296,
0.28917160630226135,
1.0782771110534668,
0.14209581911563873,
3.119966745376587
],
[
1.004982829093933,
0.9370648264884949,
-0.6308929324150085,
-0.8727185726165771,
-1.6758214235305786,
-2.8413281440734863,
-4.569004535675049
],
[
0.3696422278881073,
-2.4712371826171875,
0.9488536715507507,
2.1806108951568604,
-1.2012957334518433,
0.36072051525115967,
0.23620405793190002
],
[
-3.7826366424560547,
0.9237260818481445,
-0.8343602418899536,
-0.6885524392127991,
1.7286131381988525,
1.9389992952346802,
1.3162440061569214
],
[
0.36113783717155457,
-1.4914426803588867,
0.37810999155044556,
1.2679622173309326,
-0.732909083366394,
1.933423399925232,
0.9037644267082214
],
[
-1.5808677673339844,
-0.031187832355499268,
-0.38661620020866394,
0.4751318693161011,
1.1403697729110718,
0.958604097366333,
1.1177728176116943
],
[
0.7001248002052307,
0.7343422770500183,
-0.5925262570381165,
-1.9185808897018433,
1.1383529901504517,
-2.0614662170410156,
-2.6704373359680176
]
],
[
[
-1.7921253442764282,
-1.7791352272033691,
1.9362361431121826,
-4.38215446472168,
-1.696469783782959
],
[
1.1622157096862793,
-1.5756466388702393,
-0.5181728005409241,
5.293956756591797,
3.5066170692443848
],
[
0.4845467507839203,
2.4686086177825928,
-1.7550359964370728,
-0.8793517351150513,
-1.783814787864685
],
[
1.4014157056808472,
1.2228449583053589,
0.32988429069519043,
-0.7148339152336121,
-5.3566131591796875
],
[
0.17789016664028168,
-1.710491418838501,
-0.4293838143348694,
2.0826103687286377,
0.926232099533081
],
[
2.32541823387146,
1.312388300895691,
-1.2358834743499756,
0.4478369951248169,
-2.3990867137908936
],
[
-2.395493507385254,
-0.7135123610496521,
2.6433749198913574,
1.1071109771728516,
-1.9118226766586304
]
],
[
[
2.059842109680176,
2.5099709033966064
],
[
-0.26115500926971436,
1.5193843841552734
],
[
-3.904820442199707,
-1.3211722373962402
],
[
2.479161024093628,
-2.436509847640991
],
[
1.2092896699905396,
-4.882288455963135
]
],
[
[
-4.140275955200195,
4.376102447509766,
5.007478713989258,
-5.753604412078857,
2.551893711090088
],
[
5.109598636627197,
4.395392417907715,
3.7867867946624756,
3.2506680488586426,
-5.936469078063965
]
],
[
[
6.964428424835205,
-1.9023141860961914,
1.2294992208480835,
-2.494931697845459,
5.139438629150391,
-1.2469078302383423,
3.8611180782318115
],
[
1.4523006677627563,
9.954370498657227,
-2.139326810836792,
-0.9459704160690308,
2.0168163776397705,
11.196776390075684,
2.2062644958496094
],
[
0.7467207908630371,
8.28785514831543,
-1.298266053199768,
0.07817942649126053,
1.3462064266204834,
11.280993461608887,
0.24093034863471985
],
[
5.210637092590332,
-4.294027328491211,
2.1438729763031006,
-3.282440185546875,
3.713907480239868,
-3.499796152114868,
5.917919635772705
],
[
-5.016149044036865,
-2.7130753993988037,
-0.2851487696170807,
8.532496452331543,
-3.5970492362976074,
-1.5019468069076538,
-7.698583126068115
]
],
[
[
-0.6968080997467041,
0.7484884262084961,
-0.4230259656906128,
0.46702805161476135,
0.9606726765632629,
2.2319283485412598,
-2.3976309299468994,
2.276494026184082,
-4.385148048400879,
-6.036461353302002
],
[
0.7987446188926697,
-1.1139551401138306,
-0.22006738185882568,
4.318490505218506,
0.9564504027366638,
0.015492977574467659,
-0.021582378074526787,
0.019811546429991722,
-0.036860693246126175,
0.6295841932296753
],
[
-5.090571403503418,
4.759396553039551,
-4.77251672744751,
4.969375133514404,
-2.5499253273010254,
0.08981809765100479,
-0.08619396388530731,
0.0694398358464241,
-0.1133684366941452,
1.5965533256530762
],
[
-0.662455677986145,
0.6227018237113953,
-0.6145215630531311,
0.7977278232574463,
1.030511736869812,
-3.6479456424713135,
4.154869556427002,
-3.5671372413635254,
4.193316459655762,
-1.9646614789962769
],
[
2.118936777114868,
-2.122020959854126,
1.6456965208053589,
-1.680084228515625,
-0.7294877171516418,
1.198642611503601,
-1.2397879362106323,
1.2121137380599976,
-2.562821388244629,
-2.1334125995635986
],
[
2.219999313354492,
-2.3641719818115234,
2.326659679412842,
-5.463375568389893,
-7.3427534103393555,
-0.009894054383039474,
0.01199839636683464,
-0.023310601711273193,
0.0612969771027565,
-0.013971450738608837
],
[
0.5158653259277344,
-0.4588440954685211,
0.5030941963195801,
-0.557999312877655,
0.5210561752319336,
1.4156707525253296,
-1.5811996459960938,
0.28104645013809204,
3.994385004043579,
0.8332198858261108
]
]
],
"bias": [
[
-1.069589376449585,
0.0032550811301916838,
-0.6666462421417236,
0.35170167684555054,
0.6081191301345825,
2.1643481254577637,
1.325771689414978
],
[
-0.6558616757392883,
-0.682751476764679,
1.1333270072937012,
0.24874892830848694,
-0.09851467609405518
],
[
-1.0663052797317505,
0.11489524692296982
],
[
0.264947772026062,
-3.955415964126587,
-3.465902328491211,
0.6347262263298035,
1.6293431520462036
],
[
-0.783745288848877,
-1.700945496559143,
0.30000773072242737,
1.5292822122573853,
-0.9142544269561768,
-1.20986008644104,
-1.3012850284576416
],
[
-0.2500978410243988,
0.04470078647136688,
0.5079190135002136,
-0.9122554063796997,
3.7497880458831787,
-0.6030765175819397,
-0.29624900221824646,
0.2175414264202118,
-0.4752647280693054,
4.907028675079346
]
]
},
"spe": 0.046509415842592716,
"cov": [
[
59.319103246994395,
-14.53475875845949
],
[
-14.53475875845949,
71.20353814746151
]
],
"h_mean": [
0.3830653131008148,
0.3723643720149994
],
"limit_95": 6.011475027591657,
"limit_99": 9.251043652650376
}

1
cjl勿删/cf.json

@ -1 +0,0 @@
{"FDR": 0.0, "FAR": 0.00034615384615384613}

1
cjl勿删/cf0.json

@ -1 +0,0 @@
{"FDR": 0.0, "FAR": 0.0003846153846153846}

1
cjl勿删/cf1.json

@ -1 +0,0 @@
{"FDR": 0.0020000000000000018, "FAR": 0.0004230769230769231}

1
cjl勿删/cf10.json

@ -1 +0,0 @@
{"FDR": 1.0, "FAR": 0.00034615384615384613}

1
cjl勿删/cf11.json

@ -1 +0,0 @@
{"FDR": 1.0, "FAR": 0.00034615384615384613}

1
cjl勿删/cf2.json

@ -1 +0,0 @@
{"FDR": 0.013000000000000012, "FAR": 0.00046153846153846153}

1
cjl勿删/cf3.json

@ -1 +0,0 @@
{"FDR": 0.14700000000000002, "FAR": 0.0017307692307692308}

1
cjl勿删/cf4.json

@ -1 +0,0 @@
{"FDR": 0.862, "FAR": 0.001346153846153846}

1
cjl勿删/cf5.json

@ -1 +0,0 @@
{"FDR": 0.998, "FAR": 0.0003846153846153846}

1
cjl勿删/cf6.json

@ -1 +0,0 @@
{"FDR": 1.0, "FAR": 0.00034615384615384613}

1
cjl勿删/cf7.json

@ -1 +0,0 @@
{"FDR": 1.0, "FAR": 0.00034615384615384613}

1
cjl勿删/cf8.json

@ -1 +0,0 @@
{"FDR": 1.0, "FAR": 0.00034615384615384613}

1
cjl勿删/cf9.json

@ -1 +0,0 @@
{"FDR": 1.0, "FAR": 0.00034615384615384613}

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save