You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

317 lines
12 KiB

# -*- coding: utf-8 -*-
"""
Created on Sun Feb 28 10:04:26 2016
PCA source code————最新更新————————————————————————————
@author: liudiwei
"""
import numpy as np
import pandas as pd
from scipy.stats import norm
from scipy.stats.distributions import chi2
import json
import sys
import pymssql
import requests
import datetime
from scipy.stats import norm
from scipy.stats import f
from scipy.stats import chi2
import jenkspy
import xlrd
import gc
import time
import pyodbc
from recon import Lars, recon_fault_diagnosis_r, recon_fault_diagnosis_r_l, recon_fault_diagnosis_r_c
import config
"""
参数:
- XMat:传入的是一个numpy的矩阵格式,行表示样本数,列表示特征
- k:表示取前k个特征值对应的特征向量
返回值:
- finalData:参数一指的是返回的低维矩阵,对应于输入参数二
- reconData:参数二对应的是移动坐标轴后的矩阵
"""
def min_pos(X):
X[X <= 0] = np.max(X)
m = np.min(X)
re = np.where(X == m)
min_i = re[0]
min_j = re[1]
if m < 0:
m = 0
return m, min_i, min_j
class MSSQL:
def __init__(self,host,user,pwd,database):
self.host = host
self.user = user
self.pwd = pwd
self.db = database
def __GetConnect(self):
"""
得到连接信息
返回: conn.cursor()
"""
if not self.db:
raise(NameError,"没有设置数据库信息")
self.conn = pymssql.connect(host=self.host,user=self.user,password=self.pwd,database=self.db,charset="utf8")
cur = self.conn.cursor()
if not cur:
raise(NameError,"连接数据库失败")
else:
return cur
def ExecQuery(self,sql):
"""
执行查询语句
返回的是一个包含tuple的list,list的元素是记录行,tuple的元素是每行记录的字段
"""
cur = self.__GetConnect()
cur.execute(sql)
resList = cur.fetchall()
#查询完毕后必须关闭连接
self.conn.close()
return resList
def ExecNonQuery(self,sql):
"""
执行非查询语句
调用示例:
cur = self.__GetConnect()
cur.execute(sql)
self.conn.commit()
self.conn.close()
"""
cur = self.__GetConnect()
cur.execute(sql)
self.conn.commit()
self.conn.close()
def get_model_by_ID(model_id):
ms = MSSQL(host="120.26.116.243", user="root", pwd="powerSIS#123", database="alert")
resList = ms.ExecQuery("SELECT Model_info FROM model_cfg where \"model_id\"="+str(model_id))
#return json.loads(resList[0][0])["para"]
return json.loads(resList[0][0])
def get_model_by_id(model_id):
try:
conn = pyodbc.connect(
r"DRIVER={ODBC Driver 17 for SQL Server};SERVER=%s;DATABASE=alert;UID=sa;PWD=powerSIS#123" % config._SQL_IP) # 连接数据库
except pyodbc.Error:
conn = pyodbc.connect(
r"DRIVER={SQL SERVER NATIVE CLIENT 10.0};SERVER=%s;DATABASE=alert;UID=sa;PWD=powerSIS#123" % config._SQL_IP) # 连接数据库
cursor = conn.cursor() # 获得操作的游标
cursor.execute(f"SELECT Model_info FROM model_cfg where model_id={model_id}")
res_list = cursor.fetchall() # 获取查询的结果
conn.commit() # 提交执行
cursor.close() # 关闭游标
conn.close() # 关闭数据库连接
return json.loads(res_list[0][0])
def get_model_by_id_and_version(model_id, version):
try:
conn = pyodbc.connect(
r"DRIVER={ODBC Driver 17 for SQL Server};SERVER=%s;DATABASE=alert;UID=sa;PWD=powerSIS#123" % config._SQL_IP) # 连接数据库
except pyodbc.Error:
conn = pyodbc.connect(
r"DRIVER={SQL SERVER NATIVE CLIENT 10.0};SERVER=%s;DATABASE=alert;UID=sa;PWD=powerSIS#123" % config._SQL_IP) # 连接数据库
cursor = conn.cursor() # 获得操作的游标
cursor.execute(f"SELECT Model_info FROM [alert].[dbo].[Model_Version] where model_id={model_id} and version='{version}'")
res_list = cursor.fetchall() # 获取查询的结果
conn.commit() # 提交执行
cursor.close() # 关闭游标
conn.close() # 关闭数据库连接
return json.loads(res_list[0][0])
def pca(model,LockVariable, Data_origin):
Data = (Data_origin - model["Train_X_mean"]) / model["Train_X_std"]
featValue = np.array(model["featValue"]) # 训练数据的特征值
featVec = np.array(model["featVec"]) # 训练数据的特征向量
k = (model["K"]) # 主元个数
# selectVec = np.array(model["selectVec"])
selectVec = featVec[:, 0:k]####自己选择的,取前k个特征向量
# index = np.argsort(-np.array(featValue)) # 按照featValue进行从大到小排序
featValue_sort = featValue#[index] # 排序后的特征值
'''
featValue, featVec = np.linalg.eig(model["COV"]) # 求解协方差矩阵的特征值和特征向量
index = np.argsort(-featValue) # 按照featValue进行从大到小排序
featValue_sort =featValue[index]#排序后的特征值
selectVec = np.matrix(featVec[:,index[:int(model["K"])]]) # 所以这里需要进行转置P
'''
############----------*********-SPE-**************----------########################
numbel_variable = featValue.shape[0]#取特征值的个数
# LockVariable="3,5"
C_ = np.eye(numbel_variable) - np.dot(selectVec, selectVec.T)#生成numbel_variable阶单位矩阵
X_SPE = C_.T
D_SPE = C_
DIAG_SPE = np.eye(numbel_variable)#生成numbel_variable阶单位矩阵
'''
# ************************调用LARS*******************************
t = 50000
lamuta = 1
limit_line = model["QCUL_99"]
beta_path=[]
for i in range(Data.shape[0]):
Y=Data[i,:]
beta, mse=Lars(X_SPE, Y, D_SPE, DIAG_SPE, t, limit_line, lamuta)
beta_end=abs(beta[-1,:])
jenk=jenkspy.jenks_breaks(beta_end,5)
limt=(jenk[1]+jenk[2])/2
index=np.where(beta_end>limt)[0]
beta_path.append(beta[-1,:])
'''
############----------*********-T2-**************----------########################
DIAG_T2 = np.linalg.pinv(np.diag(featValue_sort[:int(model["K"])]))
D_T2 = selectVec.copy()
X_T2 = np.dot(D_T2, np.linalg.cholesky(DIAG_T2)).T
############----------*********-综合指标-**************----------########################
II = featValue_sort.copy()
II[:int(model["K"])] = II[:int(model["K"])] * model["T2CUL_99"]
II[int(model["K"]):] = model["QCUL_99"]
DIAG_Fai = np.linalg.pinv(np.diag(II))
D_Fai = featVec.copy()
X_Fai = np.dot(D_Fai, np.linalg.cholesky(DIAG_Fai)).T
# ************************调用LARS*******************************
t = 50000
lamuta = 1
#limit_line = model["Kesi_99"]/np.sqrt(numbel_variable)#修改
limit_line = model["Kesi_99"]
beta_path = []
SPE_list = []
FAI_list=[]
paraState = np.zeros([np.array(Data_origin).shape[0], np.array(Data_origin).shape[1]])
if Data.shape[1] >= 12:
para_length = 3
elif 12 > Data.shape[1] >= 7:
para_length = 2
else:
para_length = 1
Y = None
plots_matrix = [] # 贡献图法的矩阵
plots_index = [] # 贡献图法的index
for i in range(Data.shape[0]):
Y = Data[i, :] # 测试数据的每一行
#########*********************计算SPE******************************
SPE_line = np.dot(Y, C_).dot(Y.T)
SPE_list.append(SPE_line)
#########################计算综合指标##########
FAI_list.append(np.dot(Y.T, D_Fai).dot(DIAG_Fai).dot(D_Fai.T).dot(Y))
# **************计算LARS***************
beta, mse = Lars(X_Fai, Y, D_Fai, DIAG_Fai, t, limit_line,LockVariable)
beta_end = abs(beta[-1, :])
pi=len(beta_end)
if pi>7:
jenk = jenkspy.jenks_breaks(beta_end, 5)
else:
jenk = jenkspy.jenks_breaks(beta_end, 2)
limt = (jenk[1] + jenk[2]) / 2
index = np.where(beta_end > 0)[0]
if len(index) > para_length:
# res = recon_fault_diagnosis_r_c(Y, D_Fai @ DIAG_Fai @ D_Fai.T, limit_line, list(zip(index, beta_end[index])), model,
# True, X_SPE @ X_SPE.T, rbc=None)
res = recon_fault_diagnosis_r_c(Y, D_Fai @ DIAG_Fai @ D_Fai.T, limit_line,
list(zip(index, beta_end[index])), model,
True, X_SPE @ X_SPE.T, LockVariable, selectVec, rbc=None)
if not isinstance(res[0], list):
if res[1] == "plot":
# beta[-1, :] = res[0]
plots_matrix.append(res[0])
plots_index.append(i)
else:
beta[-1, :], index = res[0].T, res[1]
# beta[-1, :], index = res[0].T, res[1]
elif len(index) <= para_length and len(index) != 0:
res = recon_fault_diagnosis_r_l(Y, D_Fai @ DIAG_Fai @ D_Fai.T, index)
beta[-1, :], index = res[0].T, res[1]
paraState[i, index] = 1
beta_new=beta[-1, :]*paraState[i,:]
beta_path.append(beta_new)
del Y
gc.collect()
beta_path = np.array(beta_path)
################-------------------------------------------------------------###############
#finalData = np.dot(Data - beta_path, selectVec).dot(selectVec.T)
finalData=Data - beta_path
reconData = np.add(np.multiply(finalData, model["Train_X_std"]), model["Train_X_mean"]) # 重构值
if len(plots_matrix) != 0:
reconData[plots_index] = plots_matrix
errorData = Data_origin - reconData # 偏差值
# cos检验值
R = 0
for index in range(0, reconData.shape[1]):
vector1 = Data_origin[:, index]
vector2 = np.array(reconData)[:, index]
R += np.dot(vector1, vector2.T) / (np.sqrt(np.sum(vector1 ** 2)) * np.sqrt(np.sum(vector2 ** 2)))
R /= reconData.shape[1]
items = [('reconData', reconData.tolist())
, ('errorData', errorData.tolist()), ('R', R.tolist()), ('SPE', SPE_list),('FAI', FAI_list),
('paraState', paraState.tolist())]
result = json.dumps(dict(items)) # json.dumps(result)
return result
def get_history_value(points,time,interval):
#url="http://192.168.1.201:8080/openPlant/getMultiplePointHistorys"
url=f"http://{config._EXA_IP}:9000/exawebapi/exatime/GetSamplingValueArrayFloat"
headers = {"Content-Type": "application/json;charset=utf-8"}#,"token":get_token()
point_array = points.split(",")
time_span = time.split(";")
value_array = []
for item in point_array:
for time_piece in time_span:
st = time_piece.split(",")[0]
et = time_piece.split(",")[1]
para = {"ItemName": item, "StartingTime": st, "TerminalTime": et, "SamplingPeriod": interval}
response = requests.get(url, headers=headers, params=para)
value = eval(str(response.text).replace("\"","").replace("null","0"))
value_group = []
for row in value:
value_group.append(row[1])
value_array.append(value_group)
return np.transpose(np.array(value_array))
# 根据数据集data.txt
if __name__ == "__main__":
#lifan 调试模型计算引擎
jsonstr = '{"Model_id":764,"version":"v-2021-04-02 09:43:50","Test_Data":[[129.3936,0.8944824,152.4081,119.2822,0.4589844]],"Target_Data":[[]]}'
jsonstr = json.loads(jsonstr)
model_id = jsonstr["Model_id"]
version = jsonstr["version"]
res = get_model_by_id_and_version(model_id, version)
filename = res["algorithm"]
Data = jsonstr["Test_Data"]
if filename == "PCA":
model = res["para"]["Model_info"]
lock = []
point_info = res["pointInfo"]
for i in range(len(point_info)):
try:
if point_info[i]["lock"]:
lock.append(i)
except:
continue
result = pca(model, lock, np.array(Data))
print('aaa')