commit
bb49dfc100
415 changed files with 614685 additions and 0 deletions
@ -0,0 +1,3 @@ |
|||
{ |
|||
"ANN_Test_offline.py::test_offline_main": true |
|||
} |
|||
@ -0,0 +1,17 @@ |
|||
<?xml version="1.0" encoding="UTF-8"?> |
|||
<module type="PYTHON_MODULE" version="4"> |
|||
<component name="NewModuleRootManager"> |
|||
<content url="file:$MODULE_DIR$/../../..//$MODULE_DIR$"> |
|||
<excludeFolder url="file:$MODULE_DIR$/../../..//$MODULE_DIR$/clawer" /> |
|||
</content> |
|||
<orderEntry type="jdk" jdkName="Python 3.6" jdkType="Python SDK" /> |
|||
<orderEntry type="sourceFolder" forTests="false" /> |
|||
</component> |
|||
<component name="PackageRequirementsSettings"> |
|||
<option name="requirementsPath" value="" /> |
|||
</component> |
|||
<component name="TestRunnerService"> |
|||
<option name="projectConfiguration" value="Nosetests" /> |
|||
<option name="PROJECT_TEST_RUNNER" value="Nosetests" /> |
|||
</component> |
|||
</module> |
|||
@ -0,0 +1,7 @@ |
|||
<?xml version="1.0" encoding="UTF-8"?> |
|||
<project version="4"> |
|||
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.6" project-jdk-type="Python SDK" /> |
|||
<component name="PyCharmProfessionalAdvertiser"> |
|||
<option name="shown" value="true" /> |
|||
</component> |
|||
</project> |
|||
@ -0,0 +1,8 @@ |
|||
<?xml version="1.0" encoding="UTF-8"?> |
|||
<project version="4"> |
|||
<component name="ProjectModuleManager"> |
|||
<modules> |
|||
<module fileurl="file://$PROJECT_DIR$/.idea/FlaskWebApi.iml" filepath="$PROJECT_DIR$/.idea/FlaskWebApi.iml" /> |
|||
</modules> |
|||
</component> |
|||
</project> |
|||
@ -0,0 +1,998 @@ |
|||
<?xml version="1.0" encoding="UTF-8"?> |
|||
<project version="4"> |
|||
<component name="ChangeListManager"> |
|||
<list default="true" id="01f2c793-fd4e-48a4-8a46-398afce1d721" name="Default" comment="" /> |
|||
<option name="EXCLUDED_CONVERTED_TO_IGNORED" value="true" /> |
|||
<option name="SHOW_DIALOG" value="false" /> |
|||
<option name="HIGHLIGHT_CONFLICTS" value="true" /> |
|||
<option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" /> |
|||
<option name="LAST_RESOLUTION" value="IGNORE" /> |
|||
</component> |
|||
<component name="FavoritesManager"> |
|||
<favorites_list name="FlaskWebApi" /> |
|||
</component> |
|||
<component name="FileTemplateManagerImpl"> |
|||
<option name="RECENT_TEMPLATES"> |
|||
<list> |
|||
<option value="Python Script" /> |
|||
</list> |
|||
</option> |
|||
</component> |
|||
<component name="ProjectId" id="1b5BbYSWUA3yaU618uWBsmgh0DZ" /> |
|||
<component name="ProjectInspectionProfilesVisibleTreeState"> |
|||
<entry key="Project Default"> |
|||
<profile-state> |
|||
<expanded-state> |
|||
<State> |
|||
<id /> |
|||
</State> |
|||
</expanded-state> |
|||
<selected-state> |
|||
<State> |
|||
<id>Buildout</id> |
|||
</State> |
|||
</selected-state> |
|||
</profile-state> |
|||
</entry> |
|||
</component> |
|||
<component name="PropertiesComponent"> |
|||
<property name="SearchEverywhereHistoryKey" value="	FILE	file://D:/FlaskWebApi/pcamtcltest.py" /> |
|||
<property name="last_opened_file_path" value="$PROJECT_DIR$" /> |
|||
<property name="restartRequiresConfirmation" value="false" /> |
|||
<property name="settings.editor.selected.configurable" value="com.jetbrains.python.configuration.PyActiveSdkModuleConfigurable" /> |
|||
</component> |
|||
<component name="PyConsoleOptionsProvider"> |
|||
<option name="myPythonConsoleState"> |
|||
<console-settings module-name="FlaskWebApi" is-module-sdk="true"> |
|||
<option name="myUseModuleSdk" value="true" /> |
|||
<option name="myModuleName" value="FlaskWebApi" /> |
|||
</console-settings> |
|||
</option> |
|||
</component> |
|||
<component name="RunDashboard"> |
|||
<option name="ruleStates"> |
|||
<list> |
|||
<RuleState> |
|||
<option name="name" value="ConfigurationTypeDashboardGroupingRule" /> |
|||
</RuleState> |
|||
<RuleState> |
|||
<option name="name" value="StatusDashboardGroupingRule" /> |
|||
</RuleState> |
|||
</list> |
|||
</option> |
|||
</component> |
|||
<component name="RunManager" selected="Python.web"> |
|||
<configuration name="AANN_Fit" type="PythonConfigurationType" factoryName="Python" temporary="true"> |
|||
<module name="FlaskWebApi" /> |
|||
<option name="INTERPRETER_OPTIONS" value="" /> |
|||
<option name="PARENT_ENVS" value="true" /> |
|||
<envs> |
|||
<env name="PYTHONUNBUFFERED" value="1" /> |
|||
</envs> |
|||
<option name="SDK_HOME" value="" /> |
|||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" /> |
|||
<option name="IS_MODULE_SDK" value="false" /> |
|||
<option name="ADD_CONTENT_ROOTS" value="true" /> |
|||
<option name="ADD_SOURCE_ROOTS" value="true" /> |
|||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/AANN_Fit.py" /> |
|||
<option name="PARAMETERS" value="" /> |
|||
<option name="SHOW_COMMAND_LINE" value="false" /> |
|||
<option name="EMULATE_TERMINAL" value="false" /> |
|||
<option name="MODULE_MODE" value="false" /> |
|||
<option name="REDIRECT_INPUT" value="false" /> |
|||
<option name="INPUT_FILE" value="" /> |
|||
<method v="2" /> |
|||
</configuration> |
|||
<configuration name="AANN_Train" type="PythonConfigurationType" factoryName="Python" temporary="true"> |
|||
<module name="FlaskWebApi" /> |
|||
<option name="INTERPRETER_OPTIONS" value="" /> |
|||
<option name="PARENT_ENVS" value="true" /> |
|||
<envs> |
|||
<env name="PYTHONUNBUFFERED" value="1" /> |
|||
</envs> |
|||
<option name="SDK_HOME" value="" /> |
|||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" /> |
|||
<option name="IS_MODULE_SDK" value="false" /> |
|||
<option name="ADD_CONTENT_ROOTS" value="true" /> |
|||
<option name="ADD_SOURCE_ROOTS" value="true" /> |
|||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/AANN_Train.py" /> |
|||
<option name="PARAMETERS" value="" /> |
|||
<option name="SHOW_COMMAND_LINE" value="false" /> |
|||
<option name="EMULATE_TERMINAL" value="false" /> |
|||
<option name="MODULE_MODE" value="false" /> |
|||
<option name="REDIRECT_INPUT" value="false" /> |
|||
<option name="INPUT_FILE" value="" /> |
|||
<method v="2" /> |
|||
</configuration> |
|||
<configuration name="ANN_Train_offline" type="PythonConfigurationType" factoryName="Python" temporary="true"> |
|||
<module name="FlaskWebApi" /> |
|||
<option name="INTERPRETER_OPTIONS" value="" /> |
|||
<option name="PARENT_ENVS" value="true" /> |
|||
<envs> |
|||
<env name="PYTHONUNBUFFERED" value="1" /> |
|||
</envs> |
|||
<option name="SDK_HOME" value="C:\ProgramData\Anaconda3\python.exe" /> |
|||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" /> |
|||
<option name="IS_MODULE_SDK" value="false" /> |
|||
<option name="ADD_CONTENT_ROOTS" value="true" /> |
|||
<option name="ADD_SOURCE_ROOTS" value="true" /> |
|||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/ANN_Train_offline.py" /> |
|||
<option name="PARAMETERS" value="" /> |
|||
<option name="SHOW_COMMAND_LINE" value="false" /> |
|||
<option name="EMULATE_TERMINAL" value="false" /> |
|||
<option name="MODULE_MODE" value="false" /> |
|||
<option name="REDIRECT_INPUT" value="false" /> |
|||
<option name="INPUT_FILE" value="" /> |
|||
<method v="2" /> |
|||
</configuration> |
|||
<configuration name="Unnamed (1)" type="PythonConfigurationType" factoryName="Python"> |
|||
<module name="FlaskWebApi" /> |
|||
<option name="INTERPRETER_OPTIONS" value="" /> |
|||
<option name="PARENT_ENVS" value="true" /> |
|||
<envs> |
|||
<env name="PYTHONUNBUFFERED" value="1" /> |
|||
</envs> |
|||
<option name="SDK_HOME" value="C:\ProgramData\Anaconda3\python.exe" /> |
|||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" /> |
|||
<option name="IS_MODULE_SDK" value="false" /> |
|||
<option name="ADD_CONTENT_ROOTS" value="true" /> |
|||
<option name="ADD_SOURCE_ROOTS" value="true" /> |
|||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/ANN_Test_offline.py" /> |
|||
<option name="PARAMETERS" value="" /> |
|||
<option name="SHOW_COMMAND_LINE" value="false" /> |
|||
<option name="EMULATE_TERMINAL" value="false" /> |
|||
<option name="MODULE_MODE" value="false" /> |
|||
<option name="REDIRECT_INPUT" value="false" /> |
|||
<option name="INPUT_FILE" value="" /> |
|||
<method v="2" /> |
|||
</configuration> |
|||
<configuration name="Unnamed" type="PythonConfigurationType" factoryName="Python"> |
|||
<module name="FlaskWebApi" /> |
|||
<option name="INTERPRETER_OPTIONS" value="" /> |
|||
<option name="PARENT_ENVS" value="true" /> |
|||
<envs> |
|||
<env name="PYTHONUNBUFFERED" value="1" /> |
|||
</envs> |
|||
<option name="SDK_HOME" value="" /> |
|||
<option name="WORKING_DIRECTORY" value="" /> |
|||
<option name="IS_MODULE_SDK" value="true" /> |
|||
<option name="ADD_CONTENT_ROOTS" value="true" /> |
|||
<option name="ADD_SOURCE_ROOTS" value="true" /> |
|||
<option name="SCRIPT_NAME" value="" /> |
|||
<option name="PARAMETERS" value="" /> |
|||
<option name="SHOW_COMMAND_LINE" value="false" /> |
|||
<option name="EMULATE_TERMINAL" value="false" /> |
|||
<option name="MODULE_MODE" value="false" /> |
|||
<option name="REDIRECT_INPUT" value="false" /> |
|||
<option name="INPUT_FILE" value="" /> |
|||
<method v="2" /> |
|||
</configuration> |
|||
<configuration name="pca_test_by_rb_plot" type="PythonConfigurationType" factoryName="Python" temporary="true"> |
|||
<module name="FlaskWebApi" /> |
|||
<option name="INTERPRETER_OPTIONS" value="" /> |
|||
<option name="PARENT_ENVS" value="true" /> |
|||
<envs> |
|||
<env name="PYTHONUNBUFFERED" value="1" /> |
|||
</envs> |
|||
<option name="SDK_HOME" value="" /> |
|||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" /> |
|||
<option name="IS_MODULE_SDK" value="false" /> |
|||
<option name="ADD_CONTENT_ROOTS" value="true" /> |
|||
<option name="ADD_SOURCE_ROOTS" value="true" /> |
|||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/pca_test_by_rb_plot.py" /> |
|||
<option name="PARAMETERS" value="" /> |
|||
<option name="SHOW_COMMAND_LINE" value="false" /> |
|||
<option name="EMULATE_TERMINAL" value="false" /> |
|||
<option name="MODULE_MODE" value="false" /> |
|||
<option name="REDIRECT_INPUT" value="false" /> |
|||
<option name="INPUT_FILE" value="" /> |
|||
<method v="2" /> |
|||
</configuration> |
|||
<configuration name="web" type="PythonConfigurationType" factoryName="Python" temporary="true"> |
|||
<module name="FlaskWebApi" /> |
|||
<option name="INTERPRETER_OPTIONS" value="" /> |
|||
<option name="PARENT_ENVS" value="true" /> |
|||
<envs> |
|||
<env name="PYTHONUNBUFFERED" value="1" /> |
|||
</envs> |
|||
<option name="SDK_HOME" value="" /> |
|||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" /> |
|||
<option name="IS_MODULE_SDK" value="false" /> |
|||
<option name="ADD_CONTENT_ROOTS" value="true" /> |
|||
<option name="ADD_SOURCE_ROOTS" value="true" /> |
|||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/web.py" /> |
|||
<option name="PARAMETERS" value="" /> |
|||
<option name="SHOW_COMMAND_LINE" value="false" /> |
|||
<option name="EMULATE_TERMINAL" value="false" /> |
|||
<option name="MODULE_MODE" value="false" /> |
|||
<option name="REDIRECT_INPUT" value="false" /> |
|||
<option name="INPUT_FILE" value="" /> |
|||
<method v="2" /> |
|||
</configuration> |
|||
<list> |
|||
<item itemvalue="Python.Unnamed (1)" /> |
|||
<item itemvalue="Python.Unnamed" /> |
|||
<item itemvalue="Python.ANN_Train_offline" /> |
|||
<item itemvalue="Python.AANN_Fit" /> |
|||
<item itemvalue="Python.pca_test_by_rb_plot" /> |
|||
<item itemvalue="Python.web" /> |
|||
<item itemvalue="Python.AANN_Train" /> |
|||
</list> |
|||
<recent_temporary> |
|||
<list> |
|||
<item itemvalue="Python.web" /> |
|||
<item itemvalue="Python.ANN_Train_offline" /> |
|||
<item itemvalue="Python.AANN_Train" /> |
|||
<item itemvalue="Python.AANN_Fit" /> |
|||
<item itemvalue="Python.pca_test_by_rb_plot" /> |
|||
</list> |
|||
</recent_temporary> |
|||
</component> |
|||
<component name="SvnConfiguration"> |
|||
<configuration>C:\Users\Administrator\AppData\Roaming\Subversion</configuration> |
|||
</component> |
|||
<component name="TaskManager"> |
|||
<task active="true" id="Default" summary="Default task"> |
|||
<changelist id="01f2c793-fd4e-48a4-8a46-398afce1d721" name="Default" comment="" /> |
|||
<created>1568983181226</created> |
|||
<option name="number" value="Default" /> |
|||
<option name="presentableId" value="Default" /> |
|||
<updated>1568983181226</updated> |
|||
</task> |
|||
<servers /> |
|||
</component> |
|||
<component name="TestHistory"> |
|||
<history-entry file="py_test_for_ANN_Test_offline_test_offline_main - 2020.02.05 at 23h 03m 51s.xml"> |
|||
<configuration name="py.test for ANN_Test_offline.test_offline_main" configurationId="tests" /> |
|||
</history-entry> |
|||
<history-entry file="py_test_in_ANN_Test_offline_py - 2020.02.05 at 22h 48m 49s.xml"> |
|||
<configuration name="py.test in ANN_Test_offline.py" configurationId="tests" /> |
|||
</history-entry> |
|||
<history-entry file="py_test_in_ANN_Test_offline_py - 2020.02.05 at 22h 49m 21s.xml"> |
|||
<configuration name="py.test in ANN_Test_offline.py" configurationId="tests" /> |
|||
</history-entry> |
|||
<history-entry file="py_test_in_ANN_Test_offline_py - 2020.02.05 at 22h 53m 56s.xml"> |
|||
<configuration name="py.test in ANN_Test_offline.py" configurationId="tests" /> |
|||
</history-entry> |
|||
<history-entry file="py_test_in_ANN_Test_offline_py - 2020.02.05 at 22h 57m 55s.xml"> |
|||
<configuration name="py.test in ANN_Test_offline.py" configurationId="tests" /> |
|||
</history-entry> |
|||
<history-entry file="py_test_in_ANN_Test_offline_py - 2020.02.06 at 20h 26m 35s.xml"> |
|||
<configuration name="py.test in ANN_Test_offline.py" configurationId="tests" /> |
|||
</history-entry> |
|||
<history-entry file="py_test_in_ANN_Test_offline_py - 2020.02.06 at 20h 26m 57s.xml"> |
|||
<configuration name="py.test in ANN_Test_offline.py" configurationId="tests" /> |
|||
</history-entry> |
|||
<history-entry file="py_test_in_ANN_Test_offline_py - 2020.02.06 at 20h 27m 38s.xml"> |
|||
<configuration name="py.test in ANN_Test_offline.py" configurationId="tests" /> |
|||
</history-entry> |
|||
<history-entry file="py_test_in_ANN_Test_offline_py - 2020.02.06 at 20h 28m 03s.xml"> |
|||
<configuration name="py.test in ANN_Test_offline.py" configurationId="tests" /> |
|||
</history-entry> |
|||
<history-entry file="py_test_in_ANN_Test_offline_py - 2020.02.07 at 15h 53m 41s.xml"> |
|||
<configuration name="py.test in ANN_Test_offline.py" configurationId="tests" /> |
|||
</history-entry> |
|||
</component> |
|||
<component name="XDebuggerManager"> |
|||
<breakpoint-manager> |
|||
<breakpoints> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/PLS_Train.py</url> |
|||
<line>75</line> |
|||
<option name="timeStamp" value="96" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/PLS_Train.py</url> |
|||
<line>37</line> |
|||
<option name="timeStamp" value="102" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/PLS_Train.py</url> |
|||
<line>118</line> |
|||
<option name="timeStamp" value="103" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/GMM_test.py</url> |
|||
<line>197</line> |
|||
<option name="timeStamp" value="185" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/GMM_train.py</url> |
|||
<line>344</line> |
|||
<option name="timeStamp" value="204" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/pca_test_by_rb.py</url> |
|||
<line>325</line> |
|||
<option name="timeStamp" value="248" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/PCA_Test.py</url> |
|||
<line>246</line> |
|||
<option name="timeStamp" value="252" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/pcamtcltest.py</url> |
|||
<line>345</line> |
|||
<option name="timeStamp" value="267" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/ANN_Train_offline.py</url> |
|||
<line>157</line> |
|||
<option name="timeStamp" value="332" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/ANN_Train_offline.py</url> |
|||
<line>254</line> |
|||
<option name="timeStamp" value="334" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/PCA_Test.py</url> |
|||
<line>164</line> |
|||
<option name="timeStamp" value="337" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/PCA_Test.py</url> |
|||
<line>231</line> |
|||
<option name="timeStamp" value="342" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/ANN_Train_offline.py</url> |
|||
<line>258</line> |
|||
<option name="timeStamp" value="346" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/ANN_Train_offline.py</url> |
|||
<line>47</line> |
|||
<option name="timeStamp" value="348" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/PCA_Train.py</url> |
|||
<line>265</line> |
|||
<option name="timeStamp" value="370" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/test.py</url> |
|||
<line>20</line> |
|||
<option name="timeStamp" value="377" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/pcamtcltest.py</url> |
|||
<line>402</line> |
|||
<option name="timeStamp" value="383" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/pcamtcltest.py</url> |
|||
<line>413</line> |
|||
<option name="timeStamp" value="384" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/pcamtcltest.py</url> |
|||
<line>453</line> |
|||
<option name="timeStamp" value="385" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/pcamtcltest.py</url> |
|||
<line>463</line> |
|||
<option name="timeStamp" value="388" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/pcamtcltest.py</url> |
|||
<line>186</line> |
|||
<option name="timeStamp" value="389" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/pcamtcltest.py</url> |
|||
<line>249</line> |
|||
<option name="timeStamp" value="390" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/pcamtcltest.py</url> |
|||
<line>321</line> |
|||
<option name="timeStamp" value="392" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/pcamtcltest.py</url> |
|||
<line>470</line> |
|||
<option name="timeStamp" value="395" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/pcamtcltest.py</url> |
|||
<line>462</line> |
|||
<option name="timeStamp" value="396" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/pcamtcltest.py</url> |
|||
<line>507</line> |
|||
<option name="timeStamp" value="400" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/pcamtcltest.py</url> |
|||
<line>190</line> |
|||
<option name="timeStamp" value="406" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/pcamtcltest.py</url> |
|||
<line>364</line> |
|||
<option name="timeStamp" value="411" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/test.py</url> |
|||
<line>16</line> |
|||
<option name="timeStamp" value="412" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/pcamtcltest.py</url> |
|||
<line>364</line> |
|||
<option name="timeStamp" value="415" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/PCA_Train.py</url> |
|||
<line>271</line> |
|||
<option name="timeStamp" value="418" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/pcamtcltest.py</url> |
|||
<line>501</line> |
|||
<option name="timeStamp" value="421" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/pcamtcltest.py</url> |
|||
<line>475</line> |
|||
<option name="timeStamp" value="423" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/ANN_Test_offline.py</url> |
|||
<line>104</line> |
|||
<option name="timeStamp" value="430" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/ANN_Test_offline.py</url> |
|||
<line>170</line> |
|||
<option name="timeStamp" value="432" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/ANN_Train_offline.py</url> |
|||
<line>70</line> |
|||
<option name="timeStamp" value="440" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/ANN_Test_offline.py</url> |
|||
<line>92</line> |
|||
<option name="timeStamp" value="445" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/ANN_Train_offline.py</url> |
|||
<line>99</line> |
|||
<option name="timeStamp" value="446" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/ANN_Train_offline.py</url> |
|||
<line>170</line> |
|||
<option name="timeStamp" value="451" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/ANN_Train_offline.py</url> |
|||
<line>175</line> |
|||
<option name="timeStamp" value="456" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/pcamtcltest.py</url> |
|||
<line>425</line> |
|||
<option name="timeStamp" value="459" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/pcamtcltest.py</url> |
|||
<line>493</line> |
|||
<option name="timeStamp" value="461" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/pcamtcltest.py</url> |
|||
<line>511</line> |
|||
<option name="timeStamp" value="463" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/pcamtcltest.py</url> |
|||
<line>460</line> |
|||
<option name="timeStamp" value="466" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/pcamtcltest.py</url> |
|||
<line>453</line> |
|||
<option name="timeStamp" value="467" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/PLS_Train.py</url> |
|||
<line>119</line> |
|||
<option name="timeStamp" value="473" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/PCA_Train.py</url> |
|||
<line>192</line> |
|||
<option name="timeStamp" value="485" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/PCA_Train.py</url> |
|||
<line>43</line> |
|||
<option name="timeStamp" value="486" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/pcamtcltest.py</url> |
|||
<line>380</line> |
|||
<option name="timeStamp" value="487" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/model.py</url> |
|||
<line>22</line> |
|||
<option name="timeStamp" value="488" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/model.py</url> |
|||
<line>31</line> |
|||
<option name="timeStamp" value="490" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/ANN_Train_offline.py</url> |
|||
<line>268</line> |
|||
<option name="timeStamp" value="491" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/ANN_Test_offline.py</url> |
|||
<line>73</line> |
|||
<option name="timeStamp" value="498" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/pcamtcltest_k.py</url> |
|||
<line>469</line> |
|||
<option name="timeStamp" value="508" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/pcamtcltest_k.py</url> |
|||
<line>481</line> |
|||
<option name="timeStamp" value="510" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/model_performance.py</url> |
|||
<line>149</line> |
|||
<option name="timeStamp" value="532" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/model_performance.py</url> |
|||
<line>149</line> |
|||
<option name="timeStamp" value="539" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/model_performance.py</url> |
|||
<line>170</line> |
|||
<option name="timeStamp" value="547" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/model_performance.py</url> |
|||
<line>156</line> |
|||
<option name="timeStamp" value="548" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/pcamtcltest.py</url> |
|||
<line>461</line> |
|||
<option name="timeStamp" value="570" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/PCA_Train.py</url> |
|||
<line>251</line> |
|||
<option name="timeStamp" value="585" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/update_points.py</url> |
|||
<line>21</line> |
|||
<option name="timeStamp" value="645" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/PCA_Train.py</url> |
|||
<line>259</line> |
|||
<option name="timeStamp" value="657" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/pcamtcltest.py</url> |
|||
<line>511</line> |
|||
<option name="timeStamp" value="713" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/pcamtcltest.py</url> |
|||
<line>516</line> |
|||
<option name="timeStamp" value="721" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/pcamtcltest.py</url> |
|||
<line>508</line> |
|||
<option name="timeStamp" value="722" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/model_performance.py</url> |
|||
<line>325</line> |
|||
<option name="timeStamp" value="740" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/clean.py</url> |
|||
<line>92</line> |
|||
<option name="timeStamp" value="744" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/pcamtcltest.py</url> |
|||
<line>190</line> |
|||
<option name="timeStamp" value="746" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/pcamtcltest.py</url> |
|||
<line>328</line> |
|||
<option name="timeStamp" value="750" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/get_data.py</url> |
|||
<line>242</line> |
|||
<option name="timeStamp" value="768" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/pcamtcltest.py</url> |
|||
<line>525</line> |
|||
<option name="timeStamp" value="776" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/update_points.py</url> |
|||
<line>49</line> |
|||
<option name="timeStamp" value="793" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file:$PROJECT_DIR$/../../..//D:/FlaskWebApi/get_data.py</url> |
|||
<line>149</line> |
|||
<option name="timeStamp" value="794" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$USER_HOME$/Desktop/test.py</url> |
|||
<line>9</line> |
|||
<option name="timeStamp" value="1092" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/cjl勿删/sae_diagnosis.py</url> |
|||
<line>60</line> |
|||
<option name="timeStamp" value="1099" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://C:/D盘/Flaskwebapi/recon.py</url> |
|||
<line>35</line> |
|||
<option name="timeStamp" value="1108" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/cjl勿删/pca_diagnosis.py</url> |
|||
<line>124</line> |
|||
<option name="timeStamp" value="1300" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/pca.py</url> |
|||
<line>307</line> |
|||
<option name="timeStamp" value="1304" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/pca.py</url> |
|||
<line>168</line> |
|||
<option name="timeStamp" value="1305" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/pca.py</url> |
|||
<line>114</line> |
|||
<option name="timeStamp" value="1306" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/base.py</url> |
|||
<line>94</line> |
|||
<option name="timeStamp" value="1307" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/test.py</url> |
|||
<line>6</line> |
|||
<option name="timeStamp" value="1316" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/pca.py</url> |
|||
<line>260</line> |
|||
<option name="timeStamp" value="1338" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/base.py</url> |
|||
<line>144</line> |
|||
<option name="timeStamp" value="1349" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/PCA_Train.py</url> |
|||
<line>276</line> |
|||
<option name="timeStamp" value="1361" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/AANN_Train.py</url> |
|||
<line>172</line> |
|||
<option name="timeStamp" value="1392" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/AANN_Train.py</url> |
|||
<line>115</line> |
|||
<option name="timeStamp" value="1394" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/AANN_Train.py</url> |
|||
<line>59</line> |
|||
<option name="timeStamp" value="1396" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/AANN_Train.py</url> |
|||
<line>191</line> |
|||
<option name="timeStamp" value="1397" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/AANN_Train.py</url> |
|||
<line>260</line> |
|||
<option name="timeStamp" value="1398" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/PCA_Train.py</url> |
|||
<line>259</line> |
|||
<option name="timeStamp" value="1405" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/AANN_Train.py</url> |
|||
<line>88</line> |
|||
<option name="timeStamp" value="1407" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/PCA_Train.py</url> |
|||
<line>96</line> |
|||
<option name="timeStamp" value="1411" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/PCA_Train.py</url> |
|||
<line>195</line> |
|||
<option name="timeStamp" value="1413" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/AANN_Fit.py</url> |
|||
<line>20</line> |
|||
<option name="timeStamp" value="1418" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/pcamtcltest_recon.py</url> |
|||
<line>455</line> |
|||
<option name="timeStamp" value="1432" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/recon.py</url> |
|||
<line>75</line> |
|||
<option name="timeStamp" value="1434" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/pcamtcltest_recon.py</url> |
|||
<line>226</line> |
|||
<option name="timeStamp" value="1436" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/pcamtcltest_recon.py</url> |
|||
<line>239</line> |
|||
<option name="timeStamp" value="1437" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/aannmtcltest_recon.py</url> |
|||
<line>190</line> |
|||
<option name="timeStamp" value="1440" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/aannmtcltest_recon.py</url> |
|||
<line>81</line> |
|||
<option name="timeStamp" value="1442" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/AANN_SF_SBSRB.py</url> |
|||
<line>142</line> |
|||
<option name="timeStamp" value="1444" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/AANN_Derivative.py</url> |
|||
<line>63</line> |
|||
<option name="timeStamp" value="1449" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/aannmtcltest_recon.py</url> |
|||
<line>212</line> |
|||
<option name="timeStamp" value="1451" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/pcamtcltest_recon.py</url> |
|||
<line>461</line> |
|||
<option name="timeStamp" value="1461" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/aannmtcltest_recon.py</url> |
|||
<line>219</line> |
|||
<option name="timeStamp" value="1463" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/AANN_SF_SBSRB.py</url> |
|||
<line>101</line> |
|||
<option name="timeStamp" value="1465" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/aannmtcltest_recon.py</url> |
|||
<line>140</line> |
|||
<option name="timeStamp" value="1467" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/AANN_SF_SBSRB.py</url> |
|||
<line>97</line> |
|||
<option name="timeStamp" value="1468" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/AANN_Fit.py</url> |
|||
<line>137</line> |
|||
<option name="timeStamp" value="1474" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/AANN_Fit.py</url> |
|||
<line>68</line> |
|||
<option name="timeStamp" value="1476" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/web.py</url> |
|||
<line>205</line> |
|||
<option name="timeStamp" value="1477" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/web.py</url> |
|||
<line>218</line> |
|||
<option name="timeStamp" value="1481" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/AANN_Train.py</url> |
|||
<line>275</line> |
|||
<option name="timeStamp" value="1485" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/AANN_Train.py</url> |
|||
<line>258</line> |
|||
<option name="timeStamp" value="1486" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/web.py</url> |
|||
<line>31</line> |
|||
<option name="timeStamp" value="1489" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/PCA_Test_offline.py</url> |
|||
<line>294</line> |
|||
<option name="timeStamp" value="1493" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/PCA_Test_offline.py</url> |
|||
<line>311</line> |
|||
<option name="timeStamp" value="1496" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/AANN_RB.py</url> |
|||
<option name="timeStamp" value="1499" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/PCA_Train.py</url> |
|||
<line>291</line> |
|||
<option name="timeStamp" value="1503" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/PCA_Train.py</url> |
|||
<line>255</line> |
|||
<option name="timeStamp" value="1504" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/PCA_Test.py</url> |
|||
<line>271</line> |
|||
<option name="timeStamp" value="1507" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/web.py</url> |
|||
<line>133</line> |
|||
<option name="timeStamp" value="1516" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$USER_HOME$/Desktop/HealthyScoringSystem/HealthyScoringSystem/main.py</url> |
|||
<line>46</line> |
|||
<option name="timeStamp" value="1520" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$USER_HOME$/Desktop/HealthyScoringSystem/HealthyScoringSystem/main.py</url> |
|||
<line>164</line> |
|||
<option name="timeStamp" value="1521" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/PCA_Test.py</url> |
|||
<line>259</line> |
|||
<option name="timeStamp" value="1522" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/PCA_Test.py</url> |
|||
<line>222</line> |
|||
<option name="timeStamp" value="1523" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/ASSESS.py</url> |
|||
<line>306</line> |
|||
<option name="timeStamp" value="1536" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$USER_HOME$/Desktop/HealthyScoringSystem/HealthyScoringSystem/main.py</url> |
|||
<line>144</line> |
|||
<option name="timeStamp" value="1551" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/ASSESS.py</url> |
|||
<line>187</line> |
|||
<option name="timeStamp" value="1560" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/ASSESS.py</url> |
|||
<line>208</line> |
|||
<option name="timeStamp" value="1562" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/pcamtcltest_recon.py</url> |
|||
<line>59</line> |
|||
<option name="timeStamp" value="1564" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/AANN_Fit.py</url> |
|||
<line>202</line> |
|||
<option name="timeStamp" value="1566" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/AANN_Fit.py</url> |
|||
<line>201</line> |
|||
<option name="timeStamp" value="1568" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/pca_test_by_rb_plot.py</url> |
|||
<line>281</line> |
|||
<option name="timeStamp" value="1569" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/ANN_Train_offline.py</url> |
|||
<line>80</line> |
|||
<option name="timeStamp" value="1578" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/ANN_Train_offline.py</url> |
|||
<line>270</line> |
|||
<option name="timeStamp" value="1579" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/ANN_Train_offline.py</url> |
|||
<line>140</line> |
|||
<option name="timeStamp" value="1582" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/ANN_Train_offline.py</url> |
|||
<line>166</line> |
|||
<option name="timeStamp" value="1583" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/ANN_Train_offline.py</url> |
|||
<line>157</line> |
|||
<option name="timeStamp" value="1584" /> |
|||
</line-breakpoint> |
|||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> |
|||
<url>file://$PROJECT_DIR$/web.py</url> |
|||
<line>151</line> |
|||
<option name="timeStamp" value="1585" /> |
|||
</line-breakpoint> |
|||
</breakpoints> |
|||
<default-breakpoints> |
|||
<breakpoint type="python-exception"> |
|||
<properties notifyOnTerminate="true" exception="BaseException"> |
|||
<option name="notifyOnTerminate" value="true" /> |
|||
</properties> |
|||
</breakpoint> |
|||
</default-breakpoints> |
|||
</breakpoint-manager> |
|||
</component> |
|||
</project> |
|||
File diff suppressed because it is too large
Binary file not shown.
@ -0,0 +1,66 @@ |
|||
import numpy as np |
|||
import torch |
|||
from numba import jit |
|||
|
|||
@jit(nopython=True, fastmath=True, cache=True) |
|||
def sigmoid(x): |
|||
return 1.0/(1 + np.exp(-x)) |
|||
|
|||
|
|||
@jit(nopython=True, fastmath=True, cache=True) |
|||
def AANN_Derivative(v1, v2, w1, w2, testsample, faulty_directions): |
|||
""" |
|||
|
|||
:param v1: |
|||
:param v2: |
|||
:param w1: |
|||
:param w2: |
|||
:param testsample: |
|||
:param faulty_directions: |
|||
:return: |
|||
""" |
|||
[mmv1, nnv1] = v1.shape |
|||
[mmw1, nnw1] = w1.shape |
|||
[mmv2, nnv2] = v2.shape |
|||
[mmw2, nnw2] = w2.shape |
|||
z = np.dot(w1, v2) |
|||
faulty_number = faulty_directions.shape[0] |
|||
max_count = 3000 |
|||
spe = np.zeros(max_count+1) |
|||
count = 0 |
|||
derivative, ff, delta = np.zeros((max_count+1, mmv1)), np.zeros((max_count+1, mmv1)), np.zeros((max_count+1, mmv1)) |
|||
y_ = testsample |
|||
out, e = np.zeros((max_count, mmv1)),np.zeros((max_count, mmv1)) |
|||
ahfa1 = 0.0018 |
|||
ahfa2 = 0.9 |
|||
|
|||
while 1: |
|||
count = count + 1 |
|||
testsample = np.copy(y_) |
|||
delta[count, :] = (-ahfa1*derivative[count-1, :]+ahfa2*delta[count-1, :]) |
|||
ff[count, :] = (delta[count]) |
|||
y_ = y_-ff[count] |
|||
g = sigmoid(np.dot(y_, v1)) |
|||
t = np.dot(g, w1) |
|||
h = sigmoid(np.dot(t, v2)) |
|||
out[count-1, :] = np.dot(h, w2).reshape((1,mmv1)) |
|||
e[count-1, :] = y_-out[count-1, :] |
|||
spe[count] = np.sum(e[count-1, :]*e[count-1, :]) |
|||
|
|||
if count >= max_count or np.abs(spe[count-1]-spe[count]) < 0.000001: |
|||
iteration_number = count |
|||
break |
|||
for i in range(faulty_number): |
|||
deltyf = np.zeros((mmv1, mmv1)) |
|||
yitao, yita = np.zeros(nnv2), np.zeros(nnv2) |
|||
yitao = yitao + np.dot(g[0] * (1 - g[0]) * (-v1[int(faulty_directions[i])]), z) |
|||
yita = np.dot(h[0] * (1 - h[0]) * yitao, w2) |
|||
deltyf[:, int(faulty_directions[i])] = yita.T |
|||
ee = testsample - out[count-1, :] |
|||
derivative[count, int(faulty_directions[i])] = (-2*np.sum(ee*deltyf[:, int(faulty_directions[i])])-2*(ee[int(faulty_directions[i])]-2*deltyf[int(faulty_directions[i]), int(faulty_directions[i])]*ff[count, int(faulty_directions[i])])+2*ff[count, int(faulty_directions[i])]) |
|||
rbc_spe = spe[count] |
|||
delt_f = np.sum(ff, axis=0) |
|||
|
|||
return rbc_spe, delt_f.reshape(1,mmv1), iteration_number, spe[:count] |
|||
|
|||
|
|||
@ -0,0 +1,203 @@ |
|||
import numpy as np |
|||
from numba import jit |
|||
import config |
|||
import json |
|||
import sys |
|||
import requests |
|||
import datetime |
|||
import jenkspy |
|||
import xlrd |
|||
import AANN_Fit |
|||
import traceback |
|||
|
|||
@jit(nopython=True, cache=True) |
|||
def AANN_Fit(testdata, v1, v2, w1, w2): |
|||
""" |
|||
|
|||
:param testdata: |
|||
:param model: |
|||
:return: |
|||
""" |
|||
y1 = 1/(1+np.exp(-np.dot(testdata, v1))) |
|||
out1 = np.dot(y1, w1) |
|||
y2 = 1/(1+np.exp(-np.dot(out1, v2))) |
|||
output = np.dot(y2, w2) |
|||
return output |
|||
|
|||
def get_history_value(points,time,interval): |
|||
#url="http://192.168.1.201:8080/openPlant/getMultiplePointHistorys" |
|||
url=f"http://{config._EXA_IP}:9000/exawebapi/exatime/GetSamplingValueArrayFloat" |
|||
headers = {"Content-Type": "application/json;charset=utf-8"}#,"token":get_token() |
|||
point_array = points.split(",") |
|||
time_span = time.split(";") |
|||
value_array = [] |
|||
for item in point_array: |
|||
value_group = [] |
|||
for time_piece in time_span: |
|||
st = time_piece.split(",")[0] |
|||
et = time_piece.split(",")[1] |
|||
para = {"ItemName": item, "StartingTime": st, "TerminalTime": et, "SamplingPeriod": interval} |
|||
response = requests.get(url, headers=headers, params=para) |
|||
content = response.text.replace('"[','[').replace(']"',']') |
|||
value = json.loads(content) |
|||
if not isinstance(value, list): |
|||
print("aaa") |
|||
for row in value: |
|||
value_group.append(row[1]) |
|||
value_array.append(value_group) |
|||
return np.transpose(np.array(value_array)) |
|||
#return values |
|||
|
|||
def isnumber(limits): |
|||
flag=True |
|||
for item in limits: |
|||
item=item.replace("-","") |
|||
if(item.isdigit()==False): |
|||
flag=False |
|||
break |
|||
return flag |
|||
|
|||
def AANN_Test(model,Data): |
|||
v1 = np.array(model["v1"]) |
|||
v2 = np.array(model["v2"]) |
|||
w1 = np.array(model["w1"]) |
|||
w2 = np.array(model["w2"]) |
|||
maxdata = np.array(model["maxdata"]) |
|||
mindata = np.array(model["mindata"]) |
|||
mm, nn = Data.shape |
|||
# 预处理 数据归一化 |
|||
mdata = (2 * Data - (maxdata + mindata + np.zeros((mm, nn)))) / (maxdata - mindata + np.zeros((mm, nn))) |
|||
reconData = AANN_Fit(mdata, v1, v2, w1, w2) |
|||
# 预处理 数据反归一化 |
|||
a = maxdata - mindata + np.zeros((mm, nn)) |
|||
b = maxdata + mindata + np.zeros((mm, nn)) |
|||
reconData = (np.multiply(reconData, a) + b) / 2 |
|||
|
|||
res = {} |
|||
paraState = np.zeros([np.array(Data).shape[0], np.array(Data).shape[1]]) |
|||
SPE_list = [] |
|||
R = 0 |
|||
errorData = Data - reconData # 偏差值 |
|||
|
|||
res["sampleData"]=np.transpose(np.array(Data)).tolist() |
|||
res["reconData"]=np.around(np.transpose(np.array(reconData)), decimals=3).tolist() |
|||
res["errorData"]=np.around(np.transpose(np.array(errorData)), decimals=3).tolist() |
|||
res["R"]=np.around(R, decimals=3).tolist() |
|||
res["SPE"]=np.around(np.transpose(np.array(SPE_list)), decimals=3).tolist() |
|||
res["paraState"]=np.transpose(np.array(paraState)).tolist() |
|||
|
|||
return res |
|||
|
|||
|
|||
def clean_main(info): |
|||
try: |
|||
# datatype = info['type'] |
|||
condition = info["condition"].replace("=", "==").replace(">=", ">").replace("<=", "<") |
|||
times = info["time"].split(';') |
|||
points = info["point"].split(',') |
|||
interval = 300000 |
|||
dead = info["dead"].split(',') |
|||
limit = info["limit"].split(',') |
|||
uplower = info["uplow"].split(';') |
|||
res = json.loads(info["model"]) |
|||
filename = res["Model_type"] |
|||
count = 0 |
|||
ItemsInfo, SamplingTimePeriods = [], [] |
|||
Constraint = "" |
|||
for i in range(len(points)): |
|||
iteminfo = {} |
|||
iteminfo["ItemName"] = points[i] # 加点 |
|||
if (dead[i] == "1"): # 判断是否参与死区清洗 |
|||
iteminfo["ClearDeadZone"] = "true" |
|||
else: |
|||
iteminfo["ClearDeadZone"] = "false" |
|||
if (limit[i] == "1"): # 参与上下限清洗 |
|||
limits = uplower[i].split(',') |
|||
if (isnumber(limits) == True): # 输入上下限正确 |
|||
count += 1 |
|||
Constraint += "[" + points[i] + "]>" + limits[0] + " and " + "[" + points[i] + "]<" + limits[ |
|||
1] + " and " |
|||
ItemsInfo.append(iteminfo) |
|||
if (count != 0): |
|||
Constraint = Constraint[:len(Constraint) - 4:] |
|||
else: |
|||
Constraint = "1==1" # 没有上下限清洗 |
|||
Constraint += " and (" + condition + ")" |
|||
for i in range(len(times)): |
|||
Eachsampletime = {} |
|||
timess = times[i].split(',') |
|||
Eachsampletime["StartingTime"] = timess[0] |
|||
Eachsampletime["TerminalTime"] = timess[1] |
|||
SamplingTimePeriods.append(Eachsampletime) |
|||
Constraint = Constraint.replace("\n", " ") |
|||
url = f"http://{config._CLEAN_IP}/exawebapi/exatime/GetCleaningData?ItemsInfo=%s&SamplingTimePeriods=%s&Constraint=%s&SamplingPeriod=%s&DCount=6" % ( |
|||
ItemsInfo, SamplingTimePeriods, Constraint, interval) |
|||
response = requests.get(url) |
|||
content = json.loads(response.text) |
|||
origndata = np.array([item for item in content["ClearData"]]) |
|||
result = AANN_Test(res["Model_info"],origndata.T) |
|||
result["sampleData"] = origndata.tolist() |
|||
result["CleanOrNot"] = True |
|||
except Exception as e: |
|||
points = info['point'] |
|||
time1 = info["time"] |
|||
interval = info['interval'] |
|||
data_x, origndata = get_history_value(points, time1, interval) |
|||
res = json.loads(info["model"]) |
|||
filename = res["Model_type"] |
|||
origndata = origndata.tolist() |
|||
result = AANN_Test(res["Model_info"],origndata.T) |
|||
result["sampleData"] = origndata |
|||
result["CleanOrNot"] = False |
|||
return result |
|||
|
|||
|
|||
if __name__ == '__main__': |
|||
# info = {"point":"DH4_40MAG20CT362,DH4_40MAG20AN002GT,DH4_40MAG20CE102,DH4_40MAG20CT312,DH4_40MAG20CT322,DH4_40MAG20CT332","dead":"1,1,1,1,1,1","condition":"[DH4_40MAG20CE102]>20","limit":"0,0,0,0,0,0","uplow":"null,null;null,null;null,null;null,null;null,null;null,null","time":"2021-06-02 17:37:17,2021-06-03 17:37:17","model":"{\"Model_info\":{\"Train_X_min\":[22.848,14.865,45.127,24.787,24.875,24.699],\"Train_X_max\":[49.215,54.841,185.917,80.635,81.084,80.097],\"Train_X_std\":[7.014,12.071,37.054,15.602,15.881,15.695],\"Train_X_mean\":[37.569,36.848,113.843,53.193,53.611,53.304],\"Train_X_bais_max\":[[0,0,0,0,0,0]],\"Train_X_bais_min\":[[0,0,0,0,0,0]],\"Train_X_bais_mean\":[[0,0,0,0,0,0]],\"QCUL_95_line\":[0,0,0,0,0,0],\"QCUL_99_line\":[0,0,0,0,0,0],\"count\":1957,\"maxdata\":[49.21523,54.841,185.917023,80.63535,81.08379,80.0973053],\"mindata\":[22.84761,14.864501,45.1272545,24.7868519,24.8750286,24.698679],\"sigma\":0.017395612757076084,\"v1\":[[0.05396457607128241,-1.1037970788364901,-0.9023458013688983,0.8166076266011788,-0.17679687427234053],[0.5200933810436045,0.007280858992126217,-0.5349408644184176,0.2845470690775108,-0.6855420074109138],[0.36869788692168753,0.022827023461346962,0.5064698648194299,-0.4703408850462729,-0.36541089496833906],[0.12892337610198143,0.8200230541433281,0.0447757887183383,0.1670333002651392,0.8993457054279524],[-0.2971657506583043,0.485522778322426,0.7944907999619857,-0.7713468714572788,-0.3803782724966992],[-0.24463129252472893,-0.8040155541706245,-0.4787131792272823,-0.6808617963702621,0.8823560072658708]],\"v2\":[[-0.624183704662755,-1.0451052181462246,-0.2814222311475065,-0.3354562640454462,2.000215994798745],[1.304013807858937,-1.2949290118426027,-1.274288771981608,1.3783276766624326,0.5918960339827718]],\"w1\":[[-1.804815936738639,-1.177820457498696],[-0.26858615380810363,0.942696914999828],[0.045393854822685514,1.0175755864672742],[0.9657639036369005,0.8851033322191303],[0.9882029320129792,-1.731780185362415]],\"w2\":[[-1.2783993770868123,0.12079935279430898,0.5404098532155389,-0.7614378857555226,-0.8806415399208807,-1.1425373462620356],[1.1519307867425934,1.7851341509876484,1.4232979025735768,1.9335508578138167,1.2388319720461638,1.2688802766586058],[1.0836842481718094,0.24511103285668004,0.19049263794782964,0.26731984115471025,1.0669397542018493,1.067090986764354],[-1.1907723532803773,0.19092261395624277,-0.17818889703150334,-1.2393336475691328,-0.9319272349811759,-0.7091295825768392],[0.2667338682884674,-2.3674951959521846,-2.19524658584935,-0.3717940912566747,-0.6570853336446724,-0.6287452290848574]]},\"Model_type\":\"AANN\",\"BeforeCleanSamNum\":2143,\"AfterCleanSamNum\":2067,\"CleanOrNot\":true}","interval":300000} |
|||
# result = clean_main(info) |
|||
# print('aaa') |
|||
model={ |
|||
"Train_X_min": [ 20.821, 14.869, 43.163, 21.262, 21.35, 21.262 ], |
|||
"Train_X_max": [ 49.126, 49.925, 169.502, 77.588, 78.394, 77.856 ], |
|||
"Train_X_std": [ 7.125, 10.472, 31.807, 14.634, 14.847, 14.718 ], |
|||
"Train_X_mean": [ 36.205, 36.429, 111.547, 50.583, 50.955, 50.672 ], |
|||
"Train_X_bais_max": [ [ 0, 0, 0, 0, 0, 0 ] ], |
|||
"Train_X_bais_min": [ [ 0, 0, 0, 0, 0, 0 ] ], |
|||
"Train_X_bais_mean": [ [ 0, 0, 0, 0, 0, 0 ] ], |
|||
"QCUL_95_line": [ 0, 0, 0, 0, 0, 0 ], |
|||
"QCUL_99_line": [ 0, 0, 0, 0, 0, 0 ], |
|||
"r2": [ 0.9868677166895696, 0.9972168348807384, 0.994155641760367, 0.997452401678795, 0.9972480237895449, 0.9973549357761101 ], |
|||
"count": 1837, |
|||
"maxdata": [ 49.1263924, 49.92462, 169.501648, 77.58756, 78.39406, 77.85637 ], |
|||
"mindata": [ 20.8214855, 14.86908, 43.1634521, 21.261837, 21.3499146, 21.261837 ], |
|||
"sigma": 0.01806189580260563, |
|||
"v1": [ |
|||
[ -0.23316133948572393, -0.9217914025920456, -0.07554897165584737, 0.5760860199806274, -1.0764354048741178 ], |
|||
[ 0.6269288587688561, 0.2873108121988404, -0.4522541335965181, 0.1976888146807624, -0.06952149372511203 ], |
|||
[ 0.1787240198568913, 0.19306067329316998, -0.6544931571469833, -0.7615224968711883, 0.34011752017824165 ], |
|||
[ 0.018938620699273264, 0.730637377526106, 0.4933076792373918, 0.6451509410317868, 0.053819869986792335 ], |
|||
[ -0.19609562960367105, 0.11172696177830604, -0.1584273023561399, 0.331300527227796, 0.5888365193680527 ], |
|||
[ 0.06189724560442225, -0.8971329436091366, 0.5277390828634656, -0.5983529844040149, 0.7979948968923328 ] |
|||
], |
|||
"v2": [ |
|||
[ 1.2232098033189849, -1.6743334472355316, 0.801596559892827, 1.1379819603344574, 0.6490732883102263 ], |
|||
[ 1.0900875026570802, -0.9667042438318552, -0.6416000023658467, -0.506978913609041, 1.6559592831593986 ] |
|||
], |
|||
"w1": [ |
|||
[ -1.330531486686979, 0.2236374553075324 ], |
|||
[ 0.7317719867861042, 2.0240829271774823 ], |
|||
[ 1.9002236694443064, -1.1102667989668475 ], |
|||
[ -0.18577976434243573, -0.5562627341260915 ], |
|||
[ -1.0437074026078084, -0.7558997859191849 ] |
|||
], |
|||
"w2": [ |
|||
[ -1.3748396962059533, -0.4858035766258788, -0.8013045354908804, -0.6155000036590996, -0.48891412875006274, -1.0627377754183602 ], |
|||
[ 1.4595863114619005, 2.68853721134863, 2.238122625694176, 1.9127830289008108, 1.9551336244607311, 1.8698425061744566 ], |
|||
[ 0.5758092226574661, -0.4237580862942265, -1.4401060479517787, 0.28527357176477214, 0.1858972429240083, 0.054877840090252594 ], |
|||
[ 0.7197290283109676, -1.73044863841078, -0.4906062165572168, -0.19012584014090636, -0.18883049934775636, 0.22915594950041754 ], |
|||
[ -1.4023393826788018, 0.16500469061488093, 0.4718410186558856, -1.5843596374153093, -1.6622678974964407, -1.2898135647821567 ] |
|||
] |
|||
} |
|||
Data=np.array([[24.25785,22.9852276,71.4623947,26.1979523,26.2861671,26.2861671]]) |
|||
result = AANN_Test(model, Data) |
|||
print('aaa') |
|||
File diff suppressed because it is too large
Binary file not shown.
@ -0,0 +1,77 @@ |
|||
import numpy as np |
|||
import time |
|||
from AANN_Fit import AANN_Fit |
|||
import itertools |
|||
from AANN_Derivative import AANN_Derivative |
|||
|
|||
|
|||
def AANN_RB(fault_sample, fault_magnitude,model): |
|||
""" |
|||
:param fault_sample: |
|||
:param fault_magnitude: |
|||
:param model: |
|||
:return: |
|||
""" |
|||
[m, n] = fault_sample.shape |
|||
v1 = model.v1 |
|||
v2 = model.v2 |
|||
w1 = model.w1 |
|||
w2 = model.w2 |
|||
sigma = model.sigma |
|||
isolated = np.zeros((m, n)) |
|||
detected = 0 |
|||
rod_n = 0 |
|||
mdata = (2*fault_sample-np.tile(model.maxdata, (m, 1))-np.tile(model.mindata, (m, 1)))/(np.tile(model.maxdata, (m, 1))-np.tile(model.mindata, (m, 1))) |
|||
# 数据正则化 |
|||
mdata1 = (2 * (fault_sample-fault_magnitude) - np.tile(model.maxdata, (m, 1)) - np.tile(model.mindata, (m, 1))) / ( |
|||
np.tile(model.maxdata, (m, 1)) - np.tile(model.mindata, (m, 1))) |
|||
# mdata = mdata/np.tile(pow(np.sum(pow(mdata1, 2), axis=1), 1/2), (n, 1)).T |
|||
time1 = time.time()*1000 |
|||
index = np.zeros(m) |
|||
index2 = np.zeros(m) |
|||
index_v = np.zeros((m, n)) |
|||
nodenum_record = np.zeros(m) |
|||
for sample_id in range(m): |
|||
output = AANN_Fit(mdata[sample_id, :], v1, v2, w1, w2) |
|||
output2 = AANN_Fit(mdata1[sample_id, :], v1, v2, w1, w2) |
|||
index[sample_id] = np.sum((mdata[sample_id, :]-output)*(mdata[sample_id, :]-output)) |
|||
index2[sample_id] = np.sum((mdata1[sample_id, :] - output2) * (mdata1[sample_id, :] - output2)) |
|||
index_v[sample_id, :] = (mdata[sample_id, :]-output)*(mdata[sample_id, :]-output) |
|||
if index[sample_id] > sigma: |
|||
vnum = 1 |
|||
detected += 1 |
|||
nodenum = 0 |
|||
while vnum <= n: |
|||
variablecombination = itertools.combinations([i for i in range(n)], vnum) |
|||
combi_list =np.array(list(variablecombination)) |
|||
cur_rbindex = np.zeros(combi_list.shape[0]) |
|||
vc_id = 0 |
|||
for v_combination in combi_list: |
|||
cur_fault_varibles =v_combination |
|||
cur_fault_variables = np.array(cur_fault_variables).astype(np.float64) |
|||
cur_rbindex[vc_id], B, C, spe = AANN_Derivative(v1, v2, w1, w2, mdata[sample_id, :], cur_fault_varibles) |
|||
nodenum = nodenum+1 |
|||
vc_id = vc_id+1 |
|||
min_rbindex, op_vc_id = np.min(cur_rbindex), np.argmin(cur_rbindex) |
|||
cur_vc = combi_list[op_vc_id, :] |
|||
if min_rbindex < sigma*2: |
|||
isolated[sample_id, combi_list[op_vc_id, :]] = 1 |
|||
break |
|||
else: |
|||
vnum = vnum+1 |
|||
nodenum_record[sample_id] = nodenum |
|||
maxindex1 = np.argmax(isolated[sample_id, :]) |
|||
maxindex2 = np.argmax(np.abs(fault_magnitude[sample_id, :])) |
|||
if maxindex1 == maxindex2: |
|||
rod_n = rod_n+1 |
|||
time2 = time.time() * 1000 |
|||
tc = round(time2 - time1) |
|||
real_fault = (fault_magnitude > 0).astype('int') |
|||
fdr = np.sum(((real_fault - (isolated == 0).astype('int')) == 1).astype('int')) / np.sum(real_fault) * 100 |
|||
far = np.sum(((real_fault - isolated) == -1).astype('int')) / np.sum((real_fault == 0).astype('int')) * 100 |
|||
dr = detected / np.sum((np.sum(real_fault, axis=1) > 0).astype('int')) * 100 |
|||
rod = rod_n / detected * 100 |
|||
nodes = np.mean(nodenum_record) |
|||
|
|||
return fdr, far, tc, dr, rod |
|||
|
|||
@ -0,0 +1 @@ |
|||
#error Do not use this file, it is the result of a failed Cython compilation. |
|||
@ -0,0 +1,143 @@ |
|||
import numpy as np |
|||
import time |
|||
import copy |
|||
from AANN_Derivative import AANN_Derivative |
|||
from AANN_Fit import AANN_Fit |
|||
|
|||
|
|||
|
|||
def AANN_SF_SBSRB(fault_sample, fault_magnitude, v1, v2, w1, w2, sigma, mindata, maxdata): |
|||
""" |
|||
前向后向序列 |
|||
:param fault_sample: |
|||
:param fault_magnitude: |
|||
:param model: |
|||
:return: |
|||
""" |
|||
# print(type(fault_sample)) |
|||
a = 0 |
|||
[m, n] = fault_sample.shape |
|||
isolated = np.zeros((m, n)) |
|||
delt_ff=np.zeros((m, n)) |
|||
detected = 0 # 检测出故障样本数量 |
|||
rod_n = 0 |
|||
mdata = (2*fault_sample-np.tile(maxdata, (m, 1))-np.tile(mindata, (m, 1)))/(np.tile(maxdata, (m, 1))-np.tile(mindata, (m, 1))) |
|||
time1 = int(round(time.time()*1000)) |
|||
confidence = 2*sigma |
|||
index = [] |
|||
nodenum_record = np.zeros(m) |
|||
spe = np.zeros(m) |
|||
for sample_id in range(m): |
|||
# output = model(mdata[sample_id,:]) |
|||
if sample_id==225: |
|||
print(1) |
|||
output = AANN_Fit(mdata[sample_id, :], v1, v2, w1, w2) |
|||
index.append(np.sum((mdata[sample_id, :]-output)*(mdata[sample_id, :]-output))) |
|||
spe[sample_id] = index[sample_id] |
|||
if index[sample_id] > sigma: |
|||
vnum = 1 |
|||
detected += 1 |
|||
node_num = 0 |
|||
# 前向序列 |
|||
variables = [i for i in range(n)] |
|||
selected = np.zeros((n, 1)) |
|||
selected_v_num = -1 |
|||
while selected_v_num <= n: |
|||
selected_variables = [variables[i] for i in (np.where(selected == 1)[0])] |
|||
unselected_variables = [variables[i] for i in (np.where(selected == 0)[0])] |
|||
cur_rbindex = np.zeros(len(unselected_variables)) |
|||
for v_id in range(len(unselected_variables)): |
|||
cur_fault_variables = copy.deepcopy(selected_variables) |
|||
cur_fault_variables.append(unselected_variables[v_id]) |
|||
cur_fault_variables = np.array(cur_fault_variables).astype(np.float64) |
|||
cur_rbindex[v_id], B, C, SPE = AANN_Derivative(v1.astype(np.float64), v2.astype(np.float64), w1.astype(np.float64), w2.astype(np.float64), mdata[sample_id, :], cur_fault_variables) |
|||
node_num += node_num |
|||
min_rbindex, op_vc_id = min(cur_rbindex), np.argmin(cur_rbindex) |
|||
selected[unselected_variables[op_vc_id]] = 1 |
|||
if min_rbindex < confidence: |
|||
isolated_variable = [variables[i] for i in (np.where(selected == 1)[0])]#有问题 |
|||
isolated[sample_id, isolated_variable] = 1 |
|||
break |
|||
else: |
|||
selected_v_num += 1 |
|||
# 后向序列 |
|||
remained_v_num = len(np.where(selected == 1)[0]) |
|||
while remained_v_num > 1: |
|||
remained_variables = [variables[i] for i in (np.where(selected == 1)[0])] |
|||
cur_rbindex = np.zeros(len(remained_variables)) |
|||
for v_id in range(len(remained_variables)): |
|||
cur_fault_variables = copy.deepcopy(remained_variables) |
|||
cur_fault_variables = np.delete(cur_fault_variables, v_id) |
|||
cur_fault_variables = np.array(cur_fault_variables).astype(np.float64) |
|||
cur_rbindex[v_id], B, C, SPE = AANN_Derivative(v1, v2, w1, w2, mdata[sample_id, :], cur_fault_variables) |
|||
node_num += node_num |
|||
min_rbindex, op_vc_id = min(cur_rbindex), np.argmin(cur_rbindex) |
|||
if min_rbindex > confidence: |
|||
isolated_variable = [variables[i] for i in (np.where(selected == 1)[0])] # 有问题 |
|||
isolated[sample_id, :] = 0 |
|||
isolated[sample_id, isolated_variable] = 1 |
|||
isolated_variable = np.array(isolated_variable).astype(np.float64) |
|||
A, delt_ff[sample_id,:], C, D = AANN_Derivative(v1, v2, w1, w2, mdata[sample_id, :], |
|||
isolated_variable) |
|||
# spe[sample_id] = min_rbindex |
|||
break |
|||
else: |
|||
selected[remained_variables[op_vc_id], :] = 0 |
|||
remained_v_num -= 1 |
|||
# 后向序列结束 |
|||
nodenum_record[sample_id] = node_num |
|||
false_alarm = np.where((((fault_magnitude[sample_id, :]>0).astype('int')-isolated[sample_id, :]) == -1) == True) |
|||
print('sample_id-->{};false alarm:{}'.format(sample_id, false_alarm)) |
|||
maxvalue1, maxindex1 = max(isolated[sample_id, :]), np.argmax(isolated[sample_id, :]) |
|||
maxvalue2, maxindex2 = max(abs(fault_magnitude[sample_id, :])), np.argmax(abs(fault_magnitude[sample_id, :])) |
|||
if maxindex1 == maxindex2: |
|||
rod_n +=1 |
|||
time2 = int(round(time.time()*1000)) |
|||
tc = time2-time1 |
|||
real_fault = (abs(fault_magnitude) > 0).astype('int') |
|||
fdr = np.sum(((real_fault-(isolated == 0).astype('int')) == 1).astype('int'))/np.sum(real_fault) |
|||
far = np.sum(((real_fault-isolated) == -1).astype('int'))/np.sum((real_fault == 0).astype('int')) |
|||
|
|||
#fdr_variable, far_variable |
|||
# fdr_variable = [] |
|||
# far_variable = [] |
|||
# for i in range(real_fault.shape[1]): |
|||
# if np.sum(real_fault[:,i]) != 0: |
|||
# fdr_variable.append(np.sum(((real_fault[:,i]-(isolated[:,i] == 0).astype('int')) == 1).astype('int'))/np.sum(real_fault[:,i]))*100 |
|||
# else: |
|||
# fdr_variable.append(0) |
|||
# if np.sum((real_fault[:,i] == 0).astype('int')) != 0: |
|||
# far_variable.append(np.sum(((real_fault[:,i]-isolated[:,i]) == -1).astype('int'))/np.sum((real_fault[:,i] == 0).astype('int')))*100 |
|||
# else: |
|||
# far_variable.append(0) |
|||
fdr_variable = fdr |
|||
far_variable = far |
|||
|
|||
delt_ff=delt_ff/2*(np.tile(maxdata, (m, 1))-np.tile(mindata, (m, 1))) #对偏差值进行反归一化 |
|||
Reconstruction_precision=np.sum(np.average(abs(fault_magnitude-delt_ff), axis=0)) |
|||
|
|||
#为AANN_test_offline准备,2021-6-16 rsj&lf |
|||
Data_origin=fault_sample |
|||
errorData = delt_ff #测量值-重构值 |
|||
reconData = Data_origin-errorData |
|||
SPE_list = np.sum(errorData ** 2) |
|||
R = 0 |
|||
for index in range(0, reconData.shape[1]): |
|||
vector1 = Data_origin[:, index] |
|||
vector2 = np.array(reconData)[:, index] |
|||
R += np.dot(vector1, vector2.T) / (np.sqrt(np.sum(vector1 ** 2)) * np.sqrt(np.sum(vector2 ** 2))) |
|||
R /= reconData.shape[1] |
|||
items = [('reconData', reconData.tolist()) |
|||
, ('errorData', errorData.tolist()), ('R', R.tolist()), ('SPE', SPE_list), ('FAI', SPE_list), |
|||
('paraState', paraState.tolist())] |
|||
ReconRes=json.dumps(dict(items)) # json.dumps(result) |
|||
# 为AANN_test_offline准备 |
|||
|
|||
|
|||
dr = detected/np.sum((np.sum(real_fault, axis=1) > 0).astype('int'))*100 |
|||
rod = rod_n/detected*100 |
|||
nodes = np.mean(nodenum_record) |
|||
|
|||
|
|||
|
|||
return delt_ff,fdr,far,fdr_variable,far_variable,Reconstruction_precision,spe,ReconRes |
|||
File diff suppressed because it is too large
Binary file not shown.
@ -0,0 +1,279 @@ |
|||
# -*- coding: utf-8 -*- |
|||
""" |
|||
Created on 2021-5-3 |
|||
PCA source code |
|||
@author: rsj zjl |
|||
""" |
|||
|
|||
|
|||
|
|||
|
|||
|
|||
import matplotlib.pyplot as plt |
|||
from numba import jit |
|||
import time |
|||
import numpy as np |
|||
import traceback |
|||
from smote import smote |
|||
import config |
|||
import json |
|||
import sys |
|||
import requests |
|||
import datetime |
|||
import jenkspy |
|||
import xlrd |
|||
import AANN_Fit |
|||
|
|||
class Model(object): |
|||
def __init__(self): |
|||
self.v1 = [] |
|||
self.v2 = [] |
|||
self.w1 = [] |
|||
self.w2 = [] |
|||
self.sigma = 0 |
|||
self.maxdata = 0 |
|||
self.mindata = 0 |
|||
|
|||
def AANN(training_Sample, Nodes, num_epochs): |
|||
""" |
|||
AANN建模 |
|||
:param training_Sample: numpy的ndarray格式 |
|||
:param Nodes: [6,3,6] 三个隐层 |
|||
:param num_epochs: 迭代次数iteration |
|||
:return:应返回model(内含v1,w1,v2,w2,sigma,maxdata,mindata) |
|||
""" |
|||
time1 = int(round(time.time() * 1000)) |
|||
mm, nn = training_Sample.shape |
|||
# 预处理 数据归一化 |
|||
mx = np.mean(training_Sample, 0) |
|||
maxdata = np.max(training_Sample, 0) |
|||
mindata = np.min(training_Sample, 0) |
|||
mdata = (2 * training_Sample - (maxdata + mindata + np.zeros((mm, nn)))) / (maxdata - mindata + np.zeros((mm, nn))) |
|||
Nodes = np.array(Nodes).astype(np.int64) |
|||
model = Model() |
|||
# np.save('mdata.npy',mdata) |
|||
print(type(mm)) |
|||
print(type(nn)) |
|||
print(type(num_epochs)) |
|||
print(type(Nodes)) |
|||
print(type(Nodes[0])) |
|||
count, spe, o, v1, v2, w1, w2, sigma = cur_aann(mm, nn, num_epochs, Nodes, mdata) |
|||
reconData = AANN_Fit.AANN_Fit(mdata, v1, v2, w1, w2) |
|||
r2 = 1-np.sum(np.power((mdata-reconData),2),axis=0)/np.sum(np.power((np.tile(np.average(mdata,axis=0), (mm,1))-reconData),2),axis=0) |
|||
# 预处理 数据反归一化 |
|||
a = maxdata - mindata + np.zeros((mm, nn)) |
|||
b = maxdata + mindata + np.zeros((mm, nn)) |
|||
reconData = np.matrix((np.multiply(mdata,a) + b)/2) |
|||
Train_X_min = np.min(training_Sample, axis=0) # 训练值最小值 |
|||
Train_X_max = np.max(training_Sample, axis=0) # 训练值最大值 |
|||
Train_X_mean = np.mean(training_Sample, axis=0) # 训练值平均值 |
|||
Train_X_std = np.std(training_Sample, axis=0) # 训练值方差 |
|||
Train_X_bais = training_Sample - reconData # 训练值偏差 |
|||
Train_X_bais_max = np.max(np.abs(Train_X_bais), axis=0) # 训练值偏差最大值 axis=0 对各列求 |
|||
Train_X_bais_min = np.min(np.abs(Train_X_bais), axis=0) # 训练值偏差最小值 |
|||
Train_X_bais_mean = np.mean(np.abs(Train_X_bais), axis=0) # 训练值偏差平均值 |
|||
Train_X_bais_std_upperB95 = np.array(np.abs(1.96 * np.std(Train_X_bais, axis=0) + Train_X_bais_mean))[ |
|||
0] # 训练值偏差标准差 |
|||
Train_X_bais_std_upperB99 = np.array(np.abs(2.58 * np.std(Train_X_bais, axis=0) + Train_X_bais_mean))[0] |
|||
Train_X_bais_std_lowerB95 = np.array(np.abs(1.96 * np.std(Train_X_bais, axis=0) - Train_X_bais_mean))[ |
|||
0] # 训练值偏差标准差 |
|||
Train_X_bais_std_lowerB99 = np.array(np.abs(2.58 * np.std(Train_X_bais, axis=0) - Train_X_bais_mean))[0] |
|||
QCUL_95_line = [] # 限值 |
|||
QCUL_99_line = [] |
|||
for index1 in range(len(Train_X_bais_std_upperB95)): |
|||
QCUL_95_line.append(max(Train_X_bais_std_upperB95[index1], Train_X_bais_std_lowerB95[index1])) |
|||
QCUL_99_line.append(max(Train_X_bais_std_upperB99[index1], Train_X_bais_std_lowerB99[index1])) |
|||
QCUL_95_line = np.array(QCUL_95_line) |
|||
QCUL_99_line = np.array(QCUL_99_line) |
|||
##################################################################################################################### |
|||
items = [('Train_X_min', np.around(Train_X_min, decimals=3).tolist()), |
|||
('Train_X_max', np.around(Train_X_max, decimals=3).tolist()), |
|||
('Train_X_std', np.around(Train_X_std, decimals=3).tolist()), |
|||
('Train_X_mean',np.around(Train_X_mean, decimals=3).tolist()), |
|||
(('Train_X_bais_max',np.around(Train_X_bais_max, decimals=3).tolist())), |
|||
(('Train_X_bais_min', np.around(Train_X_bais_min, decimals=3).tolist())), |
|||
(('Train_X_bais_mean', np.around(Train_X_bais_mean, decimals=3).tolist())), |
|||
('QCUL_95_line', np.around(QCUL_95_line, decimals=3).tolist()), |
|||
('QCUL_99_line', np.around(QCUL_99_line, decimals=3).tolist()), |
|||
('r2', r2.tolist()), |
|||
('count', count), |
|||
('maxdata', maxdata.tolist()), |
|||
('mindata', mindata.tolist()), |
|||
('sigma', sigma), |
|||
('v1', v1.tolist()), |
|||
('v2', v2.tolist()), |
|||
('w1', w1.tolist()), |
|||
('w2', w2.tolist()) |
|||
] |
|||
time2 = int(round(time.time() * 1000)) |
|||
tc = time2 - time1 |
|||
res_items = [('Model_info', dict(items)), ('Model_type', 'AANN')] |
|||
result = dict(res_items) # json.dumps(result) |
|||
return json.dumps(result) |
|||
|
|||
@jit(nopython=True, cache=True) |
|||
def cur_aann(mm, nn, num_epochs, Nodes, mdata): |
|||
alpha0 = 0.001 |
|||
alfa = 0.5 |
|||
samplenum = mm |
|||
spe = np.zeros(num_epochs + 1) |
|||
errorp = np.zeros(samplenum) |
|||
v1 = 2 * np.random.rand(nn, Nodes[0]) - 1 |
|||
dv1 = np.zeros((nn, Nodes[0])) |
|||
w1 = 2 * np.random.rand(Nodes[0], Nodes[1]) - 1 |
|||
dw1 = np.zeros((Nodes[0], Nodes[1])) |
|||
v2 = 2 * np.random.rand(Nodes[1], Nodes[2]) - 1 |
|||
dv2 = np.zeros((Nodes[1], Nodes[2])) |
|||
w2 = 2 * np.random.rand(Nodes[2], nn) - 1 |
|||
dw2 = np.zeros((Nodes[2], nn)) |
|||
|
|||
xlist = mdata |
|||
expectlist = mdata |
|||
count = 0 |
|||
aa = alfa |
|||
alpha = alpha0 |
|||
y1, y2, y3, o, yitao = np.zeros((samplenum, Nodes[0])), np.zeros((samplenum, Nodes[1])), np.zeros((samplenum, |
|||
Nodes[ |
|||
2])), np.zeros( |
|||
(samplenum, nn)), np.zeros((samplenum, nn)) |
|||
yitay1, yitay2, yitay3 = np.zeros((samplenum, Nodes[0])), np.zeros((samplenum, Nodes[1])), np.zeros((samplenum, |
|||
Nodes[2])) |
|||
while count < num_epochs: |
|||
c = 0 |
|||
while c < samplenum: |
|||
d = expectlist[c, :] |
|||
x = xlist[c, :] |
|||
y1[c, :] = (1 / (1 + np.exp(-np.dot(x, v1)))) |
|||
y2[c, :] = np.dot(y1[c, :], w1) |
|||
y3[c, :] = (1 / (1 + np.exp(-np.dot(y2[c, :], v2)))) |
|||
o[c, :] = np.dot(y3[c, :], w2) |
|||
yitao[c, :] = (d - o[c, :]) |
|||
errorp[c] = 0.5 * np.sum(yitao[c, :] * yitao[c, :]) |
|||
yitay3[c, :] = (np.dot(yitao[c, :], w2.T) * y3[c, :] * (1 - y3[c, :])) |
|||
yitay2[c, :] = np.dot(yitay3[c, :], v2.T) |
|||
yitay1[c, :] = np.dot(yitay2[c, :], w1.T) * y1[c, :] * (1 - y1[c, :]) |
|||
# 调整各层权值 |
|||
deltw2 = np.dot(alpha * y3[c, :].reshape(Nodes[2], 1), yitao[c, :].reshape(1, nn)) |
|||
w2 = w2 + deltw2 + aa * dw2 |
|||
dw2 = deltw2 |
|||
deltv2 = np.dot(alpha * y2[c, :].reshape(Nodes[1], 1), yitay3[c, :].reshape(1, Nodes[2])) |
|||
v2 = v2 + deltv2 + aa * dv2 |
|||
dv2 = deltv2 |
|||
deltw1 = np.dot(alpha * y1[c, :].reshape(Nodes[0], 1), yitay2[c, :].reshape(1, Nodes[1])) |
|||
w1 = w1 + deltw1 + aa * dw1 |
|||
dw1 = deltw1 |
|||
deltv1 = np.dot(alpha * x.reshape(nn, 1), yitay1[c, :].reshape(1, Nodes[0])) |
|||
v1 = v1 + deltv1 + aa * dv1 |
|||
dv1 = deltv1 |
|||
c = c + 1 |
|||
spe[count] = np.sum(errorp) / samplenum |
|||
if count > 1 and abs(spe[count] - spe[count - 1]) < 0.0000001: |
|||
break |
|||
count += 1 |
|||
|
|||
sigma = 3 * np.sum((expectlist - o) * (expectlist - o)) / mm |
|||
|
|||
|
|||
return count, spe, o, v1, v2, w1, w2, sigma |
|||
|
|||
def isnumber(limits): |
|||
flag=True |
|||
for item in limits: |
|||
item=item.replace("-","") |
|||
if(item.isdigit()==False): |
|||
flag=False |
|||
break |
|||
return flag |
|||
|
|||
|
|||
def clearmain(info): |
|||
try: |
|||
Train_Data = info["Train_Data"] |
|||
times = Train_Data["time"].split(';') |
|||
points = Train_Data["points"].split(',') |
|||
interval = Train_Data["interval"] |
|||
if interval == 10000: |
|||
DCount = 60 |
|||
elif interval == 100000: |
|||
DCount = 6 |
|||
elif interval == 300000: |
|||
DCount = 5 |
|||
else: |
|||
DCount = 4 |
|||
dead = Train_Data["dead"].split(',') |
|||
limit = Train_Data["limit"].split(',') |
|||
uplower = Train_Data["uplow"].split(';') |
|||
|
|||
condition=info["conditon"].replace("=","==").replace(">=",">").replace("<=","<") |
|||
#percent = info["Hyper_para"]["percent"] |
|||
|
|||
count=0 |
|||
ItemsInfo, SamplingTimePeriods = [], [] |
|||
Constraint = "" |
|||
for i in range(len(points)): |
|||
iteminfo = {} |
|||
iteminfo["ItemName"] = points[i] # 加点 |
|||
if (dead[i] == "1"): # 判断是否参与死区清洗 |
|||
iteminfo["ClearDeadZone"] = "true" |
|||
else: |
|||
iteminfo["ClearDeadZone"] = "false" |
|||
if (limit[i] == "1"): # 参与上下限清洗 |
|||
limits = uplower[i].split(',') |
|||
if (isnumber(limits) == True): # 输入上下限正确 |
|||
count += 1 |
|||
Constraint += "[" + points[i] + "]>" + limits[0] + " and " + "[" + points[i] + "]<" + limits[1] + " and " |
|||
ItemsInfo.append(iteminfo) |
|||
if(count!=0): |
|||
Constraint = Constraint[:len(Constraint) - 4:] |
|||
else: |
|||
Constraint="1==1"#没有上下限清洗 |
|||
Constraint+=" and ("+condition+")" |
|||
Constraint = Constraint.replace("\n", " ") |
|||
for i in range(len(times)): |
|||
Eachsampletime = {} |
|||
timess = times[i].split(',') |
|||
Eachsampletime["StartingTime"] = timess[0] |
|||
Eachsampletime["TerminalTime"] = timess[1] |
|||
SamplingTimePeriods.append(Eachsampletime) |
|||
url = f"http://{config._CLEAN_IP}/exawebapi/exatime/GetCleaningData?ItemsInfo=%s&SamplingTimePeriods=%s&Constraint=%s&SamplingPeriod=%s&DCount=%d" % ( |
|||
ItemsInfo, SamplingTimePeriods, Constraint, interval, DCount) |
|||
response = requests.get(url) |
|||
content = json.loads(response.text) |
|||
data = np.array([item for item in content["ClearData"]]).T |
|||
try: |
|||
smote_data = info["smote"] |
|||
# smote_data = False |
|||
except KeyError: |
|||
smote_data = False |
|||
if smote_data: |
|||
try: |
|||
smote_index = [points.index(item["pointId"]) for item in info["smote_config"] if item["LAY_CHECKED"]] |
|||
smote_num = [int(item["number"]) for item in info["smote_config"] if item["LAY_CHECKED"]] |
|||
max_value = [float(item["max"]) for item in info["smote_config"] if item["LAY_CHECKED"]] |
|||
min_value = [float(item["min"]) for item in info["smote_config"] if item["LAY_CHECKED"]] |
|||
except KeyError: |
|||
pass |
|||
else: |
|||
if len(smote_num) != 0: |
|||
data, *_ = smote(data, smote_index, smote_num, max_value, min_value) |
|||
Nodes=info["layer"] |
|||
num_epochs=Train_Data["interval"] |
|||
result =AANN(data, Nodes, num_epochs)#看看nodes和num_epochs怎么传进来 |
|||
#result = pca(data, percent) |
|||
result = result.replace("NaN", "-1") |
|||
result=json.loads(result) |
|||
result["BeforeCleanSamNum"]=content["BeforeCleanSamNum"] |
|||
result["AfterCleanSamNum"]=content["AfterCleanSamNum"] |
|||
result["CleanOrNot"] = True |
|||
return json.dumps(result) |
|||
except Exception as e: |
|||
result = [{"CleanOrNot": False, "msg": traceback.format_exc()}] |
|||
return json.dumps(result, ensure_ascii=False) |
|||
|
|||
if __name__ == "__main__": |
|||
info_str='{"layer":[5,2,5],"Train_Data":{"time":"2020-10-14 04:10:28,2020-10-17 14:53:00;2021-04-07 07:32:47,2021-04-16 08:39:01;2021-06-01 18:48:17,2021-06-03 14:29:40","points":"DH4_40MAG20CT362,DH4_40MAG20AN002GT,DH4_40MAG20CE102,DH4_40MAG20CT312,DH4_40MAG20CT322,DH4_40MAG20CT332","interval":300000,"dead":"1,1,1,1,1,1","limit":"0,0,0,0,0,0","uplow":"null,null;null,null;null,null;null,null;null,null;null,null"},"type":"AANN","conditon":"[DH4_40MAG20CE102]>20","epoch":"10000"}' |
|||
info = json.loads(info_str) |
|||
print(clearmain(info)) |
|||
|
|||
|
|||
|
|||
File diff suppressed because it is too large
Binary file not shown.
File diff suppressed because one or more lines are too long
Binary file not shown.
File diff suppressed because one or more lines are too long
Binary file not shown.
@ -0,0 +1,301 @@ |
|||
# -*- coding: utf-8 -*- |
|||
""" |
|||
@Time : 2019/9/12 15:30 |
|||
@Author : 杰森·家乐森 |
|||
@File : ANN_train.py |
|||
@Software: PyCharm |
|||
""" |
|||
import os |
|||
import json |
|||
import time |
|||
import datetime |
|||
import requests |
|||
import numpy as np |
|||
import pandas as pd |
|||
import tensorflow as tf |
|||
from tensorflow.keras import backend |
|||
import matplotlib.pyplot as plt |
|||
from tensorflow.keras import layers |
|||
from sklearn.preprocessing import MinMaxScaler |
|||
from tensorflow.keras.models import load_model |
|||
from tensorflow.keras.models import model_from_json |
|||
import config |
|||
|
|||
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE" |
|||
|
|||
def get_history_value(points, time, interval,typedata): |
|||
# url="http://192.168.1.201:8080/openPlant/getMultiplePointHistorys" |
|||
url = f"http://{config._EXA_IP}:9000/exawebapi/exatime/GetSamplingValueArrayFloat" |
|||
headers = {"Content-Type": "application/json;charset=utf-8"} # ,"token":get_token() |
|||
point_array = points.split(",") |
|||
time_span = time.split(";") |
|||
value_array = [] |
|||
for item in point_array: |
|||
value_group = [] |
|||
for time_piece in time_span: |
|||
st = time_piece.split(",")[0] |
|||
et = time_piece.split(",")[1] |
|||
para = {"ItemName": item, "StartingTime": st, "TerminalTime": et, "SamplingPeriod": interval} |
|||
response = requests.get(url, headers=headers, params=para) |
|||
value = eval(str(response.text).replace("\"", "").replace("null", str(np.nan))) |
|||
for row in value: |
|||
value_group.append(row[1]) |
|||
value_array.append(value_group) |
|||
valuetrs=np.array(value_array) |
|||
typeArr = list(enumerate(typedata.split(","))) |
|||
data_x = [(valuetrs.T)[:, item[0]].tolist() for item in typeArr if item[1] == "0"] |
|||
data_y = [(valuetrs.T)[:, item[0]].tolist() for item in typeArr if item[1] == "1"] |
|||
x_data = np.array(data_x).T |
|||
y_data = np.array(data_y).T |
|||
return x_data,y_data,valuetrs |
|||
|
|||
def rmse(y_true, y_pred): |
|||
return backend.sqrt(backend.mean(tf.keras.losses.mean_squared_error(y_true, y_pred), axis=-1)) |
|||
|
|||
|
|||
def TrainOffline(x_data,y_data,hidden_layers,epochsdata): |
|||
#计算原来每一列数据的最大值 |
|||
valuetrs = np.hstack((x_data, y_data)) |
|||
# valuetrs = valuetrs |
|||
train_max, train_min, train_ave = [], [], [] |
|||
for row in valuetrs.T: |
|||
train_max.append(np.max(row)) |
|||
train_min.append(np.min(row)) |
|||
train_ave.append(np.mean(row)) |
|||
mms1 = MinMaxScaler() |
|||
mms2 = MinMaxScaler() |
|||
mms3 = MinMaxScaler() |
|||
x_normal = mms1.fit_transform(x_data) |
|||
y_normal = mms2.fit_transform(y_data) |
|||
x_train = x_normal |
|||
y_train = y_normal |
|||
# 构建网络结构 |
|||
model = tf.keras.Sequential() |
|||
model.add(layers.Dense(units=hidden_layers[0], input_dim=x_data.shape[1], activation="sigmoid")) |
|||
for i in range(len(hidden_layers) - 1): |
|||
model.add(layers.Dense(units=hidden_layers[i + 1], activation="sigmoid")) |
|||
model.add(layers.Dense(units=y_data.shape[1])) |
|||
# 选择损失函数,优化方法 |
|||
model.compile(loss="mse", optimizer="adam", metrics=["accuracy"]) |
|||
# 训练模型 |
|||
history = model.fit(x_train, y_train, epochs=epochsdata, batch_size=80) |
|||
path = os.getcwd() + "\\ModelOline"; |
|||
if not os.path.exists(path): |
|||
os.makedirs(path) |
|||
now = datetime.datetime.now() |
|||
# filepath=path + "\\M"+"_"+str(now).replace(" ","-").replace(".","-").replace(":","-")+".h5" |
|||
# model.save(filepath) |
|||
filepath = model.to_json() |
|||
model_weight = model.get_weights() |
|||
test_data = model.predict(x_normal, batch_size=400) |
|||
|
|||
# 反归一化 |
|||
predict_data = mms2.inverse_transform(test_data) |
|||
with tf.compat.v1.Session(): |
|||
spe = rmse(test_data, y_normal).eval() |
|||
limit = 3 * spe |
|||
mimusdata = predict_data - y_data |
|||
mimusdatares = mimusdata.T |
|||
pre_min, pre_max, pre_ave, pre_s = [], [], [], [] |
|||
for row in mimusdatares: |
|||
pre_max.append(np.max(row)) |
|||
pre_min.append(np.min(row)) |
|||
pre_ave.append(np.mean(row)) |
|||
pre_s.append(np.std(row) * 3) |
|||
result, mms1new, mms2new = {}, {}, {} |
|||
mms1new["data_max_"] = mms1.data_max_.tolist() |
|||
mms1new["data_min_"] = mms1.data_min_.tolist() |
|||
mms1new["data_range_"] = mms1.data_range_.tolist() |
|||
mms1new["min_"] = mms1.min_.tolist() |
|||
mms1new["scale_"] = mms1.scale_.tolist() |
|||
mms2new["data_max_"] = mms2.data_max_.tolist() |
|||
mms2new["data_min_"] = mms2.data_min_.tolist() |
|||
mms2new["data_range_"] = mms2.data_range_.tolist() |
|||
mms2new["min_"] = mms2.min_.tolist() |
|||
mms2new["scale_"] = mms2.scale_.tolist() |
|||
result["filename"] = filepath |
|||
result["mms1"] = mms1new |
|||
result["mms2"] = mms2new |
|||
result["train_max"] = np.array(train_max).tolist() |
|||
result["train_min"] = np.array(train_min).tolist() |
|||
result["train_ave"] = np.array(train_ave).tolist() |
|||
result["pre_max"] = np.array(pre_max).tolist() |
|||
result["pre_min"] = np.array(pre_min).tolist() |
|||
result["pre_ave"] = np.array(pre_ave).tolist() |
|||
result["pre_s"] = np.array(pre_s).tolist() |
|||
result["pre_s_ave"] = np.mean(np.array(pre_s)).tolist() |
|||
result["limit"] = limit |
|||
result["weight"] = [model_weight[index].tolist() for index in range(len(model_weight))] |
|||
return json.dumps(result) |
|||
|
|||
def Train(x_data,y_data,hidden_layers,valuetrs,epochsdata): |
|||
# 计算原来每一列数据的最大值 |
|||
#x_data = np.array(x_data) |
|||
#y_data = np.array(y_data) |
|||
valuetrs=valuetrs |
|||
train_max, train_min, train_ave = [], [], [] |
|||
for row in valuetrs.T: |
|||
train_max.append(np.max(row)) |
|||
train_min.append(np.min(row)) |
|||
train_ave.append(np.mean(row)) |
|||
mms1 = MinMaxScaler() |
|||
mms2 = MinMaxScaler() |
|||
mms3=MinMaxScaler() |
|||
x_normal = mms1.fit_transform(x_data) |
|||
y_normal = mms2.fit_transform(y_data) |
|||
x_train = x_normal |
|||
y_train = y_normal |
|||
# 构建网络结构 |
|||
model = tf.keras.Sequential() |
|||
model.add(layers.Dense(units=hidden_layers[0], input_dim=x_data.shape[1], activation="sigmoid")) |
|||
for i in range(len(hidden_layers) - 1): |
|||
model.add(layers.Dense(units=hidden_layers[i + 1], activation="sigmoid")) |
|||
model.add(layers.Dense(units=y_data.shape[1])) |
|||
model.summary() |
|||
# 选择损失函数,优化方法 |
|||
model.compile(loss="mse", optimizer="adam", metrics=["accuracy"]) |
|||
# 训练模型 |
|||
history = model.fit(x_train, y_train, epochs=epochsdata, batch_size=80) |
|||
path=os.getcwd()+"\\ModelOline"; |
|||
if not os.path.exists(path): |
|||
os.makedirs(path) |
|||
now=datetime.datetime.now() |
|||
#filepath=path + "\\M"+"_"+str(now).replace(" ","-").replace(".","-").replace(":","-")+".h5" |
|||
#model.save(filepath) |
|||
filepath=model.to_json() |
|||
model_weight=model.get_weights() |
|||
test_data = model.predict(x_normal, batch_size=400) |
|||
|
|||
|
|||
# 反归一化 |
|||
predict_data = mms2.inverse_transform(test_data) |
|||
with tf.compat.v1.Session(): |
|||
spe = rmse(y_normal, test_data).eval() |
|||
limit = 3 * spe |
|||
|
|||
mimusdata = predict_data - y_data |
|||
mimusdatares = mimusdata.T |
|||
pre_min, pre_max, pre_ave, pre_s = [], [], [], [] |
|||
for row in mimusdatares: |
|||
pre_max.append(np.max(row)) |
|||
pre_min.append(np.min(row)) |
|||
pre_ave.append(np.mean(row)) |
|||
pre_s.append(np.std(row) * 3) |
|||
result,mms1new,mms2new={},{},{} |
|||
mms1new["data_max_"]=mms1.data_max_.tolist() |
|||
mms1new["data_min_"]=mms1.data_min_.tolist() |
|||
mms1new["data_range_"]=mms1.data_range_.tolist() |
|||
mms1new["min_"]=mms1.min_.tolist() |
|||
mms1new["scale_"]=mms1.scale_.tolist() |
|||
mms2new["data_max_"] = mms2.data_max_.tolist() |
|||
mms2new["data_min_"] = mms2.data_min_.tolist() |
|||
mms2new["data_range_"] = mms2.data_range_.tolist() |
|||
mms2new["min_"] = mms2.min_.tolist() |
|||
mms2new["scale_"] = mms2.scale_.tolist() |
|||
result["filename"]=filepath |
|||
result["mms1"]=mms1new |
|||
result["mms2"]=mms2new |
|||
result["train_max"] = np.array(train_max).tolist() |
|||
result["train_min"] = np.array(train_min).tolist() |
|||
result["train_ave"] = np.array(train_ave).tolist() |
|||
result["pre_max"] = np.array(pre_max).tolist() |
|||
result["pre_min"] = np.array(pre_min).tolist() |
|||
result["pre_ave"] = np.array(pre_ave).tolist() |
|||
result["pre_s"] = np.array(pre_s).tolist() |
|||
result["pre_s_ave"] = np.mean(np.array(pre_s)).tolist() |
|||
result["limit"] = limit |
|||
result["weight"] = [model_weight[index].tolist() for index in range(len(model_weight))] |
|||
return result |
|||
|
|||
|
|||
def isnumber(limits): |
|||
flag=True |
|||
for item in limits: |
|||
item=item.replace("-","") |
|||
if(item.isdigit()==False): |
|||
flag=False |
|||
break |
|||
return flag |
|||
|
|||
def clearmain(info): |
|||
try: |
|||
points = info["point"].split(',') |
|||
times = info["time"].split(';') |
|||
epochs = info["iter"] |
|||
layer = info["layer"] |
|||
typedata = info["type"] |
|||
condition=info["condition"].replace("=","==").replace(">=",">").replace("<=","<") |
|||
interval = 300000 |
|||
dead = info["dead"].split(',') |
|||
limit = info["limit"].split(',') |
|||
uplower = info["uplow"].split(';') |
|||
count=0 |
|||
ItemsInfo, SamplingTimePeriods = [], [] |
|||
Constraint = "" |
|||
for i in range(len(points)): |
|||
iteminfo = {} |
|||
iteminfo["ItemName"] = points[i] # 加点 |
|||
if dead[i] == "1": # 判断是否参与死区清洗 |
|||
iteminfo["ClearDeadZone"] = "true" |
|||
else: |
|||
iteminfo["ClearDeadZone"] = "false" |
|||
if limit[i] == "1": # 参与上下限清洗 |
|||
limits = uplower[i].split(',') |
|||
if (isnumber(limits) == True): # 输入上下限正确 |
|||
count += 1 |
|||
Constraint += "[" + points[i] + "]>" + limits[0] + " and " + "[" + points[i] + "]<" + limits[1] + " and " |
|||
ItemsInfo.append(iteminfo) |
|||
if count!=0: |
|||
Constraint = Constraint[:len(Constraint) - 4:] |
|||
else: |
|||
Constraint="1==1"#没有上下限清洗 |
|||
Constraint+=" and ("+condition+")" |
|||
Constraint = Constraint.replace("\n", " ") |
|||
for i in range(len(times)): |
|||
Eachsampletime = {} |
|||
timess = times[i].split(',') |
|||
Eachsampletime["StartingTime"] = timess[0] |
|||
Eachsampletime["TerminalTime"] = timess[1] |
|||
SamplingTimePeriods.append(Eachsampletime) |
|||
url = f"http://{config._CLEAN_IP}/exawebapi/exatime/GetCleaningData?ItemsInfo=%s&SamplingTimePeriods=%s&Constraint=%s&SamplingPeriod=%s&DCount=6" % ( |
|||
ItemsInfo, SamplingTimePeriods, Constraint, interval) |
|||
response = requests.get(url) |
|||
content = json.loads(response.text) |
|||
# data =np.array([item for item in content["ClearData"] if item ]).T |
|||
valuetrs = np.array([item for item in content["ClearData"]]).T |
|||
typeArr = list(enumerate(typedata.split(","))) |
|||
data_x = [valuetrs[:,item[0]].tolist() for item in typeArr if item[1]=="0"] |
|||
data_y = [valuetrs[:,item[0]].tolist() for item in typeArr if item[1]=="1"] |
|||
data_x = np.array(data_x).T |
|||
data_y = np.array(data_y).T |
|||
result = Train(data_x, data_y, layer, valuetrs, eval(epochs)) |
|||
result["BeforeCleanSamNum"]=content["BeforeCleanSamNum"] |
|||
result["AfterCleanSamNum"]=content["AfterCleanSamNum"] |
|||
result["CleanOrNot"] = True |
|||
return result |
|||
except Exception as e: |
|||
result = {"CleanOrNot": False} |
|||
return result |
|||
|
|||
|
|||
|
|||
def main(info): |
|||
points = info["point"] |
|||
time1 = info["time"] |
|||
interval = info["interval"] |
|||
epochs = info["iter"] |
|||
layer = info["layer"] |
|||
datatype = info["type"] |
|||
data_x, data_y, origndata = get_history_value(points, time1, interval, datatype) |
|||
result = Train(data_x, data_y, layer, origndata, eval(epochs)) |
|||
return result |
|||
|
|||
|
|||
if __name__ == "__main__": |
|||
# info_str = '{"time":"2020-01-19 23:26:04,2020-01-25 20:42:16;2020-01-26 16:23:49,2020-02-03 11:36:42;2020-02-05 20:02:49,2020-02-06 05:51:40","condition":"1=1","interval":300000,"dead":"1,1,1","layer":["6"],"point":"JL_D1_10MILLA:SEP_TEMP.PNT,JL_D1_10FSSS20A:HFC10CT301.PNT,JL_D1_10FSSS20A:HFC10CT302.PNT","type":"1,0,0","limit":"0,0,0","uplow":",;,;,","iter":"100"}' |
|||
info_str = '{"iter":"800","dead":"1,1,1,1","point":"DH4_40HLF10CE101,DH4_40HLF10CT351,DH4_40HLF10CT352,DH4_40HLF10CY101","limit":"0,0,0,0","layer":["5"],"type":"0,0,0,1","time":"2020-10-06 19:17:46,2020-10-12 14:58:19","condition":"1=1","interval":300000,"uplow":"null,null;null,null;null,null;null,null"}' |
|||
info = json.loads(info_str) |
|||
result = clearmain(info) |
|||
print(result) |
|||
# info = {"iter":"800","Train_Data_X":[[7.0,7.0,7.0,7.0,7.0,7.0,7.0],[8.0,8.0,8.0,8.0,8.0,8.0,8.0],[1.0,1.0,1.0,1.0,1.0,1.0,1.0],[2.0,2.0,2.0,2.0,2.0,2.0,2.0],[3.0,3.0,3.0,3.0,3.0,3.0,3.0],[4.0,4.0,4.0,4.0,4.0,4.0,4.0],[5.0,5.0,5.0,5.0,5.0,5.0,5.0],[6.0,6.0,6.0,6.0,6.0,6.0,6.0],[7.0,7.0,7.0,7.0,7.0,7.0,7.0],[8.0,8.0,8.0,8.0,8.0,8.0,8.0],[1.0,1.0,1.0,1.0,1.0,1.0,1.0],[2.0,2.0,2.0,2.0,2.0,2.0,2.0]],"hide":["7","5","1"],"Train_Data_Y":[[7.0],[8.0],[1.0],[2.0],[3.0],[4.0],[5.0],[6.0],[7.0],[8.0],[1.0],[2.0]]} |
|||
# result = TrainOffline(np.array(info["Train_Data_X"]), np.array(info["Train_Data_Y"]),info["hide"], eval(info["iter"])) |
|||
File diff suppressed because it is too large
Binary file not shown.
@ -0,0 +1,330 @@ |
|||
import execjs |
|||
import csv |
|||
import numpy as np |
|||
import pandas as pd |
|||
import time |
|||
import json |
|||
import jenkspy |
|||
import requests |
|||
import pymssql |
|||
import config |
|||
|
|||
class HealthyScoringSystem: |
|||
def __init__(self): |
|||
# 读取配置 |
|||
ms = MSSQL(host="172.28.137.230", user="sa", pwd="powerSIS#123", database="ASSESS") |
|||
|
|||
conditionlist = ms.ExecQuery(f"SELECT * FROM [ASSESS].[dbo].[conditionlist]") |
|||
pointconfigs = ms.ExecQuery(f"SELECT * FROM [ASSESS].[dbo].[pointconfigs]") |
|||
scorelist = ms.ExecQuery(f"SELECT * FROM [ASSESS].[dbo].[scorelist]") |
|||
subsystemweightlist = ms.ExecQuery(f"SELECT * FROM [ASSESS].[dbo].[subsystemweightlist]") |
|||
systemweightlist = ms.ExecQuery(f"SELECT * FROM [ASSESS].[dbo].[systemweightlist]") |
|||
unitweightlist = ms.ExecQuery(f"SELECT * FROM [ASSESS].[dbo].[unitweightlist]") |
|||
|
|||
condi = list(range(len(conditionlist))) |
|||
for i in range(len(condi)): |
|||
condi[i] = str(condi[i]) |
|||
|
|||
point = list(range(len(pointconfigs))) |
|||
for i in range(len(point)): |
|||
point[i] = str(point[i]) |
|||
|
|||
score = list(range(len(scorelist))) |
|||
for i in range(len(score)): |
|||
score[i] = str(score[i]) |
|||
|
|||
subsy = list(range(len(subsystemweightlist))) |
|||
for i in range(len(subsy)): |
|||
subsy[i] = str(subsy[i]) |
|||
|
|||
syste = list(range(len(systemweightlist))) |
|||
for i in range(len(syste)): |
|||
syste[i] = str(syste[i]) |
|||
|
|||
unitw = list(range(len(unitweightlist))) |
|||
for i in range(len(unitw)): |
|||
unitw[i] = str(unitw[i]) |
|||
|
|||
self.conditionlist = dict(zip(list(dict(zip(condi,condi)).values()),conditionlist)) |
|||
self.pointconfigs = dict(zip(list(dict(zip(point,point)).values()),pointconfigs)) |
|||
self.scorelist = dict(zip(list(dict(zip(score,score)).values()),scorelist)) |
|||
self.subsystemweightlist = dict(zip(list(dict(zip(subsy,subsy)).values()),subsystemweightlist)) |
|||
self.systemweightlist = dict(zip(list(dict(zip(syste,syste)).values()),systemweightlist)) |
|||
self.unitweightlist = dict(zip(list(dict(zip(unitw,unitw)).values()),unitweightlist)) |
|||
|
|||
# self.treeconfigs = self.indexMap("treeconfigs.csv") |
|||
|
|||
self.inputData = {} |
|||
self.conditionMap = {} |
|||
self.outputData = {} |
|||
|
|||
self.time = time.time() |
|||
|
|||
def indexMap(self, csvFileName): |
|||
map = {} |
|||
csvFile = open(csvFileName, "r") |
|||
reader = csv.reader(csvFile) |
|||
colnames = [] |
|||
for item in reader: |
|||
|
|||
if reader.line_num == 1: |
|||
for colname in item: |
|||
colnames.append(colname) |
|||
else: |
|||
details = {} |
|||
for i in range(len(colnames) - 1): |
|||
details[colnames[i + 1]] = item[i + 1] |
|||
map[item[0]] = details |
|||
csvFile.close() |
|||
return map |
|||
|
|||
def getInputDate(self): |
|||
# 取实时值 |
|||
for value in self.pointconfigs.values(): |
|||
self.inputData[value['pointname']] = get_now_data(value['pointname']) # 上线之后把这个换成取实时值 |
|||
self.time = time.time() |
|||
|
|||
def Scoring(self): |
|||
# 测点级打分 |
|||
for key, value in self.scorelist.items(): |
|||
x1 = self.inputData[self.pointconfigs[value['firstpoint']]['pointname']] |
|||
if value['formula'] == '0': |
|||
# 分数等于 np.clip(x1-x2,0,1) |
|||
x2 = self.inputData[self.pointconfigs[value['secondpoint']]['pointname']] |
|||
y = np.clip(x1 - x2, 0, 1) |
|||
elif value['formula'] == '1': |
|||
# 分数等于np.clip(abs(x1-x2),0,1) |
|||
x2 = self.inputData[self.pointconfigs[value['secondpoint']]['pointname']] |
|||
y = np.clip(abs(x1 - x2), 0, 1) |
|||
elif value['formula'] == '2': |
|||
# 分数等于np.tanh(x1) |
|||
y = np.tanh(x1) |
|||
else: |
|||
# 分数根据js字符串解析 |
|||
x2 = self.inputData[self.pointconfigs[value['secondpoint']]['pointname']] |
|||
y = execjs.eval(value['formula'].replace("x1", str(x1)).replace("x2", str(x2))) |
|||
self.outputData[value['scoreoutput']] = y |
|||
|
|||
def Conditioning(self): |
|||
# 求解计算条件 |
|||
for key, value in self.conditionlist.items(): |
|||
inputlist = value['inputlist'].split(',') |
|||
formula = value['code'] |
|||
for i in range(len(inputlist)): |
|||
formula = formula.replace("x" + str(len(inputlist) - i), |
|||
str(self.inputData[ |
|||
self.pointconfigs[inputlist[len(inputlist) - i - 1]]['pointname']])) |
|||
y = execjs.eval(formula) |
|||
self.conditionMap[key] = y |
|||
|
|||
def weightedSumScoring(self, level): |
|||
# 系统级打分 |
|||
switch = { |
|||
"subsystem": { |
|||
'currentlevellist': self.subsystemweightlist, |
|||
'nextlevellist': self.scorelist, |
|||
'nextlevelliststr': 'scorelist', |
|||
}, |
|||
"system": { |
|||
'currentlevellist': self.systemweightlist, |
|||
'nextlevellist': self.subsystemweightlist, |
|||
'nextlevelliststr': 'subsystemlist', |
|||
}, |
|||
"unit": { |
|||
'currentlevellist': self.unitweightlist, |
|||
'nextlevellist': self.systemweightlist, |
|||
'nextlevelliststr': 'systemlist', |
|||
}, |
|||
} |
|||
|
|||
currentlevellist = switch[level]['currentlevellist'] |
|||
nextlevellist = switch[level]['nextlevellist'] |
|||
nextlevelliststr = switch[level]['nextlevelliststr'] |
|||
|
|||
for key, value in currentlevellist.items(): |
|||
readyFlag = True |
|||
conditionlist = value['conditionlist'].split(',') |
|||
for condition in conditionlist: |
|||
readyFlag = readyFlag and self.conditionMap[condition] |
|||
readyFlag = True # 后面要删掉 |
|||
if readyFlag: |
|||
scoreindexlist = value[nextlevelliststr].split(',') |
|||
scorelist = [] |
|||
for scoreindex in scoreindexlist: |
|||
scorelist.append(self.outputData[nextlevellist[scoreindex]['scoreoutput']]) |
|||
weightlist = list(map(float, (value['weightlist'].split(',')))) |
|||
score = \ |
|||
np.dot(np.array(scorelist).reshape(1, len(scorelist)), |
|||
np.array(weightlist).reshape(len(scorelist), 1))[ |
|||
0, 0] / np.sum(weightlist) |
|||
self.outputData[value['scoreoutput']] = score |
|||
|
|||
def getScore(self, unit=None, system=None, subsystem=None, type=None): |
|||
# 获取指定级别分数的接口 |
|||
if subsystem != None: |
|||
for key, value in self.subsystemweightlist.items(): |
|||
if value['unit'] == unit and value['system'] == system and value['subsystem'] == subsystem and value[ |
|||
'type'] == type: |
|||
return self.outputData[value['scoreoutput']] |
|||
elif system != None and subsystem == None: |
|||
for key, value in self.systemweightlist.items(): |
|||
if value['unit'] == unit and value['system'] == system and value['type'] == type: |
|||
return self.outputData[value['scoreoutput']] |
|||
elif unit != None and system == None: |
|||
for key, value in self.unitweightlist.items(): |
|||
if value['unit'] == unit and value['type'] == type: |
|||
return self.outputData[value['scoreoutput']] |
|||
|
|||
def outputScore(self): |
|||
|
|||
nowtime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(self.time)) |
|||
ms = MSSQL(host="172.28.137.230", user="sa", pwd="powerSIS#123", database="ASSESS") |
|||
|
|||
score = len(self.scorelist) |
|||
subsy = len(self.subsystemweightlist) + score |
|||
syste = len(self.systemweightlist) + subsy |
|||
|
|||
|
|||
for table in ['conditionlist', 'scorelist', 'subsystemweightlist', 'systemweightlist', 'unitweightlist']: |
|||
|
|||
if table == 'conditionlist': |
|||
for i in range(len(self.conditionlist)): |
|||
setitem = f"result = '{self.conditionMap[f'{i}']}' , time = '{nowtime}'" |
|||
whereitem = f"indice = {i}" |
|||
Queryitem = f"UPDATE [ASSESS].[dbo].[{table}] set {setitem} where {whereitem}" |
|||
ms.ExecNonQuery(Queryitem) |
|||
|
|||
elif table == 'scorelist': |
|||
j = 0 |
|||
for i in list(self.outputData.values())[:score]: |
|||
setitem = f"result = '{i}' , time = '{nowtime}'" |
|||
whereitem = f"indice = {j}" |
|||
Queryitem = f"UPDATE [ASSESS].[dbo].[{table}] set {setitem} where {whereitem}" |
|||
ms.ExecNonQuery(Queryitem) |
|||
j = j + 1 |
|||
|
|||
elif table == 'subsystemweightlist': |
|||
j = 0 |
|||
for i in list(self.outputData.values())[score:subsy]: |
|||
setitem = f"result = '{i}' , time = '{nowtime}'" |
|||
whereitem = f"indice = {j}" |
|||
Queryitem = f"UPDATE [ASSESS].[dbo].[{table}] set {setitem} where {whereitem}" |
|||
ms.ExecNonQuery(Queryitem) |
|||
j = j + 1 |
|||
|
|||
elif table == 'systemweightlist': |
|||
j = 0 |
|||
for i in list(self.outputData.values())[subsy:syste]: |
|||
setitem = f"result = '{i}' , time = '{nowtime}'" |
|||
whereitem = f"indice = {j}" |
|||
Queryitem = f"UPDATE [ASSESS].[dbo].[{table}] set {setitem} where {whereitem}" |
|||
ms.ExecNonQuery(Queryitem) |
|||
j = j + 1 |
|||
|
|||
elif table == 'unitweightlist': |
|||
j = 0 |
|||
for i in list(self.outputData.values())[syste:]: |
|||
setitem = f"result = '{i}' , time = '{nowtime}'" |
|||
whereitem = f"indice = {j}" |
|||
Queryitem = f"UPDATE [ASSESS].[dbo].[{table}] set {setitem} where {whereitem}" |
|||
ms.ExecNonQuery(Queryitem) |
|||
j = j + 1 |
|||
|
|||
|
|||
|
|||
class MSSQL: |
|||
def __init__(self,host,user,pwd,database): |
|||
self.host = host |
|||
self.user = user |
|||
self.pwd = pwd |
|||
self.db = database |
|||
|
|||
def __GetConnect(self): |
|||
""" |
|||
得到连接信息 |
|||
返回: conn.cursor() |
|||
""" |
|||
if not self.db: |
|||
raise(NameError,"没有设置数据库信息") |
|||
self.conn = pymssql.connect(host=self.host,user=self.user,password=self.pwd,database=self.db,port=config._PORT, charset="utf8") |
|||
cur = self.conn.cursor() |
|||
if not cur: |
|||
raise(NameError,"连接数据库失败") |
|||
else: |
|||
return cur |
|||
|
|||
def ExecQuery(self,sql): |
|||
""" |
|||
执行查询语句 |
|||
返回的是一个包含tuple的list,list的元素是记录行,tuple的元素是每行记录的字段 |
|||
""" |
|||
cur = self.__GetConnect() |
|||
cur.execute(sql) |
|||
resList = cur.fetchall() |
|||
for i in range(len(resList)): |
|||
resList[i] = resList[i][1:] |
|||
#读取 sql 得到dict by lifan |
|||
# 只用于select语句,返回一行的列名 |
|||
desc = cur.description |
|||
desc = desc[:0] + desc[1:] |
|||
object_dict = [ |
|||
dict(zip([col[0] for col in desc], row)) |
|||
for row in resList |
|||
] |
|||
|
|||
#查询完毕后必须关闭连接 |
|||
self.conn.close() |
|||
return object_dict |
|||
|
|||
def ExecNonQuery(self,sql): |
|||
""" |
|||
执行非查询语句 |
|||
|
|||
调用示例: |
|||
cur = self.__GetConnect() |
|||
cur.execute(sql) |
|||
self.conn.commit() |
|||
self.conn.close() |
|||
""" |
|||
cur = self.__GetConnect() |
|||
cur.execute(sql) |
|||
self.conn.commit() |
|||
self.conn.close() |
|||
|
|||
|
|||
def get_now_data(items): |
|||
url = f"http://{config._EXA_IP}:9000/exawebapi/exanow/getfloatvaluebatch" |
|||
headers = {"Content-Type": "application/json;charset=utf-8"} # ,"token":get_token() |
|||
#point_array = items.split(',') |
|||
data = {} |
|||
data['ItemNames'] = items |
|||
res = requests.get(url, headers=headers, params=data) |
|||
response = res.text.replace("[","").replace("]","") |
|||
return float(response) |
|||
|
|||
if __name__ == '__main__': |
|||
a = "JL_D2_20DAS05A:LAV10CE101.PNT" |
|||
#get_now_data(a) |
|||
|
|||
HSS = HealthyScoringSystem() |
|||
|
|||
HSS.getInputDate() |
|||
HSS.Scoring() |
|||
HSS.Conditioning() |
|||
HSS.weightedSumScoring('subsystem') |
|||
HSS.weightedSumScoring('system') |
|||
HSS.weightedSumScoring('unit') |
|||
HSS.outputScore() |
|||
|
|||
print(HSS.getScore(unit='1', type='1')) |
|||
print(HSS.getScore(unit='1', system='1', type='1')) |
|||
print(HSS.getScore(unit='1', system='1', subsystem='1', type='1')) |
|||
|
|||
|
|||
|
|||
|
|||
# url = f"http://{config._CLEAN_IP}/exawebapi/exatime/GetCleaningData?ItemsInfo=%s&SamplingTimePeriods=%s&Constraint=%s&SamplingPeriod=%s&DCount=%d" % (ItemsInfo, SamplingTimePeriods, Constraint, interval, DCount) |
|||
# response = requests.get(url) |
|||
# content = json.loads(response.text) |
|||
|
|||
b = 1 |
|||
Binary file not shown.
@ -0,0 +1,294 @@ |
|||
# -*- coding: utf-8 -*- |
|||
""" |
|||
Created on Sun Feb 28 10:04:26 2016 |
|||
PCA source code |
|||
@author: liudiwei |
|||
""" |
|||
|
|||
import xlsxwriter as xw |
|||
import numpy as np |
|||
import pandas as pd |
|||
import matplotlib.pyplot as plt |
|||
from scipy.stats import norm |
|||
from scipy.stats.distributions import chi2 |
|||
import json |
|||
import sys |
|||
import pymssql |
|||
import requests |
|||
import datetime |
|||
from scipy.stats import norm |
|||
from scipy.stats import f |
|||
from scipy.stats import chi2 |
|||
import jenkspy |
|||
import xlrd |
|||
import time |
|||
# import PCA_Test_offline |
|||
import config |
|||
|
|||
|
|||
""" |
|||
参数: |
|||
- XMat:传入的是一个numpy的矩阵格式,行表示样本数,列表示特征 |
|||
- k:表示取前k个特征值对应的特征向量 |
|||
返回值: |
|||
- finalData:参数一指的是返回的低维矩阵,对应于输入参数二 |
|||
- reconData:参数二对应的是移动坐标轴后的矩阵 |
|||
""" |
|||
|
|||
|
|||
def min_pos(X): |
|||
X[X <= 0] = np.max(X) |
|||
m = np.min(X) |
|||
re = np.where(X == m) |
|||
min_i = re[0] |
|||
min_j = re[1] |
|||
if m < 0: |
|||
m = 0 |
|||
return m, min_i, min_j |
|||
|
|||
|
|||
def Lars(X, Y, D, DIAG, t, limit_line, lamuta): |
|||
n, m = X.shape |
|||
beta = np.zeros((1, m)) |
|||
A = [] |
|||
N_Co_added = 0 |
|||
i = 0 |
|||
mse = [] |
|||
for k in range(m): |
|||
i += 1 |
|||
ero = np.array(Y - beta[-1, :].T) |
|||
# c=np.dot(P,DIAG,P.T,ero) |
|||
c = np.dot(D, DIAG).dot(D.T).dot(ero) # 计算相关性 |
|||
C = np.max(np.abs(c)) |
|||
mse.append(np.dot(ero.T, D).dot(DIAG).dot(D.T).dot(ero)) |
|||
if mse[k] < limit_line: |
|||
break |
|||
elif k == 0: |
|||
addTndex = np.where(abs(c) == C)[-1][0] |
|||
A.append(addTndex) # 活动集 |
|||
# 更新正在添加的相应协方差索引的数量 |
|||
N_Co_added = N_Co_added + 1 |
|||
A_c = list(set(range(0, m)).difference(set(A))) # 非活动集 |
|||
s_A = np.diag(np.sign(c[A])) |
|||
|
|||
num_Zero_Coeff = len(A_c) |
|||
## 计算 X_A, A_A , u_A ,the inner product vecto |
|||
X_A = np.dot(X[:, A], s_A).reshape(n, -1) |
|||
G_A = np.dot(X_A.T, X_A) |
|||
One_A = np.ones((len(A), 1)) |
|||
s = One_A.copy() |
|||
if G_A.shape == (): |
|||
inv_GA = 1 / G_A |
|||
else: |
|||
inv_GA = np.linalg.pinv(G_A) |
|||
# G_a_inv_red_cols = np.sum(inv_GA, 1) |
|||
A_A = 1 / np.sqrt(np.dot(s.T, inv_GA).dot(s)) |
|||
w_A = (A_A * inv_GA).dot(s) # w_A: (less then 90%)构成等角的单位向量 |
|||
u_A = np.dot(X_A, w_A) # .reshape(n) |
|||
a = X.T.dot(u_A) # inner product vector |
|||
gamma_Test = np.zeros((num_Zero_Coeff, 2)) |
|||
# gamma=[] |
|||
if N_Co_added == m - 1: |
|||
gamma = C / A_A |
|||
else: |
|||
for j in range(num_Zero_Coeff): |
|||
j_p = A_c[j] |
|||
first_term = (C - c[j_p]) / (A_A - a[j_p]) |
|||
second_term = (C + c[j_p]) / (A_A + a[j_p]) |
|||
gamma_Test[j, :] = np.array([first_term, second_term]).reshape(1, -1) |
|||
gamma, min_i, min_j = min_pos(gamma_Test) |
|||
# gamma.append(m_s) |
|||
addTndex = A_c[np.min(min_i)] |
|||
beta_temp = np.zeros((m, 1)) |
|||
beta_temp[A] = beta[k, A].reshape(-1, 1) + np.dot(s_A, gamma * w_A) |
|||
beta = np.vstack((beta, beta_temp.transpose())) # 更新的系数即故障f |
|||
return beta, mse |
|||
|
|||
|
|||
# import sklearn |
|||
# q=sklearn.linear_model.Lars |
|||
|
|||
|
|||
class MSSQL: |
|||
def __init__(self,host,user,pwd,database): |
|||
self.host = host |
|||
self.user = user |
|||
self.pwd = pwd |
|||
self.db = database |
|||
|
|||
def __GetConnect(self): |
|||
""" |
|||
得到连接信息 |
|||
返回: conn.cursor() |
|||
""" |
|||
if not self.db: |
|||
raise(NameError,"没有设置数据库信息") |
|||
self.conn = pymssql.connect(host=self.host,user=self.user,password=self.pwd,database=self.db,port=config._PORT, charset="utf8") |
|||
cur = self.conn.cursor() |
|||
if not cur: |
|||
raise(NameError,"连接数据库失败") |
|||
else: |
|||
return cur |
|||
|
|||
def ExecQuery(self,sql): |
|||
""" |
|||
执行查询语句 |
|||
返回的是一个包含tuple的list,list的元素是记录行,tuple的元素是每行记录的字段 |
|||
""" |
|||
cur = self.__GetConnect() |
|||
cur.execute(sql) |
|||
resList = cur.fetchall() |
|||
|
|||
#查询完毕后必须关闭连接 |
|||
self.conn.close() |
|||
return resList |
|||
|
|||
def ExecNonQuery(self,sql): |
|||
""" |
|||
执行非查询语句 |
|||
|
|||
调用示例: |
|||
cur = self.__GetConnect() |
|||
cur.execute(sql) |
|||
self.conn.commit() |
|||
self.conn.close() |
|||
""" |
|||
cur = self.__GetConnect() |
|||
cur.execute(sql) |
|||
self.conn.commit() |
|||
self.conn.close() |
|||
|
|||
def get_model_by_ID(model_id, version="v-test"): |
|||
ms = MSSQL(host=config._SQL_IP, user="sa", pwd="powerSIS#123", database="alert") |
|||
resList = ms.ExecQuery(f"SELECT Model_info FROM [alert].[dbo].[Model_CFG] where model_id={model_id}") |
|||
return json.loads(resList[0][0]) |
|||
|
|||
|
|||
def get_model_by_id_and_version(model_id, version): |
|||
ms = MSSQL(host=config._SQL_IP, user="sa", pwd="powerSIS#123", database="alert") |
|||
resList = ms.ExecQuery(f"SELECT Model_info FROM [alert].[dbo].[model_version] where model_id={model_id} and version='{version}'") |
|||
return json.loads(resList[0][0]) |
|||
|
|||
def pca(model, Data_origin): |
|||
Data = (Data_origin - model["Train_X_mean"]) / model["Train_X_std"] |
|||
featValue = np.array(model["featValue"]) # 训练数据的特征值 |
|||
k=(model["K"]) # 主元个数 |
|||
featVec = np.array(model["featVec"]) # 训练数据的特征向量 |
|||
selectVec1 = np.array(model["selectVec"]) |
|||
selectVec=featVec[:, 0:k] |
|||
index = np.argsort(-np.array(featValue)) # 按照featValue进行从大到小排序 |
|||
featValue_sort = featValue[index] # 排序后的特征值 |
|||
|
|||
############----------*********-SPE-**************----------######################## |
|||
numbel_variable = featValue.shape[0] |
|||
C_ = np.eye(numbel_variable) - np.dot(selectVec, selectVec.T) |
|||
|
|||
SPE_list = [] |
|||
for i in range(Data.shape[0]): |
|||
Y = Data[i, :] # 测试数据的每一行 |
|||
#########*********************计算SPE****************************** |
|||
SPE_line =np.dot(Y, C_).dot(Y.T)###SPE根号 |
|||
SPE_list.append(SPE_line) |
|||
|
|||
paraState = np.zeros([np.array(Data_origin).shape[0], np.array(Data_origin).shape[1]]) |
|||
finalData = np.dot(Data, selectVec).dot(selectVec.T) |
|||
reconData = np.add(np.multiply(finalData, model["Train_X_std"]), model["Train_X_mean"]) # 重构值 |
|||
errorData =Data_origin - reconData # 偏差值 |
|||
# cos检验值 |
|||
R = 0 |
|||
res={} |
|||
for index in range(0, reconData.shape[1]): |
|||
vector1 = Data_origin[:, index] |
|||
vector2 = np.array(reconData)[:, index] |
|||
R += np.dot(vector1, vector2.T) / (np.sqrt(np.sum(vector1 ** 2)) * np.sqrt(np.sum(vector2 ** 2))) |
|||
R /= reconData.shape[1] |
|||
#items = [('reconData', np.around(reconData, decimals=3).tolist()) |
|||
# , ('errorData', np.around(errorData, decimals=3).tolist()), ('R', R.tolist()), ('SPE', np.array(SPE_list).tolist()), |
|||
# ('paraState', paraState.tolist())] |
|||
#res["sampleData"]=np.transpose(Data_origin.tolist()) |
|||
res["sampleData"]=np.transpose(np.array(Data_origin)).tolist() |
|||
res["reconData"]=np.around(np.transpose(np.array(reconData)), decimals=3).tolist() |
|||
res["errorData"]=np.around(np.transpose(np.array(errorData)), decimals=3).tolist() |
|||
res["R"]=np.around(R, decimals=3).tolist() |
|||
res["SPE"]=np.around(np.transpose(np.array(SPE_list)), decimals=3).tolist() |
|||
res["paraState"]=np.transpose(np.array(paraState)).tolist() |
|||
|
|||
#result = json.dumps(dict(items)) # json.dumps(result) |
|||
#return result |
|||
return res |
|||
|
|||
|
|||
def get_history_value(points,time,interval): |
|||
#url="http://192.168.1.201:8080/openPlant/getMultiplePointHistorys" |
|||
url=f"http://{config._EXA_IP}:9000/exawebapi/exatime/GetSamplingValueArrayFloat" |
|||
headers = {"Content-Type": "application/json;charset=utf-8"}#,"token":get_token() |
|||
point_array = points.split(",") |
|||
time_span = time.split(";") |
|||
value_array = [] |
|||
for item in point_array: |
|||
value_group = [] |
|||
for time_piece in time_span: |
|||
st = time_piece.split(",")[0] |
|||
et = time_piece.split(",")[1] |
|||
para = {"ItemName": item, "StartingTime": st, "TerminalTime": et, "SamplingPeriod": interval} |
|||
response = requests.get(url, headers=headers, params=para) |
|||
content = response.text.replace('"[','[').replace(']"',']') |
|||
value = json.loads(content) |
|||
if not isinstance(value, list): |
|||
print("aaa") |
|||
for row in value: |
|||
value_group.append(row[1]) |
|||
value_array.append(value_group) |
|||
return np.transpose(np.array(value_array)) |
|||
#return valres |
|||
|
|||
|
|||
def main(info): |
|||
model_id = info["Model_id"] |
|||
try: |
|||
version = info["version"] |
|||
except KeyError: |
|||
version = "v-test" |
|||
if version == "v-test": |
|||
res = get_model_by_ID(model_id) |
|||
else: |
|||
res = get_model_by_id_and_version(model_id, version) |
|||
Test_Data = info["Test_Data"] |
|||
points = Test_Data["points"] |
|||
time1 = Test_Data["time"] |
|||
interval = Test_Data["interval"] |
|||
model = res["para"]["Model_info"] |
|||
Data = get_history_value(points, time1, interval) |
|||
result = pca(model, Data) |
|||
index = time1.index(",") |
|||
result["time"] = time1[:index:] |
|||
return result |
|||
|
|||
# 根据数据集data.txt |
|||
|
|||
|
|||
if __name__ == "__main__": |
|||
info_str='{"Test_Data":{"time":"2021-01-13 12:52:40,2021-01-14 12:52:40","points":"JL_D2_20SCS02A:MAG10CT311.PNT,JL_D2_20DAS01B:MAG10CE101.PNT,JL_D2_20MCS01A:MAG10AN001ZT.PNT,JL_D2_20SCS02A:MAG10CT312.PNT,JL_D2_20DAS01B:MAG10CE102.PNT,JL_D2_20MCS01A:MAG10AN002ZT.PNT,JL_D2_20SCS02A:MAG10CT313.PNT,JL_D2_20DAS01B:MAG10CE103.PNT,JL_D2_20MCS01A:MAG10AN003ZT.PNT,JL_D2_20SCS02A:MAG10CT314.PNT,JL_D2_20DAS01B:MAG10CE104.PNT,JL_D2_20MCS01A:MAG10AN004ZT.PNT,JL_D2_20SCS02A:MAG10CT315.PNT,JL_D2_20DAS01B:MAG10CE105.PNT,JL_D2_20MCS01A:MAG10AN005ZT.PNT,JL_D2_20SCS02A:MAG10CT316.PNT,JL_D2_20DAS01B:MAG10CE106.PNT,JL_D2_20MCS01A:MAG10AN006ZT.PNT,JL_D2_20SCS02A:MAG10CT317.PNT,JL_D2_20DAS01B:MAG10CE107.PNT,JL_D2_20MCS01A:MAG10AN007ZT.PNT,JL_D2_20DAS01B:MAJ10CT101.PNT,JL_D2_20DAS01B:MAJ10CT102.PNT,JL_D2_20SCS02A:MAG10CT101.PNT,JL_D2_20SCS02A:MAG10CT102.PNT,JL_D2_20DAS01B:MAG03CG101.PNT,JL_D2_20DAS01B:MAG03CS101.PNT","interval":300000},"Model_id":528,"version":"v-test"}' |
|||
info = json.loads(info_str) |
|||
print(main(info)) |
|||
# model_id=info["Model_id"] |
|||
# Test_Data = info["Test_Data"] |
|||
# points = Test_Data["points"] |
|||
# time = Test_Data["time"] |
|||
# interval = Test_Data["interval"] |
|||
# Data = get_history_value(points, time, interval) |
|||
# # workbook = xw.Workbook("pca_test.xlsx") |
|||
# # worksheet = workbook.add_worksheet() |
|||
# # for row, item in enumerate(Data.tolist()): |
|||
# # for col, cell in enumerate(item): |
|||
# # worksheet.write(row, col, cell) |
|||
# # workbook.close() |
|||
# model = PCA_Test_offline.get_model_by_ID(model_id)["para"]["Model_info"] |
|||
# result = pca(model,Data)#模型参数,训练数据 |
|||
# aaa=json.dumps(result) |
|||
# print (result) |
|||
|
|||
|
|||
|
|||
|
|||
|
|||
@ -0,0 +1,317 @@ |
|||
# -*- coding: utf-8 -*- |
|||
""" |
|||
Created on Sun Feb 28 10:04:26 2016 |
|||
PCA source code————最新更新———————————————————————————— |
|||
@author: liudiwei |
|||
""" |
|||
|
|||
|
|||
import numpy as np |
|||
import pandas as pd |
|||
from scipy.stats import norm |
|||
from scipy.stats.distributions import chi2 |
|||
import json |
|||
import sys |
|||
import pymssql |
|||
import requests |
|||
import datetime |
|||
from scipy.stats import norm |
|||
from scipy.stats import f |
|||
from scipy.stats import chi2 |
|||
import jenkspy |
|||
import xlrd |
|||
import gc |
|||
import time |
|||
import pyodbc |
|||
from recon import Lars, recon_fault_diagnosis_r, recon_fault_diagnosis_r_l, recon_fault_diagnosis_r_c |
|||
import config |
|||
|
|||
|
|||
""" |
|||
参数: |
|||
- XMat:传入的是一个numpy的矩阵格式,行表示样本数,列表示特征 |
|||
- k:表示取前k个特征值对应的特征向量 |
|||
返回值: |
|||
- finalData:参数一指的是返回的低维矩阵,对应于输入参数二 |
|||
- reconData:参数二对应的是移动坐标轴后的矩阵 |
|||
""" |
|||
|
|||
|
|||
def min_pos(X): |
|||
X[X <= 0] = np.max(X) |
|||
m = np.min(X) |
|||
re = np.where(X == m) |
|||
min_i = re[0] |
|||
min_j = re[1] |
|||
if m < 0: |
|||
m = 0 |
|||
return m, min_i, min_j |
|||
|
|||
class MSSQL: |
|||
def __init__(self,host,user,pwd,database): |
|||
self.host = host |
|||
self.user = user |
|||
self.pwd = pwd |
|||
self.db = database |
|||
|
|||
def __GetConnect(self): |
|||
""" |
|||
得到连接信息 |
|||
返回: conn.cursor() |
|||
""" |
|||
if not self.db: |
|||
raise(NameError,"没有设置数据库信息") |
|||
self.conn = pymssql.connect(host=self.host,user=self.user,password=self.pwd,database=self.db,charset="utf8") |
|||
cur = self.conn.cursor() |
|||
if not cur: |
|||
raise(NameError,"连接数据库失败") |
|||
else: |
|||
return cur |
|||
|
|||
def ExecQuery(self,sql): |
|||
""" |
|||
执行查询语句 |
|||
返回的是一个包含tuple的list,list的元素是记录行,tuple的元素是每行记录的字段 |
|||
""" |
|||
cur = self.__GetConnect() |
|||
cur.execute(sql) |
|||
resList = cur.fetchall() |
|||
|
|||
#查询完毕后必须关闭连接 |
|||
self.conn.close() |
|||
return resList |
|||
|
|||
def ExecNonQuery(self,sql): |
|||
""" |
|||
执行非查询语句 |
|||
|
|||
调用示例: |
|||
cur = self.__GetConnect() |
|||
cur.execute(sql) |
|||
self.conn.commit() |
|||
self.conn.close() |
|||
""" |
|||
cur = self.__GetConnect() |
|||
cur.execute(sql) |
|||
self.conn.commit() |
|||
self.conn.close() |
|||
|
|||
def get_model_by_ID(model_id): |
|||
ms = MSSQL(host=config._SQL_IP, user="sa", pwd="powerSIS#123", database="alert") |
|||
resList = ms.ExecQuery("SELECT Model_info FROM [alert].[dbo].[Model_CFG] where \"model_id\"="+str(model_id)) |
|||
#return json.loads(resList[0][0])["para"] |
|||
return json.loads(resList[0][0]) |
|||
|
|||
|
|||
def get_model_by_id(model_id): |
|||
try: |
|||
conn = pyodbc.connect( |
|||
r"DRIVER={ODBC Driver 17 for SQL Server};SERVER=%s;DATABASE=alert;UID=sa;PWD=powerSIS#123" % config._SQL_IP) # 连接数据库 |
|||
except pyodbc.Error: |
|||
conn = pyodbc.connect( |
|||
r"DRIVER={SQL SERVER NATIVE CLIENT 10.0};SERVER=%s;DATABASE=alert;UID=sa;PWD=powerSIS#123" % config._SQL_IP) # 连接数据库 |
|||
cursor = conn.cursor() # 获得操作的游标 |
|||
cursor.execute(f"SELECT Model_info FROM [alert].[dbo].[Model_CFG] where model_id={model_id}") |
|||
res_list = cursor.fetchall() # 获取查询的结果 |
|||
conn.commit() # 提交执行 |
|||
cursor.close() # 关闭游标 |
|||
conn.close() # 关闭数据库连接 |
|||
return json.loads(res_list[0][0]) |
|||
|
|||
|
|||
def get_model_by_id_and_version(model_id, version): |
|||
try: |
|||
conn = pyodbc.connect( |
|||
r"DRIVER={ODBC Driver 17 for SQL Server};SERVER=%s;DATABASE=alert;UID=sa;PWD=powerSIS#123" % config._SQL_IP) # 连接数据库 |
|||
except pyodbc.Error: |
|||
conn = pyodbc.connect( |
|||
r"DRIVER={SQL SERVER NATIVE CLIENT 10.0};SERVER=%s;DATABASE=alert;UID=sa;PWD=powerSIS#123" % config._SQL_IP) # 连接数据库 |
|||
cursor = conn.cursor() # 获得操作的游标 |
|||
cursor.execute(f"SELECT Model_info FROM [alert].[dbo].[Model_Version] where model_id={model_id} and version='{version}'") |
|||
res_list = cursor.fetchall() # 获取查询的结果 |
|||
conn.commit() # 提交执行 |
|||
cursor.close() # 关闭游标 |
|||
conn.close() # 关闭数据库连接 |
|||
return json.loads(res_list[0][0]) |
|||
|
|||
|
|||
def pca(model,LockVariable, Data_origin): |
|||
Data = (Data_origin - model["Train_X_mean"]) / model["Train_X_std"] |
|||
featValue = np.array(model["featValue"]) # 训练数据的特征值 |
|||
featVec = np.array(model["featVec"]) # 训练数据的特征向量 |
|||
k = (model["K"]) # 主元个数 |
|||
# selectVec = np.array(model["selectVec"]) |
|||
selectVec = featVec[:, 0:k]####自己选择的,取前k个特征向量 |
|||
# index = np.argsort(-np.array(featValue)) # 按照featValue进行从大到小排序 |
|||
featValue_sort = featValue#[index] # 排序后的特征值 |
|||
''' |
|||
featValue, featVec = np.linalg.eig(model["COV"]) # 求解协方差矩阵的特征值和特征向量 |
|||
|
|||
index = np.argsort(-featValue) # 按照featValue进行从大到小排序 |
|||
featValue_sort =featValue[index]#排序后的特征值 |
|||
selectVec = np.matrix(featVec[:,index[:int(model["K"])]]) # 所以这里需要进行转置P |
|||
''' |
|||
############----------*********-SPE-**************----------######################## |
|||
numbel_variable = featValue.shape[0]#取特征值的个数 |
|||
# LockVariable="3,5" |
|||
|
|||
C_ = np.eye(numbel_variable) - np.dot(selectVec, selectVec.T)#生成numbel_variable阶单位矩阵 |
|||
X_SPE = C_.T |
|||
D_SPE = C_ |
|||
DIAG_SPE = np.eye(numbel_variable)#生成numbel_variable阶单位矩阵 |
|||
|
|||
''' |
|||
# ************************调用LARS******************************* |
|||
t = 50000 |
|||
lamuta = 1 |
|||
limit_line = model["QCUL_99"] |
|||
beta_path=[] |
|||
for i in range(Data.shape[0]): |
|||
Y=Data[i,:] |
|||
beta, mse=Lars(X_SPE, Y, D_SPE, DIAG_SPE, t, limit_line, lamuta) |
|||
beta_end=abs(beta[-1,:]) |
|||
jenk=jenkspy.jenks_breaks(beta_end,5) |
|||
limt=(jenk[1]+jenk[2])/2 |
|||
index=np.where(beta_end>limt)[0] |
|||
beta_path.append(beta[-1,:]) |
|||
''' |
|||
############----------*********-T2-**************----------######################## |
|||
DIAG_T2 = np.linalg.pinv(np.diag(featValue_sort[:int(model["K"])])) |
|||
D_T2 = selectVec.copy() |
|||
X_T2 = np.dot(D_T2, np.linalg.cholesky(DIAG_T2)).T |
|||
|
|||
############----------*********-综合指标-**************----------######################## |
|||
II = featValue_sort.copy() |
|||
II[:int(model["K"])] = II[:int(model["K"])] * model["T2CUL_99"] |
|||
II[int(model["K"]):] = model["QCUL_99"] |
|||
DIAG_Fai = np.linalg.pinv(np.diag(II)) |
|||
D_Fai = featVec.copy() |
|||
X_Fai = np.dot(D_Fai, np.linalg.cholesky(DIAG_Fai)).T |
|||
# ************************调用LARS******************************* |
|||
t = 50000 |
|||
lamuta = 1 |
|||
#limit_line = model["Kesi_99"]/np.sqrt(numbel_variable)#修改 |
|||
limit_line = model["Kesi_99"] |
|||
beta_path = [] |
|||
SPE_list = [] |
|||
FAI_list=[] |
|||
paraState = np.zeros([np.array(Data_origin).shape[0], np.array(Data_origin).shape[1]]) |
|||
if Data.shape[1] >= 12: |
|||
para_length = 3 |
|||
elif 12 > Data.shape[1] >= 7: |
|||
para_length = 2 |
|||
else: |
|||
para_length = 1 |
|||
Y = None |
|||
plots_matrix = [] # 贡献图法的矩阵 |
|||
plots_index = [] # 贡献图法的index |
|||
for i in range(Data.shape[0]): |
|||
Y = Data[i, :] # 测试数据的每一行 |
|||
#########*********************计算SPE****************************** |
|||
SPE_line = np.dot(Y, C_).dot(Y.T) |
|||
SPE_list.append(SPE_line) |
|||
#########################计算综合指标########## |
|||
FAI_list.append(np.dot(Y.T, D_Fai).dot(DIAG_Fai).dot(D_Fai.T).dot(Y)) |
|||
# **************计算LARS*************** |
|||
beta, mse = Lars(X_Fai, Y, D_Fai, DIAG_Fai, t, limit_line,LockVariable) |
|||
beta_end = abs(beta[-1, :]) |
|||
pi=len(beta_end) |
|||
if pi>7: |
|||
jenk = jenkspy.jenks_breaks(beta_end, 5) |
|||
else: |
|||
jenk = jenkspy.jenks_breaks(beta_end, 2) |
|||
limt = (jenk[1] + jenk[2]) / 2 |
|||
index = np.where(beta_end > 0)[0] |
|||
if len(index) > para_length: |
|||
# res = recon_fault_diagnosis_r_c(Y, D_Fai @ DIAG_Fai @ D_Fai.T, limit_line, list(zip(index, beta_end[index])), model, |
|||
# True, X_SPE @ X_SPE.T, rbc=None) |
|||
res = recon_fault_diagnosis_r_c(Y, D_Fai @ DIAG_Fai @ D_Fai.T, limit_line, |
|||
list(zip(index, beta_end[index])), model, |
|||
True, X_SPE @ X_SPE.T, LockVariable, selectVec, rbc=None) |
|||
if not isinstance(res[0], list): |
|||
if res[1] == "plot": |
|||
# beta[-1, :] = res[0] |
|||
plots_matrix.append(res[0]) |
|||
plots_index.append(i) |
|||
else: |
|||
beta[-1, :], index = res[0].T, res[1] |
|||
# beta[-1, :], index = res[0].T, res[1] |
|||
elif len(index) <= para_length and len(index) != 0: |
|||
res = recon_fault_diagnosis_r_l(Y, D_Fai @ DIAG_Fai @ D_Fai.T, index) |
|||
beta[-1, :], index = res[0].T, res[1] |
|||
paraState[i, index] = 1 |
|||
beta_new=beta[-1, :]*paraState[i,:] |
|||
beta_path.append(beta_new) |
|||
del Y |
|||
gc.collect() |
|||
beta_path = np.array(beta_path) |
|||
################-------------------------------------------------------------############### |
|||
#finalData = np.dot(Data - beta_path, selectVec).dot(selectVec.T) |
|||
finalData=Data - beta_path |
|||
reconData = np.add(np.multiply(finalData, model["Train_X_std"]), model["Train_X_mean"]) # 重构值 |
|||
if len(plots_matrix) != 0: |
|||
reconData[plots_index] = plots_matrix |
|||
errorData = Data_origin - reconData # 偏差值 |
|||
# cos检验值 |
|||
R = 0 |
|||
for index in range(0, reconData.shape[1]): |
|||
vector1 = Data_origin[:, index] |
|||
vector2 = np.array(reconData)[:, index] |
|||
R += np.dot(vector1, vector2.T) / (np.sqrt(np.sum(vector1 ** 2)) * np.sqrt(np.sum(vector2 ** 2))) |
|||
R /= reconData.shape[1] |
|||
items = [('reconData', reconData.tolist()) |
|||
, ('errorData', errorData.tolist()), ('R', R.tolist()), ('SPE', SPE_list),('FAI', FAI_list), |
|||
('paraState', paraState.tolist())] |
|||
result = json.dumps(dict(items)) # json.dumps(result) |
|||
return result |
|||
|
|||
|
|||
def get_history_value(points,time,interval): |
|||
#url="http://192.168.1.201:8080/openPlant/getMultiplePointHistorys" |
|||
url=f"http://{config._EXA_IP}:9000/exawebapi/exatime/GetSamplingValueArrayFloat" |
|||
headers = {"Content-Type": "application/json;charset=utf-8"}#,"token":get_token() |
|||
point_array = points.split(",") |
|||
time_span = time.split(";") |
|||
value_array = [] |
|||
for item in point_array: |
|||
for time_piece in time_span: |
|||
st = time_piece.split(",")[0] |
|||
et = time_piece.split(",")[1] |
|||
para = {"ItemName": item, "StartingTime": st, "TerminalTime": et, "SamplingPeriod": interval} |
|||
response = requests.get(url, headers=headers, params=para) |
|||
value = eval(str(response.text).replace("\"","").replace("null","0")) |
|||
value_group = [] |
|||
for row in value: |
|||
value_group.append(row[1]) |
|||
value_array.append(value_group) |
|||
return np.transpose(np.array(value_array)) |
|||
|
|||
# 根据数据集data.txt |
|||
|
|||
|
|||
if __name__ == "__main__": |
|||
#lifan 调试模型计算引擎 |
|||
jsonstr = '{"Model_id":764,"version":"v-2021-04-02 09:43:50","Test_Data":[[129.3936,0.8944824,152.4081,119.2822,0.4589844]],"Target_Data":[[]]}' |
|||
jsonstr = json.loads(jsonstr) |
|||
model_id = jsonstr["Model_id"] |
|||
version = jsonstr["version"] |
|||
res = get_model_by_id_and_version(model_id, version) |
|||
filename = res["algorithm"] |
|||
Data = jsonstr["Test_Data"] |
|||
if filename == "PCA": |
|||
model = res["para"]["Model_info"] |
|||
lock = [] |
|||
point_info = res["pointInfo"] |
|||
for i in range(len(point_info)): |
|||
try: |
|||
if point_info[i]["lock"]: |
|||
lock.append(i) |
|||
except: |
|||
continue |
|||
result = pca(model, lock, np.array(Data)) |
|||
print('aaa') |
|||
|
|||
|
|||
|
|||
|
|||
|
|||
@ -0,0 +1,304 @@ |
|||
# -*- coding: utf-8 -*- |
|||
""" |
|||
Created on Sun Feb 28 10:04:26 2016 |
|||
PCA source code |
|||
@author: liudiwe |
|||
""" |
|||
|
|||
import numpy as np |
|||
import traceback |
|||
import pandas as pd |
|||
from json import JSONDecodeError |
|||
#import matplotlib.pyplot as plt |
|||
from scipy.stats import norm |
|||
from scipy.stats import f |
|||
from scipy.stats.distributions import chi2 |
|||
import json |
|||
import sys |
|||
import requests |
|||
import datetime |
|||
import jenkspy |
|||
import xlrd |
|||
from smote import smote |
|||
import config |
|||
""" |
|||
参数: |
|||
- XMat:传入的是一个numpy的矩阵格式,行表示样本数,列表示特征 |
|||
- k:表示取前k个特征值对应的特征向量 |
|||
返回值: |
|||
""" |
|||
|
|||
|
|||
def get_history_value(points,time,interval): |
|||
#url="http://192.168.1.201:8080/openPlant/getMultiplePointHistorys" |
|||
url=f"http://{config._EXA_IP}:9000/exawebapi/exatime/GetSamplingValueArrayFloat" |
|||
headers = {"Content-Type": "application/json;charset=utf-8"}#,"token":get_token() |
|||
point_array = points.split(",") |
|||
time_span = time.split(";") |
|||
value_array = [] |
|||
for item in point_array: |
|||
value_group = [] |
|||
for time_piece in time_span: |
|||
st = time_piece.split(",")[0] |
|||
et = time_piece.split(",")[1] |
|||
para = {"ItemName": item, "StartingTime": st, "TerminalTime": et, "SamplingPeriod": interval} |
|||
response = requests.get(url, headers=headers, params=para) |
|||
content = response.text.replace('"[','[').replace(']"',']') |
|||
value = json.loads(content) |
|||
for row in value: |
|||
value_group.append(row[1]) |
|||
value_array.append(value_group) |
|||
return np.transpose(np.array(value_array)) |
|||
|
|||
|
|||
def pca(XMat, p): |
|||
m = np.array(XMat).shape[1]#取参数个数,即矩阵列数 |
|||
average = np.mean(XMat, axis=0)#axis=0,分别对各列求均值 |
|||
std = np.std(XMat, axis=0)#axis=0,计算各列的标准差 |
|||
m, n = np.shape(XMat)#取矩阵的行数m和列数n |
|||
avgs = np.tile(average, (m, 1))#将average的数乘1倍,再以此为基,复制到每一行,变成m行的矩阵 |
|||
stds = np.tile(std, (m, 1))#将std的数乘1倍,再以此为基,复制到每一行,变成m行的矩阵 |
|||
data_adjust = np.divide(XMat - avgs, stds)#计算XMat-avgs,即每个元素与列平均值之间的差,再用此差除以每列数据的标准差 |
|||
covX = np.cov(data_adjust.T) # 计算协方差矩阵(对称阵) |
|||
# corr=np.corrcoef(data_adjust.T) |
|||
featValue, featVec = np.linalg.eig(covX) # 求解协方差矩阵的特征值和特征向量 |
|||
# ############可能协方差为0(一列数据相同,在训练的时候不要选择一样的数据,就不能除。############################ |
|||
featValue=np.real(featValue)#返回复杂参数的实部 |
|||
featVec=np.real(featVec)#返回复杂参数的实部 |
|||
index = np.argsort(-featValue) # 按照featValue进行从大到小排序 -featValue转置后把元素变成其相反数 |
|||
featValue=featValue[index]#按照排序进行重建 |
|||
featVec = featVec[:,index] |
|||
featValue_sum = np.divide(featValue, np.sum(featValue))#特征值分别除以特征值之和 |
|||
per = 0 # 特征值百分比 |
|||
k = 0 # 主元个数 |
|||
for precent in featValue_sum: |
|||
per += precent |
|||
k = k + 1 |
|||
if per > p: |
|||
break |
|||
#记录主元数小与设定值p的总值和个数 |
|||
finalData = [] |
|||
if k > n:#如果k比总数大,就返回 k必须小于特征数 |
|||
print |
|||
"k must lower than feature number" |
|||
return |
|||
else: |
|||
# 注意特征向量是列向量,而numpy的二维矩阵(数组)a[m][n]中,a[1]表示第1行值 |
|||
selectVec = np.matrix(featVec[:, :k]) # 所以这里需要进行转置 取第k列的特征向量,将ndarray对象转为matrix矩阵 |
|||
finalData = np.dot(data_adjust, selectVec).dot(selectVec.T)#将data_adjust,selectVec,selectVec.T三者相乘 |
|||
reconData = np.add(np.multiply(finalData, stds), avgs) # 重构值 将finalData和stds相乘,然后与avgs相加 |
|||
Train_X_min = np.min(XMat, axis=0) # 训练值最小值 |
|||
Train_X_max = np.max(XMat, axis=0) # 训练值最大值 |
|||
Train_X_mean = np.mean(XMat, axis=0) # 训练值平均值 |
|||
Train_X_std = np.std(XMat, axis=0) # 训练值方差 |
|||
Train_X_bais = XMat - reconData # 训练值偏差 |
|||
Train_X_bais_max = np.max(np.abs(Train_X_bais), axis=0) # 训练值偏差最大值 axis=0 对各列求 |
|||
Train_X_bais_min = np.min(np.abs(Train_X_bais), axis=0) # 训练值偏差最小值 |
|||
Train_X_bais_mean = np.mean(np.abs(Train_X_bais), axis=0) # 训练值偏差平均值 |
|||
Train_X_bais_std_upperB95 = np.array(np.abs(1.96 * np.std(Train_X_bais, axis=0) + Train_X_bais_mean))[ |
|||
0] # 训练值偏差标准差 |
|||
Train_X_bais_std_upperB99 = np.array(np.abs(2.58* np.std(Train_X_bais, axis=0) + Train_X_bais_mean))[0] |
|||
Train_X_bais_std_lowerB95 = np.array(np.abs(1.96 * np.std(Train_X_bais, axis=0) - Train_X_bais_mean))[ |
|||
0] # 训练值偏差标准差 |
|||
Train_X_bais_std_lowerB99 = np.array(np.abs(2.58 * np.std(Train_X_bais, axis=0) - Train_X_bais_mean))[0] |
|||
QCUL_95_line = []#限值 |
|||
QCUL_99_line = [] |
|||
for index1 in range(len(Train_X_bais_std_upperB95)): |
|||
QCUL_95_line.append(max(Train_X_bais_std_upperB95[index1], Train_X_bais_std_lowerB95[index1])) |
|||
QCUL_99_line.append(max(Train_X_bais_std_upperB99[index1], Train_X_bais_std_lowerB99[index1])) |
|||
QCUL_95_line = np.array(QCUL_95_line) |
|||
QCUL_99_line = np.array(QCUL_99_line) |
|||
################################################################################# |
|||
# 计算阈值----------------QUCL--------------------################################################ |
|||
theta1 = np.sum(featValue[k:]) |
|||
theta2 = np.sum(np.power(featValue[k:], 2)) |
|||
theta3 = np.sum(np.power(featValue[k:], 3)) |
|||
h0 = 1 - 2 * theta1 * theta3 / (3 * np.power(theta2, 2)) |
|||
ca_95 = norm.ppf(0.95, loc=0, scale=1) |
|||
QCUL_95 = theta1 * np.power( |
|||
h0 * ca_95 * np.sqrt(2 * theta2) / theta1 + 1 + theta2 * h0 * (h0 - 1) / np.power(theta1, 2), |
|||
1 / h0) # 置信域为百分之95 |
|||
# QCUL_95_line = Train_X_bais_std*2.58 # +Train_X_mean#反归一化阈值 |
|||
ca_99 = norm.ppf(0.99, loc=0, scale=1) |
|||
QCUL_99 = theta1 * np.power( |
|||
(h0 * ca_99 * np.sqrt(2 * theta2) / theta1 + 1 + theta2 * h0 * (h0 - 1) / np.power(theta1, 2)), |
|||
1 / h0) # 置信域为百分之99 |
|||
# QCUL_99_line = Train_X_bais_std*1.96 # + Train_X_mean # 反归一化阈值 |
|||
|
|||
# 计算阈值----------------T2UCL--------------------########################################### |
|||
f_95 = f.ppf(0.95, k, m - k) |
|||
T2CUL_95 = k * (m - 1) * (m + 1) * f_95 / (m * (m - k)) # 置信域为百分之95 |
|||
T2CUL_95_line = np.sqrt(T2CUL_95) * Train_X_std / np.sqrt(m) # +Train_X_mean#反归一化阈值 |
|||
f_99 = f.ppf(0.99, k, m - k) |
|||
T2CUL_99 = k * (m - 1) * (m + 1) * f_99 / (m * (m - k)) # 置信域为百分之99 |
|||
T2CUL_99_line = np.sqrt(T2CUL_99) * Train_X_std / np.sqrt(m) # +Train_X_mean#反归一化阈值 |
|||
|
|||
# 计算阈值----------------综合--------------------################################################# |
|||
gfi_95 = (k / pow(T2CUL_95, 2) + theta2 / pow(QCUL_95, 2)) / (k / T2CUL_95 + theta1 / QCUL_95) |
|||
hfi_95 = pow((k / T2CUL_95 + theta1 / QCUL_95), 2) / (k / pow(T2CUL_95, 2) + theta2 / pow(QCUL_95, 2)) |
|||
Kesi_95 = gfi_95 * chi2.ppf(0.95, hfi_95) # 卡方分布 |
|||
Kesi_95_line = np.sqrt(Kesi_95) * Train_X_std / np.sqrt(m) # 反归一化阈值 |
|||
|
|||
gfi_99 = (k / pow(T2CUL_99, 2) + theta2 / pow(QCUL_99, 2)) / (k / T2CUL_99 + theta1 / QCUL_99) |
|||
hfi_99 = pow((k / T2CUL_99 + theta1 / QCUL_99), 2) / (k / pow(T2CUL_99, 2) + theta2 / pow(QCUL_99, 2)) |
|||
Kesi_99 = gfi_99 * chi2.ppf(0.99, hfi_99) # 卡方分布 |
|||
Kesi_99_line = np.sqrt(Kesi_99) * Train_X_std / np.sqrt(m) # 反归一化阈值 |
|||
|
|||
# cos检验值 |
|||
R = per#相关性 |
|||
#for index in range(0, reconData.shape[1]): |
|||
#vector1 = XMat[:, index] |
|||
#vector2 = np.array(reconData)[:, index] |
|||
#R += np.dot(vector1, vector2.T) / (np.sqrt(np.sum(vector1 ** 2)) * np.sqrt(np.sum(vector2 ** 2))) |
|||
#sR /= reconData.shape[1] |
|||
|
|||
##################################################################################################################### |
|||
items = [('Train_X_min', np.around(Train_X_min, decimals=3).tolist()), |
|||
('Train_X_max', np.around(Train_X_max, decimals=3).tolist()), |
|||
('Train_X_std', np.around(Train_X_std, decimals=3).tolist()), |
|||
('Train_X_mean',np.around(Train_X_mean, decimals=3).tolist()), |
|||
('Train_X_bais_max',np.around(Train_X_bais_max, decimals=3).tolist()), |
|||
('Train_X_bais_min', np.around(Train_X_bais_min, decimals=3).tolist()), |
|||
('Train_X_bais_mean',np.around(Train_X_bais_mean, decimals=3).tolist()), |
|||
('QCUL_95',np.around(QCUL_95, decimals=10).tolist()), |
|||
('QCUL_99', np.around(QCUL_99, decimals=10).tolist()), |
|||
('QCUL_95_line',np.around(QCUL_95_line, decimals=3).tolist()), |
|||
('QCUL_99_line',np.around(QCUL_99_line, decimals=3).tolist()), |
|||
('T2CUL_95', np.around(T2CUL_95, decimals=3).tolist()), |
|||
('T2CUL_99', np.around(T2CUL_99, decimals=3).tolist()), |
|||
('T2CUL_95_line', np.around(T2CUL_95_line, decimals=3).tolist()), |
|||
('T2CUL_99_line', np.around(T2CUL_99_line, decimals=3).tolist()), |
|||
('Kesi_95', np.around(Kesi_95, decimals=3).tolist()), |
|||
('Kesi_99', np.around(Kesi_99, decimals=3).tolist()), |
|||
('Kesi_95_line', np.around(Kesi_95_line, decimals=3).tolist()), |
|||
('Kesi_99_line', np.around(Kesi_99_line, decimals=3).tolist()), |
|||
('COV', np.around(covX, decimals=3).tolist()), |
|||
('K', k), |
|||
('R', np.around(R,decimals=3).tolist()), |
|||
("featValue",np.around(featValue, decimals=3).tolist()), |
|||
("featVec", np.around(featVec, decimals=3).tolist()), |
|||
("selectVec", np.around(selectVec, decimals=3).tolist())] |
|||
# model_info=json.dumps(dict(items)) |
|||
res_items = [('Model_info', dict(items)), ('Model_type', 'PCA')] |
|||
result = dict(res_items) # json.dumps(result) |
|||
return json.dumps(result) |
|||
|
|||
|
|||
def main(info): |
|||
Train_Data = info["Train_Data"] |
|||
points = Train_Data["points"] |
|||
time = Train_Data["time"] |
|||
interval = Train_Data["interval"] |
|||
Hyper_para = info["Hyper_para"] |
|||
percent = Hyper_para["percent"] |
|||
XMat = get_history_value(points, time, interval) |
|||
result = pca(XMat, percent) # 训练数据,主元百分比 |
|||
result = result.replace("NaN", "-1") # 防止出现非数 解析不出来 |
|||
return result |
|||
|
|||
def isnumber(limits): |
|||
flag=True |
|||
for item in limits: |
|||
item=item.replace("-","") |
|||
if(item.isdigit()==False): |
|||
flag=False |
|||
break |
|||
return flag |
|||
|
|||
|
|||
def clearmain(info): |
|||
try: |
|||
Train_Data = info["Train_Data"] |
|||
condition=info["conditon"].replace("=","==").replace(">=",">").replace("<=","<") |
|||
times = Train_Data["time"].split(';') |
|||
points = Train_Data["points"].split(',') |
|||
interval = Train_Data["interval"] |
|||
if interval == 10000: |
|||
DCount = 60 |
|||
elif interval == 100000: |
|||
DCount = 6 |
|||
elif interval == 300000: |
|||
DCount = 5 |
|||
else: |
|||
DCount = 4 |
|||
dead = Train_Data["dead"].split(',') |
|||
limit = Train_Data["limit"].split(',') |
|||
uplower = Train_Data["uplow"].split(';') |
|||
percent = info["Hyper_para"]["percent"] |
|||
count=0 |
|||
ItemsInfo, SamplingTimePeriods = [], [] |
|||
Constraint = "" |
|||
for i in range(len(points)): |
|||
iteminfo = {} |
|||
iteminfo["ItemName"] = points[i] # 加点 |
|||
if (dead[i] == "1"): # 判断是否参与死区清洗 |
|||
iteminfo["ClearDeadZone"] = "true" |
|||
else: |
|||
iteminfo["ClearDeadZone"] = "false" |
|||
if (limit[i] == "1"): # 参与上下限清洗 |
|||
limits = uplower[i].split(',') |
|||
if (isnumber(limits) == True): # 输入上下限正确 isnumber 是否为数字 |
|||
count += 1 |
|||
Constraint += "[" + points[i] + "]>" + limits[0] + " and " + "[" + points[i] + "]<" + limits[1] + " and " |
|||
ItemsInfo.append(iteminfo) |
|||
if(count!=0): |
|||
Constraint = Constraint[:len(Constraint) - 4:] |
|||
else: |
|||
Constraint="1==1"#没有上下限清洗 |
|||
Constraint+=" and ("+condition+")" |
|||
for i in range(len(times)): |
|||
Eachsampletime = {} |
|||
timess = times[i].split(',') |
|||
Eachsampletime["StartingTime"] = timess[0] |
|||
Eachsampletime["TerminalTime"] = timess[1] |
|||
SamplingTimePeriods.append(Eachsampletime) |
|||
Constraint = Constraint.replace("\n", " ") |
|||
url = f"http://{config._CLEAN_IP}/exawebapi/exatime/GetCleaningData?ItemsInfo=%s&SamplingTimePeriods=%s&Constraint=%s&SamplingPeriod=%s&DCount=%d" % ( |
|||
ItemsInfo, SamplingTimePeriods, Constraint, interval, DCount) |
|||
response = requests.get(url) |
|||
content = json.loads(response.text) |
|||
data = np.array([item for item in content["ClearData"]]).T |
|||
try: |
|||
smote_data = info["smote"] |
|||
# smote_data = False |
|||
except KeyError: |
|||
smote_data = False |
|||
if smote_data: |
|||
try: |
|||
smote_index = [points.index(item["pointId"]) for item in info["smote_config"] if item["LAY_CHECKED"]] |
|||
smote_num = [int(item["number"]) for item in info["smote_config"] if item["LAY_CHECKED"]] |
|||
max_value = [float(item["max"]) for item in info["smote_config"] if item["LAY_CHECKED"]] |
|||
min_value = [float(item["min"]) for item in info["smote_config"] if item["LAY_CHECKED"]] |
|||
except KeyError: |
|||
pass |
|||
else: |
|||
if len(smote_num) != 0: |
|||
data, *_ = smote(data, smote_index, smote_num, max_value, min_value) |
|||
result = pca(data, percent) |
|||
result = result.replace("NaN", "-1") |
|||
result=json.loads(result) |
|||
result["BeforeCleanSamNum"]=content["BeforeCleanSamNum"] |
|||
result["AfterCleanSamNum"]=content["AfterCleanSamNum"] |
|||
result["CleanOrNot"] = True |
|||
return json.dumps(result) |
|||
except Exception as e: |
|||
result = [{"CleanOrNot": False, "msg": traceback.format_exc()}] |
|||
return json.dumps(result, ensure_ascii=False) |
|||
|
|||
|
|||
if __name__ == "__main__": |
|||
info_str = r'{"Train_Data":{"time":"2020-08-02 00:40:00,2020-08-02 07:36:03;2020-08-05 15:20:43,2020-08-05 18:45:46","points":"JL_D2_20DAS05A:LAV10CE101.PNT,JL_D2_20DAS05A:LAC10CE101.PNT,JL_D2_20DAS11A:HAG41CE101.PNT","interval":300000,"dead":"1,1,1","limit":"0,0,0","uplow":"null,null;null,null;null,null"},"Hyper_para":{"percent":0.94375},"type":"PCA","conditon":"1=1","smote_config":[],"smote":true,"target_point":null}' |
|||
info = json.loads(info_str) |
|||
res = json.loads(clearmain(info)) |
|||
print("aaa") |
|||
|
|||
|
|||
|
|||
|
|||
|
|||
|
|||
|
|||
|
|||
|
|||
|
|||
|
|||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Some files were not shown because too many files changed in this diff
Loading…
Reference in new issue