JustDreamer7 лет назад: 2
Родитель
Сommit
b11471da61

+ 12 - 5
UI_report_prisma.py

@@ -5,8 +5,10 @@ from PyQt6 import QtCore, QtWidgets
 from interfaces.clientui import Ui_MainWindow
 from interfaces.clientui import Ui_MainWindow
 from interfaces.drctryChoice import Ui_drctryChoice
 from interfaces.drctryChoice import Ui_drctryChoice
 from interfaces.takeFiles import Ui_takeFiles
 from interfaces.takeFiles import Ui_takeFiles
-from make_report_prisma import make_report_prisma
+from make_report_prisma import make_report_prisma, preparing_data
 from exceptions import DateError
 from exceptions import DateError
+from file_reader.db_file_reader import DbFileReader
+
 
 
 class UIReportPrisma32(QtWidgets.QMainWindow, Ui_MainWindow):
 class UIReportPrisma32(QtWidgets.QMainWindow, Ui_MainWindow):
     def __init__(self):
     def __init__(self):
@@ -60,7 +62,6 @@ class UIReportPrisma32(QtWidgets.QMainWindow, Ui_MainWindow):
             ui_report_drctry.lineEdit.setText("")
             ui_report_drctry.lineEdit.setText("")
         self.widget.show()
         self.widget.show()
 
 
-
     def report_on_click(self):
     def report_on_click(self):
         """Метод, описывающий получение паспорта с помощью данных UI"""
         """Метод, описывающий получение паспорта с помощью данных UI"""
         start_date = self.dateEdit.date().toPyDate()
         start_date = self.dateEdit.date().toPyDate()
@@ -82,15 +83,21 @@ class UIReportPrisma32(QtWidgets.QMainWindow, Ui_MainWindow):
                     print(f"Создать директорию {picture_path} не удалось")
                     print(f"Создать директорию {picture_path} не удалось")
                 else:
                 else:
                     print(f"Успешно создана директория {picture_path}")
                     print(f"Успешно создана директория {picture_path}")
-            make_report_prisma(start_date=start_date, end_date=end_date, report_path=report_path, picture_path=picture_path,
-                               path_to_files_1=path_to_files_1, path_to_files_2 = path_to_files_2)
+            # concat_n_df_1, concat_n_df_2 = DbFileReader.db_preparing_data(start_date=start_date,
+            #                                                               end_date=end_date,
+            #                                                               path_to_db="mongodb://localhost:27017/")
+            concat_n_df_1, concat_n_df_2 = preparing_data(start_date=start_date,
+                                                          end_date=end_date,
+                                                          path_to_files_1=path_to_files_1,
+                                                          path_to_files_2=path_to_files_2)
+            make_report_prisma(start_date=start_date, end_date=end_date, report_path=report_path,
+                               picture_path=picture_path, concat_n_df_1=concat_n_df_1, concat_n_df_2=concat_n_df_2)
         except PermissionError:
         except PermissionError:
             print("Закройте предыдущую версию файла!")
             print("Закройте предыдущую версию файла!")
         except DateError:
         except DateError:
             DateError(start_date, end_date).ui_output_error()
             DateError(start_date, end_date).ui_output_error()
 
 
 
 
-
 # запуск основного окна
 # запуск основного окна
 app = QtWidgets.QApplication([])
 app = QtWidgets.QApplication([])
 window = UIReportPrisma32()
 window = UIReportPrisma32()

+ 0 - 0
file_reader/__init__.py


+ 73 - 0
file_reader/db_file_reader.py

@@ -0,0 +1,73 @@
+import datetime
+from collections import defaultdict
+from file_reader.file_reader import FileReader
+
+import pandas as pd
+import pymongo
+
+
+class DbFileReader(FileReader):
+    # __DB_URL = "mongodb://localhost:27017/"
+    __amp_n_cols = []
+    for i in range(1, 17):
+        __amp_n_cols.append(f'amp{i}')
+        __amp_n_cols.append(f'n{i}')
+
+    def __init__(self, cluster, single_date, db_url):
+        self.cluster = cluster
+        self.single_date = single_date
+        self.__db_url = db_url
+
+    def reading_db(self) -> pd.DataFrame():
+        """Метод, прочитывающий noSQL БД ПРИЗМА-32 с помощью DB_URL"""
+
+        data_cl = pd.DataFrame.from_records(
+            pymongo.MongoClient(self.__db_url)["prisma-32_db"][f'{str(self.single_date.date())}_12d'].find(
+                {'cluster': self.cluster}))
+        # print(self.single_date)
+        # print(data_cl)
+        if data_cl.empty:
+            raise FileNotFoundError
+        amp_dict = defaultdict(list)
+        n_dict = defaultdict(list)
+        for item in data_cl['detectors']:
+            for j in [f'det_{i:02}' for i in range(1, 17)]:
+                amp_dict[j].append(item[j]['amplitude'])
+                n_dict[j].append(item[j]['neutrons'])
+
+        for i in range(1, 17):
+            data_cl[f'amp{i}'] = amp_dict[f'det_{i:02}']
+            data_cl[f'n{i}'] = n_dict[f'det_{i:02}']
+        data_cl['time'] = [round(item / 1e9, 2) for item in data_cl['time_ns']]
+        data_cl['Date'] = [datetime.date(int(item[0:4]), int(item[5:7]), int(item[8:10])) for item in data_cl['_id']]
+
+        return data_cl
+
+    def concat_n_data(self, concat_n_df):
+        data_cl = self.reading_db()
+        # noinspection PyUnresolvedReferences
+        concat_n_df = pd.concat([concat_n_df, data_cl[['Date', 'time', 'trigger'] + DbFileReader.__amp_n_cols]],
+                                ignore_index=True)
+        return concat_n_df
+
+    @staticmethod
+    def db_preparing_data(start_date, end_date, path_to_db):
+        concat_n_df_1 = pd.DataFrame(columns=['Date', 'time', 'trigger'] + DbFileReader.__amp_n_cols)
+        concat_n_df_2 = pd.DataFrame(columns=['Date', 'time', 'trigger'] + DbFileReader.__amp_n_cols)
+        for single_date in pd.date_range(start_date, end_date):
+            try:
+                db_file_reader_1 = DbFileReader(cluster=1, single_date=single_date, db_url=path_to_db)
+                concat_n_df_1 = db_file_reader_1.concat_n_data(concat_n_df=concat_n_df_1)
+            except FileNotFoundError:
+                print(
+                    f"File n_{single_date.month:02}-" +
+                    f"{single_date.day:02}.{single_date.year - 2000:02}', does not exist")
+            try:
+                db_file_reader_1 = DbFileReader(cluster=2, single_date=single_date, db_url=path_to_db)
+                concat_n_df_2 = db_file_reader_1.concat_n_data(concat_n_df=concat_n_df_2)
+            except FileNotFoundError:
+                print(
+                    f"File 2n_{single_date.month:02}-" +
+                    f"{single_date.day:02}.{single_date.year - 2000:02}', does not exist")
+
+        return concat_n_df_1, concat_n_df_2

+ 156 - 0
file_reader/file_reader.py

@@ -0,0 +1,156 @@
+import datetime
+import pandas as pd
+
+
+class FileReader:
+    __amp_n_cols = []
+    for i in range(1, 17):
+        __amp_n_cols.append(f'amp{i}')
+        __amp_n_cols.append(f'n{i}')
+
+    def __init__(self, cluster, single_date, path_to_files=''):
+        self.cluster = cluster
+        if cluster == 1:
+            self.cluster_n = ''
+        else:
+            self.cluster_n = '2'
+        self.path_to_files = path_to_files
+        self.single_date = single_date
+        self.n_file_path = self.making_file_path(file_type='n')
+        self.n7_file_path = self.making_file_path(file_type='n7')
+        self.t_file_path = self.making_file_path(file_type='t')
+        self.p_file_path = self.making_file_path_eas_p(file_directory='nv', file_type='p')
+        self.eas_file_path = self.making_file_path_eas_p(file_directory='EAS', file_type='eas')
+
+    def __del__(self):
+        pass
+
+    def making_file_path(self, file_type):
+        file_path = f'{self.path_to_files}\\{file_type}\\{self.cluster_n}{file_type}_{self.single_date.month:02}-{self.single_date.day:02}.{self.single_date.year - 2000:02} '
+        return file_path
+
+    def making_file_path_eas_p(self, file_directory, file_type):
+        file_path = f'{self.path_to_files}\\{file_directory}\\{self.cluster}{file_type}_{self.single_date.month:02}-{self.single_date.day:02}.{self.single_date.year - 2000:02}'
+        return file_path
+
+    def reading_n_file(self):
+        """Метод, прочитывающий n-файлы, возвращающий датафрейм дня на выходе. Или возвращающий filenotfounderror, если
+                файла нет"""
+        n_file = pd.read_csv(self.n_file_path,
+                             sep=r'\s[-]*\s*', header=None, skipinitialspace=True, index_col=False, engine='python')
+        n_file.dropna(axis=1, how='all', inplace=True)
+        n_file.columns = ['time', 'number', 'sum_n', 'trigger'] + FileReader.__amp_n_cols
+        time_difference = n_file['time'].diff()
+        bad_end_time_index = time_difference[time_difference < -10000].index
+        if any(bad_end_time_index):
+            n_file_today = n_file[n_file.index < bad_end_time_index[0]]
+            n_file_day_after = n_file[n_file.index >= bad_end_time_index[0]]
+            return n_file_today, n_file_day_after
+        return n_file, []
+
+    def reading_n7_file(self):
+        n7_file = pd.read_csv(self.n7_file_path,
+                              sep=r'\s[-]*\s*', header=None, skipinitialspace=True, index_col=False, engine='python')
+        n7_file.dropna(axis=1, how='all', inplace=True)
+        for i in range(len(n7_file[0])):
+            if type(n7_file[0][i]) is str:
+                n7_file.loc[i, 0] = float('.'.join(n7_file.loc[i, 0].split(',')))
+        time_difference = n7_file[0].diff()
+        bad_end_time_index = time_difference[time_difference < -10000].index
+        if any(bad_end_time_index):
+            n7_file_today = n7_file[n7_file.index < bad_end_time_index[0]]
+            n7_file_day_after = n7_file[n7_file.index >= bad_end_time_index[0]]
+            return n7_file_today, n7_file_day_after
+        return n7_file, []
+
+    @staticmethod
+    def concat_data(file_today, file_day_after, single_date, concat_n_df):
+        file_today['Date'] = [single_date.date()] * len(file_today.index)
+        concat_n_df = pd.concat([concat_n_df, file_today], ignore_index=True)
+        if any(file_day_after):
+            file_day_after['Date'] = [(single_date + datetime.timedelta(
+                days=1)).date()] * len(file_day_after.index)
+            concat_n_df = pd.concat([concat_n_df, file_day_after],
+                                    ignore_index=True)
+        return concat_n_df
+
+    def reading_t_file(self):
+        """Converter for PRISMA t-files"""
+        with open(self.t_file_path) as f:
+            raw_data = f.readlines()
+        raw_data = [line.rstrip() for line in raw_data]
+        # Убираем переводы строки
+        event_list = []
+        main_list = []
+        sep = 0
+        for i in range(len(raw_data)):
+            if raw_data[i] == '*#*':
+                main_list.append(raw_data[sep].split(' '))
+                event_list.append(raw_data[sep + 1:i])
+                sep = i + 1
+        unit_delay = []
+        for item in event_list:
+            delay_per_event = []
+            for line in item:
+                step = line.split(' ')
+                for i in range(1, 17):
+                    if int(step[i]) != 0:
+                        delay_per_event.append([round(int(step[0]) * (10 ** (-4)), 4), i, int(step[i])])
+            unit_delay.append(delay_per_event)
+        plural_data_list = []
+        for i in unit_delay:
+            time_list = []
+            detector_list = []
+            neut_quantity_list = []
+            for j in i:
+                time_list.append(j[0])
+                detector_list.append(j[1])
+                neut_quantity_list.append(j[2])
+            plural_data_list.append([time_list, detector_list, neut_quantity_list])
+        for i in range(len(main_list)):
+            main_list[i].extend(plural_data_list[i])
+        t_file_df = pd.DataFrame(main_list,
+                                 columns=['time', 'number', 'sum_n', 'trigger', 'time_delay', 'detectors',
+                                          'n_per_step'])
+        t_file_df = t_file_df.astype({"time": float, "number": int, "sum_n": int, "trigger": int})
+        return t_file_df
+
+    def reading_p_file(self):
+        """Метод, прочитывающий p-файлы, возвращающий датафрейм дня на выходе. Или возвращающий filenotfounderror, если
+        файла нет"""
+        p_file = pd.read_csv(self.p_file_path,
+                             sep=r'\s[-]*\s*', header=None, skipinitialspace=True, engine='python')
+        p_file.dropna(axis=1, how='all', inplace=True)
+        corr_p_file = self.correcting_p_file(p_file)
+        return corr_p_file
+
+    @staticmethod
+    def correcting_p_file(p_file):
+        """Метод, корректирующий старые файлы ПРИЗМА-32, возвращающий скорректированный датафрейм.
+        Данный костыль нужен для старых p-файлов ПРИЗМА-32(до 14-15 гг.), в которых индексы строк,
+        по сути обозначающие 5 минут реального времени между ранами, могут повторяться. """
+        p_file['time'] = p_file[0]
+        del p_file[0]
+        p_file = p_file.sort_values(by='time')
+        if len(p_file['time']) > len(p_file['time'].unique()):
+            p_file.drop_duplicates(keep=False, inplace=True)
+            """После удаления полных дубликатов ищем повторяющиеся индексы. Сначала удаляем строки, 
+            состоящие полностью из нулей и точек (value = len(p_file.columns)), потом ищем множество 
+            дубликатов индексов и множество строк, почти полностью (value > 30) состоящих из нулей и точек. 
+            Берем пересечение этих двух множеств и удаляем находящиеся в пересечении строки"""
+            null_row = dict(p_file.isin([0, '.']).sum(axis=1))  # Проверяем на нули и точки
+            all_null_index = list(
+                {key: value for key, value in null_row.items() if value == len(p_file.columns)}.keys())
+            p_file.drop(index=all_null_index, inplace=True)
+
+            null_index = list(
+                {key: value for key, value in null_row.items() if value > len(p_file.columns) - 5}.keys())
+            same_index = dict(p_file['time'].duplicated(keep=False))
+            same_index_row = list({key: value for key, value in same_index.items() if value is True}.keys())
+            bad_index = list(set(null_index) & set(same_index_row))
+            p_file.drop(index=bad_index, inplace=True)
+            """Также может быть, что после фильтрации осталось больше строк, чем нужно, так как в старых 
+            p-файлах может быть больше индексов, чем минут в дне. Тогда оставляем только 288"""
+            if len(p_file.index) == 289:
+                p_file = p_file.head(288)
+        return p_file

+ 78 - 35
make_report_prisma.py

@@ -1,3 +1,4 @@
+import datetime
 import time
 import time
 import warnings
 import warnings
 import pandas as pd
 import pandas as pd
@@ -5,42 +6,78 @@ from docx import Document
 from docx.enum.text import WD_BREAK
 from docx.enum.text import WD_BREAK
 
 
 from drawing_graphs import GraphsDrawing
 from drawing_graphs import GraphsDrawing
-from processing_data_prisma import ProccessingPrismaCl
+from file_reader.file_reader import FileReader
+from processing_data_prisma_ver_2 import ProccessingPrismaCl
 from word_addition import *
 from word_addition import *
 
 
-def make_report_prisma(start_date, end_date, path_to_files_1, path_to_files_2, report_path, picture_path):
+amp_n_cols = []
+for item in range(1, 17):
+    amp_n_cols.append(f'amp{item}')
+    amp_n_cols.append(f'n{item}')
+
+
+def preparing_data(start_date, end_date, path_to_files_1, path_to_files_2):
+    concat_n_df_1 = pd.DataFrame(columns=['Date', 'time', 'trigger'] + amp_n_cols)
+    concat_n_df_2 = pd.DataFrame(columns=['Date', 'time', 'trigger'] + amp_n_cols)
+    for single_date in pd.date_range(start_date - datetime.timedelta(days=1), end_date):
+        try:
+            n_file_reader_1 = FileReader(cluster=1, single_date=single_date, path_to_files=path_to_files_1)
+            n_file_today, n_file_day_after = n_file_reader_1.reading_n_file()
+            concat_n_df_1 = FileReader.concat_data(file_today=n_file_today, file_day_after=n_file_day_after,
+                                                   single_date=single_date,
+                                                   concat_n_df=concat_n_df_1)
+        except FileNotFoundError:
+            print(
+                f"File {path_to_files_1}/n_{single_date.month:02}-" +
+                f"{single_date.day:02}.{single_date.year - 2000:02}', does not exist")
+        try:
+            n_file_reader_2 = FileReader(cluster=2, single_date=single_date, path_to_files=path_to_files_2)
+            n_file_today_2, n_file_day_after_2 = n_file_reader_2.reading_n_file()
+            concat_n_df_2 = FileReader.concat_data(file_today=n_file_today_2, file_day_after=n_file_day_after_2,
+                                                   single_date=single_date, concat_n_df=concat_n_df_2)
+        except FileNotFoundError:
+            print(
+                f"File {path_to_files_2}/2n_{single_date.month:02}-" +
+                f"{single_date.day:02}.{single_date.year - 2000:02}', does not exist")
+
+    return concat_n_df_1, concat_n_df_2
+
+
+def make_report_prisma(start_date, end_date, report_path, picture_path, concat_n_df_1, concat_n_df_2):
     t1 = time.time()
     t1 = time.time()
 
 
     warnings.filterwarnings(action='ignore')
     warnings.filterwarnings(action='ignore')
 
 
     days_amount = len(pd.date_range(start_date, end_date))
     days_amount = len(pd.date_range(start_date, end_date))
 
 
-    process_1 = ProccessingPrismaCl(1, start_date=start_date, end_date=end_date,
-                                    path_to_files=path_to_files_1)
-    process_2 = ProccessingPrismaCl(2, start_date=start_date, end_date=end_date,
-                                    path_to_files=path_to_files_2)
+    process_1 = ProccessingPrismaCl(n_data=concat_n_df_1)
+    process_2 = ProccessingPrismaCl(n_data=concat_n_df_2)
+
     graphs = GraphsDrawing(start_date=start_date, end_date=end_date,
     graphs = GraphsDrawing(start_date=start_date, end_date=end_date,
                            path_to_pic=f'{picture_path}')
                            path_to_pic=f'{picture_path}')
 
 
-    worktime_frame_1, breaks_frame_1, n_vs_zero_tr_frame_1, event_counter_fr_4_1, amp_5_fr_2_frame_1, amp_10_fr_1_frame_1, count_rate_amp_5_fr_2_1, count_rate_amp_10_fr_1_1 = process_1.day_proccessing()
-
+    worktime_frame_1, breaks_frame_1, n_vs_zero_tr_frame_1, event_counter_fr_4_1, count_rate_amp_5_fr_2_1, count_rate_amp_10_fr_1_1, amp_5_fr_2_frame_1, amp_10_fr_1_frame_1, = process_1.period_processing_for_report(
+        start_date=start_date, end_date=end_date)
 
 
-    worktime_frame_2, breaks_frame_2, n_vs_zero_tr_frame_2, event_counter_fr_4_2, amp_5_fr_2_frame_2, amp_10_fr_1_frame_2, count_rate_amp_5_fr_2_2, count_rate_amp_10_fr_1_2 = process_2.day_proccessing()
+    worktime_frame_2, breaks_frame_2, n_vs_zero_tr_frame_2, event_counter_fr_4_2, count_rate_amp_5_fr_2_2, count_rate_amp_10_fr_1_2, amp_5_fr_2_frame_2, amp_10_fr_1_frame_2 = process_2.period_processing_for_report(
+        start_date=start_date, end_date=end_date)
 
 
     brake_both_cl_time = 0
     brake_both_cl_time = 0
     for i in range(len(breaks_frame_1.index)):
     for i in range(len(breaks_frame_1.index)):
         for j in range(len(breaks_frame_2.index)):
         for j in range(len(breaks_frame_2.index)):
             if breaks_frame_1['Date'][i] == breaks_frame_2['Date'][j]:
             if breaks_frame_1['Date'][i] == breaks_frame_2['Date'][j]:
-                if breaks_frame_1['StartMinutes'][i] <= breaks_frame_2['StartMinutes'][j] < breaks_frame_1['EndMinutes'][i]:
-                    brake_both_cl_time += min(breaks_frame_2['EndMinutes'][j], breaks_frame_1['EndMinutes'][i]) - max(
-                        breaks_frame_2['StartMinutes'][j], breaks_frame_1['StartMinutes'][i])
-                elif breaks_frame_2['StartMinutes'][j] <= breaks_frame_1['StartMinutes'][i] < breaks_frame_2['EndMinutes'][
-                    j]:
-                    brake_both_cl_time += min(breaks_frame_2['EndMinutes'][j], breaks_frame_1['EndMinutes'][i]) - max(
-                        breaks_frame_2['StartMinutes'][j], breaks_frame_1['StartMinutes'][i])
+                if breaks_frame_1['StartSeconds'][i] <= breaks_frame_2['EndSeconds'][j] < \
+                        breaks_frame_1['EndSeconds'][i]:
+                    brake_both_cl_time += min(breaks_frame_2['EndSeconds'][j], breaks_frame_1['EndSeconds'][i]) - max(
+                        breaks_frame_2['StartSeconds'][j], breaks_frame_1['StartSeconds'][i])
+                elif breaks_frame_2['StartSeconds'][j] <= breaks_frame_1['StartSeconds'][i] < \
+                        breaks_frame_2['EndSeconds'][
+                            j]:
+                    brake_both_cl_time += min(breaks_frame_2['EndSeconds'][j], breaks_frame_1['EndSeconds'][i]) - max(
+                        breaks_frame_2['StartSeconds'][j], breaks_frame_1['StartSeconds'][i])
 
 
     real_worktime = worktime_frame_2['Worktime'].sum() - 24 * days_amount + worktime_frame_1[
     real_worktime = worktime_frame_2['Worktime'].sum() - 24 * days_amount + worktime_frame_1[
-        'Worktime'].sum() + brake_both_cl_time / 60
+        'Worktime'].sum() + brake_both_cl_time / 3600
 
 
     print(f'{brake_both_cl_time=}')
     print(f'{brake_both_cl_time=}')
 
 
@@ -63,18 +100,18 @@ def make_report_prisma(start_date, end_date, path_to_files_1, path_to_files_2, r
                                                                 a_crit=6, freq=2)
                                                                 a_crit=6, freq=2)
 
 
     count_rate_amp_5_fr_2_pic_path_1 = graphs.count_rate_graph(cluster=1, count_rate_frame=count_rate_amp_5_fr_2_1,
     count_rate_amp_5_fr_2_pic_path_1 = graphs.count_rate_graph(cluster=1, count_rate_frame=count_rate_amp_5_fr_2_1,
-                                                      working_frame=worktime_frame_1,
-                                                      a_crit=5, freq=2)
+                                                               working_frame=worktime_frame_1,
+                                                               a_crit=5, freq=2)
     count_rate_amp_5_fr_2_pic_path_2 = graphs.count_rate_graph(cluster=2, count_rate_frame=count_rate_amp_5_fr_2_2,
     count_rate_amp_5_fr_2_pic_path_2 = graphs.count_rate_graph(cluster=2, count_rate_frame=count_rate_amp_5_fr_2_2,
-                                                      working_frame=worktime_frame_2,
-                                                      a_crit=5, freq=2)
+                                                               working_frame=worktime_frame_2,
+                                                               a_crit=5, freq=2)
 
 
     count_rate_amp_10_fr_1_pic_path_1 = graphs.count_rate_graph(cluster=1, count_rate_frame=count_rate_amp_10_fr_1_1,
     count_rate_amp_10_fr_1_pic_path_1 = graphs.count_rate_graph(cluster=1, count_rate_frame=count_rate_amp_10_fr_1_1,
-                                                       working_frame=worktime_frame_1,
-                                                       a_crit=10, freq=1)
+                                                                working_frame=worktime_frame_1,
+                                                                a_crit=10, freq=1)
     count_rate_amp_10_fr_1_pic_path_2 = graphs.count_rate_graph(cluster=2, count_rate_frame=count_rate_amp_10_fr_1_2,
     count_rate_amp_10_fr_1_pic_path_2 = graphs.count_rate_graph(cluster=2, count_rate_frame=count_rate_amp_10_fr_1_2,
-                                                       working_frame=worktime_frame_2,
-                                                       a_crit=10, freq=1)
+                                                                working_frame=worktime_frame_2,
+                                                                a_crit=10, freq=1)
 
 
     del graphs
     del graphs
 
 
@@ -116,7 +153,8 @@ def make_report_prisma(start_date, end_date, path_to_files_1, path_to_files_2, r
 
 
     fail_str_begin_1, fail_str_end_1, lost_minutes_1, break_1 = time_breaks_counter(brake_frame=breaks_frame_1)
     fail_str_begin_1, fail_str_end_1, lost_minutes_1, break_1 = time_breaks_counter(brake_frame=breaks_frame_1)
     fail_str_begin_2, fail_str_end_2, lost_minutes_2, break_2 = time_breaks_counter(brake_frame=breaks_frame_2)
     fail_str_begin_2, fail_str_end_2, lost_minutes_2, break_2 = time_breaks_counter(brake_frame=breaks_frame_2)
-    brake_table_title = doc.add_paragraph('Таблица 2: Сводная таблица остановок и работ установки ПРИЗМА-32.', style='PItalic')
+    brake_table_title = doc.add_paragraph('Таблица 2: Сводная таблица остановок и работ установки ПРИЗМА-32.',
+                                          style='PItalic')
     brake_table_title.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
     brake_table_title.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
     brake_table = doc.add_table(len(fail_str_begin_1) + len(fail_str_begin_2) + 2, 5, doc.styles['Table Grid'])
     brake_table = doc.add_table(len(fail_str_begin_1) + len(fail_str_begin_2) + 2, 5, doc.styles['Table Grid'])
     brake_table.alignment = WD_TABLE_ALIGNMENT.CENTER
     brake_table.alignment = WD_TABLE_ALIGNMENT.CENTER
@@ -145,7 +183,7 @@ def make_report_prisma(start_date, end_date, path_to_files_1, path_to_files_2, r
         brake_table.cell(i, 3).text = str(lost_minutes_2[i - 2 - len(fail_str_begin_1)])
         brake_table.cell(i, 3).text = str(lost_minutes_2[i - 2 - len(fail_str_begin_1)])
         brake_table.cell(i, 4).text = ' '
         brake_table.cell(i, 4).text = ' '
 
 
-    make_table_bold(brake_table, cols=5, rows=len(fail_str_begin_1) + len(fail_str_begin_2)+2)
+    make_table_bold(brake_table, cols=5, rows=len(fail_str_begin_1) + len(fail_str_begin_2) + 2)
     doc.add_paragraph()
     doc.add_paragraph()
 
 
     table_title = doc.add_paragraph(
     table_title = doc.add_paragraph(
@@ -153,15 +191,18 @@ def make_report_prisma(start_date, end_date, path_to_files_1, path_to_files_2, r
         style='PItalic')
         style='PItalic')
     table_title.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
     table_title.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
 
 
-    neut_stat_info_1, neut_stat_info_2 = statistical_table(n_vs_zero_tr_frame_1, n_vs_zero_tr_frame_2, dimension='100/соб')
+    neut_stat_info_1, neut_stat_info_2 = statistical_table(n_vs_zero_tr_frame_1, n_vs_zero_tr_frame_2,
+                                                           dimension='100/соб')
 
 
     neutron_table = doc.add_table(3, 3, doc.styles['Table Grid'])
     neutron_table = doc.add_table(3, 3, doc.styles['Table Grid'])
     neutron_table.cell(0, 0).text = 'Счет/кластер'
     neutron_table.cell(0, 0).text = 'Счет/кластер'
     neutron_table.cell(0, 1).text = 'Кл1'
     neutron_table.cell(0, 1).text = 'Кл1'
     neutron_table.cell(0, 2).text = 'Кл2'
     neutron_table.cell(0, 2).text = 'Кл2'
     neutron_table.cell(1, 0).text = 'События (Fr ≥ 4, A ≥ 5), N соб./ч.'
     neutron_table.cell(1, 0).text = 'События (Fr ≥ 4, A ≥ 5), N соб./ч.'
-    neutron_table.cell(1, 1).text = str(round((event_counter_fr_4_1['Events'] / worktime_frame_1['Worktime']).mean(), 2))
-    neutron_table.cell(1, 2).text = str(round((event_counter_fr_4_2['Events'] / worktime_frame_2['Worktime']).mean(), 2))
+    neutron_table.cell(1, 1).text = str(
+        round((event_counter_fr_4_1['Events'] / worktime_frame_1['Worktime']).mean(), 2))
+    neutron_table.cell(1, 2).text = str(
+        round((event_counter_fr_4_2['Events'] / worktime_frame_2['Worktime']).mean(), 2))
     neutron_table.cell(2, 0).text = 'Нейтроны, (Nn)/соб.'
     neutron_table.cell(2, 0).text = 'Нейтроны, (Nn)/соб.'
     neutron_table.cell(2, 1).text = str(round(neut_stat_info_1.iloc[0].sum(), 2))
     neutron_table.cell(2, 1).text = str(round(neut_stat_info_1.iloc[0].sum(), 2))
     neutron_table.cell(2, 2).text = str(round(neut_stat_info_2.iloc[0].sum(), 2))
     neutron_table.cell(2, 2).text = str(round(neut_stat_info_2.iloc[0].sum(), 2))
@@ -221,7 +262,6 @@ def make_report_prisma(start_date, end_date, path_to_files_1, path_to_files_2, r
     table_title.add_run('(100/соб)').bold = True
     table_title.add_run('(100/соб)').bold = True
     table_title.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
     table_title.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
 
 
-
     neutron_stat_table = doc.add_table(neut_stat_info_1.shape[0] + neut_stat_info_2.shape[0] + 2,
     neutron_stat_table = doc.add_table(neut_stat_info_1.shape[0] + neut_stat_info_2.shape[0] + 2,
                                        neut_stat_info_1.shape[1] + 2,
                                        neut_stat_info_1.shape[1] + 2,
                                        doc.styles['Table Grid'])
                                        doc.styles['Table Grid'])
@@ -265,7 +305,8 @@ def make_report_prisma(start_date, end_date, path_to_files_1, path_to_files_2, r
         style='PItalic')
         style='PItalic')
     table_title.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
     table_title.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
 
 
-    count_rate_stat_info_1, count_rate_stat_info_2 = statistical_table(count_rate_amp_10_fr_1_1, count_rate_amp_10_fr_1_2,
+    count_rate_stat_info_1, count_rate_stat_info_2 = statistical_table(count_rate_amp_10_fr_1_1,
+                                                                       count_rate_amp_10_fr_1_2,
                                                                        dimension='cоб./ч.')
                                                                        dimension='cоб./ч.')
     count_stat_table_2 = doc.add_table(count_rate_stat_info_1.shape[0] + count_rate_stat_info_2.shape[0] + 2,
     count_stat_table_2 = doc.add_table(count_rate_stat_info_1.shape[0] + count_rate_stat_info_2.shape[0] + 2,
                                        count_rate_stat_info_1.shape[1] + 2, doc.styles['Table Grid'])
                                        count_rate_stat_info_1.shape[1] + 2, doc.styles['Table Grid'])
@@ -276,8 +317,9 @@ def make_report_prisma(start_date, end_date, path_to_files_1, path_to_files_2, r
     run = doc.add_paragraph().add_run()
     run = doc.add_paragraph().add_run()
     run.add_break(WD_BREAK.PAGE)
     run.add_break(WD_BREAK.PAGE)
 
 
-    graphic_header = doc.add_paragraph('На рисунке 8, 9 представлено число сигналов с А>5 кодов АЦП в час для 16 детекторов.',
-                             style='Head-graphic')
+    graphic_header = doc.add_paragraph(
+        'На рисунке 8, 9 представлено число сигналов с А>5 кодов АЦП в час для 16 детекторов.',
+        style='Head-graphic')
     graphic_header.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
     graphic_header.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
 
 
     adding_graphic(doc, title='Рис. 10 - Амплитудное распределение сигналов от детекторов, кластер 1 (Fr ≥ 2 и А > 5)',
     adding_graphic(doc, title='Рис. 10 - Амплитудное распределение сигналов от детекторов, кластер 1 (Fr ≥ 2 и А > 5)',
@@ -286,6 +328,7 @@ def make_report_prisma(start_date, end_date, path_to_files_1, path_to_files_2, r
                    width=6, picture_path=amp_distribution_pic_path_2)
                    width=6, picture_path=amp_distribution_pic_path_2)
     add_page_number(doc.sections[0].footer.paragraphs[0])
     add_page_number(doc.sections[0].footer.paragraphs[0])
 
 
-    doc.save(f'{report_path}\\{start_date.day:02}.{start_date.month:02}.{start_date.year}-{end_date.day:02}.{end_date.month:02}.{end_date.year}.docx')
+    doc.save(
+        f'{report_path}\\{start_date.day:02}.{start_date.month:02}.{start_date.year}-{end_date.day:02}.{end_date.month:02}.{end_date.year}.docx')
 
 
     print(time.time() - t1)
     print(time.time() - t1)

+ 10 - 86
processing_data_prisma.py

@@ -6,56 +6,11 @@ import pandas as pd
 
 
 
 
 class ProccessingPrismaCl:
 class ProccessingPrismaCl:
-    def __init__(self, cluster, start_date, end_date, path_to_files):
-        self.cluster = cluster
-        if cluster == 1:
-            self.cluster_n = ''
-        else:
-            self.cluster_n = '2'
-        # self.a_crit = a_crit
-        # self.freq = freq
+    def __init__(self, start_date, end_date):
         self.start_date = start_date
         self.start_date = start_date
         self.end_date = end_date
         self.end_date = end_date
-        self.path_to_files = path_to_files
 
 
-        self.amp_n_cols = []
-        for i in range(1, 17):
-            self.amp_n_cols.append(f'amp{i}')
-            self.amp_n_cols.append(f'n{i}')
-
-    def reading_p_file(self, single_date):
-        """Метод, прочитывающий p-файлы, возвращающий датафрейм дня на выходе. Или возвращающий filenotfounderror, если
-        файла нет"""
-        try:
-            p_file = pd.read_csv(
-                f'{self.path_to_files}\\nv\\{self.cluster}p{single_date.date().month:02}' +
-                f'-{single_date.date().day:02}.{single_date.date().year - 2000:02}',
-                sep='\s[-]*\s*', header=None, skipinitialspace=True, engine='python')
-            p_file.dropna(axis=1, how='all', inplace=True)
-            return p_file
-        except FileNotFoundError as error:
-            print(f"File {self.path_to_files}\\nv\\{self.cluster}p{single_date.date().month:02}-" +
-                  f"{single_date.date().day:02}.{single_date.date().year - 2000:02} does not exist")
-            return error.strerror
-
-    def reading_n_file(self, single_date):
-        """Метод, прочитывающий n-файлы, возвращающий датафрейм дня на выходе. Или возвращающий filenotfounderror, если
-        файла нет"""
-        try:
-            n_file = pd.read_csv(
-                f'{self.path_to_files}\\{self.cluster_n}n_{single_date.date().month:02}' +
-                f'-{single_date.date().day:02}.{single_date.date().year - 2000:02}',
-                sep=' ', header=None, skipinitialspace=True, index_col=False,
-                names=['time', 'number', 'sum_n', 'tr'] + self.amp_n_cols)
-            n_file.dropna(axis=1, how='all', inplace=True)
-            return n_file
-        except FileNotFoundError as error:
-            print(
-                f"File {self.path_to_files}\\{self.cluster_n}n_{single_date.date().month:02}-" +
-                f"{single_date.date().day:02}.{single_date.date().year - 2000:02}', does not exist")
-            return error.strerror
-
-    def day_proccessing(self):
+    def day_proccessing(self, n_file, p_file):
         """Функция, в которую помещается полная дневная обработка"""
         """Функция, в которую помещается полная дневная обработка"""
         worktime_dict = defaultdict(list)
         worktime_dict = defaultdict(list)
         n_vs_zero_tr_dict = defaultdict(list)
         n_vs_zero_tr_dict = defaultdict(list)
@@ -66,19 +21,14 @@ class ProccessingPrismaCl:
         amp_5_fr_2_frame = pd.DataFrame(columns=[f'amp{i}' for i in range(1, 17)])
         amp_5_fr_2_frame = pd.DataFrame(columns=[f'amp{i}' for i in range(1, 17)])
         amp_10_fr_1_frame = pd.DataFrame(columns=[f'amp{i}' for i in range(1, 17)])
         amp_10_fr_1_frame = pd.DataFrame(columns=[f'amp{i}' for i in range(1, 17)])
         for single_date in pd.date_range(self.start_date, self.end_date):
         for single_date in pd.date_range(self.start_date, self.end_date):
-            n_file = self.reading_n_file(single_date)
-            p_file = self.reading_p_file(single_date)
-
             worktime_dict['Date'].append(single_date)
             worktime_dict['Date'].append(single_date)
             n_vs_zero_tr_dict['Date'].append(single_date)
             n_vs_zero_tr_dict['Date'].append(single_date)
             count_rate_amp_5_fr_2['Date'].append(single_date)
             count_rate_amp_5_fr_2['Date'].append(single_date)
             count_rate_amp_10_fr_1['Date'].append(single_date)
             count_rate_amp_10_fr_1['Date'].append(single_date)
             event_counter_fr_4['Date'].append(single_date)
             event_counter_fr_4['Date'].append(single_date)
             if type(p_file) != str:
             if type(p_file) != str:
-                corr_p_file = self.correcting_p_file(p_file)
-
-                worktime_dict['Worktime'].append(round(len(corr_p_file.index) * 5 / 60, 2))
-                break_time_dict = self.counting_break_time(corr_p_file)
+                worktime_dict['Worktime'].append(round(len(p_file.index) * 5 / 60, 2))
+                break_time_dict = self.counting_break_time(p_file)
                 if break_time_dict:
                 if break_time_dict:
                     breaks_dict['Date'].append(single_date)
                     breaks_dict['Date'].append(single_date)
                     breaks_dict['StartMinutes'].extend(break_time_dict['StartMinutes'])
                     breaks_dict['StartMinutes'].extend(break_time_dict['StartMinutes'])
@@ -87,7 +37,7 @@ class ProccessingPrismaCl:
             else:
             else:
                 worktime_dict['Worktime'].append(0.00)
                 worktime_dict['Worktime'].append(0.00)
             if type(n_file) != str:
             if type(n_file) != str:
-                neutron_to_zero_trigger = self.neutron_to_zero_trigger(n_file)
+                neutron_to_zero_trigger = self.neutron_to_zero_trigger(n_file['Date'])
                 for i in range(16):
                 for i in range(16):
                     n_vs_zero_tr_dict[f'n{i + 1}'].append(neutron_to_zero_trigger[i])
                     n_vs_zero_tr_dict[f'n{i + 1}'].append(neutron_to_zero_trigger[i])
                     count_rate_amp_5_fr_2[f'amp{i + 1}'].append(
                     count_rate_amp_5_fr_2[f'amp{i + 1}'].append(
@@ -128,37 +78,6 @@ class ProccessingPrismaCl:
 
 
 
 
     @staticmethod
     @staticmethod
-    def correcting_p_file(p_file):
-        """Метод, корректирующий старые файлы ПРИЗМА-32, возвращающий скорректированный датафрейм"""
-        p_file['time'] = p_file[0]
-        del p_file[0]
-        p_file = p_file.sort_values(by='time')
-        if len(p_file['time']) > len(p_file['time'].unique()):
-            """Данный костыль нужен для старых p-файлов ПРИЗМА-32(до 14-15 гг.), в которых индексы строк, 
-            по сути обозначающие 5 минут реального времени между ранами, могут повторяться. """
-            p_file.drop_duplicates(keep=False, inplace=True)
-            """После удаления полных дубликатов ищем повторяющиеся индексы. Сначала удаляем строки, 
-            состоящие полностью из нулей и точек (value = len(p_file.columns)), потом ищем множество 
-            дубликатов индексов и множество строк, почти полностью (value > 30) состоящих из нулей и точек. 
-            Берем пересечение этих двух множеств и удаляем находящиеся в пересечении строки"""
-            null_row = dict(p_file.isin([0, '.']).sum(axis=1))  # Проверяем на нули и точки
-            all_null_index = list(
-                {key: value for key, value in null_row.items() if value == len(p_file.columns)}.keys())
-            p_file.drop(index=all_null_index, inplace=True)
-
-            null_index = list(
-                {key: value for key, value in null_row.items() if value > len(p_file.columns) - 5}.keys())
-            same_index = dict(p_file['time'].duplicated(keep=False))
-            same_index_row = list({key: value for key, value in same_index.items() if value is True}.keys())
-            bad_index = list(set(null_index) & set(same_index_row))
-            p_file.drop(index=bad_index, inplace=True)
-            """Также может быть, что после фильтрации осталось больше строк, чем нужно, так как в старых 
-            p-файлах может быть больше индексов, чем минут в дне. Тогда оставляем только 288"""
-            if len(p_file.index) == 289:
-                p_file = p_file.head(288)
-        return p_file
-
-    @staticmethod
     def counting_break_time(p_file):
     def counting_break_time(p_file):
         """Метод, выявляющий в p-file 5-минутки, когда кластер не работал, возвращает начальное время остановки и
         """Метод, выявляющий в p-file 5-минутки, когда кластер не работал, возвращает начальное время остановки и
         конечное время остановки"""
         конечное время остановки"""
@@ -213,3 +132,8 @@ class ProccessingPrismaCl:
                 'count_rate': cluster_count_rate}
                 'count_rate': cluster_count_rate}
 
 
 
 
+
+
+
+
+

+ 133 - 0
processing_data_prisma_ver_2.py

@@ -0,0 +1,133 @@
+# import datetime
+
+from collections import defaultdict
+
+import pandas as pd
+
+"""Вопросы:
+   1)Обязательны p-file для временных обработок или перейти на n-file и db.
+   2)Начальную и конечную дату нужно ли передавать в этот класс или брать из n_data
+   3)Можно больше не конкатенировать датафреймы для амплитудного распределения 
+   4)..."""
+
+
+class ProccessingPrismaCl:
+    def __init__(self, n_data):
+        self.n_data = n_data
+
+    def period_processing_for_report(self, start_date, end_date):
+        worktime_dict = defaultdict(list)
+        n_vs_zero_tr_dict = defaultdict(list)
+        event_counter_fr_4 = defaultdict(list)
+        breaks_dict = defaultdict(list)
+        count_rate_amp_5_fr_2 = defaultdict(list)
+        count_rate_amp_10_fr_1 = defaultdict(list)
+
+        for single_date in pd.date_range(start_date, end_date):
+            worktime_dict['Date'].append(single_date)
+            n_vs_zero_tr_dict['Date'].append(single_date)
+            count_rate_amp_5_fr_2['Date'].append(single_date)
+            count_rate_amp_10_fr_1['Date'].append(single_date)
+            event_counter_fr_4['Date'].append(single_date)
+
+            single_n_data = self.n_data[self.n_data['Date'] == single_date].reset_index(drop=True)
+
+            if len(single_n_data) == 0:
+                worktime_dict['Worktime'].append(0.00)
+                for i in range(16):
+                    n_vs_zero_tr_dict[f'n{i + 1}'].append(0.00)
+                    count_rate_amp_5_fr_2[f'amp{i + 1}'].append(0.00)
+                    count_rate_amp_10_fr_1[f'amp{i + 1}'].append(0.00)
+                event_counter_fr_4['Events'].append(0.00)
+                continue
+
+            break_time_dict, worktime_item = self._counting_break_time(single_n_data, delta_time_crit=600)
+            worktime_dict['Worktime'].append(worktime_item)
+            # print(break_time_dict)
+            if break_time_dict:
+                breaks_dict['Date'].extend([single_date.date()] * len(break_time_dict['StartSeconds']))
+                breaks_dict['StartSeconds'].extend(break_time_dict['StartSeconds'])
+                breaks_dict['EndSeconds'].extend(break_time_dict['EndSeconds'])
+            neutron_to_zero_trigger = self._neutron_to_zero_trigger(single_n_data)
+            for i in range(16):
+                n_vs_zero_tr_dict[f'n{i + 1}'].append(neutron_to_zero_trigger[i])
+                count_rate_amp_5_fr_2[f'amp{i + 1}'].append(
+                    self._set_event_counter(single_n_data, a_crit=6, freq=2)['count_rate'][i + 1])
+                count_rate_amp_10_fr_1[f'amp{i + 1}'].append(
+                    self._set_event_counter(single_n_data, a_crit=11, freq=1)['count_rate'][i + 1])
+
+            event_counter_fr_4['Events'].append(
+                self._set_event_counter(single_n_data, a_crit=6, freq=4)['sum_events'])
+
+        amp_5_fr_2_frame = self.set_amp_df(a_crit=6, freq=2)
+        amp_10_fr_1_frame = self.set_amp_df(a_crit=11, freq=1)
+        worktime_frame = pd.DataFrame(worktime_dict)
+        n_vs_zero_tr_frame = pd.DataFrame(n_vs_zero_tr_dict)
+        breaks_frame = pd.DataFrame(breaks_dict)
+        event_counter_fr_4 = pd.DataFrame(event_counter_fr_4)
+        count_rate_amp_5_fr_2 = pd.DataFrame(count_rate_amp_5_fr_2)
+        count_rate_amp_10_fr_1 = pd.DataFrame(count_rate_amp_10_fr_1)
+
+        for column in [f'amp{i}' for i in range(1, 17)]:
+            count_rate_amp_5_fr_2[column] = count_rate_amp_5_fr_2[column] / worktime_frame['Worktime']
+            count_rate_amp_10_fr_1[column] = count_rate_amp_10_fr_1[column] / worktime_frame['Worktime']
+
+        return worktime_frame, breaks_frame, n_vs_zero_tr_frame, event_counter_fr_4, count_rate_amp_5_fr_2, count_rate_amp_10_fr_1, amp_5_fr_2_frame, amp_10_fr_1_frame
+
+    @staticmethod
+    def _counting_break_time(n_file, delta_time_crit):
+        """Метод, выявляющий в n-file 5-минутки, когда кластер не работал, возвращает начальное время остановки и
+        конечное время остановки"""
+        time_difference = n_file['time'].diff()
+        daily_breaks_dict = defaultdict(list)
+        worktime_item = 24.00
+        # print(f'{time_difference=}')
+        for i in range(1, len(time_difference)):
+            if time_difference[i] > delta_time_crit:
+                daily_breaks_dict['StartSeconds'].append(n_file['time'][i - 1])
+                daily_breaks_dict['EndSeconds'].append(n_file['time'][i])
+        if n_file['time'][0] > delta_time_crit:
+            daily_breaks_dict['StartSeconds'].append(0)
+            daily_breaks_dict['EndSeconds'].append(n_file['time'][0])
+        if n_file['time'][len(n_file['time'])-1] < 86400 - delta_time_crit:
+            daily_breaks_dict['StartSeconds'].append(n_file['time'][len(n_file['time'])-1])
+            daily_breaks_dict['EndSeconds'].append(86399)
+        if daily_breaks_dict:
+            worktime_item = worktime_item - round(sum(
+                [daily_breaks_dict['EndSeconds'][index] - daily_breaks_dict['StartSeconds'][index] for index in
+                 range(len(daily_breaks_dict['StartSeconds']))]) / 3600, 2)
+            return daily_breaks_dict, worktime_item
+        else:
+            return None, worktime_item
+
+    @staticmethod
+    def _neutron_to_zero_trigger(n_file):
+        """Метод, обрабатывающий данные n-файлов ПРИЗМА-32, дающий на выходе нормированное число в событии,
+        отобранных как нейтрон, при самозапуске"""
+        counter_zero_tr = len(n_file[n_file['trigger'] == 0].index)
+        zero_tr_frame = n_file[n_file['trigger'] == 0]
+        return [round(zero_tr_frame[f'n{i}'].sum() / counter_zero_tr, 3) for i in range(1, 17)]
+
+    # except ZeroDivisionError: Нужно дописать, если допустим нет нулевых триггеров
+
+    @staticmethod
+    def _set_event_counter(n_file, a_crit, freq):
+        """Метод, обрабатывающий данные n-файлов ПРИЗМА-32, на вход подаются определенная амплитуда и количество
+        детекторов, на которых амплитуда превышает заданную, на выходе метод возвращает словарь с параметрами:
+        1) суммарное число событий на кластере, подходящих под заданные условия;
+        2) датафрейм с амплитудами детекторов для каждого события, подходящего под заданные условия;
+        3) количество превышений заданной амплитуды у детектора в событиях, подходящих под заданные условия; """
+        cluster_count_rate = {}
+        amp_frame = n_file[[f'amp{i}' for i in range(1, 17)]]
+        amp_frame['fr_sum'] = amp_frame.isin(range(a_crit, 520)).sum(axis=1, skipna=True)  # noqa
+        amp_frame = amp_frame[amp_frame['fr_sum'] >= freq].reset_index(drop=True)
+        for i in range(1, 17):
+            cluster_count_rate[i] = len(amp_frame[amp_frame[f'amp{i}'] >= a_crit])
+        return {'sum_events': len(amp_frame.index),
+                'count_rate': cluster_count_rate}
+
+    def set_amp_df(self, a_crit, freq):
+        amp_frame = self.n_data[[f'amp{i}' for i in range(1, 17)]]
+        amp_frame['fr_sum'] = amp_frame.isin(range(a_crit, 520)).sum(axis=1, skipna=True)  # noqa
+        amp_frame = amp_frame[amp_frame['fr_sum'] >= freq].reset_index(drop=True)
+        return amp_frame[[f'amp{i}' for i in range(1, 17)]]

+ 12 - 6
word_addition.py

@@ -133,7 +133,7 @@ def draw_stat_table(stat_table, stat_info_1, stat_info_2):
     stat_table.cell(0, 0).merge(stat_table.cell(1, 0))
     stat_table.cell(0, 0).merge(stat_table.cell(1, 0))
     stat_table.cell(0, 1).merge(stat_table.cell(1, 1))
     stat_table.cell(0, 1).merge(stat_table.cell(1, 1))
     for j in range(16):
     for j in range(16):
-        stat_table.cell(1, j + 2).text = str(j+1)
+        stat_table.cell(1, j + 2).text = str(j + 1)
     stat_table.cell(2, 0).text = '1'
     stat_table.cell(2, 0).text = '1'
     stat_table.cell(2, 0).merge(stat_table.cell(3, 0))
     stat_table.cell(2, 0).merge(stat_table.cell(3, 0))
     stat_table.cell(4, 0).text = '2'
     stat_table.cell(4, 0).text = '2'
@@ -181,15 +181,21 @@ def time_breaks_counter(brake_frame):
     fail_str_end = []
     fail_str_end = []
     lost_minutes = []
     lost_minutes = []
     for i in range(len(brake_frame.index)):
     for i in range(len(brake_frame.index)):
+        start_hour = int(brake_frame['StartSeconds'][i] // 60 // 60)
+        start_minute = int((brake_frame['StartSeconds'][i] - start_hour * 3600) // 60)
+        start_second = int((brake_frame['StartSeconds'][i] - start_hour * 3600) % 60)
+        end_hour = int(brake_frame['EndSeconds'][i] // 60 // 60)
+        end_minute = int((brake_frame['EndSeconds'][i] - end_hour * 3600) // 60)
+        end_second = int((brake_frame['EndSeconds'][i] - end_hour * 3600) % 60)
         fail_str_begin.append(
         fail_str_begin.append(
-            f" {brake_frame['Date'][i].date()}  {brake_frame['StartMinutes'][i] // 60:02}:{brake_frame['StartMinutes'][i] % 60:02}")
+            f" {brake_frame['Date'][i]}  {start_hour:02}:{start_minute:02}:{start_second:02}")
         fail_str_end.append(
         fail_str_end.append(
-            f" {brake_frame['Date'][i].date()}  {brake_frame['EndMinutes'][i] // 60:02}:{brake_frame['EndMinutes'][i] % 60:02}")
-        lost_minutes.append(brake_frame['EndMinutes'][i] - brake_frame['StartMinutes'][i])
+            f" {brake_frame['Date'][i]}  {end_hour:02}:{end_minute:02}:{end_second:02}")
+        lost_minutes.append(int(brake_frame['EndSeconds'][i]//60 - brake_frame['StartSeconds'][i]//60))
 
 
     for i in range(1, len(brake_frame.index)):
     for i in range(1, len(brake_frame.index)):
-        if brake_frame['StartMinutes'][i] == 0 and brake_frame['EndMinutes'][i - 1] == 1435 and \
-                (brake_frame['Date'][i] - brake_frame['Date'][i-1]).days == 1:
+        if brake_frame['StartSeconds'][i] == 0 and brake_frame['EndSeconds'][i - 1] == 1435 and \
+                (brake_frame['Date'][i] - brake_frame['Date'][i - 1]).days == 1:
             breaks -= 1
             breaks -= 1
 
 
     return fail_str_begin, fail_str_end, lost_minutes, breaks
     return fail_str_begin, fail_str_end, lost_minutes, breaks