Commit 6bd09d81 authored by doetschj's avatar doetschj
Browse files

addition of asdf to gmug conversion as well as asdf generation of 20ms waveforms

parent 4146cc1c
......@@ -57,17 +57,40 @@ def merge(param):
sta.merge(method=1, interpolation_samples=0)
start_time = sta.traces[0].stats["starttime"]
sta.write(merge_folder + '/' + gparam['project_name'] + str(start_time) + ".mseed")
name = merge_folder + '/' + gparam['project_name'] + str(start_time)
name2 = name.replace('.', '-')
name3 = name2.replace(':', '-')
# sta.write(merge_folder + '/' + gparam['project_name'] + str(start_time) + ".mseed")
new_file = pyasdf.ASDFDataSet(merge_folder + '/' + str(start_time) + '_'+ gparam['project_name'] + ".h5",
sta.write(name3 + ".mseed")
name = merge_folder + '/' + str(start_time) + '_' + gparam['project_name']
name2 = name.replace('.', '-')
name3 = name2.replace(':', '-')
#merge_folder + '/' + str(start_time) + '_' + gparam['project_name'] + ".h5"
new_file = pyasdf.ASDFDataSet(name3 + ".h5",
compression="gzip-3")
files = glob.glob(merge_folder + '/' + gparam['project_name'] + str(start_time) + ".mseed")
name = merge_folder + '/' + gparam['project_name'] + str(start_time)
name2 = name.replace('.', '-')
name3 = name2.replace(':', '-')
#merge_folder + '/' + gparam['project_name'] + str(start_time) + ".mseed"
files = glob.glob(name3 + ".mseed")
for _i, filename in enumerate(files):
...
print("Adding file %i of %i ..." % (_i + 1, len(files)))
new_file.add_waveforms(filename, tag="raw_recording")
os.remove(merge_folder + '/' + gparam['project_name'] + str(start_time) + ".mseed")
name = merge_folder + '/' + gparam['project_name'] + str(start_time)
name2 = name.replace('.', '-')
name3 = name2.replace(':', '-')
#merge_folder + '/' + gparam['project_name'] + str(start_time) + ".mseed"
os.remove(name3 + ".mseed")
last_merge += filenumber
......
from obspy import Stream
import pyasdf
import yaml
import numpy as np
from obspy import read
#name of ASDF file and to be generated GMUG files
name="event1"
n=4096
################
sta = Stream()
sta =read(name+'.h5')
f = open('dug-seis.yaml')
param = yaml.load(f)
stations = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31]
hr=sta.traces[0].stats["starttime"].hour
minute=sta.traces[0].stats["starttime"].minute
sec=sta.traces[0].stats["starttime"].second
mil=int(sta.traces[0].stats["starttime"].microsecond/1000)
year=sta.traces[0].stats["starttime"].year
month=sta.traces[0].stats["starttime"].month
day=sta.traces[0].stats["starttime"].day
#print(sta.traces[0].stats["delta"])
data = []
inpl=[]
for i in range(0,len(stations)):
data.append(sta.traces[i].data[0:n])
inpl.append(param['Acquisition']['hardware_settings']['gain_selection'][i])
nsamp=n #len(data[0])
nchan=32
nrec=1
sra=sta.traces[0].stats["delta"] * 1e6
inpl=str(inpl)
inpl = inpl.replace(',', '')
inpl = inpl.replace('[', '')
inpl = inpl.replace(']', '')
triglevel=[40,400,1000,1000,1000,1000,1000,1000,1000,1000,1000,1000,1000,1000,1000,1000,1000,1000,1000,1000,1000,1000,1000,1000,1000,1000,1000,1000,1000,1000,1000,1000] #set to 1000?
triglevel=str(triglevel)
triglevel = triglevel.replace(',', '')
triglevel = triglevel.replace('[', '')
triglevel = triglevel.replace(']', '')
digitmultiplier=[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] #set to 1? #[0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] #[
digitmultiplier=str(digitmultiplier)
digitmultiplier = digitmultiplier.replace(',', '')
digitmultiplier = digitmultiplier.replace('[', '')
digitmultiplier = digitmultiplier.replace(']', '')
data = np.asarray(data)
data.astype('int16').tofile(name+'.dat')
newfile=open(name+".txt","w")
with open(name+".txt", 'a') as file:
file.write("rate[µs], number of records, number of samples, number of channels\n")
file.write("pre trigger [%], AD resolution [bit]\n")
file.write("input range [+/- MilliVolts], all channels\n")
file.write("trigger level [1/1000], all channels\n")
file.write(str(sra)+"\n") #1.0 or from asdf?
file.write(str(nrec) + "\n")
file.write(str(nsamp) + "\n")
file.write(str(nchan) + "\n")
file.write(str(30) + "\n") #keep 30%?
file.write(str(16) + "\n")
file.write(str(inpl) + "\n")
file.write(str(triglevel) + "\n")
file.write("Trans-Channel: 2; nStack: 1\n")
file.write("digit multiplier\n")
file.write(str(digitmultiplier) + "\n")
file.write("Reserve\n")
file.write("Reserve\n")
file.write(" No. Hour Minute Second Millis. Year Month Day\n")
file.write(" "+str(1)+" "+str(hr)+" "+str(minute)+" "+str(sec)+" "+str(mil)+" "+str(year)+" "+str(month)+" "+str(day)+ "\n")
......@@ -15,9 +15,6 @@ from dug_seis.processing.obs_trigger import coincidence_trigger
import pandas as pd
from obspy.core import UTCDateTime
# from dug_seis.processing.Pphase_picker import FBPicker
# from dug_seis.processing.Pphase_picker import FBSummary
def dug_trigger(sta_total, tparam, event_nr, event_nr_s):
......@@ -96,40 +93,3 @@ def dug_trigger(sta_total, tparam, event_nr, event_nr_s):
trigger_out = pd.DataFrame({'Event_id': event_nr_s, 'Time': time_s, 'Classification': classification_s})
return trigger_out, event_nr
# # print(trig)
# time_s = []
# classification_s = []
# for f in range(len(trig)):
# event_nr += 1 # update cummulative event_id
# event_nr_s = event_nr_s + [event_nr] # create a list of event_ids
# coins_sum = int(trig[f]['coincidence_sum'])
#
# time_min = trig[f]['time'][0] # trigger time of earliest arrival of one event
# time_max = max(trig[f]['time']) # trigger time of latest arrival of one event
#
# diff = time_max-time_min # difference between earliest and latest arrival of one event
# trace_id = trig[f]['trace_ids'] # find out trace ids of one event
# id_int = [int(i[3:6]) for i in trace_id]
# id_int.sort() # sort the trace ids of one event
# # if the difference between earliest and latest arrival is too small, classification=electronic interference
# if diff < float(tparam['classification']['spread_int']):
# cla_s = 'electronic'
# elif id_int[0] == 1: # if the event has an arrival on the first trace, classification=active
# cla_s = 'active'
# else:
# cla_s = 'passive' # if none of the above are true, classification=passive
#
# # write a log file
# data = [event_nr] + [coins_sum] + [time_min.isoformat()] + [cla_s]
# cols = pd.Index(['Event_id', 'Coincidence_sum', 'Time', 'Classification'], name='cols')
# df = pd.DataFrame(data=[data], columns=cols)
# log_file = 'trigger.csv'
# df.to_csv(log_file, mode='a', header=False, index=False)
#
# classification_s.append(cla_s) # make a list of classifications for each event
# time_s.append(UTCDateTime(time_min.isoformat())) # make a list of earliest arrival times for each event
#
# # set up data frame containing list of event ids, trigger times and classifications
# trigger_out = pd.DataFrame({'Event_id': event_nr_s, 'Time': time_s, 'Classification': classification_s})
#
# return trigger_out, event_nr
\ No newline at end of file
......@@ -30,8 +30,9 @@ from obspy.core import *
from dug_seis.processing.Pickers.PhasePApy_Austin_Holland import aicdpicker
from dug_seis.processing.Pickers.PhasePApy_Austin_Holland import ktpicker
from dug_seis.processing.Pickers.P_Phase_Picker_USGS.pphasepicker import pphasepicker
#
#
# this class contains the bandass, picker, localization, magntiude and visualization to be applied the 20ms waveform.
class Event(ObsPyEvent):
def __init__(self, param, wf_stream, event_id, classification, logger):
super().__init__(resource_id='smi:%s/event/%d' % (param['General']['project_name'], event_id))
......@@ -53,25 +54,18 @@ class Event(ObsPyEvent):
t_start_plot = self.wf_stream.traces[0].stats["starttime"]
self.logger.info('Noise Visualisation at ' + str(t_start_plot)[11:-1] + ', processing started.')
# apply a bandpass filter to all 32 traces.
def bandpass(self):
# bandpass filter using same parameters as for triggering
self.wf_stream.filter("bandpass", freqmin=self.prparam['bandpass_f_min'], freqmax=self.prparam['bandpass_f_max'])
# import numpy as np
# tparam = self.param['Trigger']
# stations = [i - 1 for i in tparam['channels']]
# if self.param['General']['active_trigger_channel'] and self.param['General'][
# 'active_trigger_channel'] - 1 not in stations:
# stations.insert(0, param['General']['active_trigger_channel'] - 1)
# stations.sort()
# for k in stations:
# print(np.amax(np.absolute(self.wf_stream.traces[k].data)))
# apply a picker algorithm on all 32 traces to pick first arrivals.
def pick(self):
piparam = self.prparam['Picking']
df = self.wf_stream[0].stats.sampling_rate
# apply the fb picker.
if piparam['algorithm'] == 'fb':
t_long = 5 / 1000
freqmin = 1
......@@ -108,9 +102,7 @@ class Event(ObsPyEvent):
logging.info('Event ' + str(self.event_id) + ': ' + str(len(self.picks)) + ' picks.')
# apply the kt (kurtosis) picker.
if piparam['algorithm'] == 'kt':
t_win = 1/2000
t_ma = 10/2000
......@@ -129,14 +121,12 @@ class Event(ObsPyEvent):
tr = self.wf_stream[j]
tr.detrend('linear') # Perform a linear detrend on the data
# scnl, picks, polarity, snr, uncert = picker.picks(tr)
scnl, picks, polarity, snr, uncert = chenPicker.picks(tr)
if len(picks):
# t_picks=trig[0][0] / df # if a pick is done on a trace, convert it to seconds
t_pick_UTC = picks[0] # self.wf_stream[0].stats[
# "starttime"] + t_picks # and add the start time of the 20ms snippet
t_pick_UTC = picks[0]
station_id = j + 1
self.picks.append(Pick(time=t_pick_UTC,
resource_id='%s/picks/%d' % (self.resource_id.id, len(self.picks) + 1),
......@@ -147,7 +137,8 @@ class Event(ObsPyEvent):
logging.info('Event ' + str(self.event_id) + ': ' + str(len(self.picks)) + ' picks.')
if piparam['algorithm'] == 'AICD':
# apply the AICD picker.
if piparam['algorithm'] == 'aicd':
t_ma = 3 / 1000
nsigma = 8
t_up = 0.78 / 1000
......@@ -171,9 +162,8 @@ class Event(ObsPyEvent):
if len(picks):
# t_picks=trig[0][0] / df # if a pick is done on a trace, convert it to seconds
t_pick_UTC = picks[0] # self.wf_stream[0].stats[
# "starttime"] + t_picks # and add the start time of the 20ms snippet
t_pick_UTC = picks[0]
station_id = j + 1
picksav.append(t_pick_UTC-self.wf_stream[0].stats[
"starttime"])
......@@ -186,6 +176,7 @@ class Event(ObsPyEvent):
pickaverage=np.mean(picksav)*1000
logging.info('Event ' + str(self.event_id) + ': ' + str(len(self.picks)) + ' picks.')
# apply the STA LTA picker.
if piparam['algorithm'] == 'sta_lta':
for j in range(len(self.wf_stream)): # do picking for each trace in 20ms snippet
......@@ -207,7 +198,8 @@ class Event(ObsPyEvent):
logging.info('Event ' + str(self.event_id) + ': ' + str(len(self.picks)) + ' picks.')
if piparam['algorithm'] == 'Pphase':
# apply the P Phase picker.
if piparam['algorithm'] == 'pphase':
Tn = 0.01
xi = 0.6
......@@ -228,10 +220,13 @@ class Event(ObsPyEvent):
logging.info('Event ' + str(self.event_id) + ': ' + str(len(self.picks)) + ' picks.')
# apply a localization algorithm that makes use of the picks done by the picker module.
def locate(self):
lparam = self.prparam['Locate']
if lparam['algorithm'] == 'hom_aniso':
aparam = lparam['hom_aniso']
# apply localization if there is a minimum number of picks done for the event.
if len(self.picks) < lparam['min_picks']:
return
......@@ -326,6 +321,7 @@ class Event(ObsPyEvent):
self.logger.info('Event ' + str(self.event_id) + ': Location %3.2f %3.2f %3.2f; %i iterations, rms %4.3f ms'
% (loc[0], loc[1], loc[2], nit, rms))
# apply a magnitude estimation that makes use of the picks to find the maximum amplitude of arrivals.
def est_magnitude(self, origin_number=np.inf):
emparam = self.prparam['Magnitude']
if origin_number > len(self.origins):
......@@ -353,22 +349,25 @@ class Event(ObsPyEvent):
self.ts_approx[i] = (self.param['Trigger']['endtime']+self.param['Trigger']['endtime'])*1000
relstartwindow = []
#we create a time window with a begin and end time around the expected maximum amplitude of the arrival of the event at each trace.
#this time window is based on the pick from the localization module(ts_approx)
for i in range(len(self.ts_approx)):
relstartwindow.append(self.ts_approx[i] - 2 * righthand[i]) # begin of the segment (time)
relstartwindow.append(self.ts_approx[i] - 2 * righthand[i]) # begin of the segment (in time)
if relstartwindow[i] < 0:
relstartwindow[i] = 0
sample_start = [int(round(i / 1000 * self.wf_stream.traces[0].stats.sampling_rate)) for i in
relstartwindow] # beginning of segment (samples)
relstartwindow] # beginning of segment (in samples)
sample_end = [int(round(i / 1000 * self.wf_stream.traces[0].stats.sampling_rate)) for i in
self.ts_approx] # end of segment (samples)
self.ts_approx] # end of segment (in samples)
maxamp = []
for k in range(len(ind_trig[0])):
# print(abs(self.wf_stream[ind_trig[0][k]].data[sample_start[k]:sample_end[k]]))
maxamp.append(max(abs(self.wf_stream[ind_trig[0][k]].data[
sample_start[k]:sample_end[k]]))) # find maximum amplitude in segment
sample_start[k]:sample_end[k]]))) # find maximum amplitude in segment which will impact the magnitude estimation.
corr_fac = []
mag_exp= []
......@@ -388,8 +387,10 @@ class Event(ObsPyEvent):
type='relative_amplitude'))
self.preferredMagnitudeID = self.magnitudes[-1].resource_id
# generates a visualization of the 32 traces in the 20ms waveform and the picks done by the picker module as well as the picks resulting from visualization.
def event_plot(self, save_fig=False):
fparam = self.prparam['Folders']
# for name of each saved png, use start and end time of the 20ms snippet
t_start_plot = self.wf_stream.traces[0].stats["starttime"]
t_start_plot=str(t_start_plot)
......@@ -427,7 +428,6 @@ class Event(ObsPyEvent):
trig_ch = [int(i.waveform_id['station_code']) for i in self.picks]
ch_in = [int(i.stats['station']) for i in self.wf_stream.traces]
# finding the correct axes(stations)
# print(self.loc_ind)
ind_trig = np.where(np.isin(ch_in, trig_ch)) # Finds correct axis
trig_time = [(i.time - self.wf_stream.traces[0].stats["starttime"]) * 1000 for i in self.picks]
......@@ -449,28 +449,7 @@ class Event(ObsPyEvent):
(axs[loc_ind_trig[m]].get_ylim()[0],
axs[loc_ind_trig[m]].get_ylim()[1]),
'y--', linewidth=1.0)
# if self.classification != 'noise':
# trig_ch = [int(i.waveform_id['station_code']) for i in self.picks]
# ch_in = [int(i.stats['station']) for i in self.wf_stream.traces]
# # finding the correct axes(stations)
# ind_trig = np.where(np.isin(ch_in, trig_ch)) # Finds correct axis
# trig_time = [(i.time - self.wf_stream.traces[0].stats["starttime"]) * 1000 for i in self.picks]
# loc_ind_trig = ind_trig[0] #why was this? if this line gives error switch between ind_trig[0] and self.loc_ind[-1][:]
#
# # plotting pick time
# for m in range(len(trig_time)):
# axs[ind_trig[0][m]].plot((trig_time[m], trig_time[m]),
# (axs[ind_trig[0][m]].get_ylim()[0], axs[ind_trig[0][m]].get_ylim()[1]),
# 'r-', linewidth=1.0)
# for m in range(len(loc_ind_trig)):
# if len(self.origins):
# axs[loc_ind_trig[m]].plot((self.tcalc[len(self.origins)-1][m], self.tcalc[len(self.origins)-1][m]),
# (axs[loc_ind_trig[m]].get_ylim()[0], axs[loc_ind_trig[m]].get_ylim()[1]),
# 'g-', linewidth=1.0)
# if len(self.magnitudes):
# axs[loc_ind_trig[m]].plot((self.ts_approx[m], self.ts_approx[m]),
# (axs[loc_ind_trig[m]].get_ylim()[0], axs[loc_ind_trig[m]].get_ylim()[1]),
# 'y-', linewidth=1.0)
plt.suptitle(t_title, fontsize=17)
fig.text(0.49, 0.035, 'time [ms]', ha='center', fontsize=14)
......@@ -516,3 +495,8 @@ class Event(ObsPyEvent):
df = pd.DataFrame(data=[outdata], columns=cols)
df.to_csv(filename, mode='a', header=False, index=False)
self.logger.info('Event ' + str(self.event_id) + ': Info saved to %s.', filename)
def event_save_ASDF(self):
self.wf_stream.write('event' + str(self.event_id), 'H5') # declare 'H5' as format
print(read('event' + str(self.event_id) + '.h5'))
......@@ -14,10 +14,19 @@ Version 0.0, 23.10.2018, Joseph Doetsch (doetschj)
from dug_seis.processing.event import Event
from dug_seis.processing.get_waveforms import get_waveforms
from obspy import read
def event_processing(param, load_file, trig_time, event_id, classification, logger):
# get the new (standard 20ms) waveform around the triggered time for all 32 channels by sending the name of the data snippet and the trigger time to the get waveforms script.
wf_stream = get_waveforms(param, load_file, trig_time)
# In case the user wants to create new asdf files for each 20ms waveform of all 32 channels.
if param['Processing']['waveform_toASDF'] == True:
wf_stream.write('event' + str(event_id), 'H5') # declare 'H5' as format
# which modules in the event class script and the order of them to be applied on the 20ms waveform can be specified here by changing the order
# of the event modules or commenting them.
event = Event(param, wf_stream, event_id, classification, logger)
if classification == 'noise':
event.event_plot(save_fig=True)
......@@ -31,6 +40,7 @@ def event_processing(param, load_file, trig_time, event_id, classification, log
event.locate()
#event.est_magnitude()
event.event_plot(save_fig=True)
# event.event_save_ASDF()
#event.prparam['Locate']['algorithm'] = 'hom_aniso'
#event.locate()
#event.est_magnitude()
......@@ -44,5 +54,6 @@ def event_processing(param, load_file, trig_time, event_id, classification, log
elif classification == 'active':
event.write('%s/hammer%s.xml' % (param['Processing']['Folders']['quakeml_folder'], event_id), 'quakeml',
validate=True)
event.event_save_csv('events.csv')
logger.info('Finished event processing for event %d' % event_id)
......@@ -30,8 +30,9 @@ def get_waveforms(param, load_file, trig_time):
'active_trigger_channel'] - 1 not in stations:
stations.insert(0, param['General']['active_trigger_channel'] - 1)
stations.sort()
if len(load_file) == 1: # load 2 snippets if event in overlap otherwise load 1
# To create the 20ms waveform, load 2 data snippets if event in overlap otherwise load 1
if len(load_file) == 1:
wf_stream = Stream()
ds = pyasdf.ASDFDataSet(asdf_folder + '/' + load_file[0], mode='r')
......@@ -41,6 +42,8 @@ def get_waveforms(param, load_file, trig_time):
starttime=start_time,
endtime=end_time,
tag="raw_recording")
if tparam['Gainrange'] == 'YAML':
for k in r:
# print(np.amax(np.absolute(wf_stream.traces[k].data)))
......@@ -49,6 +52,7 @@ def get_waveforms(param, load_file, trig_time):
logging.info('Gain range event retrieved from YAML file')
else:
# In this case the 20ms waveform will be loaded in from 2 data snippets.
ds1 = pyasdf.ASDFDataSet(asdf_folder + '/' + load_file[0], mode='r')
ds2 = pyasdf.ASDFDataSet(asdf_folder + '/' + load_file[1], mode='r') #GR_001
......
......@@ -36,7 +36,7 @@ def processing(param):
os.system('celery -A dug_seis worker --loglevel=debug --concurrency=%i &> celery.log &' %param['Processing']['number_workers'])
from dug_seis.processing.celery_tasks import event_processing_celery
# create folders for processing
# create folders for processing if they are not specified in the YAML file.
if not os.path.exists(param['Processing']['Folders']['quakeml_folder']):
os.makedirs(param['Processing']['Folders']['quakeml_folder'])
if not os.path.exists(param['Processing']['Folders']['plot_folder_active']):
......@@ -59,13 +59,13 @@ def processing(param):
event_nr = 0
event_nr_s = []
next_noise_vis = 0
# load list of ASDF snippets in folder
sta_overlap = Stream()
# print(int(str(tparam['Time'])[6:8]))
# if tparam['processing time'] == True:
# load list of ASDF snippets in folder.
new_files1 = sorted([f for f in os.listdir(asdf_folder) if f.endswith('.h5')]) # generates a list of the .asdf files in asdf_folder
number2=[]
for j in range(len(new_files1)):
......@@ -83,6 +83,8 @@ def processing(param):
processed_files = []
# Load in both the data snippet(sta) and a to be set overlap(sta_total).
while 1:
if len(new_files):
current_file = new_files.pop(0)
......@@ -105,11 +107,12 @@ def processing(param):
else:
sta_total = sta_overlap + sta_copy
# Use merge statement for merging
# Use merge statement for merging the data snippet and the overlap, sta and sta_overlap that together form the data snippet sta_total.
for tr in sta_total:
tr.stats.delta = sta_total[0].stats.delta
sta_total.merge(method=1, interpolation_samples=0)
# Apply gainrange to all traces in the data snippet
if tparam['Gainrange']=='YAML':
for k in range(0, len(sta_total.traces)):
sta_total.traces[k].data = sta_total.traces[k].data / 32768 * \
......@@ -117,6 +120,8 @@ def processing(param):
logger.info('Gain range trigger retrieved from YAML file')
logger.debug(sta_total)
# Send the loaded snippets including overlap to the trigger script
trigger_out, event_nr = dug_trigger(sta_total, tparam, event_nr, event_nr_s) # run trigger function
overlap = tparam['starttime'] + tparam['endtime'] #+ tparam['endtime']['sta_lta']['lt_window']/sta_total.stats.sampling_rate
......@@ -131,6 +136,8 @@ def processing(param):
classification = [i for i in trigger_out['Classification']]
trig_time=[i for i in trigger_out['Time'][trigger_out['Time'] <t_end-overlap]] #do not store times in overlap
#send the name of the data snippet in which the event lies, the triggered time of the event, the event id and the event classification to the event processing script.
for l in range(0, len(trig_time)):
if trig_time[l] < sta_copy.traces[0].stats["starttime"] + param['Trigger']['starttime']:
load_file = [processed_files[-1], current_file]
......@@ -140,6 +147,7 @@ def processing(param):
event_processing_celery.delay(param, load_file, trig_time[l], event_id[l], classification[l])
logger.info('Event ' + str(event_id[l]) + ' at ' + str(trig_time[l]) + ' sent to parallel worker.')
else:
event_processing(param, load_file, trig_time[l], event_id[l], classification[l], logger) # run processing for each event
# noise visualization
......@@ -162,6 +170,7 @@ def processing(param):
else:
#check if there are new data snippets that have not been processed yet.
flist1 = sorted([f for f in os.listdir(asdf_folder) if f.endswith('.h5')]) # generates a list of the .asdf files in asdf_folder
number2 = []
for j in range(len(flist1)):
......@@ -176,7 +185,7 @@ def processing(param):
flist = flist1[index_start_approved:]
new_files = [f for f in flist if f not in processed_files]
#print(new_files)
if not len(new_files):
logger.info('Waiting for new files.')
time.sleep(1)
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment