Commit ae0f8d7b authored by doetschj's avatar doetschj
Browse files

Merge branch 'master' into feature/parallel_processing

# Conflicts:
#	config/dug-seis.yaml
#	dug_seis/processing/event.py
#	dug_seis/processing/event_processing.py
#	dug_seis/processing/processing.py
parents aecbc44a 58881ef7
# DUG-Seis parameter file
General:
project_name : Elegancy_CSD
project_location : Mt_Terri
operator : ['J. Doetsch','S. Demir']
project_name : Bedretto_test
project_location : Bedretto
operator : ['J. Doetsch','K. Plenkers']
sensor_count : 32
sensor_coords : [ # in local coordinate system; x=East, y=North, z=elevation
0,0,0,
......@@ -43,24 +43,25 @@ General:
origin_ch1903_north : 247500.0 # ch1903 northing of local coordinate system
origin_elev : 500.0 # elevation of origin of local coordinate system
stats:
network : 'XM'
network : 'XB'
Acquisition:
hardware_settings:
sampling_frequency: 200000 # in Hz
gain_selection : [10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000]
asdf_settings:
use_float32 : False # select between int32 and float32 as asdf data format
data_folder : 'e:/20190206_sw_test/'
data_folder_tmp : 'e:/20190206_sw_test/tmp/'
save_mV : False # save_mV = true uses float32 and results in much larger asdf file sizes
data_folder : 'raw/'
data_folder_tmp : 'raw/tmp_'
compression : 'gzip-3' # default: 'gzip-3', 'gzip-0' to 'gzip-9' or None
file_length_sec : 60.0 # in seconds
file_length_sec : 30.0 # in seconds
station_naming : [1, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15, 8, 16, 17, 25, 18, 26, 19, 27, 20, 28, 21, 29, 22, 30, 23, 31, 24, 32]
Trigger:
asdf_folder : 'raw'
channels : [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27]
overlap : 0.05
Gainrange : 'YAML' #other option: 'ASDF'
bandpass_f_min : 1000
bandpass_f_max : 12000
algorithm : 'recstalta' #other option: 'threshold'
......@@ -78,7 +79,8 @@ Trigger:
spread_int : 1e-4
noise_vis : 'True'
noise_vis_interval : 60 # in seconds
time_between_events: 0.0001
Hr : 13 #hour from which to start processing
Min : 49 #min from which to start processing
Processing:
......
......@@ -30,7 +30,7 @@ class Card:
# l_notify_size = c_int32(regs.KILO_B(2 * 1024))
self.l_notify_size = c_int32(param['Acquisition']['bytes_per_transfer'])
self._use_16_bit_mode = param['Acquisition']['hardware_settings']['use_16_bit_mode']
self.use_float32 = param['Acquisition']['asdf_settings']['use_float32']
self.save_mV = param['Acquisition']['asdf_settings']['save_mV']
self.card_nr = card_nr
self.h_card = None
......@@ -100,7 +100,7 @@ class Card:
pass
else:
# some possibilities: int16(no conversion needed), int32, float32, float64
if self.use_float32:
if self.save_mV:
np_data = np.require(np_data, dtype=np.float32, requirements=["C"]) # int32 convert to 32 bit float
for i in range(0, 16):
np_data[i] = np_data[i] * self.scaling_this_card[i]
......
......@@ -101,14 +101,25 @@ def init_card(param, card_nr):
# no clock output
spcm_dwSetParam_i32(h_card, regs.SPC_CLOCKOUT, 0)
# set input range 50, 100, 250, 500, 1000, 2000, 5000, 10000 mV
# read available ranges
range_min = c_int32(0)
range_max = c_int32(0)
for i in range(16):
spcm_dwSetParam_i32(h_card, regs.SPC_AMP0 + i*100, gain_selection_this_card[i])
l_number_of_ranges = c_int32(0)
spcm_dwGetParam_i32(h_card, regs.SPC_READIRCOUNT, byref(l_number_of_ranges))
logging.debug("card {}: nr of available ranges: {}".format(card_nr, l_number_of_ranges.value))
for i in range(l_number_of_ranges.value):
spcm_dwGetParam_i32(h_card, regs.SPC_READRANGEMIN0 + i, byref(range_min))
spcm_dwGetParam_i32(h_card, regs.SPC_READRANGEMAX0 + i, byref(range_max))
logging.info("card {}, channel {}: {}mV to {}mV".format(card_nr, i, range_min.value, range_max.value))
logging.debug("card {}, range nr {}: {}mV to {}mV".format(card_nr, i, range_min.value, range_max.value))
# set input range 50, 100, 250, 500, 1000, 2000, 5000, 10000 mV
selected_range = c_int32(0)
for i in range(16):
spcm_dwSetParam_i32(h_card, regs.SPC_AMP0 + i * 100, gain_selection_this_card[i])
spcm_dwGetParam_i32(h_card, regs.SPC_AMP0 + i * 100, byref(selected_range))
logging.info("card {}, channel {} selected range: {}mV".format(card_nr, i, selected_range.value))
""" define the data buffer """
# we try to use continuous memory if available and big enough
......
......@@ -166,4 +166,97 @@ code
====
np_data = np.require(np_data, dtype=np.float32, requirements=["C"]) # int32 convert to 32 bit
for i in range(0, 16):
np_data[i] = np_data[i] / 10000
\ No newline at end of file
np_data[i] = np_data[i] / 10000
==================================
on the acquisition system 3.4.2019
==================================
2019-04-03 09:35:35,851 INFO DUG-Seis started
2019-04-03 09:35:35,885 INFO Acquisition script started
2019-04-03 09:35:35,885 INFO ==========================
2019-04-03 09:35:35,885 INFO Hardware driver found, running on real hardware
2019-04-03 09:35:35,886 INFO sampling_frequency: 200000
2019-04-03 09:35:35,886 INFO gain_selection: [10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000]
2019-04-03 09:35:35,886 INFO ram_buffer_size: 134217728
2019-04-03 09:35:35,886 INFO timeout: 8000
2019-04-03 09:35:35,887 INFO use_16_bit_mode: False
2019-04-03 09:35:35,887 INFO save_mV: False
2019-04-03 09:35:35,887 INFO data_folder: e:/20190402_sw_test/
2019-04-03 09:35:35,887 INFO data_folder_tmp: e:/20190402_sw_test/tmp/
2019-04-03 09:35:35,888 INFO compression: gzip-3
2019-04-03 09:35:35,888 INFO file_length_sec: 30.0
2019-04-03 09:35:35,888 INFO station_naming: [1, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15, 8, 16, 17, 25, 18, 26, 19, 27, 20, 28, 21, 29, 22, 30, 23, 31, 24, 32]
2019-04-03 09:35:35,888 INFO simulation_mode: False
2019-04-03 09:35:35,888 INFO bytes_per_transfer: 33554432
2019-04-03 09:35:35,888 INFO simulation_amount: 1
2019-04-03 09:35:35,889 INFO init card: 0
- amp off
- teamview login
dtype=np.int32
==============
- file size first: 98 898 KB
- load ~ 63% (3.2025)
dtype=np.float32
================
- save_mV: True
- file size first: 131 435 KB
- load ~ 88% (4.5141)
dtype=np.float32
================
- save_mV: True
- compression: None
- file size first: 787 565 KB
- load ~ 9.9% (0.5075)
dtype=np.int32
==============
- compression: None
- file size first: 787 565 KB
- load ~ 7.7% (0.3942)
dtype=np.int32
==============
- compression: None
- 500k sps
- file size first: 1 968 667 KB
- load ~ 20% (0.411)
dtype=np.int32
==============
- compression: None
- 1000k sps
- file size first: 3 806 060 KB
- load ~ 45% (0.4598)
dtype=np.int32
==============
- compression: gzip-1
- 1000k sps
- file size first: 385 653 KB
- load ~ 141% (1.441)
dtype=np.int32
==============
- compression: gzip-1
- 500k sps
- file size first: 277 292 KB
- load ~ 71% (1.463)
dtype=np.int32
==============
- amp's on
- file size first: 109 988 KB
- load ~ 65% (3.3458)
dtype=np.float32
================
- save_mV: True
- amp's on
- file size first: 142 942 KB
- load ~ 89% (4.5725)
\ No newline at end of file
......@@ -59,7 +59,7 @@ def merge(param):
sta.write(merge_folder + '/' + gparam['project_name'] + str(start_time) + ".mseed")
new_file = pyasdf.ASDFDataSet(merge_folder + '/' + gparam['project_name'] + str(start_time) + ".h5",
new_file = pyasdf.ASDFDataSet(merge_folder + '/' + str(start_time) + '_'+ gparam['project_name'] + ".h5",
compression="gzip-3")
files = glob.glob(merge_folder + '/' + gparam['project_name'] + str(start_time) + ".mseed")
......
import pandas as pd
from obspy.core import UTCDateTime
from obspy import Stream
import os
import time
import pyasdf
import numpy as np
import scipy
from obspy import read, Trace
def statelevel(y, n):
ymax=max(y)
ymin=min(y)-np.finfo(float).eps
idx = np.ceil(n * (y - ymin) / (ymax - ymin))
idx= [x for x in idx if 1 <= x <= n-1]
histogram = np.zeros(n)
for i in range(0,len(idx)):
histogram[int(idx[i])]=histogram[int(idx[i])] + 1
# Compute Center of Each Bin
ymin = min(y)
Ry = ymax - ymin
dy = Ry / n
verm=[x-0.5 for x in range(1, n + 1)]
bins = ymin + [x * dy for x in verm]
# compute statelevels
iLowerRegion = next(x[0] for x in enumerate(histogram) if x[1] > 0) #find(histogram > 0, 1, 'first');
histogram2=histogram[::-1]
iUpperRegion = len(histogram2) - next(x[0] for x in enumerate(histogram2) if x[1] > 0) - 1 #find(histogram > 0, 1, 'last');
iLow = iLowerRegion #iLowerRegion[0]
iHigh = iUpperRegion #iUpperRegion[0]
lLow = int(iLow)
lHigh = int(iLow + np.floor((iHigh - iLow)/2))
uLow = int(iLow + np.floor((iHigh - iLow)/2))
uHigh = int(iHigh)
# Upper and lower histograms
lHist = histogram[lLow:lHigh]
uHist = histogram[uLow:uHigh]
levels = np.zeros(2)
iMax = max(lHist[1:])
iMin = max(uHist)
levels[0] = ymin + dy * (lLow + iMax - 1.5)
levels[1] = ymin + dy * (uLow + iMin - 1.5)
return(levels,histogram)
import pandas as pd
from obspy.core import UTCDateTime
from obspy import Stream
import os
import time
import pyasdf
import numpy as np
import scipy
from obspy import read, Trace
from dug_seis.processing.Pickers.P_Phase_Picker_USGS.Statelevels import statelevel
# from matplotlib import pyplot as plt
def pphasepicker(input,Tn,xi):
dt = 1/input.stats.sampling_rate
# x_d = input.detrend(type='simple')
x_d=input
# # Construct a fixed-base viscously damped SDF oscillator
omegan = 2 * np.pi / Tn; # natural frequency in radian/second
C = 2 * xi * omegan # viscous damping term
K = omegan ** 2 # stiffness term
# y(:,1) = [0;0] # response vector
y = np.array([[0, 0]])
# % Solve second-order ordinary differential equation of motion
A = np.array([[0, 1], [-K, -C]])
Ae = scipy.linalg.expm(A * dt)
koek = (Ae - np.eye(2)) * np.array([0, 1])
equal = (Ae - np.eye(2)) * np.array([0, 1])
b = equal[:, 1]
num_vars = A.shape[1]
rank = np.linalg.matrix_rank(A)
if rank == num_vars:
AeB = np.linalg.lstsq(A, b)[0] # not under-determined
else:
for nz in combinations(range(num_vars), rank): # the variables not set to zero
try:
AeB = np.zeros((num_vars, 1))
AeB[nz, :] = np.asarray(np.linalg.solve(A[:, nz], b))
except np.linalg.LinAlgError:
pass
# ind_peak = np.where(abs(x_d.data) == max(abs(x_d.data)))
# tot = np.asscalar(ind_peak[0])
xnew = Stream()
xnew_trace = Trace(x_d.data)
xnew.append(xnew_trace)
for k in range(1, len(xnew.traces[0].data)):
y = np.concatenate((y, [np.matmul(Ae, y[k - 1]) + AeB * xnew.traces[0].data[k]])) # check
veloc = y[:, 1] # relative velocity of mass
Edi = 2 * xi * omegan * veloc ** 2; # integrand of viscous damping energy
# appy histogram method
nbins = int(np.ceil(2 / dt))
levels, histograms = statelevel(Edi, int(nbins))
R = levels
locs = [x for (x, val) in enumerate(Edi) if val > R[0]] # find(Edi > R(1));
indx_1 = xnew.traces[0].data[0:int(locs[0]) - 1] * xnew.traces[0].data[1:int(locs[0])]
indx = [x for (x, val) in enumerate(indx_1) if val < 0]
if indx:
loc = indx[-1] * dt
else:
loc='nopick'
return(loc)
""" phasepapy.phasepicker
This package contains modules to make earthquake phase picks.
"""
#__all__=['fbpicker','ktpicker','aicdpicker','scnl','util']
# Original imports from Chen's way of doing things
from .fbpicker import *
from .ktpicker import *
from .aicdpicker import *
#!/usr/bin/env python
import matplotlib.pyplot as plt
import matplotlib
import copy
from obspy.core import *
from .scnl import *
from .cf_aicd import *
class AICDPicker():
"""
AICDpicker is designed based on the derivative of the AIC function.
"""
def __init__(self, t_ma = 3, nsigma = 6, t_up = 0.2, nr_len = 2, nr_coeff = 2, pol_len = 10, pol_coeff = 10, uncert_coeff = 3):
"""
Parameter description:
t_ma : the time in seconds of the moving average window for dynamic threshold
n_sigma : controls the level of threshold to trigger potential picks
t_up : the time in seconds not allowed consecutive pick in this duration
nr_len : noise ratio filter window length before and after potential picks used to calculate standard deviation
nr_coeff : control threshold level to determine if remove the pick by comparing std or rms on both sides of each potential pick
pol_len : window length in samples to calculate the standard deviation of waveform before the picks
pol_coeff : determine if declare first motion as 'Compression' or 'Dilation' by comparing the first local extreme value after pick and standard deviation in previous window
uncert_len : window length in time to calculate the rms of the CF before the picks, we make it as long as t_ma
uncert_coeff : control the floating level based on the noise of CF.
"""
self.t_ma = t_ma
self.nsigma = nsigma
self.t_up = t_up
self.nr_len = nr_len
self.nr_coeff = nr_coeff
self.pol_len = pol_len
self.pol_coeff = pol_coeff
self.uncert_len = self.t_ma
self.uncert_coeff = uncert_coeff
def picks(self,tr):
"""
Make picks, polarity, snr, and uncertainty.
"""
#tr = trace.detrend('linear')
# now=time.time()
summary = AICDSummary(self, tr)
# print "It took %f s to summary" %(time.time()-now)
# threshold
# now=time.time()
threshold = summary.threshold
# print "It took %f s to threshold" %(time.time()-now)
# picks
# now=time.time()
scnl,picks,trigger,snr = summary.pick_ident()
# print "It took %f s to ident" %(time.time()-now)
# uncertainty
# now=time.time()
uncertainty = summary.uncertainty()
# print "It took %f s to uncertainty" %(time.time()-now)
# polarity
# now=time.time()
polarity = summary.polarity()
# print "It took %f s to polarity" %(time.time()-now)
return scnl,picks,polarity,snr,uncertainty
#else:
# return np.array([]),np.array([]),np.array([]),np.array([]),np.array([])
class AICDSummary():
"""
The class calculate CF, threshold level, cleans the false picks, determines uncertainty, polarity
and plot CF.
"""
def __init__(self,picker, tr):
self.picker = picker
self.tr = tr
self.stats = self.tr.stats
self.cf = AicDeriv(self.tr)
self.aic, self.aicd = self.cf._statistics()
self.summary = self.aicd
self.thres = self.threshold() # This method creates the threshold from our picker config
self.uncert = self.uncertainty()
self.pol = self.polarity()
def threshold(self):
"""
Control the threshold level with nsigma.
"""
dt = self.stats.delta
npts_Tma = int(round(self.picker.t_ma / dt,0))
LEN = self.stats.npts
#print "npts_Tma: ",npts_Tma
threshold = np.zeros(LEN)
threshold[npts_Tma:LEN] = rms(rolling_window(self.summary[0:LEN-1],npts_Tma), -1) * self.picker.nsigma
threshold[0:npts_Tma] = 1
return threshold
def pick_ident(self):
"""
Clean false picks and Make picks.
"""
scnl = SCNL([self.stats.station,self.stats.channel,self.stats.network,self.stats.location])
dt = self.stats.delta
npts_Tma = int(round(self.picker.t_ma/dt,0))
LEN = self.stats.npts
# trigger the earthquakes
trigger_ptnl_index = np.where(self.summary[npts_Tma:LEN] > self.thres[npts_Tma:LEN])
trigger_ptnl_index = trigger_ptnl_index + np.array(npts_Tma)
t = np.arange(0, self.stats.npts / self.stats.sampling_rate, dt)
trigger_ptnl = t[trigger_ptnl_index][0]
# clean close picks
window_t_up = int(round(self.picker.t_up / dt,0))
trigger_remove1_index = []
for i in range(0,len(trigger_ptnl) - 1): # second from last
if (trigger_ptnl[i+1] - trigger_ptnl[i]) <= window_t_up * dt: # avoid consecutive picking
trigger_remove1_index.append(i+1)
# delete close picks
trigger_ptnl=np.delete(trigger_ptnl, trigger_remove1_index)
# clean_filtering
trigger_remove2_index = []
N = self.picker.nr_coeff
filter_length = self.picker.nr_len
for i in range(len(trigger_ptnl)):
# determine filter_length for each pick:
r, R = self.winlen(i, trigger_ptnl, filter_length, t, dt)
M = min(r, R)
if N * np.std(self.tr.data[int(round(trigger_ptnl[i] / dt,0)) - M:int(round(trigger_ptnl[i] / dt,0))]) >= np.std(self.tr[int(round(trigger_ptnl[i] / dt,0)):int(round(trigger_ptnl[i] / dt,0)) + M]):
trigger_remove2_index.append(i)
#delete fake picks
trigger_ptnl = np.delete(trigger_ptnl, trigger_remove2_index)
# assign potential picks to trigger
trigger = trigger_ptnl
# really need to be careful copy list or array, since if just copy like A=B, when any element in B changes, A will change as well
# roll backward for picking
picks = []
for i in range(len(trigger)):
index = int(round(trigger[i] / dt,0))
while True:
if self.summary[index] > self.summary[index - 1]:
index -= 1
else:
break
picks.append(UTCDateTime(self.tr.stats.starttime + round(t[index], 3)))
# really need to be careful copy list or array, since if just copy like A=B, when any element in B changes, A will change as well
# roll forward for maximum signal values
maxes = copy.deepcopy(trigger)
for i in range(len(trigger)):
index = int(round(trigger[i] / dt,0))
while True:
if self.summary[index] < self.summary[index + 1]:
index += 1
else:
break
maxes[i] = round(self.summary[index],3)
# really need to be careful copy list or array, since if just copy like A=B, when any element in B changes, A will change as well
# Signal noise ration: SNR
SNR=copy.deepcopy(trigger)
for i in range(len(picks)):
index = int(round(trigger[i] / dt,0))
noise = rms(self.summary[index-npts_Tma:index])
SNR[i] = round(maxes[i] / noise,1)
return scnl,picks,trigger,SNR
def uncertainty(self):
"""
Uncertainty is determined based on the noise level of CF.
"""
scnl,picks,trigger,SNR = self.pick_ident()
dt = self.stats.delta
npts_Tma = int(round(self.picker.uncert_coeff / dt, 0))
t = np.arange(0, self.stats.npts / self.stats.sampling_rate, dt)
pick_uncert = copy.deepcopy(trigger)
for i in range(len(trigger)):
r, R=self.winlen(i, trigger, npts_Tma, t, dt)
index0 = int(round((picks[i] - self.tr.stats.starttime) / dt, 0))
index = int(round(trigger[i] / dt, 0))
uncert_level = self.picker.uncert_coeff * rms(self.summary[index0 - npts_Tma:index0])
while True:
if self.summary[index] > uncert_level and self.summary[index] > self.summary[index - 1]:
index -= 1
else:
break
pick_uncert[i] = round(t[index] - (picks[i] - self.tr.stats.starttime), 3)
return pick_uncert
def polarity(self):
"""
Determine polarity for declared picks.
"""
dt=self.stats.delta
t = np.arange(0, self.stats.npts/self.stats.sampling_rate, dt)
pol=[]
scnl,picks,trigger,snr=self.pick_ident()
for i in range(len(picks)):