Commit 37c53192 authored by Stelios Karozis's avatar Stelios Karozis

Debugging - Real case use

parent 87b2b8d9
...@@ -6,6 +6,16 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ...@@ -6,6 +6,16 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased] ## [Unreleased]
## [0.2.0] - 2021-06-04
### Added
- None
### Changed
- Use pickle5 to save data
### Removed
- None
## [0.1.2] - 2020-10-29 ## [0.1.2] - 2020-10-29
### Added ### Added
- add OrderedDict() function to keep input ordered independent to Python version - add OrderedDict() function to keep input ordered independent to Python version
...@@ -149,4 +159,4 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ...@@ -149,4 +159,4 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- None - None
### Removed ### Removed
- None - None
\ No newline at end of file
...@@ -3,13 +3,14 @@ from collections import OrderedDict ...@@ -3,13 +3,14 @@ from collections import OrderedDict
import pandas as pd import pandas as pd
import tooba_f as tbf import tooba_f as tbf
import tooba_gmx as tbgmx import tooba_gmx as tbgmx
import gc
################################################### ###################################################
#NOTICE: resids of head in each subdomain may differ in tail case #NOTICE: resids of head in each subdomain may differ in tail case
# keep all atoms of group in the first occurent subdomain # keep all atoms of group in the first occurent subdomain
# in case of tail is the one closest to the head, hence # in case of tail is the one closest to the head, hence
# the code is a good approximation # the code is a good approximation
################################################### ###################################################
SYSTEM_NAME='Case4_20190909_5' SYSTEM_NAME="20190322_2"
DISCET=[3.5, 3.5, 3.5] DISCET=[3.5, 3.5, 3.5]
NUM_FR=750 NUM_FR=750
TRAJ=SYSTEM_NAME+'/eq_traj.trr' TRAJ=SYSTEM_NAME+'/eq_traj.trr'
...@@ -41,29 +42,41 @@ TPR=SYSTEM_NAME+'/eq_run.tpr' ...@@ -41,29 +42,41 @@ TPR=SYSTEM_NAME+'/eq_run.tpr'
# #
################################################### ###################################################
GROUPS=OrderedDict() GROUPS=OrderedDict()
GROUPS={'ALL':['index+ALL_ndx',['save', ['pkl'],'index'],'density',['save', ['pkl'],'dens']], GROUPS['ALL']=['index+ALL_ndx',['save', ['pkl'],'index'],'density',['save', ['pkl'],'dens']]
'HD_GROUP':['surf',['save', ['pkl', 'json'],'surf'],'index+HD_ndx',['save', ['pkl'],'index'],'rdf',['save', ['pkl'],'rdf']], GROUPS['HD_GROUP']=['surf',['save', ['pkl'],'surf'],'index+HD_ndx',['save', ['pkl'],'index'],'rdf',['save', ['pkl'],'rdf']]
'TL_GROUP':['vector',['save', ['pkl'],'vec']], GROUPS['TL_GROUP']=['vector',['save', ['pkl'],'vec']]
'ORDER_NS_SPH':['index_order+ORDER_NS_SPH_ndx',['save', ['pkl'],'index'],'order',['save',['pkl'], 'order']], GROUPS['ORDER_NS_SPH']=['index_order+ORDER_NS_SPH_ndx',['save', ['pkl'],'index'],'order',['save',['pkl'], 'order']]
'ORDER_NS_ACYL':['index_order+ORDER_NS_ACYL_ndx',['save', ['pkl'],'index'],'order',['save',['pkl'], 'order']], GROUPS['ORDER_NS_ACYL']=['index_order+ORDER_NS_ACYL_ndx',['save', ['pkl'],'index'],'order',['save',['pkl'], 'order']]
'ORDER_FFA':['index_order+ORDER_FFA_ndx',['save', ['pkl'],'index'],'order',['save',['pkl'], 'order']], GROUPS['ORDER_FFA']=['index_order+ORDER_FFA_ndx',['save', ['pkl'],'index'],'order',['save',['pkl'], 'order']]
'ORDER_CHOL':['index_order+ORDER_CHOL_ndx',['save', ['pkl'],'index'],'order',['save',['pkl'], 'order']], GROUPS['ORDER_CHOL']=['index_order+ORDER_CHOL_ndx',['save', ['pkl'],'index'],'order',['save',['pkl'], 'order']]
'COMBINE':[['HD_GROUP','surf'],['TL_GROUP','vector'],['COMB','tilt'],['save', ['pkl'],'tilt']] GROUPS['COMBINE']=[['HD_GROUP','surf'],['TL_GROUP','vector'],['COMB','tilt'],['save', ['pkl'],'tilt']]
}
ALL=OrderedDict() ALL=OrderedDict()
ALL={'NS':['C1', 'C2', 'C3', 'C4', 'C5','C6', 'Na', 'P4', 'P3', 'C7','C3', 'C4', 'C5', 'C8', 'C9', 'C10'], 'CHOL':['ROH','R1', 'R2', 'R3', 'R4', 'R5'], 'FFA':['AC','C1', 'C2', 'C3', 'C4']} ALL['NS']=['C1', 'C2', 'C3', 'C4', 'C5','C6', 'Na', 'P4', 'P3', 'C7','C3', 'C4', 'C5', 'C8', 'C9', 'C10']
ALL['CHOL']=['ROH','R1', 'R2', 'R3', 'R4', 'R5']
ALL['FFA']=['AC','C1', 'C2', 'C3', 'C4']
HD_GROUP=OrderedDict() HD_GROUP=OrderedDict()
HD_GROUP={'NS':['C6', 'Na', 'P4', 'P3', 'C7'], 'CHOL':['ROH'], 'FFA':['AC']} HD_GROUP['NS']=['C6', 'Na', 'P4', 'P3', 'C7']
HD_GROUP['CHOL']=['ROH']
HD_GROUP['FFA']=['AC']
TL_GROUP=OrderedDict() TL_GROUP=OrderedDict()
TL_GROUP={'NS':['C3', 'C4', 'C5', 'C8', 'C9', 'C10'], 'CHOL':['R1', 'R2', 'R3', 'R4', 'R5'], 'FFA':['C1', 'C2', 'C3', 'C4']} TL_GROUP['NS']=['C3', 'C4', 'C5', 'C8', 'C9', 'C10']
TL_GROUP['CHOL']=['R1', 'R2', 'R3', 'R4', 'R5']
TL_GROUP['FFA']=['C1', 'C2', 'C3', 'C4']
ORDER_NS_SPH=OrderedDict() ORDER_NS_SPH=OrderedDict()
ORDER_NS_SPH={'NS':['C1', 'C2', 'C3', 'C4', 'C5']} #propable problem with the same atomname of NS, FFA ORDER_NS_SPH['NS']=['C1', 'C2', 'C3', 'C4', 'C5'] #propable problem with the same atomname of NS, FFA
ORDER_NS_ACYL=OrderedDict() ORDER_NS_ACYL=OrderedDict()
ORDER_NS_ACYL={'NS':['C8', 'C9', 'C10']} ORDER_NS_ACYL['NS']=['C8', 'C9', 'C10']
ORDER_FFA=OrderedDict() ORDER_FFA=OrderedDict()
ORDER_FFA={'FFA':['C1', 'C2', 'C3', 'C4']} ORDER_FFA['FFA']=['C1', 'C2', 'C3', 'C4']
ORDER_CHOL=OrderedDict() ORDER_CHOL=OrderedDict()
ORDER_CHOL={'CHOL':['R2', 'R3', 'R4', 'R5']} ORDER_CHOL['CHOL']=['R2', 'R3', 'R4', 'R5']
################################################### ###################################################
################################################### ###################################################
print(' ') print(' ')
...@@ -95,12 +108,15 @@ ST_FR=MAX_FR-NUM_FR ...@@ -95,12 +108,15 @@ ST_FR=MAX_FR-NUM_FR
################################################### ###################################################
#-------------------------------------------------- #--------------------------------------------------
if os.path.isfile('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_data.pkl'): if os.path.isfile('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_data.pkl'):
print('_data.pkl exist !')
pass pass
else: else:
#Read .trr file #Read .trr file
data_all=tbf.fr_export(trajfile=TRAJ,num_frames=NUM_FR) data_all=tbf.fr_export(trajfile=TRAJ,num_frames=NUM_FR)
#tbf.tojson(fl=data_all, sv_name='./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_data')
tbf.topickle(fl=data_all, sv_name='./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_data') tbf.topickle(fl=data_all, sv_name='./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_data')
del data_all del data_all
gc.collect()
################################################### ###################################################
#-------------------------------------------------- #--------------------------------------------------
#Check save files if exist in order to skip functions #Check save files if exist in order to skip functions
...@@ -161,13 +177,17 @@ for i in GROUPS.keys(): ...@@ -161,13 +177,17 @@ for i in GROUPS.keys():
print('++++++++++++++++++++++++') print('++++++++++++++++++++++++')
group_ndx=tbf.atomid_data(res_num, res_type, atom_type, atom_num, group=locals()[i]) group_ndx=tbf.atomid_data(res_num, res_type, atom_type, atom_num, group=locals()[i])
tbf.topickle(fl=group_ndx, sv_name='./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_ndx') tbf.topickle(fl=group_ndx, sv_name='./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_ndx')
#tbf.tojson(fl=group_ndx, sv_name='./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_ndx')
del group_ndx del group_ndx
gc.collect()
#-------------------------------------------------- #--------------------------------------------------
if os.path.isfile('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_box.pkl'): if os.path.isfile('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_box.pkl'):
pass pass
else: else:
data_all=tbf.frompickle('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_data.pkl') data_all=tbf.frompickle('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_data.pkl')
#data_all=tbf.fromjson('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_data.json')
group_ndx=tbf.frompickle('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_ndx.pkl') group_ndx=tbf.frompickle('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_ndx.pkl')
#group_ndx=tbf.fromjson('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_ndx.json')
#Create subdomains coordinates #Create subdomains coordinates
box_p=tbf.domain_decomposition(data=data_all,dx=DISCET[0],dy=DISCET[1],dz=DISCET[2]) box_p=tbf.domain_decomposition(data=data_all,dx=DISCET[0],dy=DISCET[1],dz=DISCET[2])
#Assign desired atoms (from above function) to subdomains #Assign desired atoms (from above function) to subdomains
...@@ -175,23 +195,31 @@ for i in GROUPS.keys(): ...@@ -175,23 +195,31 @@ for i in GROUPS.keys():
##result2: {step:{res:{atom_type:{(subX,subYsubZ):[atom_num]}}}} ##result2: {step:{res:{atom_type:{(subX,subYsubZ):[atom_num]}}}}
#todo keep fixed the initial domain name and the molecules that are grouped for all the steps #todo keep fixed the initial domain name and the molecules that are grouped for all the steps
_,box_res=tbf.atom2group(data_all,box_p, group_ndx) _,box_res=tbf.atom2group(data_all,box_p, group_ndx)
tbf.topickle(fl=box_res, sv_name='./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_box')
del data_all del data_all
del group_ndx del group_ndx
del box_p del box_p
gc.collect()
tbf.topickle(fl=box_res, sv_name='./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_box')
#tbf.tojson(fl=box_res, sv_name='./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_box')
del box_res del box_res
gc.collect()
################################################### ###################################################
if os.path.isfile('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_FR'+str(NUM_FR)+'.pkl'): if os.path.isfile('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_FR'+str(NUM_FR)+'.pkl'):
pass pass
else: else:
#Creates dictionary with coordinates per subdomain for each frame #Creates dictionary with coordinates per subdomain for each frame
data_all=tbf.frompickle('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_data.pkl') data_all=tbf.frompickle('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_data.pkl')
#data_all=tbf.fromjson('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_data.json')
box_res=tbf.frompickle('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_box.pkl') box_res=tbf.frompickle('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_box.pkl')
#box_res=tbf.fromjson('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_box.json')
_,coord_vector=tbf.sub_coord(box=box_res, data=data_all, res_num=res_num) _,coord_vector=tbf.sub_coord(box=box_res, data=data_all, res_num=res_num)
tbf.topickle(fl=coord_vector, sv_name='./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_FR'+str(NUM_FR))
del data_all del data_all
del box_res del box_res
gc.collect()
tbf.topickle(fl=coord_vector, sv_name='./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_FR'+str(NUM_FR))
#tbf.tojson(fl=coord_vector, sv_name='./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_FR'+str(NUM_FR))
del coord_vector del coord_vector
gc.collect()
################################################### ###################################################
for j in GROUPS[i]: for j in GROUPS[i]:
if len(j) > 1: if len(j) > 1:
...@@ -200,57 +228,73 @@ for i in GROUPS.keys(): ...@@ -200,57 +228,73 @@ for i in GROUPS.keys():
surf={} surf={}
#Creates dictionary with c, normal per subdomain for each frame #Creates dictionary with c, normal per subdomain for each frame
coord_vector=tbf.frompickle('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_FR'+str(NUM_FR)+'.pkl') coord_vector=tbf.frompickle('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_FR'+str(NUM_FR)+'.pkl')
#coord_vector=tbf.fromjson('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_FR'+str(NUM_FR)+'.json')
surf[i]=tbf.coord2norm2cg(coord_vector,img=False) surf[i]=tbf.coord2norm2cg(coord_vector,img=False)
del coord_vector del coord_vector
gc.collect()
sv_data=surf[i] sv_data=surf[i]
elif j=='surf' and sv_index[i][j]['status']=='exist': elif j=='surf' and sv_index[i][j]['status']=='exist':
if j not in locals(): if j not in locals():
surf={} surf={}
surf[i]=tbf.frompickle(sv_index[i][j]['name']) surf[i]=tbf.frompickle(sv_index[i][j]['name'])
#surf[i]=tbf.fromjson(sv_index[i][j]['name'])
#-------------------------------------------------- #--------------------------------------------------
if j=='vector' and sv_index[i][j]['status']=='not exist': if j=='vector' and sv_index[i][j]['status']=='not exist':
if j not in locals(): if j not in locals():
vector={} vector={}
coord_vector=tbf.frompickle('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_FR'+str(NUM_FR)+'.pkl') coord_vector=tbf.frompickle('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_FR'+str(NUM_FR)+'.pkl')
#coord_vector=tbf.fromjson('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_FR'+str(NUM_FR)+'.json')
vector[i]=tbf.coord2vector(coord_vector) vector[i]=tbf.coord2vector(coord_vector)
del coord_vector del coord_vector
gc.collect()
sv_data=vector[i] sv_data=vector[i]
elif j=='vector' and sv_index[i][j]['status']=='exist': elif j=='vector' and sv_index[i][j]['status']=='exist':
if j not in locals(): if j not in locals():
vector={} vector={}
vector[i]=tbf.frompickle(sv_index[i][j]['name']) vector[i]=tbf.frompickle(sv_index[i][j]['name'])
#vector[i]=tbf.fromjson(sv_index[i][j]['name'])
#-------------------------------------------------- #--------------------------------------------------
#ToDo: make more generic file with ndx files and ndx for order parameter #ToDo: make more generic file with ndx files and ndx for order parameter
#As for now the hash value is generic (system+domain coord), but needs to run for every input group #As for now the hash value is generic (system+domain coord), but needs to run for every input group
if j=='index' and sv_index[i][j]['status']=='not exist': if j=='index' and sv_index[i][j]['status']=='not exist':
box_res=tbf.frompickle('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_box.pkl') box_res=tbf.frompickle('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_box.pkl')
#box_res=tbf.fromjson('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_box.json')
tbf.togmxndx(box_res, fld='./'+SYSTEM_NAME+'/'+ndx_fl[i][j], sv_name=SYSTEM_NAME+'_'+i) tbf.togmxndx(box_res, fld='./'+SYSTEM_NAME+'/'+ndx_fl[i][j], sv_name=SYSTEM_NAME+'_'+i)
del box_res del box_res
gc.collect()
uniq_id=tbgmx.ndx_index(SYSTEM_NAME,ndx_fl[i][j]) uniq_id=tbgmx.ndx_index(SYSTEM_NAME,ndx_fl[i][j])
sv_data=uniq_id sv_data=uniq_id
elif j=='index' and sv_index[i][j]['status']=='exist': elif j=='index' and sv_index[i][j]['status']=='exist':
box_res=tbf.frompickle('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_box.pkl') box_res=tbf.frompickle('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_box.pkl')
#box_res=tbf.fromjson('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_box.json')
tbf.togmxndx(box_res, fld='./'+SYSTEM_NAME+'/'+ndx_fl[i][j], sv_name=SYSTEM_NAME+'_'+i) tbf.togmxndx(box_res, fld='./'+SYSTEM_NAME+'/'+ndx_fl[i][j], sv_name=SYSTEM_NAME+'_'+i)
del box_res del box_res
gc.collect()
uniq_id=tbf.frompickle(sv_index[i][j]['name']) uniq_id=tbf.frompickle(sv_index[i][j]['name'])
#uniq_id=tbf.fromjson(sv_index[i][j]['name'])
#-------------------------------------------------- #--------------------------------------------------
if j=='index_order' and sv_index[i][j]['status']=='not exist': if j=='index_order' and sv_index[i][j]['status']=='not exist':
box_res=tbf.frompickle('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_box.pkl') box_res=tbf.frompickle('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_box.pkl')
#box_res=tbf.fromjson('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_box.json')
for mol, atoms in locals()[i].items(): for mol, atoms in locals()[i].items():
tbgmx.order_ndx(box_res, fld='./'+SYSTEM_NAME+'/'+ndx_fl[i][j], atoms=atoms, sv_name=SYSTEM_NAME+'_'+i) tbgmx.order_ndx(box_res, fld='./'+SYSTEM_NAME+'/'+ndx_fl[i][j], atoms=atoms, sv_name=SYSTEM_NAME+'_'+i)
del box_res del box_res
gc.collect()
uniq_id=tbgmx.ndx_index(SYSTEM_NAME,ndx_fl[i][j]) uniq_id=tbgmx.ndx_index(SYSTEM_NAME,ndx_fl[i][j])
sv_data=uniq_id sv_data=uniq_id
elif j=='index_order' and sv_index[i][j]['status']=='exist': elif j=='index_order' and sv_index[i][j]['status']=='exist':
box_res=tbf.frompickle('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_box.pkl') box_res=tbf.frompickle('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_box.pkl')
#box_res=tbf.fromjson('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_box.json')
for mol, atoms in locals()[i].items(): for mol, atoms in locals()[i].items():
tbgmx.order_ndx(box_res, fld='./'+SYSTEM_NAME+'/'+ndx_fl[i][j], atoms=atoms, sv_name=SYSTEM_NAME+'_'+i) tbgmx.order_ndx(box_res, fld='./'+SYSTEM_NAME+'/'+ndx_fl[i][j], atoms=atoms, sv_name=SYSTEM_NAME+'_'+i)
del box_res del box_res
gc.collect()
uniq_id=tbf.frompickle(sv_index[i][j]['name']) uniq_id=tbf.frompickle(sv_index[i][j]['name'])
#uniq_id=tbf.fromjson(sv_index[i][j]['name'])
#-------------------------------------------------- #--------------------------------------------------
if j=='density' and sv_index[i][j]['status']=='not exist': if j=='density' and sv_index[i][j]['status']=='not exist':
dens_dict={} dens_dict={}
...@@ -277,10 +321,13 @@ for i in GROUPS.keys(): ...@@ -277,10 +321,13 @@ for i in GROUPS.keys():
sv_data=dens_dict sv_data=dens_dict
mrg_data[i][j]=[dens_dict,[]] mrg_data[i][j]=[dens_dict,[]]
del dens_dict del dens_dict
gc.collect()
elif j=='density' and sv_index[i][j]['status']=='exist': elif j=='density' and sv_index[i][j]['status']=='exist':
dens_dict=tbf.frompickle(sv_index[i][j]['name']) dens_dict=tbf.frompickle(sv_index[i][j]['name'])
#dens_dict=tbf.fromjson(sv_index[i][j]['name'])
mrg_data[i][j]=[dens_dict,[]] mrg_data[i][j]=[dens_dict,[]]
del dens_dict del dens_dict
gc.collect()
#-------------------------------------------------- #--------------------------------------------------
if j=='rdf' and sv_index[i][j]['status']=='not exist': if j=='rdf' and sv_index[i][j]['status']=='not exist':
rdf_dict={} rdf_dict={}
...@@ -301,17 +348,22 @@ for i in GROUPS.keys(): ...@@ -301,17 +348,22 @@ for i in GROUPS.keys():
cnt2=cnt2+1 cnt2=cnt2+1
peaks = tbgmx.rdf_peaks(TRR=TRAJ,TPR=TPR,IND=fl,ST=time_index[ST_FR],EN=-1,fld='./'+uniq_id[iidd]['system'],arg1=cnt1,arg2=cnt2,dist_pk=20) peaks = tbgmx.rdf_peaks(TRR=TRAJ,TPR=TPR,IND=fl,ST=time_index[ST_FR],EN=-1,fld='./'+uniq_id[iidd]['system'],arg1=cnt1,arg2=cnt2,dist_pk=20)
rdf_nm=mol1+'-'+mol2+'_rdf_'+uniq_id[iidd]['fld'] rdf_nm=mol1+'-'+mol2+'_rdf_'+uniq_id[iidd]['fld']
if len(peaks)==0: #if len(peaks)==0:
peaks.append(0) # print(peaks)
# print(type(peaks))
# peaks=np.ndarray((0))
rdf_dict[iidd][rdf_nm]=peaks rdf_dict[iidd][rdf_nm]=peaks
sv_data=rdf_dict sv_data=rdf_dict
mrg_data[i][j]=[rdf_dict,[]] mrg_data[i][j]=[rdf_dict,[]]
del rdf_dict del rdf_dict
gc.collect()
elif j=='rdf' and sv_index[i][j]['status']=='exist': elif j=='rdf' and sv_index[i][j]['status']=='exist':
rdf_dict=tbf.frompickle(sv_index[i][j]['name']) rdf_dict=tbf.frompickle(sv_index[i][j]['name'])
#rdf_dict=tbf.fromjson(sv_index[i][j]['name'])
mrg_data[i][j]=[rdf_dict,[]] mrg_data[i][j]=[rdf_dict,[]]
del rdf_dict del rdf_dict
gc.collect()
#-------------------------------------------------- #--------------------------------------------------
if j=='order' and sv_index[i][j]['status']=='not exist': if j=='order' and sv_index[i][j]['status']=='not exist':
order_dict={} order_dict={}
...@@ -335,8 +387,10 @@ for i in GROUPS.keys(): ...@@ -335,8 +387,10 @@ for i in GROUPS.keys():
sv_data=order_dict sv_data=order_dict
mrg_data[i][j]=[order_dict,[]] mrg_data[i][j]=[order_dict,[]]
del order_dict del order_dict
gc.collect()
elif j=='order' and sv_index[i][j]['status']=='exist': elif j=='order' and sv_index[i][j]['status']=='exist':
order_dict=tbf.frompickle(sv_index[i][j]['name']) order_dict=tbf.frompickle(sv_index[i][j]['name'])
#order_dict=tbf.fromjson(sv_index[i][j]['name'])
mrg_data[i][j]=[order_dict,[]] mrg_data[i][j]=[order_dict,[]]
del order_dict del order_dict
#-------------------------------------------------- #--------------------------------------------------
...@@ -354,6 +408,7 @@ for i in GROUPS.keys(): ...@@ -354,6 +408,7 @@ for i in GROUPS.keys():
if k=='json': if k=='json':
tbf.tojson(fl=sv_data, sv_name='./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_'+j[2]) tbf.tojson(fl=sv_data, sv_name='./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_'+j[2])
del sv_data del sv_data
gc.collect()
################################################### ###################################################
#COMBINE section #COMBINE section
else: else:
...@@ -373,6 +428,7 @@ for i in GROUPS.keys(): ...@@ -373,6 +428,7 @@ for i in GROUPS.keys():
#ToDo: check "if str(value['domain']).strip() == str(sub).strip():" #ToDo: check "if str(value['domain']).strip() == str(sub).strip():"
del surf_inuse del surf_inuse
del vector_inuse del vector_inuse
gc.collect()
#Loop over timesteps and keep avgs tilts for each step #Loop over timesteps and keep avgs tilts for each step
avg={} avg={}
for step in tilt.keys(): for step in tilt.keys():
...@@ -399,10 +455,13 @@ for i in GROUPS.keys(): ...@@ -399,10 +455,13 @@ for i in GROUPS.keys():
sv_data=tot_avg sv_data=tot_avg
mrg_data[i][j[1]]=[tot_avg,['Tilt[degrees]']] mrg_data[i][j[1]]=[tot_avg,['Tilt[degrees]']]
del tot_avg del tot_avg
gc.collect()
elif j[1]=='tilt' and sv_index[i][str(j)]['status']=='exist': elif j[1]=='tilt' and sv_index[i][str(j)]['status']=='exist':
tot_avg=tbf.frompickle(sv_index[i][str(j)]['name']) tot_avg=tbf.frompickle(sv_index[i][str(j)]['name'])
#tot_avg=tbf.fromjson(sv_index[i][str(j)]['name'])
mrg_data[i][j[1]]=[tot_avg,['Tilt[degrees]']] mrg_data[i][j[1]]=[tot_avg,['Tilt[degrees]']]
del tot_avg del tot_avg
gc.collect()
#-------------------------------------------------- #--------------------------------------------------
# Save module # Save module
if len(j)==3: if len(j)==3:
...@@ -417,10 +476,12 @@ for i in GROUPS.keys(): ...@@ -417,10 +476,12 @@ for i in GROUPS.keys():
tbf.topickle(fl=sv_data, sv_name='./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_'+j[2]) tbf.topickle(fl=sv_data, sv_name='./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_'+j[2])
if k=='json': if k=='json':
tbf.tojson(fl=sv_data, sv_name='./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_'+j[2]) tbf.tojson(fl=sv_data, sv_name='./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_'+j[2])
del sv_data del sv_data
gc.collect()
################################################### ###################################################
#Merge data #Merge data
tbf.topickle(fl=mrg_data, sv_name='./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_merge') tbf.topickle(fl=mrg_data, sv_name='./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_merge')
#tbf.tojson(fl=mrg_data, sv_name='./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_merge')
print(' ') print(' ')
print('Merging data of:') print('Merging data of:')
print('==============================') print('==============================')
...@@ -436,6 +497,7 @@ for grp in mrg_data.keys(): ...@@ -436,6 +497,7 @@ for grp in mrg_data.keys():
data_df=df.copy() data_df=df.copy()
continue continue
tbf.topickle(fl=data_df, sv_name='./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_dataset') tbf.topickle(fl=data_df, sv_name='./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_dataset')
#tbf.tojson(fl=data_df, sv_name='./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_dataset')
print(data_df.head()) print(data_df.head())
################################################### ###################################################
################################################### ###################################################
\ No newline at end of file
...@@ -3,11 +3,12 @@ import numpy as np ...@@ -3,11 +3,12 @@ import numpy as np
#from mpl_toolkits.mplot3d import Axes3D #from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import scipy.optimize import scipy.optimize
import pickle as pkl import pickle5 as pkl
import json import json
import re import re
import pandas as pd import pandas as pd
from progress.bar import Bar from progress.bar import Bar
import joblib
from pytrr import ( from pytrr import (
read_trr_header, read_trr_header,
...@@ -66,15 +67,47 @@ def center_gravity(a): ...@@ -66,15 +67,47 @@ def center_gravity(a):
cg = np.sum(a)/m cg = np.sum(a)/m
return cg return cg
class StreamFile(object):
def __init__(self, f):
self.f = f
def __getattr__(self, item):
return getattr(self.f, item)
def read(self, n):
# print("reading total_bytes=%s" % n, flush=True)
if n >= (1 << 31):
buffer = bytearray(n)
idx = 0
while idx < n:
batch_size = min(n - idx, 1 << 31 - 1)
# print("reading bytes [%s,%s)..." % (idx, idx + batch_size), end="", flush=True)
buffer[idx:idx + batch_size] = self.f.read(batch_size)
# print("done.", flush=True)
idx += batch_size
return buffer
return self.f.read(n)
def write(self, buffer):
n = len(buffer)
print("writing total_bytes=%s..." % n, flush=True)
idx = 0
while idx < n:
batch_size = min(n - idx, 1 << 31 - 1)
print("writing bytes [%s, %s)... " % (idx, idx + batch_size), end="", flush=True)
self.f.write(buffer[idx:idx + batch_size])
print("done.", flush=True)
idx += batch_size
def topickle(fl, sv_name): def topickle(fl, sv_name):
print(' ') print(' ')
print('Save to pickle |################################| 1/1') print('Save to pickle |################################| 1/1')
with open(sv_name+'.pkl', 'wb') as handle: with open(sv_name+'.pkl', 'wb') as handle:
pkl.dump(fl, handle, protocol=pkl.HIGHEST_PROTOCOL) pkl.dump(fl, handle, protocol=pkl.HIGHEST_PROTOCOL)
#joblib.dump(fl, handle)
def frompickle(fl): def frompickle(fl):
with open(fl, 'rb') as handle: with open(fl, 'rb') as handle:
b = pkl.load(handle) b = pkl.load(handle)
#b = joblib.load(handle)
return b return b
...@@ -83,6 +116,14 @@ def tojson(fl, sv_name): ...@@ -83,6 +116,14 @@ def tojson(fl, sv_name):
print('Save to json |################################| 1/1') print('Save to json |################################| 1/1')
with open(sv_name+'.json', 'w') as file: with open(sv_name+'.json', 'w') as file:
file.write(json.dumps(str(fl))) file.write(json.dumps(str(fl)))
#file.write(json.dumps(fl))
def fromjson(fl):
print(' ')
print('Load to json |################################| 1/1')
with open(fl, 'r') as file:
data = file.read()
b = json.dumps(data)
return b
def plot_surf(data, normal, c, save_name): def plot_surf(data, normal, c, save_name):
#Plot surface #Plot surface
...@@ -866,4 +907,4 @@ def togmxndx(box_res, fld, sv_name): ...@@ -866,4 +907,4 @@ def togmxndx(box_res, fld, sv_name):
bar.finish() bar.finish()
def dict2pd(d, col=[]): def dict2pd(d, col=[]):
return pd.DataFrame.from_dict(d, orient='index', columns=col) return pd.DataFrame.from_dict(d, orient='index', columns=col)
\ No newline at end of file
...@@ -97,13 +97,18 @@ def rdf_peaks(TRR,TPR,IND,ST,EN,fld,arg1,arg2,dist_pk): ...@@ -97,13 +97,18 @@ def rdf_peaks(TRR,TPR,IND,ST,EN,fld,arg1,arg2,dist_pk):
p.communicate(cmd.encode('UTF-8')) p.communicate(cmd.encode('UTF-8'))
p.send_signal('<Ctrl>-D') p.send_signal('<Ctrl>-D')
p.wait() p.wait()
try:
x,y=read_xvg(XVG=fld+'/tmp.xvg') f = open(fld+'/tmp.xvg')
yhat = savgol_filter(y, 15, 4) # window size 15, polynomial order 4 f.close()
peaks, _ = find_peaks(yhat, distance=dist_pk) # Do something with the file
x,y=read_xvg(XVG=fld+'/tmp.xvg')
pathname = os.path.abspath(os.path.join(fld, 'tmp.xvg')) yhat = savgol_filter(y, 15, 4) # window size 15, polynomial order 4
os.remove(pathname) peaks, _ = find_peaks(yhat, distance=dist_pk)
pathname = os.path.abspath(os.path.join(fld, 'tmp.xvg'))
os.remove(pathname)
except IOError:
peaks=np.ndarray((0))
return peaks return peaks
...@@ -190,4 +195,4 @@ def order(TRR,TPR,IND,ST,EN,normal,fld,dist_pk=1): ...@@ -190,4 +195,4 @@ def order(TRR,TPR,IND,ST,EN,normal,fld,dist_pk=1):
pathname = os.path.abspath(os.path.join(fld, 'tmp2.xvg')) pathname = os.path.abspath(os.path.join(fld, 'tmp2.xvg'))
os.remove(pathname) os.remove(pathname)
return y return y
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment