import os import tooba_f as tbf import tooba_gmx as tbgmx ################################################### #NOTICE: resids of head in each subdomain may differ in tail case # keep all atoms of group in the first occurent subdomain # in case of tail is the one closest to the head, hence # the code is a good approximation ################################################### LOG=0 SYSTEM_NAME='test' DISCET=[6, 6, 6] NUM_FR=1 TRAJ=SYSTEM_NAME+'/eq_traj.trr' GRO=SYSTEM_NAME+'/eq_final.gro' TPR=SYSTEM_NAME+'/eq_run.tpr' ITP_DIC={'NS':'CER_SOVOVA.itp','FFA':'FFA_CG.itp','CHOL':'CHOL_CG.itp'} ################################################### # {NAME:[QUEUE OF PROCESSES]} # # NAME: It is user defined. A dictionary must follows with the same name. # The dict structure has to be: {res_type:[atom_types]} # # if NAME is COMBINE then it needs part or all the info from aforementioned # groups to execute a process. You cannot use combination as first group. # # QUEUE OF PROCESSES: surf, vector, tilt, index, density, gmx_ndx, [save, [type], save_name] # # surf: Determine surface from atoms (ex. Head of lipid) # vector: Determine vector that fits atoms (ex. Tail of lipid) # tilt: Use surf and vector result to calculate angle (if NAME is COMBINE) # index: Creates unique code (md5) for every subdomain to use in data saving process # density: Detrmine density profile of x,y,z and save peaks of directions with the least number # gmx_ndx: Saves one ndx for every subdomain # [save, [type], save_name]: Save function and properties aka, type: pkl, json, Name # ################################################### GROUPS={'ALL':['gmx_ndx','index',['save', ['pkl'],'index'],'density',['save', ['pkl'],'dens']], 'HD_GROUP':['surf',['save', ['pkl', 'json'],'time_domain_c-normal-cg']], 'TL_GROUP':['vector'], 'COMBINE':[['HD_GROUP','surf'],['TL_GROUP','vector'],['COMB','tilt']] } ALL={'NS':['C6', 'Na', 'P4', 'P3', 'C7','C3', 'C4', 'C5', 'C8', 'C9', 'C10'], 'CHOL':['ROH','R1', 'R2', 'R3', 'R4', 'R5'], 'FFA':['AC','C1', 'C2', 'C3', 'C4']} HD_GROUP={'NS':['C6', 'Na', 'P4', 'P3', 'C7'], 'CHOL':['ROH'], 'FFA':['AC']} TL_GROUP={'NS':['C3', 'C4', 'C5', 'C8', 'C9', 'C10'], 'CHOL':['R1', 'R2', 'R3', 'R4', 'R5'], 'FFA':['C1', 'C2', 'C3', 'C4']} ################################################### ################################################### print(' ') print('================') print('Starting process') print('================') ################################################### ################################################### #Read .gro file _,data_num,_,res_num,res_type,atom_type,atom_num,_ = tbf.read_gro(GRO) print(' ') ################################################### #Read .itp files #weights={} #for MOL in ITP_DIC.keys(): # weights_tmp = tbf.read_itp(SYSTEM_NAME+'/'+ITP_DIC[MOL]) # weights[MOL]=weights_tmp # print(' ') #print(weights) ################################################### if os.path.isfile('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_data.pkl'): if LOG==1: print('WARNING: Preprocessing files exist.') print(' Erase data.pkl if the system is new.') print('--------------------------------------------') data_all=tbf.frompickle('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_data.pkl') else: #Read .trr file data_all=tbf.fr_export(trajfile=TRAJ,num_frames=NUM_FR) tbf.topickle(fl=data_all, sv_name='./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_data') ################################################### for i in GROUPS.keys(): if i!='COMBINE': if os.path.isfile('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_ndx.pkl'): if LOG==1: print('WARNING: Preprocessing files exist.') print(' Erase ndx_HEAD.pkl if the system is new.') print('--------------------------------------------') group_ndx=tbf.frompickle('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_ndx.pkl') else: #Find atom type index in lists created above group_ndx=tbf.atomid_data(res_num, res_type, atom_type, atom_num, group=locals()[i]) tbf.topickle(fl=group_ndx, sv_name='./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_ndx') ################################################### if os.path.isfile('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_box.pkl'): if LOG==1: print('WARNING: Preprocessing files exist.') print(' Erase box.pkl if the system is new') print(' or new grid is applied !') print('--------------------------------------------') box_res=tbf.frompickle('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_box.pkl') else: #Create subdomains coordinates box_p=tbf.domain_decomposition(data=data_all,dx=DISCET[0],dy=DISCET[1],dz=DISCET[2]) #Assign desired atoms (from above function) to subdomains ##result1: {step:{res:{atom_type:{atom_num:(subX,subYsubZ)}}}} ##result2: {step:{res:{atom_type:{(subX,subYsubZ):[atom_num]}}}} _,box_res=tbf.atom2grid(data_all,box_p, group_ndx) tbf.topickle(fl=box_res, sv_name='./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_box') ################################################### #Creates dictionary with coordinates per subdomain for each frame _,coord_vector=tbf.sub_coord(box=box_res, data=data_all, res_num=res_num) for j in GROUPS[i]: if len(j) > 1: if j=='surf': if j not in locals(): surf={} #Creates dictionary with c, normal per subdomain for each frame surf[locals()[i]]=tbf.coord2norm2cg(coord_vector,img=False) sv_data=surf[locals()[i]] if j=='vector': if vector not in locals(): vector={} vector[locals()[i]]=tbf.coord2vector(coord_vector) sv_data=vector[locals()[i]] if j=='index': uniq_id=tbgmx.ndx_index(SYSTEM_NAME) sv_data=uniq_id if j=='density': dens_df={} for iidd in uniq_id.keys(): dens_df[iidd]={} fl='./'+uniq_id[iidd]['system']+'/gmx_ndx/'+uniq_id[iidd]['domain'] cnt=-1 for mol in locals()[i].keys(): cnt=cnt+1 for d in ('x','y','z'): peaks = tbgmx.density_picks(TRR=TRAJ,TPR=TPR,IND=fl,SLC=400,ST=1000,EN=-1,normal=d,fld='./'+uniq_id[iidd]['system'],arg=cnt,dist_pk=20) if d=='x': tmp=peaks else: print(len(tmp),len(peaks)) if len(tmp)