Commit 37c53192 authored by Stelios Karozis's avatar Stelios Karozis

Debugging - Real case use

parent 87b2b8d9
......@@ -6,6 +6,16 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased]
## [0.2.0] - 2021-06-04
### Added
- None
### Changed
- Use pickle5 to save data
### Removed
- None
## [0.1.2] - 2020-10-29
### Added
- add OrderedDict() function to keep input ordered independent to Python version
......@@ -149,4 +159,4 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- None
### Removed
- None
\ No newline at end of file
- None
......@@ -3,13 +3,14 @@ from collections import OrderedDict
import pandas as pd
import tooba_f as tbf
import tooba_gmx as tbgmx
import gc
###################################################
#NOTICE: resids of head in each subdomain may differ in tail case
# keep all atoms of group in the first occurent subdomain
# in case of tail is the one closest to the head, hence
# the code is a good approximation
###################################################
SYSTEM_NAME='Case4_20190909_5'
SYSTEM_NAME="20190322_2"
DISCET=[3.5, 3.5, 3.5]
NUM_FR=750
TRAJ=SYSTEM_NAME+'/eq_traj.trr'
......@@ -41,29 +42,41 @@ TPR=SYSTEM_NAME+'/eq_run.tpr'
#
###################################################
GROUPS=OrderedDict()
GROUPS={'ALL':['index+ALL_ndx',['save', ['pkl'],'index'],'density',['save', ['pkl'],'dens']],
'HD_GROUP':['surf',['save', ['pkl', 'json'],'surf'],'index+HD_ndx',['save', ['pkl'],'index'],'rdf',['save', ['pkl'],'rdf']],
'TL_GROUP':['vector',['save', ['pkl'],'vec']],
'ORDER_NS_SPH':['index_order+ORDER_NS_SPH_ndx',['save', ['pkl'],'index'],'order',['save',['pkl'], 'order']],
'ORDER_NS_ACYL':['index_order+ORDER_NS_ACYL_ndx',['save', ['pkl'],'index'],'order',['save',['pkl'], 'order']],
'ORDER_FFA':['index_order+ORDER_FFA_ndx',['save', ['pkl'],'index'],'order',['save',['pkl'], 'order']],
'ORDER_CHOL':['index_order+ORDER_CHOL_ndx',['save', ['pkl'],'index'],'order',['save',['pkl'], 'order']],
'COMBINE':[['HD_GROUP','surf'],['TL_GROUP','vector'],['COMB','tilt'],['save', ['pkl'],'tilt']]
}
GROUPS['ALL']=['index+ALL_ndx',['save', ['pkl'],'index'],'density',['save', ['pkl'],'dens']]
GROUPS['HD_GROUP']=['surf',['save', ['pkl'],'surf'],'index+HD_ndx',['save', ['pkl'],'index'],'rdf',['save', ['pkl'],'rdf']]
GROUPS['TL_GROUP']=['vector',['save', ['pkl'],'vec']]
GROUPS['ORDER_NS_SPH']=['index_order+ORDER_NS_SPH_ndx',['save', ['pkl'],'index'],'order',['save',['pkl'], 'order']]
GROUPS['ORDER_NS_ACYL']=['index_order+ORDER_NS_ACYL_ndx',['save', ['pkl'],'index'],'order',['save',['pkl'], 'order']]
GROUPS['ORDER_FFA']=['index_order+ORDER_FFA_ndx',['save', ['pkl'],'index'],'order',['save',['pkl'], 'order']]
GROUPS['ORDER_CHOL']=['index_order+ORDER_CHOL_ndx',['save', ['pkl'],'index'],'order',['save',['pkl'], 'order']]
GROUPS['COMBINE']=[['HD_GROUP','surf'],['TL_GROUP','vector'],['COMB','tilt'],['save', ['pkl'],'tilt']]
ALL=OrderedDict()
ALL={'NS':['C1', 'C2', 'C3', 'C4', 'C5','C6', 'Na', 'P4', 'P3', 'C7','C3', 'C4', 'C5', 'C8', 'C9', 'C10'], 'CHOL':['ROH','R1', 'R2', 'R3', 'R4', 'R5'], 'FFA':['AC','C1', 'C2', 'C3', 'C4']}
ALL['NS']=['C1', 'C2', 'C3', 'C4', 'C5','C6', 'Na', 'P4', 'P3', 'C7','C3', 'C4', 'C5', 'C8', 'C9', 'C10']
ALL['CHOL']=['ROH','R1', 'R2', 'R3', 'R4', 'R5']
ALL['FFA']=['AC','C1', 'C2', 'C3', 'C4']
HD_GROUP=OrderedDict()
HD_GROUP={'NS':['C6', 'Na', 'P4', 'P3', 'C7'], 'CHOL':['ROH'], 'FFA':['AC']}
HD_GROUP['NS']=['C6', 'Na', 'P4', 'P3', 'C7']
HD_GROUP['CHOL']=['ROH']
HD_GROUP['FFA']=['AC']
TL_GROUP=OrderedDict()
TL_GROUP={'NS':['C3', 'C4', 'C5', 'C8', 'C9', 'C10'], 'CHOL':['R1', 'R2', 'R3', 'R4', 'R5'], 'FFA':['C1', 'C2', 'C3', 'C4']}
TL_GROUP['NS']=['C3', 'C4', 'C5', 'C8', 'C9', 'C10']
TL_GROUP['CHOL']=['R1', 'R2', 'R3', 'R4', 'R5']
TL_GROUP['FFA']=['C1', 'C2', 'C3', 'C4']
ORDER_NS_SPH=OrderedDict()
ORDER_NS_SPH={'NS':['C1', 'C2', 'C3', 'C4', 'C5']} #propable problem with the same atomname of NS, FFA
ORDER_NS_SPH['NS']=['C1', 'C2', 'C3', 'C4', 'C5'] #propable problem with the same atomname of NS, FFA
ORDER_NS_ACYL=OrderedDict()
ORDER_NS_ACYL={'NS':['C8', 'C9', 'C10']}
ORDER_NS_ACYL['NS']=['C8', 'C9', 'C10']
ORDER_FFA=OrderedDict()
ORDER_FFA={'FFA':['C1', 'C2', 'C3', 'C4']}
ORDER_FFA['FFA']=['C1', 'C2', 'C3', 'C4']
ORDER_CHOL=OrderedDict()
ORDER_CHOL={'CHOL':['R2', 'R3', 'R4', 'R5']}
ORDER_CHOL['CHOL']=['R2', 'R3', 'R4', 'R5']
###################################################
###################################################
print(' ')
......@@ -95,12 +108,15 @@ ST_FR=MAX_FR-NUM_FR
###################################################
#--------------------------------------------------
if os.path.isfile('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_data.pkl'):
print('_data.pkl exist !')
pass
else:
#Read .trr file
data_all=tbf.fr_export(trajfile=TRAJ,num_frames=NUM_FR)
#tbf.tojson(fl=data_all, sv_name='./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_data')
tbf.topickle(fl=data_all, sv_name='./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_data')
del data_all
gc.collect()
###################################################
#--------------------------------------------------
#Check save files if exist in order to skip functions
......@@ -161,13 +177,17 @@ for i in GROUPS.keys():
print('++++++++++++++++++++++++')
group_ndx=tbf.atomid_data(res_num, res_type, atom_type, atom_num, group=locals()[i])
tbf.topickle(fl=group_ndx, sv_name='./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_ndx')
#tbf.tojson(fl=group_ndx, sv_name='./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_ndx')
del group_ndx
gc.collect()
#--------------------------------------------------
if os.path.isfile('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_box.pkl'):
pass
else:
data_all=tbf.frompickle('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_data.pkl')
#data_all=tbf.fromjson('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_data.json')
group_ndx=tbf.frompickle('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_ndx.pkl')
#group_ndx=tbf.fromjson('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_ndx.json')
#Create subdomains coordinates
box_p=tbf.domain_decomposition(data=data_all,dx=DISCET[0],dy=DISCET[1],dz=DISCET[2])
#Assign desired atoms (from above function) to subdomains
......@@ -175,23 +195,31 @@ for i in GROUPS.keys():
##result2: {step:{res:{atom_type:{(subX,subYsubZ):[atom_num]}}}}
#todo keep fixed the initial domain name and the molecules that are grouped for all the steps
_,box_res=tbf.atom2group(data_all,box_p, group_ndx)
tbf.topickle(fl=box_res, sv_name='./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_box')
del data_all
del group_ndx
del box_p
gc.collect()
tbf.topickle(fl=box_res, sv_name='./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_box')
#tbf.tojson(fl=box_res, sv_name='./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_box')
del box_res
gc.collect()
###################################################
if os.path.isfile('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_FR'+str(NUM_FR)+'.pkl'):
pass
else:
#Creates dictionary with coordinates per subdomain for each frame
data_all=tbf.frompickle('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_data.pkl')
#data_all=tbf.fromjson('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_data.json')
box_res=tbf.frompickle('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_box.pkl')
#box_res=tbf.fromjson('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_box.json')
_,coord_vector=tbf.sub_coord(box=box_res, data=data_all, res_num=res_num)
tbf.topickle(fl=coord_vector, sv_name='./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_FR'+str(NUM_FR))
del data_all
del box_res
gc.collect()
tbf.topickle(fl=coord_vector, sv_name='./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_FR'+str(NUM_FR))
#tbf.tojson(fl=coord_vector, sv_name='./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_FR'+str(NUM_FR))
del coord_vector
gc.collect()
###################################################
for j in GROUPS[i]:
if len(j) > 1:
......@@ -200,57 +228,73 @@ for i in GROUPS.keys():
surf={}
#Creates dictionary with c, normal per subdomain for each frame
coord_vector=tbf.frompickle('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_FR'+str(NUM_FR)+'.pkl')
#coord_vector=tbf.fromjson('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_FR'+str(NUM_FR)+'.json')
surf[i]=tbf.coord2norm2cg(coord_vector,img=False)
del coord_vector
gc.collect()
sv_data=surf[i]
elif j=='surf' and sv_index[i][j]['status']=='exist':
if j not in locals():
surf={}
surf[i]=tbf.frompickle(sv_index[i][j]['name'])
#surf[i]=tbf.fromjson(sv_index[i][j]['name'])
#--------------------------------------------------
if j=='vector' and sv_index[i][j]['status']=='not exist':
if j not in locals():
vector={}
coord_vector=tbf.frompickle('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_FR'+str(NUM_FR)+'.pkl')
#coord_vector=tbf.fromjson('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_FR'+str(NUM_FR)+'.json')
vector[i]=tbf.coord2vector(coord_vector)
del coord_vector
gc.collect()
sv_data=vector[i]
elif j=='vector' and sv_index[i][j]['status']=='exist':
if j not in locals():
vector={}
vector[i]=tbf.frompickle(sv_index[i][j]['name'])
#vector[i]=tbf.fromjson(sv_index[i][j]['name'])
#--------------------------------------------------
#ToDo: make more generic file with ndx files and ndx for order parameter
#As for now the hash value is generic (system+domain coord), but needs to run for every input group
if j=='index' and sv_index[i][j]['status']=='not exist':
box_res=tbf.frompickle('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_box.pkl')
#box_res=tbf.fromjson('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_box.json')
tbf.togmxndx(box_res, fld='./'+SYSTEM_NAME+'/'+ndx_fl[i][j], sv_name=SYSTEM_NAME+'_'+i)
del box_res
gc.collect()
uniq_id=tbgmx.ndx_index(SYSTEM_NAME,ndx_fl[i][j])
sv_data=uniq_id
elif j=='index' and sv_index[i][j]['status']=='exist':
box_res=tbf.frompickle('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_box.pkl')
#box_res=tbf.fromjson('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_box.json')
tbf.togmxndx(box_res, fld='./'+SYSTEM_NAME+'/'+ndx_fl[i][j], sv_name=SYSTEM_NAME+'_'+i)
del box_res
gc.collect()
uniq_id=tbf.frompickle(sv_index[i][j]['name'])
#uniq_id=tbf.fromjson(sv_index[i][j]['name'])
#--------------------------------------------------
if j=='index_order' and sv_index[i][j]['status']=='not exist':
box_res=tbf.frompickle('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_box.pkl')
#box_res=tbf.fromjson('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_box.json')
for mol, atoms in locals()[i].items():
tbgmx.order_ndx(box_res, fld='./'+SYSTEM_NAME+'/'+ndx_fl[i][j], atoms=atoms, sv_name=SYSTEM_NAME+'_'+i)
del box_res
gc.collect()
uniq_id=tbgmx.ndx_index(SYSTEM_NAME,ndx_fl[i][j])
sv_data=uniq_id
elif j=='index_order' and sv_index[i][j]['status']=='exist':
box_res=tbf.frompickle('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_box.pkl')
#box_res=tbf.fromjson('./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_box.json')
for mol, atoms in locals()[i].items():
tbgmx.order_ndx(box_res, fld='./'+SYSTEM_NAME+'/'+ndx_fl[i][j], atoms=atoms, sv_name=SYSTEM_NAME+'_'+i)
del box_res
gc.collect()
uniq_id=tbf.frompickle(sv_index[i][j]['name'])
#uniq_id=tbf.fromjson(sv_index[i][j]['name'])
#--------------------------------------------------
if j=='density' and sv_index[i][j]['status']=='not exist':
dens_dict={}
......@@ -277,10 +321,13 @@ for i in GROUPS.keys():
sv_data=dens_dict
mrg_data[i][j]=[dens_dict,[]]
del dens_dict
gc.collect()
elif j=='density' and sv_index[i][j]['status']=='exist':
dens_dict=tbf.frompickle(sv_index[i][j]['name'])
#dens_dict=tbf.fromjson(sv_index[i][j]['name'])
mrg_data[i][j]=[dens_dict,[]]
del dens_dict
gc.collect()
#--------------------------------------------------
if j=='rdf' and sv_index[i][j]['status']=='not exist':
rdf_dict={}
......@@ -301,17 +348,22 @@ for i in GROUPS.keys():
cnt2=cnt2+1
peaks = tbgmx.rdf_peaks(TRR=TRAJ,TPR=TPR,IND=fl,ST=time_index[ST_FR],EN=-1,fld='./'+uniq_id[iidd]['system'],arg1=cnt1,arg2=cnt2,dist_pk=20)
rdf_nm=mol1+'-'+mol2+'_rdf_'+uniq_id[iidd]['fld']
if len(peaks)==0:
peaks.append(0)
#if len(peaks)==0:
# print(peaks)
# print(type(peaks))
# peaks=np.ndarray((0))
rdf_dict[iidd][rdf_nm]=peaks
sv_data=rdf_dict
mrg_data[i][j]=[rdf_dict,[]]
del rdf_dict
gc.collect()
elif j=='rdf' and sv_index[i][j]['status']=='exist':
rdf_dict=tbf.frompickle(sv_index[i][j]['name'])
#rdf_dict=tbf.fromjson(sv_index[i][j]['name'])
mrg_data[i][j]=[rdf_dict,[]]
del rdf_dict
gc.collect()
#--------------------------------------------------
if j=='order' and sv_index[i][j]['status']=='not exist':
order_dict={}
......@@ -335,8 +387,10 @@ for i in GROUPS.keys():
sv_data=order_dict
mrg_data[i][j]=[order_dict,[]]
del order_dict
gc.collect()
elif j=='order' and sv_index[i][j]['status']=='exist':
order_dict=tbf.frompickle(sv_index[i][j]['name'])
#order_dict=tbf.fromjson(sv_index[i][j]['name'])
mrg_data[i][j]=[order_dict,[]]
del order_dict
#--------------------------------------------------
......@@ -354,6 +408,7 @@ for i in GROUPS.keys():
if k=='json':
tbf.tojson(fl=sv_data, sv_name='./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_'+j[2])
del sv_data
gc.collect()
###################################################
#COMBINE section
else:
......@@ -373,6 +428,7 @@ for i in GROUPS.keys():
#ToDo: check "if str(value['domain']).strip() == str(sub).strip():"
del surf_inuse
del vector_inuse
gc.collect()
#Loop over timesteps and keep avgs tilts for each step
avg={}
for step in tilt.keys():
......@@ -399,10 +455,13 @@ for i in GROUPS.keys():
sv_data=tot_avg
mrg_data[i][j[1]]=[tot_avg,['Tilt[degrees]']]
del tot_avg
gc.collect()
elif j[1]=='tilt' and sv_index[i][str(j)]['status']=='exist':
tot_avg=tbf.frompickle(sv_index[i][str(j)]['name'])
#tot_avg=tbf.fromjson(sv_index[i][str(j)]['name'])
mrg_data[i][j[1]]=[tot_avg,['Tilt[degrees]']]
del tot_avg
gc.collect()
#--------------------------------------------------
# Save module
if len(j)==3:
......@@ -417,10 +476,12 @@ for i in GROUPS.keys():
tbf.topickle(fl=sv_data, sv_name='./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_'+j[2])
if k=='json':
tbf.tojson(fl=sv_data, sv_name='./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_'+i+'_'+j[2])
del sv_data
del sv_data
gc.collect()
###################################################
#Merge data
tbf.topickle(fl=mrg_data, sv_name='./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_merge')
#tbf.tojson(fl=mrg_data, sv_name='./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_merge')
print(' ')
print('Merging data of:')
print('==============================')
......@@ -436,6 +497,7 @@ for grp in mrg_data.keys():
data_df=df.copy()
continue
tbf.topickle(fl=data_df, sv_name='./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_dataset')
#tbf.tojson(fl=data_df, sv_name='./'+SYSTEM_NAME+'/'+SYSTEM_NAME+'_dataset')
print(data_df.head())
###################################################
###################################################
\ No newline at end of file
###################################################
......@@ -3,11 +3,12 @@ import numpy as np
#from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import scipy.optimize
import pickle as pkl
import pickle5 as pkl
import json
import re
import pandas as pd
from progress.bar import Bar
import joblib
from pytrr import (
read_trr_header,
......@@ -66,15 +67,47 @@ def center_gravity(a):
cg = np.sum(a)/m
return cg
class StreamFile(object):
def __init__(self, f):
self.f = f
def __getattr__(self, item):
return getattr(self.f, item)
def read(self, n):
# print("reading total_bytes=%s" % n, flush=True)
if n >= (1 << 31):
buffer = bytearray(n)
idx = 0
while idx < n:
batch_size = min(n - idx, 1 << 31 - 1)
# print("reading bytes [%s,%s)..." % (idx, idx + batch_size), end="", flush=True)
buffer[idx:idx + batch_size] = self.f.read(batch_size)
# print("done.", flush=True)
idx += batch_size
return buffer
return self.f.read(n)
def write(self, buffer):
n = len(buffer)
print("writing total_bytes=%s..." % n, flush=True)
idx = 0
while idx < n:
batch_size = min(n - idx, 1 << 31 - 1)
print("writing bytes [%s, %s)... " % (idx, idx + batch_size), end="", flush=True)
self.f.write(buffer[idx:idx + batch_size])
print("done.", flush=True)
idx += batch_size
def topickle(fl, sv_name):
print(' ')
print('Save to pickle |################################| 1/1')
with open(sv_name+'.pkl', 'wb') as handle:
pkl.dump(fl, handle, protocol=pkl.HIGHEST_PROTOCOL)
#joblib.dump(fl, handle)
def frompickle(fl):
with open(fl, 'rb') as handle:
b = pkl.load(handle)
#b = joblib.load(handle)
return b
......@@ -83,6 +116,14 @@ def tojson(fl, sv_name):
print('Save to json |################################| 1/1')
with open(sv_name+'.json', 'w') as file:
file.write(json.dumps(str(fl)))
#file.write(json.dumps(fl))
def fromjson(fl):
print(' ')
print('Load to json |################################| 1/1')
with open(fl, 'r') as file:
data = file.read()
b = json.dumps(data)
return b
def plot_surf(data, normal, c, save_name):
#Plot surface
......@@ -866,4 +907,4 @@ def togmxndx(box_res, fld, sv_name):
bar.finish()
def dict2pd(d, col=[]):
return pd.DataFrame.from_dict(d, orient='index', columns=col)
\ No newline at end of file
return pd.DataFrame.from_dict(d, orient='index', columns=col)
......@@ -97,13 +97,18 @@ def rdf_peaks(TRR,TPR,IND,ST,EN,fld,arg1,arg2,dist_pk):
p.communicate(cmd.encode('UTF-8'))
p.send_signal('<Ctrl>-D')
p.wait()
x,y=read_xvg(XVG=fld+'/tmp.xvg')
yhat = savgol_filter(y, 15, 4) # window size 15, polynomial order 4
peaks, _ = find_peaks(yhat, distance=dist_pk)
pathname = os.path.abspath(os.path.join(fld, 'tmp.xvg'))
os.remove(pathname)
try:
f = open(fld+'/tmp.xvg')
f.close()
# Do something with the file
x,y=read_xvg(XVG=fld+'/tmp.xvg')
yhat = savgol_filter(y, 15, 4) # window size 15, polynomial order 4
peaks, _ = find_peaks(yhat, distance=dist_pk)
pathname = os.path.abspath(os.path.join(fld, 'tmp.xvg'))
os.remove(pathname)
except IOError:
peaks=np.ndarray((0))
return peaks
......@@ -190,4 +195,4 @@ def order(TRR,TPR,IND,ST,EN,normal,fld,dist_pk=1):
pathname = os.path.abspath(os.path.join(fld, 'tmp2.xvg'))
os.remove(pathname)
return y
\ No newline at end of file
return y
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment