import os import numpy as np #from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt import scipy.optimize import pickle as pkl import json import re import pandas as pd from progress.bar import Bar from pytrr import ( read_trr_header, read_trr_data, skip_trr_data, ) def search_pattern(fl,pattern): textfile = open(fl, 'r') filetext = textfile.read() textfile.close() matches = re.findall(pattern, filetext) if len(matches)==0: ind='not exist' else: ind='exist' return ind def fitPlaneLTSQ(XYZ): #Source: https://gist.github.com/RustingSword/e22a11e1d391f2ab1f2c (rows, cols) = XYZ.shape G = np.ones((rows, 3)) G[:, 0] = XYZ[:, 0] #X G[:, 1] = XYZ[:, 1] #Y Z = XYZ[:, 2] (a, b, c),resid,rank,s = np.linalg.lstsq(G, Z, rcond=None) normal = (a, b, -1) nn = np.linalg.norm(normal) normal = normal / nn return (c, normal) def angle_between2D(p1, p2): #arctan2 is anticlockwise ang1 = np.arctan2(*p1[::-1]) ang2 = np.arctan2(*p2[::-1]) angle=np.rad2deg((ang1 - ang2) % (2 * np.pi)) #degree if angle<180: return angle else: return 360 - angle def angle_between3D(v1, v2): # v1 is your firsr vector # v2 is your second vector angle = np.arccos(np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))) #rad angle=180*angle/np.pi #degrees if angle<180: return angle else: return 360 - angle def center_gravity(a): #Todo somatidia diaforetikis mazas -> weighted me maza ???? m=len(a) cg = np.sum(a)/m return cg def topickle(fl, sv_name): print(' ') print('Save to pickle |################################| 1/1') with open(sv_name+'.pkl', 'wb') as handle: pkl.dump(fl, handle, protocol=pkl.HIGHEST_PROTOCOL) def frompickle(fl): with open(fl, 'rb') as handle: b = pkl.load(handle) return b def tojson(fl, sv_name): print(' ') print('Save to json |################################| 1/1') with open(sv_name+'.json', 'w') as file: file.write(json.dumps(str(fl))) def plot_surf(data, normal, c, save_name): #Plot surface fig = plt.figure() ax = fig.gca(projection='3d') # plot fitted plane maxx = np.max(data[:,0]) maxy = np.max(data[:,1]) minx = np.min(data[:,0]) miny = np.min(data[:,1]) point = np.array([0.0, 0.0, c]) d = -point.dot(normal) # plot original points ax.scatter(data[:, 0], data[:, 1], data[:, 2]) # compute needed points for plane plotting xx, yy = np.meshgrid([minx, maxx], [miny, maxy]) z = (-normal[0]*xx - normal[1]*yy - d)*1. / normal[2] # plot plane ax.plot_surface(xx, yy, z, alpha=0.2) ax.set_xlabel('x') ax.set_ylabel('y') ax.set_zlabel('z') #plt.savefig(save_name+'.png') plt.show() plt.clf() plt.cla() plt.close() def points2vector(data): """ Defines the line whose direction vector is the eigenvector of the covariance matrix corresponding to the largest eigenvalue, that passes through the mean of the data. parameters: data = array[[x y z]] output: vector = [X,Y,Z] """ # Calculate the mean of the points, i.e. the 'center' of the cloud datamean = data.mean(axis=0) # Do an SVD on the mean-centered data. uu, dd, vv = np.linalg.svd(data - datamean) # Now vv[0] contains the first principal component, i.e. the direction # vector of the 'best fit' line in the least squares sense. return vv[0] def count_frames(trajfile='traj.trr', pr_time=False): """ Count total frames of .trr file parameters: trajfile = [.trr] """ time_index={} cnt_fr=0 with open(trajfile, 'rb') as inputfile: for i in range(1000000): try: header = read_trr_header(inputfile) skip_trr_data(inputfile, header) if pr_time==True: #print('Step: {step}, time: {time}'.format(**header)) #print('Step: {step}, time: {time}'.format(**header)) time_index[cnt_fr]='{time}'.format(**header) cnt_fr=cnt_fr+1 except EOFError: pass if pr_time==True: return cnt_fr,time_index else: return cnt_fr def fr_export(trajfile='traj.trr', num_frames=1): """ Export frames from gromacs .trr file. parameters: trajfile = [.trr] num_frames = [number of frames to keep counting from the end of file] """ print(' ') print('Read gmx files progress:') print('==============================') cnt_fr=count_frames(trajfile) data_all={} with open(trajfile, 'rb') as inputfile: bar = Bar('Skipping frames from .trr', max=cnt_fr-num_frames) for i in range(cnt_fr-num_frames): header = read_trr_header(inputfile) #print('Step: {step}, time: {time}'.format(**header)) skip_trr_data(inputfile, header) bar.next() bar.finish() bar = Bar('Read frames from .trr', max=num_frames) for i in range(cnt_fr-num_frames,cnt_fr): header = read_trr_header(inputfile) #print('Step: {step}, time: {time}'.format(**header)) data = read_trr_data(inputfile, header) step='{step}'.format(**header) data_all[step]=data bar.next() bar.finish() return data_all def read_gro(gro): """ Read .gro file and exports 1. system name 2. data number 3. box size 4. residue number 5. residue type 6. atom type 7. atom number 8. free format data (x,y,z,v,u,w) parameters: gro = [.gro] """ cnt=0 data_num=0 res_num = [] res_type = [] atom_type = [] atom_num = [] rest_dt = [] cnt_atoms=0 num_lines = sum(1 for line in open(gro)) with open(gro, 'r') as F: bar = Bar('Read .gro', max=num_lines) for line in F: cnt=cnt+1 #print(cnt) if cnt>2 and cnt<=data_num+2: res_num.append(line[:5]) res_type.append(line[5:10]) atom_type.append(line[10:15]) #Compensate for large .gro files #ToDo check if data index is the same in the function that follows #atom_num.append(line[15:20]) cnt_atoms=cnt_atoms+1 atom_num.append(str(cnt_atoms)) rest_dt.append(line[20:]) elif cnt==1: system=line[:10] elif cnt==2: data_num=int(line[:7]) elif cnt>data_num+2: box_size=line[:50] bar.next() bar.finish() #print(system,data_num,box_size) return system,data_num,box_size,res_num,res_type,atom_type,atom_num,rest_dt def read_itp(itp): """ Read mass and atomtype from .itp file. parameters: itp = [.itp] output: dict {atomtype{mass}} ATTENTION: the format of itp is free but the current function has some restrictions 1. sections starting points doesn't have spaces [ex. [atoms] ] 2. the sections have to be seperated by an empty line """ ll=0 #mass=[] #atomtype=[] weights={} num_lines = sum(1 for line in open(itp)) with open(itp, 'r') as F: bar = Bar('Read .itp', max=num_lines) for line in F: if line.strip()=='[atoms]': ll=1 #print(line.strip()) continue elif line.strip()=='[atomtypes]': ll=2 #print(line.strip()) continue elif len(line.strip()) == 0: #print(len(line.strip())) ll=0 continue elif line.strip()[0] == ';': continue if ll==1: #molecule structure itp word=line.split() try: if float(word[7])==0.0: print('WARNING: Atomtype mass of '+word[4]+' is 0.0') except: print('Skip line: '+word[7]) continue #print(word[4],word[7]) weights[word[4]]=word[7] #mass.append(word[1]) #atomtype.append(word[0]) if ll==2: #molecule forcefield itp word=line.split() try: if float(word[1])==0.0: print('WARNING: Atomtype mass of '+word[0]+' is 0.0') except: print('Skip line: '+word[1]) continue weights[word[0]]=word[1] bar.next() bar.finish() return weights def domain_decomposition(data,dx,dy,dz): """ Identify subdomain for each frame of of .trr frame parameters: data = {dictionary input from 'def fr_export'} dx = desired size X of subdomain {units of input} dy = desired size Y of subdomain {units of input} dz = desired size Z of subdomain {units of input} output: dictionary of x,y,z grid points per step (frame) """ box_p={} print(' ') print('Domain decomposition progress:') print('==============================') for step in data.keys(): print('Step: '+step) box_p[step]={} xs=int(round(data[step]['box'][0][0]+0.1)) #to round always to bigger number ys=int(round(data[step]['box'][1][1]+0.1)) zs=int(round(data[step]['box'][2][2]+0.1)) box_x=int(xs/dx) box_y=int(ys/dy) box_z=int(zs/dz) xx=[] bar = Bar('X-axis', max=box_x) # for i in range(0,xs+1,dx): for i in range(0,box_x): xx.append(i) bar.next() box_p[step]['x']=xx bar.finish() yy=[] bar = Bar('Y-axis', max=box_y) # for i in range(0,ys+1,dy): for i in range(0,box_y): yy.append(i) bar.next() box_p[step]['y']=yy bar.finish() zz=[] bar = Bar('Z-axis', max=box_z) # for i in range(0,zs+1,dz): for i in range(0,box_z): zz.append(i) bar.next() box_p[step]['z']=zz bar.finish() xyz=[] bar = Bar('XYZ-axis', max=box_x*box_y*box_z) # for ix in range(0,xs+1,dx): for ix in range(0,box_x): # for iy in range(0,ys+1,dy): for iy in range(0,box_y): # for iz in range(0,zs+1,dz): for iz in range(0,box_z): xyz.append([ix,iy,iz]) bar.next() bar.finish() box_p[step]['xyz']=xyz return box_p def atomid_data(res_num, res_type, atom_type, atom_num, group={}): """ Finds the index in list that 'def read_gro' returns, and correspond to the atom types in group list parameters: res_num=[] res_type=[] atom_type=[] atom_num=[] group={} output: dictionary {resid:{res_type:{atom_type:[atom_num]}}} """ print(' ') print('Create index progress:') print('======================') #bar = Bar('Create index step', max=len(group.keys())) res_ndx={} for res,atom in group.items(): res_ndx[res]=[] for element in atom: tmp=[res_num[i].strip() for i, e in enumerate(atom_type) if e.strip() == element.strip() and res.strip()==res_type[i].strip()] res_ndx[res].append(tmp) #bar.next() #bar.finish() res_ndx_flat={} for res in res_ndx.keys(): res_ndx_flat[res] = [y for x in res_ndx[res] for y in x] ndx={} for rs in res_ndx_flat.keys(): bar = Bar(rs, max=len(res_ndx_flat[rs])) for resid in res_ndx_flat[rs]: ndx[resid.strip()]={} for res,atom in group.items(): ndx[resid][res]={} for element in atom: tmp=[atom_num[i].strip() for i, e in enumerate(atom_type) if e.strip() == element.strip() and resid.strip()==res_num[i].strip() and res.strip()==res_type[i].strip()] if not tmp: ndx[resid].pop(res,None) pass else: ndx[resid][res][element]=tmp bar.next() bar.finish() return ndx def atom2grid(data, box_p, ndx): """ Assign atom number that corresponds to ndx (see 'def atomid_data') to sudomains created from 'def domain_decomposition'. The output is a the box location (non zero based) in xyz-space for each atom number. parameters: data = {dictionary input from 'def fr_export'} box_p = {dictionary input from 'def domain_decomposition'} ndx = {dictionary input from 'def atomid_data'} output: dictionaries 1. box_res = {step:{resid:{res_type:{atom_type:{atom_num:(subX,subYsubZ)}}}}} 2. box_res_rev = {step:{resid:{res:{(subX,subYsubZ):[atom_num]}}}} """ print(' ') print('Assing atom to grid progress:') print('==============================') box_res={} box_res_rev = {} for step in data.keys(): #print('Step: '+step) bar = Bar('Step: '+step, max=len(ndx.keys())) box_res[step]={} box_res_rev[step] = {} for resid in ndx.keys(): box_res[step][resid]={} box_res_rev[step][resid] = {} for res in ndx[resid].keys(): box_res[step][resid][res]={} box_res_rev[step][resid][res]={} for atom in ndx[resid][res].keys(): box_res[step][resid][res][atom]={} box_res_rev[step][resid][res][atom] = {} for atomnum in ndx[resid][res][atom]: #data[step]['x'][atom_num-1][x(0),y(1),z(2)] xx=data[step]['x'][int(atomnum)-1][0] cnt_x=-1 prev=-1 for ix in box_p[step]['x']: cnt_x=cnt_x+1 if xxprev: prev=ix break yy=data[step]['x'][int(atomnum)-1][1] cnt_y=-1 prev=-1 for iy in box_p[step]['y']: cnt_y=cnt_y+1 if yyprev: prev=iy break zz=data[step]['x'][int(atomnum)-1][2] cnt_z=-1 prev=-1 for iz in box_p[step]['z']: cnt_z=cnt_z+1 if zzprev: prev=iz break box_res[step][resid][res][atom][atomnum]=(cnt_x,cnt_y,cnt_z) #Make subdomain position the key and group the residues for key, value in sorted(box_res[step][resid][res][atom].items()): box_res_rev[step][resid][res].setdefault(value, []).append(key.strip()) #Remove atom_type as key of dictionary box_res_rev[step][resid][res].pop(atom,None) #If atoms of residue lie in more than one subdomain, put the all in the first one if len(box_res_rev[step][resid][res]) > 1: tmp=[] flattened_list=[] kk=[] for sb in box_res_rev[step][resid][res].keys(): kk.append(sb) tmp.append(box_res_rev[step][resid][res][sb]) #Remove keys for k in kk: box_res_rev[step][resid][res].pop(k,None) #flatten the lists flattened_list = [y for x in tmp for y in x] #Keep first key and results box_res_rev[step][resid][res][kk[0]]=flattened_list #box_res_rev[step][resid][res]=[i for i, e in enumerate(box_res_rev[step][resid][res]) if e.strip() != .strip()] bar.next() bar.finish() return box_res, box_res_rev def atom2group(data, box_p, ndx): """ Assign atom number that corresponds to ndx (see 'def atomid_data') to sudomains created from 'def domain_decomposition' only for the first step. Then the same domain id is kept for the same group of molecules. The output is a the box location (non zero based) in xyz-space for each atom number. parameters: data = {dictionary input from 'def fr_export'} box_p = {dictionary input from 'def domain_decomposition'} ndx = {dictionary input from 'def atomid_data'} output: dictionaries 1. box_res = {step:{resid:{res_type:{atom_type:{atom_num:(subX,subYsubZ)}}}}} 2. box_res_rev = {step:{resid:{res:{(subX,subYsubZ):[atom_num]}}}} """ print(' ') print('Assing atom to group progress:') print('==============================') step_cnt=0 domain_list={} box_res={} box_res_rev = {} for step in data.keys(): step_cnt = step_cnt + 1 #print('Step: '+step) bar = Bar('Step: '+step, max=len(ndx.keys())) box_res[step]={} box_res_rev[step] = {} for resid in ndx.keys(): box_res[step][resid]={} box_res_rev[step][resid] = {} for res in ndx[resid].keys(): box_res[step][resid][res]={} box_res_rev[step][resid][res]={} for atom in ndx[resid][res].keys(): box_res[step][resid][res][atom]={} box_res_rev[step][resid][res][atom] = {} for atomnum in ndx[resid][res][atom]: #data[step]['x'][atom_num-1][x(0),y(1),z(2)] if step_cnt == 1: xx=data[step]['x'][int(atomnum)-1][0] cnt_x=-1 prev=-1 for ix in box_p[step]['x']: cnt_x=cnt_x+1 if xxprev: prev=ix break yy=data[step]['x'][int(atomnum)-1][1] cnt_y=-1 prev=-1 for iy in box_p[step]['y']: cnt_y=cnt_y+1 if yyprev: prev=iy break zz=data[step]['x'][int(atomnum)-1][2] cnt_z=-1 prev=-1 for iz in box_p[step]['z']: cnt_z=cnt_z+1 if zzprev: prev=iz break domain_list[atomnum]=(cnt_x,cnt_y,cnt_z) box_res[step][resid][res][atom][atomnum]=(cnt_x,cnt_y,cnt_z) else: box_res[step][resid][res][atom][atomnum]=domain_list[atomnum] #Make subdomain position the key and group the residues for key, value in sorted(box_res[step][resid][res][atom].items()): box_res_rev[step][resid][res].setdefault(value, []).append(key.strip()) #Remove atom_type as key of dictionary box_res_rev[step][resid][res].pop(atom,None) #If atoms of residue lie in more than one subdomain, put the all in the first one if len(box_res_rev[step][resid][res]) > 1: tmp=[] flattened_list=[] kk=[] for sb in box_res_rev[step][resid][res].keys(): kk.append(sb) tmp.append(box_res_rev[step][resid][res][sb]) #Remove keys for k in kk: box_res_rev[step][resid][res].pop(k,None) #flatten the lists flattened_list = [y for x in tmp for y in x] #Keep first key and results box_res_rev[step][resid][res][kk[0]]=flattened_list #box_res_rev[step][resid][res]=[i for i, e in enumerate(box_res_rev[step][resid][res]) if e.strip() != .strip()] bar.next() bar.finish() return box_res, box_res_rev def sub_coord(box, data, res_num=[]): """ Use the box_res_rev from 'def atom2grid' and data from 'def fr_export' and groups all the XYZ coordinates per subdomain parameters: box = {step:{resid:{res:{(subX,subYsubZ):[atom_num]}}}} data = {step:{obj:[x,y,z]}} output: norm = {step:{subdomain:[[X1,Y1,Z1] ... [Xn,Yn,Zn]]}} vector = {step:{subdomain:resid:{[[X1,Y1,Z1] ... [Xn,Yn,Zn]]}}} """ print(' ') print('Groupby subdomain progress:') print('==============================') coord_norm={} coord_vector={} for step in box.keys(): coord_norm[step]={} coord_vector[step]={} for resid in box[step].keys(): for res in box[step][resid].keys(): for subdomain in box[step][resid][res].keys(): coord_norm[step][subdomain]=[] coord_vector[step][subdomain]={} rs=[] bar = Bar('Step: '+step, max=len(box[step].keys())) for resid in box[step].keys(): for res in box[step][resid].keys(): for subdomain in box[step][resid][res].keys(): #tmp=[] if (resid,subdomain) not in rs: rs.append((resid,subdomain)) coord_vector[step][subdomain][resid]=[] for atomnum in box[step][resid][res][subdomain]: #tmp_atom=[] #tmp.append(data[step]['x'][int(atomnum)-1]) #tmp_atom.append(list(data[step]['x'][int(atomnum)-1])) #not append to previous coord_norm[step][subdomain].append(list(data[step]['x'][int(atomnum)-1])) coord_vector[step][subdomain][resid].append(list(data[step]['x'][int(atomnum)-1])) bar.next() bar.finish() return coord_norm, coord_vector def coord2norm(coord,img=True): """ Use the coord from 'def sub_coord' and replaces XYZ coordinates with c, normal of surface that fits the points. Also it can plot the surfaces for each subdomain parameters: coord = {step:{subdomain:[[X1,Y1,Z1] ... [Xn,Yn,Zn]]}} img = True or False output: dictionary = {step:{subdomain:{c:int, normal:array[]}}} """ print(' ') print('Finding surface progress:') print('==============================') surf={} for step in coord.keys(): surf[step]={} bar = Bar('Step: '+step, max=len(coord[step].keys())) for subdomain in coord[step].keys(): c,normal = fitPlaneLTSQ(np.array(coord[step][subdomain])) surf[step][subdomain]={'c':c, 'normal':normal} #Change save variable if you want to save .png elsewere save='png/'+str(subdomain) if img==True: try: plot_surf(np.array(coord[step][subdomain]), normal, c, save) except: print("ERROR: Folder png/ doesn't exist in root") bar.next() bar.finish() return surf def coord2norm2cg(coord_vector,img=True): """ Use the coord_vector from 'def sub_coord' and replaces XYZ coordinates with c, normal of surface that fits the points and calculates center of gravity for each resid. Also it can plot the surfaces for each subdomain parameters: coord = {step:{subdomain:{resid:[[X1,Y1,Z1] ... [Xn,Yn,Zn]]}} img = True or False output: dictionary = {step:{subdomain:{c:int, normal:array[], resid:[cgx, cgy, cgz]}}} """ print(' ') print('Finding surface and gravity point progress:') print('==========================================') surf={} for step in coord_vector.keys(): surf[step]={} bar = Bar('Step: '+step, max=len(coord_vector[step].keys())) for subdomain in coord_vector[step].keys(): surf[step][subdomain]={} xx=[] yy=[] zz=[] #surf_points=[] res_cg={} cnt=0 for resid in coord_vector[step][subdomain].keys(): cnt=cnt+1 for ii in coord_vector[step][subdomain][resid]: #print(ii) #print(ii[0],ii[1],ii[2]) #print(coord_vector[step][subdomain][resid][0]) xx.append(ii[0]) yy.append(ii[1]) zz.append(ii[2]) cgx = center_gravity(xx) cgy = center_gravity(yy) cgz = center_gravity(zz) res_cg[resid]=[cgx,cgy,cgz] if cnt==1: surf_points=coord_vector[step][subdomain][resid] else: surf_points=surf_points+coord_vector[step][subdomain][resid] c,normal = fitPlaneLTSQ(np.array(surf_points)) surf[step][subdomain]={'c':c, 'normal':normal} surf[step][subdomain].update(res_cg) if img==True: fl_save='png/' save=fl_save+str(subdomain) if not os.path.exists(fl_save): os.makedirs(fl_save) try: plot_surf(np.array(surf_points), normal, c, save) except Exception as e: print(e) bar.next() bar.finish() return surf def coord2vector(coord_vector): """ Use the coord_vector from 'def sub_coord' and replaces XYZ coordinates with vector of best fit line parameters: coord = {step:{subdomain:{resid:[[X1,Y1,Z1] ... [Xn,Yn,Zn]]}} output: dictionary = {step:{subdomain:{resid:[x, y, z]}} """ print(' ') print('Finding vector progress:') print('==============================') vector={} for step in coord_vector.keys(): vector[step]={} bar = Bar('Step: '+step, max=len(coord_vector[step].keys())) for subdomain in coord_vector[step].keys(): vector[step][subdomain]={} for resid in coord_vector[step][subdomain].keys(): grp=[] for ii in coord_vector[step][subdomain][resid]: grp.append(ii) vv = points2vector(np.array(grp)) vector[step][subdomain][resid]=vv bar.next() bar.finish() return vector def SurfVector_angle(surf,vector): """ Use the surf and vector from 'def coord2norm' and 'def coord2vector' and calculate the angle between them for every step, sudomain and resid parameters: surf = {step:{subdomain:{c:int, normal:array[]}}} vector = {step:{subdomain:{resid:[x, y, z]}} output: angle = {step:{subdomain:{resid:angle}} """ print(' ') print('Tilt progress:') print('==============================') angle={} for step in surf.keys(): angle[step]={} bar = Bar('Step: '+step, max=len(surf[step].keys())) for sudomain in surf[step].keys(): try: vector[step][sudomain].keys() except KeyError: #print(step, sudomain) continue angle[step][sudomain]={} tot=[] for resid in vector[step][sudomain].keys(): P1=tuple(surf[step][sudomain]['normal']) P2=tuple(vector[step][sudomain][resid]) #print(tbf.angle_between3D(P1,P2)) angle[step][sudomain][resid]=angle_between3D(P1,P2) tot.append(angle_between3D(P1,P2)) angle[step][sudomain]['avg/frame']=sum(tot)/len(tot) bar.next() bar.finish() return angle def togmxndx(box_res, fld, sv_name): print(' ') print('Save to ndx progress:') print('==============================') cnt=0 fl_save=fld+'/' if not os.path.exists(fl_save): os.makedirs(fl_save) else: print('WARNING: .ndx files exists. Nothing to do!') return ndx={} for step in box_res.keys(): if cnt==1: break for resid in box_res[step].keys(): for res in box_res[step][resid].keys(): for subdomain in box_res[step][resid][res].keys(): ndx[subdomain]={} rs=[] bar = Bar('Create format: ', max=len(box_res[step].keys())) for resid in box_res[step].keys(): for res in box_res[step][resid].keys(): for subdomain in box_res[step][resid][res]: if (res,subdomain) not in rs: rs.append((res,subdomain)) ndx[subdomain][res]=[] for atomnum in box_res[step][resid][res][subdomain]: ndx[subdomain][res].append(atomnum) bar.next() bar.finish() cnt=cnt+1 bar = Bar('Save file: ', max=len(ndx.keys())) for subdomain in ndx.keys(): save=fl_save+sv_name+'_'+str(subdomain)+'.ndx' with open(save, "a") as myfile: for res in ndx[subdomain].keys(): myfile.write("["+res+"]\n") for atomnum in ndx[subdomain][res]: myfile.write(atomnum+'\n') bar.next() bar.finish() def dict2pd(d, col=[]): return pd.DataFrame.from_dict(d, orient='index', columns=col)