coordinf <- coord[toinf]
valinf <- di[toinf]
text(x=coordinf, y=valinf - 0.1, 'i')
- }
+ }
c <- colMeans(coord)
c1 <- c[-1]
c2 <- c[-length(c)]
mn <- round(min(di))
mx <- round(max(di))
for (i in mn:mx) {
- if ((i/d) == (i%%/%%d)) {
+ if ((i/d) == (i%%/%%d)) {
abline(h=i,lty=3)
}
}
par(mar=c(0,0,0,0))
plot(0, axes = FALSE, pch = '')
- legend(x = 'center' , rownames(di), fill = color)
+ legend(x = 'center' , rownames(di) , fill = color)
dev.off()
- """ % (ffr(parametres['rgraph']), parametres['width'], parametres['height'], ffr(parametres['tmpgraph']), parametres['svg'])
+ """ % (ffr(parametres['rgraph']), parametres['width'], parametres['height'], ffr(parametres['tmpgraph']), parametres['svg'])
else:
txt += """
load("%s")
if self.var_mod == {} :
self.var_mod = treat_var_mod([val for val in corpus.actives] + [val for val in corpus.sups])
var_mod = self.var_mod
- with codecs.open(self.Source.pathout['chisqtable'], 'r', corpus.parametres['syscoding']) as f :
+ with open(self.Source.pathout['chisqtable'], 'r', encoding='utf8') as f :
chistable = [line.replace('\n','').replace('\r','').replace('"','').replace(',','.').split(';') for line in f]
title = chistable[0]
title.pop(0)
pathout = ConstructPathOut(self.Source.pathout.dirout, 'simi_classe_%i' %self.cl)
if self.tmpchi is None :
self.tmpchi = os.path.join(pathout,'chi.csv')
- with open(self.tmpchi, 'w') as f:
+ with open(self.tmpchi, 'w', encoding='utf8') as f:
f.write('\n'.join([str(val) for val in self.lchi]))
self.filename = os.path.join(pathout,'mat01.csv')
tableau.printtable(self.filename, tab)
word = self.getColumnText(self.GetFirstSelected(), 6)
if self.tmpchi is None :
self.tmpchi = os.path.join(self.Source.parametres['pathout'],'chi_%i.csv' % self.cl)
- with open(self.tmpchi, 'w') as f:
+ with open(self.tmpchi, 'w', encoding='utf8') as f:
f.write('\n'.join([str(val) for val in self.lchi]))
index = self.la.index(word)
parametres = {'type' : 'clustersimitxt',
def on_graph(self, evt):
if self.tmpchi is None :
self.tmpchi = os.path.join(self.Source.parametres['pathout'],'chi_%i.csv' % self.cl)
- with open(self.tmpchi, 'w') as f:
+ with open(self.tmpchi, 'w', encoding='utf8') as f:
f.write('\n'.join([str(val) for val in self.lchi]))
parametres = {'type' : 'clustersimitxt',
'pathout' : self.Source.parametres['pathout'],
corpus = self.Source.corpus
else :
corpus = self.Source.tableau
- with codecs.open(self.Source.pathout['chisqtable'], 'r', corpus.parametres['syscoding']) as f :
+ with open(self.Source.pathout['chisqtable'], 'r', encoging='utf8') as f :
chistable = [line.replace('\n','').replace('\r','').replace('"','').replace(',','.').split(';') for line in f]
title = chistable[0]
title.pop(0)
corpus = self.Source.corpus
else :
corpus = self.Source.tableau
- with codecs.open(self.Source.pathout['chisqtable'], 'r', corpus.parametres['syscoding']) as f :
+ with open(self.Source.pathout['chisqtable'], 'r', encoding='utf8') as f :
chistable = [line.replace('\n','').replace('\r','').replace('"','').replace(',','.').split(';') for line in f]
title = chistable[0]
title.pop(0)
self.read()
def read(self) :
- with open(self.filein, 'r') as fjson :
+ with open(self.filein, 'r', encoding='utf8') as fjson :
d = json.load(fjson)
# d = shelve.open(self.filein, protocol=1)
self.history = d.get('history', [])
d = {}
d['history'] = self.history
d['matrix'] = self.matrix
- with open(self.filein, 'w') as f :
+ with open(self.filein, 'w', encoding='utf8') as f :
f.write(json.dumps(d, indent=4, default=str))
#d = shelve.open(self.filein, protocol=1)
#d.close()
print(profils)
def read_chd(filein, fileout):
- with open(filein, 'r') as f :
+ with open(filein, 'r', encoding='utf8') as f :
content = f.read()
#content = [line[3:].replace('"',"").replace(' ','') for line in content.splitlines()]
content = [line.split('\t') for line in content.splitlines()]
mere[line[0]]['children'] = [{'name': line[1],'size' : content[i+1][0]}, {'name':line[2], 'size': content[i+1][1]}]
mere[line[1]] = mere[line[0]]['children'][-2]
mere[line[2]] = mere[line[0]]['children'][-1]
- with open(fileout, 'w') as f :
+ with open(fileout, 'w', encoding='utf8') as f :
f.write(json.dumps(chd))
edges = [line.split('\t') for line in content]
except :
edges = None
-
+
with codecs.open(nodesfile, 'r', 'utf8') as f :
content = f.read()
content = content.replace('"','')
nodes = [line.split('\t') for line in content]
graph = {'edges': [], 'nodes' : {}}
-
+
we = titles_edges.index('weight')
if edges is not None :
for edge in edges :
graph['edges'].append({'source' : edge[0], 'target' : edge[1], 'weight' : edge[we]})
-
-
+
coefcoord = parametres.get('coefcoord', 1)
coefweight = parametres.get('coefweight', 1)
-
-
+
for node in nodes :
if zr is not None :
graph['nodes'][node[ni]] = {"location" : [float(node[xr])*coefcoord, float(node[yr])*coefcoord, float(node[zr])*coefcoord], 'weight' : float(node[wr])/coefweight, 'color': (int(node[r]),int(node[g]),int(node[b]))}
x = randint(-150,150)
graph['nodes'][node[ni]] = {"location" : [ x, float(node[xr]), float(node[yr])], 'weight' : float(node[wr]), 'color': (int(node[r]),int(node[g]),int(node[b]))}
- with open(jsonout, 'w') as f :
+ with open(jsonout, 'w', encoding='utf8') as f :
json.dump(graph, f)
if name == 'p' :
self.printsent()
self.fileout.write('\n')
-
+
def characters(self, content) :
if self.name == 'txm:form' :
if content not in ['', ' ', '\n', '\r'] :
files = glob.glob(os.path.join(pathin,'*.xml'))
if len(files) == 0 :
return 'nofile'
- with open(fileout, 'w') as fout :
+ with open(fileout, 'w', encoding='utf8') as fout :
parser.setContentHandler(TXMParser(fout, encodage_out))
for f in files :
- parser.parse(open(f, 'r'))
+ parser.parse(open(f, 'r', encoding='utf8'))
fout.write('\n\n')
return None
# fichier d'historique de Iramuteq
#------------------------------------------------------------------------------------------------
if not os.path.exists(os.path.join(UserConfigPath, 'history.db')) :
- with open(os.path.join(UserConfigPath, 'history.db'), 'w') as f :
+ with open(os.path.join(UserConfigPath, 'history.db'), 'w', encoding='utf8') as f :
f.write('{}')
self.history = History(os.path.join(UserConfigPath, 'history.db'))
# l'extension ".db" est ajoutée automatiquement par le module
txt = barplot('', '', '', self.parent.RscriptsPath['Rgraph'], tmpgraph, intxt = intxt)
# ecriture du script dans un fichier
tmpscript = tempfile.mktemp(dir=self.parent.TEMPDIR)
- with open(tmpscript, 'w') as f :
+ with open(tmpscript, 'w', encoding='utf8') as f :
f.write(txt)
# excution du script
exec_rcode(self.parent.RPath, tmpscript, wait = True)
ira = wx.GetApp().GetTopWindow()
item=self.getColumnText(self.GetFirstSelected(), 0)
wordlist = [val for val in self.tgens[item] if val in corpus.lems]
- print(wordlist)
wordlist = dict(list(zip(wordlist,wordlist)))
- print(wordlist)
- print(self.tgenlem)
res = dict([[val, self.tgenlem[val]] for val in self.tgenlem if self.tgenlem[val][0] in wordlist])
win = ListLexFrame(self, ira, corpus, res, self.etoiles)
win.Show()
files.append(txtdir)
tot = 0
parser = MyHTMLParser()
- with open(fileout,'w') as outf :
+ with open(fileout,'w', encoding='utf8') as outf :
for f in files :
print(f)
parser.doinit(outf)
nf.sort()
files += nf
tot = 0
- with open(fileout,'w') as outf :
+ with open(fileout,'w', encoding='utf8') as outf :
for f in files :
print(f)
- with codecs.open(f, 'r', encodage_in) as infile :
- content = infile.read()
+ with codecs.open(f, 'r', encodage_in) as infile :
+ content = infile.read()
ucis = parsetxtpaste(content)
print_ucis(ucis, outf, encodage_out)
tot += len(ucis)
save.image(file="%s")
""" % ffr(self.pathout['RData'])
tmpfile = tempfile.mktemp(dir=self.parent.TEMPDIR)
- tmpscript = open(tmpfile, 'w')
+ tmpscript = open(tmpfile, 'w', encoding='utf8')
tmpscript.write(txt)
tmpscript.close()
pid = exec_rcode(self.parent.RPath, tmpfile, wait = False)
check_Rresult(self.parent, pid)
temps = time.time() - self.t1
self.minutes, self.seconds = divmod(temps, 60)
- self.hours, self.minutes = divmod(self.minutes, 60)
+ self.hours, self.minutes = divmod(self.minutes, 60)
""" % (ffr(self.parametres['pathout']),ffr(self.OutFrame))
tmpfile=tempfile.mktemp(dir=self.TEMPDIR)
print(tmpfile)
- tmpscript=open(tmpfile,'w')
+ tmpscript=open(tmpfile,'w', encoding='utf8')
tmpscript.write(txt)
tmpscript.close()
pid = exec_rcode(self.RPath, tmpfile, wait = False)
def dolayout(self, option):
ListFile=[False]
- file=open(self.OutFrame,'r')
+ file=open(self.OutFrame,'r', encoging='utf8')
content=file.readlines()
file.close()
lcont = [line.replace('"','').replace('\n','').split(';') for line in content]
if option['contrib'] :
allcoord.append([i for i,chi in enumerate(lcont) if chi[1]=='*contrib*'])
names.append('Contributions a posteriori')
- if option['pourcent'] :
+ if option['pourcent'] :
allcoord.append([i for i,chi in enumerate(lcont) if chi[1]=='*pr*'])
names.append('Pourcentages')
if option['pourcentl'] :
txt = '<br><hr><br>\n'.join(['<br><br>'.join([tab[i] for tab in allhtml]) for i,val in enumerate(res)])
txt = header + pretxt + txt + '\n</body></html>'
fileout=os.path.join(self.parametres['pathout'],'resultats-chi2.html')
- with open(fileout, 'w') as f :
+ with open(fileout, 'w', encoding='utf8') as f :
f.write(txt)
- ListFile.append(fileout)
+ ListFile.append(fileout)
return ListFile
supp = sortedby(supp, 2, 1)
supp = [[i, val] for i, val in enumerate(supp)]
ucesize = self.corpus.getucesize()
- with open(self.pathout['stsize.csv'], 'w') as f :
+ with open(self.pathout['stsize.csv'], 'w', encoding='utf8') as f :
f.write('\n'.join([repr(val) for val in ucesize]))
self.result = {'total' : dict(tot), 'formes_actives' : dict(act), 'formes_supplémentaires' : dict(supp), 'hapax' : dict(hapax), 'glob' : ''}
occurrences = sum([val[1][1] for val in tot]) + len(hapax)