import re,pinyin
from Pinyin2Hanzi import DefaultDagParams
from Pinyin2Hanzi import dag
class corrector():
def __init__(self):
self.re_compile = re.compile(r'[\u4e00-\u9fff]')
self.DAG = DefaultDagParams()
# 将文件中的词读取
def getData(self):
words = []
with open("/Users/wys/Desktop/token.txt") as f:
for line in f.readlines():
word = line.split(" ")[0]
if word and len(word) > 2:
res = self.re_compile.findall(word)
if len(res) == len(word): ## 保证都是汉字组成的分词
words.append(word)
return words
# 将每个拼音转换成同音的 10 个候选汉字,
def pinyin_2_hanzi(self, pinyinList):
result = []
words = dag(self.DAG, pinyinList, path_num=10)
for item in words:
res = item.path # 转换结果
result.append(res[0])
return result
# 获得词经过转换的候选结结果
def getCandidates(self, phrase):
chars = {}
for c in phrase:
chars[c] = self.pinyin_2_hanzi(pinyin.get(c, format='strip', delimiter=',').split(','))
replaces = []
for c in phrase:
for x in chars[c]:
replaces.append(phrase.replace(c, x))
return set(replaces)
# 获得纠错之后的正确结果
def getCorrection(self, words):
result = []
for word in words:
for word in self.getCandidates(word):
if Tree.search(word):
result.append(word)
break
return result
class Node:
def __init__(self):
self.word = False
self.child = {}
class Trie(object):
def __init__(self):
self.root = Node()
def insert(self, words):
for word in words:
cur = self.root
for w in word:
if w not in cur.child:
cur.child[w] = Node()
cur = cur.child[w]
cur.word = True
def search(self, word):
cur = self.root
for w in word:
if w not in cur.child:
return False
cur = cur.child[w]
if cur.word == False:
return False
return True
if __name__ == '__main__':
# 初始化纠正器
c = corrector()
# 获得单词
words = c.getData()
# 初始化前缀树
Tree = Trie()
# 将所有的单词都插入到前缀树中
Tree.insert(words)
# 测试
print(c.getCorrection(['专塘街道','转塘姐道','转塘街到']))
到此这篇关于Python中文纠错的简单实现的文章就介绍到这了,更多相关Python中文纠错内容请搜索脚本之家以前的文章或继续浏览下面的相关文章希望大家以后多多支持脚本之家!