Python_LDA实现方法详解
(编辑:jimmy 日期: 2024/11/20 浏览:3 次 )
LDA(Latent Dirichlet allocation)模型是一种常用而用途广泛地概率主题模型。其实现一般通过Variational inference和Gibbs Samping实现。作者在提出LDA模型时给出了其变分推理的C源码(后续贴出C++改编的类),这里贴出基于Python的第三方模块改写的LDA类及实现。
#coding:utf-8 import numpy as np import lda import lda.datasets import jieba import codecs class LDA_v20161130(): def __init__(self, topics=2): self.n_topic = topics self.corpus = None self.vocab = None self.ppCountMatrix = None self.stop_words = [u',', u'。', u'、', u'(', u')', u'·', u'!', u' ', u':', u'“', u'”', u'\n'] self.model = None def loadCorpusFromFile(self, fn): # 中文分词 f = open(fn, 'r') text = f.readlines() text = r' '.join(text) seg_generator = jieba.cut(text) seg_list = [i for i in seg_generator if i not in self.stop_words] seg_list = r' '.join(seg_list) # 切割统计所有出现的词纳入词典 seglist = seg_list.split(" ") self.vocab = [] for word in seglist: if (word != u' ' and word not in self.vocab): self.vocab.append(word) CountMatrix = [] f.seek(0, 0) # 统计每个文档中出现的词频 for line in f: # 置零 count = np.zeros(len(self.vocab),dtype=np.int) text = line.strip() # 但还是要先分词 seg_generator = jieba.cut(text) seg_list = [i for i in seg_generator if i not in self.stop_words] seg_list = r' '.join(seg_list) seglist = seg_list.split(" ") # 查询词典中的词出现的词频 for word in seglist: if word in self.vocab: count[self.vocab.index(word)] += 1 CountMatrix.append(count) f.close() #self.ppCountMatrix = (len(CountMatrix), len(self.vocab)) self.ppCountMatrix = np.array(CountMatrix) print "load corpus from %s success!"%fn def setStopWords(self, word_list): self.stop_words = word_list def fitModel(self, n_iter = 1500, _alpha = 0.1, _eta = 0.01): self.model = lda.LDA(n_topics=self.n_topic, n_iter=n_iter, alpha=_alpha, eta= _eta, random_state= 1) self.model.fit(self.ppCountMatrix) def printTopic_Word(self, n_top_word = 8): for i, topic_dist in enumerate(self.model.topic_word_): topic_words = np.array(self.vocab)[np.argsort(topic_dist)][:-(n_top_word + 1):-1] print "Topic:",i,"\t", for word in topic_words: print word, print def printDoc_Topic(self): for i in range(len(self.ppCountMatrix)): print ("Doc %d:((top topic:%s) topic distribution:%s)"%(i, self.model.doc_topic_[i].argmax(),self.model.doc_topic_[i])) def printVocabulary(self): print "vocabulary:" for word in self.vocab: print word, print def saveVocabulary(self, fn): f = codecs.open(fn, 'w', 'utf-8') for word in self.vocab: f.write("%s\n"%word) f.close() def saveTopic_Words(self, fn, n_top_word = -1): if n_top_word==-1: n_top_word = len(self.vocab) f = codecs.open(fn, 'w', 'utf-8') for i, topic_dist in enumerate(self.model.topic_word_): topic_words = np.array(self.vocab)[np.argsort(topic_dist)][:-(n_top_word + 1):-1] f.write( "Topic:%d\t"%i) for word in topic_words: f.write("%s "%word) f.write("\n") f.close() def saveDoc_Topic(self, fn): f = codecs.open(fn, 'w', 'utf-8') for i in range(len(self.ppCountMatrix)): f.write("Doc %d:((top topic:%s) topic distribution:%s)\n" % (i, self.model.doc_topic_[i].argmax(), self.model.doc_topic_[i])) f.close()
算法实现demo:
例如,抓取BBC川普当选的新闻作为语料,输入以下代码:
if __name__=="__main__": _lda = LDA_v20161130(topics=20) stop = [u'!', u'@', u'#', u',',u'.',u'/',u';',u' ',u'[',u']',u'$',u'%',u'^',u'&',u'*',u'(',u')', u'"',u':',u'<',u'>',u'"color: #008080">总结以上就是本文关于Python_LDA实现方法详解的全部内容,希望对大家有所帮助。感兴趣的朋友可以继续参阅本站:python+mongodb数据抓取详细介绍、Python探索之创建二叉树、Python探索之修改Python搜索路径等,有什么问题可以随时留言,欢迎大家一起交流讨论。感谢朋友们对本站的支持!
下一篇:python+mongodb数据抓取详细介绍