如下所示:
1
|
<strong><span style = "font-size:14px;" >文本过滤< / span>< / strong>
|
1
|
result = re.sub(r '[^\u4e00-\u9fa5,。?!,、;:“ ”‘ ' ( )《 》〈 〉]', "", content) #只保留中文和标点
|
1
2
3
|
result = re.sub(r '[^\u4e00-\u9fa5]' , "",content) #只保留中文
result = re.sub(r '[^\0-9\.\u4e00-\u9fa5,。?!,、;:“ ”‘ ' ( )《 》〈 〉]', "", content) #只保留中文和标点和数字
result = re.sub(r '[^\u4e00-\u9fa5,A-Za-z0-9]' , "",content) #只保留中文、英文和数字
|
文本去除两个以上空格
1
|
content = re.sub(r '\s{2,}' , '', content)
|
bas4编码变成中文
1
2
3
4
|
def bas4_decode(bas4_content):
decodestr = base64.b64decode(bas4_content)
result = re.sub(r '[^\0-9\.\u4e00-\u9fa5,。?!,、;:“ ”‘ ' ( )《 》〈 〉]', "", decodestr.decode()) #只保留中文和标点和数字
return result
|
文本去停用词
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
|
def text_to_wordlist(text):
result = re.sub(r '[^\u4e00-\u9fa5]' , "",text)
f1_seg_list = jieba.cut(result) #需要添加一个词典,来弥补结巴分词中没有的词语,从而保证更高的正确率
f_stop = codecs. open ( ".\stopword.txt" , "r" , "utf-8" )
try :
f_stop_text = f_stop.read()
finally :
f_stop.close()
f_stop_seg_list = f_stop_text.split()
test_words = []
for myword in f1_seg_list:
if myword not in f_stop_seg_list:
test_words.append(myword)
return test_words
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
|
import jieba
import jieba.analyse
import numpy as np
#import json
import re
def Textrank(content):
result = re.sub(r '[^\u4e00-\u9fa5]' , "",content)
seg = jieba.cut(result)
jieba.analyse.set_stop_words( 'stopword.txt' )
keyList = jieba.analyse.textrank( '|' .join(seg), topK = 10 , withWeight = False )
return keyList
def TF_IDF(content):
result = re.sub(r '[^\u4e00-\u9fa5]' , "",content)
seg = jieba.cut(result)
jieba.analyse.set_stop_words( 'stopword.txt' )
keyWord = jieba.analyse.extract_tags(
'|' .join(seg), topK = 10 , withWeight = False , allowPOS = ()) #关键词提取,在这里对jieba的tfidf.py进行了修改
return keyWord
|
以上这篇使用python进行文本预处理和提取特征的实例就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多支持服务器之家。
原文链接:https://blog.csdn.net/Johline/article/details/78802381