普通打标签
1
2
3
4
5
|
odue_df = df_train_stmt.loc[(df_train_stmt.AGE3> 0 )|(df_train_stmt.AGE4> 0 )|(df_train_stmt.AGE5> 0 )|(df_train_stmt.AGE6> 0 ),[ 'XACCOUNT' ]].drop_duplicates()
odue_df[ 'label' ] = 1
cust_df = df_acct[[ 'CUSTR_NBR' , 'XACCOUNT' ]].drop_duplicates()
#做合并
df_y = pd.merge(cust_df,odue_df,how = 'left' ,on = 'XACCOUNT' ).groupby( 'CUSTR_NBR' ).agg({ 'label' : max }).reset_index().fillna( 0 )
|
使用函数来打标签
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
|
#标注标签 Label
def label(row):
if row[ 'Date_received' ] = = 'null' :
return - 1
if row[ 'Date' ] ! = 'null' :
td = pd.to_datetime(row[ 'Date' ], format = '%Y%m%d' ) - pd.to_datetime(row[ 'Date_received' ], format = '%Y%m%d' )
if td < = pd.Timedelta( 15 , 'D' ):
return 1
return 0
dfoff[ 'label' ] = dfoff. apply (label, axis = 1 )
#打标签,判断天数
def get_label(s):
s = s.split( ':' )
if s[ 0 ] = = 'null' :
return 0
elif (date( int (s[ 0 ][ 0 : 4 ]), int (s[ 0 ][ 4 : 6 ]), int (s[ 0 ][ 6 : 8 ])) - date( int (s[ 1 ][ 0 : 4 ]), int (s[ 1 ][ 4 : 6 ]), int (s[ 1 ][ 6 : 8 ]))).days< = 15 :
return 1
else :
return - 1
dataset2.label = dataset2.label. apply (get_label)
|
补充:python 根据标签名获取标签内容
看代码吧~
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
|
import re
import json
import requests
from bs4 import BeautifulSoup
import lxml.html
from lxml import etree
result = requests.get( 'http://example.webscraping.com/places/default/view/Algeria-4' )
with open ( '123.html' , 'wb' ) as f:
f.write(result.content)
# print(parse_regex(result.text))
test_data = """
<div>
<ul>
<li class="item-0"><a href="link1.html" rel="external nofollow" rel="external nofollow" id="places_neighbours__row">9,596,960first item</a></li>
<li class="item-1"><a href="link2.html" rel="external nofollow" >second item</a></li>
<li class="item-inactive"><a href="link3.html" rel="external nofollow" >third item</a></li>
<li class="item-1"><a href="link4.html" rel="external nofollow" id="places_neighbours__row">fourth item</a></li>
<li class="item-0"><a href="link5.html" rel="external nofollow" rel="external nofollow" >fifth item</a></li>
<li class="good-0"><a href="link5.html" rel="external nofollow" rel="external nofollow" >fifth item</a></li>
</ul>
<book>
<title lang="aaengbb">Harry Potter</title>
<price id="places_neighbours__row">29.99</price>
</book>
<book>
<title lang="zh">Learning XML</title>
<price>39.95</price>
</book>
<book>
<title>Python</title>
<price>40</price>
</book>
</div>
"""
# //div/ul/li/a[@id] 选取a标签中带有id属性的标签
# //div/ul/li/a 选取所有a标签
# //div/ul/li[2]/a
"""
/ 从根标签开始 必须具有严格的父子关系
// 从当前标签 后续节点含有即可选出
* 通配符 选择所有
//div/book[1]/title 选择div下第一个book标签的title标签
//div/book[1]/tittle[@lang="zh"] 选择div下第一个book标签的title标签并且内容是zh的title标签
//div/book/title //book/title //title 具有相同结果 只不过选取路径不一样
//book/title/@* 将title所有的属性值选出来
//book/title/text() 将title的内容选择出来,使用内置函数
//a[@href="link1.html" rel="external nofollow" rel="external nofollow" and @id="places_neighbours_row"]
//div/book/[last()]/title/text() 将最后一个book元素选出
//div/book[price > 39]/title/text() 将book子标签price数值大于39的选择出来
//li[starts-with(@class,'item')] 将class属性前缀是item的选出来
//title[contains(@lang,"eng")]将title属性lang含有eng关键字的标签选出
"""
html = lxml.html.fromstring(test_data) # 加载任意一个字符串
html_data = html.xpath( '//title[contains(@lang,"eng")]' ) # xpath 查找路径
# print(dir(html_data[0])) # 查看html_data有什么功能
print (html_data)
for i in html_data:
print (i.text)
|
以上为个人经验,希望能给大家一个参考,也希望大家多多支持服务器之家。
原文链接:https://blog.csdn.net/wj1298250240/article/details/103146274