代码如下所示:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
|
# -*- coding: <encoding name> -*-
import io
LIMIT = 150000
file_count = 0
url_list = []
with io. open ( 'D:\DB_NEW_bak\DB_NEW_20171009_bak.sql' , 'r' ,encoding = 'utf-16' ) as f:
for line in f:
url_list.append(line)
if len (url_list) < LIMIT:
continue
file_name = str (file_count) + ".sql"
with io. open (file_name, 'w' ,encoding = 'utf-16' ) as file :
for url in url_list[: - 1 ]:
file .write(url)
file .write(url_list[ - 1 ].strip())
url_list = []
file_count + = 1
if url_list:
file_name = str (file_count) + ".sql"
with io. open (file_name, 'w' ,encoding = 'utf-16' ) as file :
for url in url_list:
file .write(url)
print ( 'done' )
|
非常的简单,提供三种方法:
方法一:
1
2
3
4
5
6
7
8
|
f = open ( "foo.txt" ) # 返回一个文件对象
line = f.readline() # 调用文件的 readline()方法
while line:
print line, # 后面跟 ',' 将忽略换行符
# print(line, end = '') # 在 Python 3中使用
line = f.readline()
f.close()
|
方法二:
1
2
|
for line in open ( "foo.txt" ):
print line,
|
方法三:
1
2
3
4
|
f = open ( "c:\\1.txt" , "r" )
lines = f.readlines() #读取全部内容
for line in lines
print line
|
总结
以上所述是小编给大家介绍的Python 逐行分割大txt文件,希望对大家有所帮助,如果大家有任何疑问请给我留言,小编会及时回复大家的。在此也非常感谢大家对服务器之家网站的支持!