# -*- coding: <encoding name> -*-
import io
LIMIT = 150000
file_count = 0
url_list = []
with io.open('D:\DB_NEW_bak\DB_NEW_20171009_bak.sql','r',encoding='utf-16') as f:
for line in f:
url_list.append(line)
if len(url_list) < LIMIT:
continue
file_name = str(file_count)+".sql"
with io.open(file_name,'w',encoding='utf-16') as file:
for url in url_list[:-1]:
file.write(url)
file.write(url_list[-1].strip())
url_list=[]
file_count+=1
if url_list:
file_name = str(file_count) + ".sql"
with io.open(file_name,'w',encoding='utf-16') as file:
for url in url_list:
file.write(url)
print('done') f = open("foo.txt") # 返回一个文件对象
line = f.readline() # 调用文件的 readline()方法
while line:
print line, # 后面跟 ',' 将忽略换行符
# print(line, end = '') # 在 Python 3中使用
line = f.readline()
f.close() for line in open("foo.txt"):
print line, f = open("c:\\1.txt","r")
lines = f.readlines()#读取全部内容
for line in lines
print line 总结
以上就是Python实现逐行分割大txt文件的方法介绍的详细内容,更多请关注Gxl网其它相关文章!
查看更多关于Python实现逐行分割大txt文件的方法介绍的详细内容...
声明:本文来自网络,不代表【好得很程序员自学网】立场,转载请注明出处:http://haodehen.cn/did84384