读取超大csv
def get_df(file):
mylist = []
for chunk in pd.read_csv(file,engine='python',chunksize=100000):
mylist.append(chunk)
temp_df = pd.concat(mylist,axis=0).reset_index(drop=True)
del mylist
return temp_df
入库超大文件
n=len(df)//100000
for i in range(n+1):
df=df.iloc[i*100000:(i+1)*100000]
print(f'df,共{n}次,第{i*100000}-{(i+1)*100000}次')
df.to_sql('df',engine1,if_exists='append',index=False)
网友评论