I have a list of image URL that are stored in a Pandas Dataframe. I want to download all of these images and store them localy.
The code I use to do so is :
import os
import requests
def load(df, output_folder):
print("Ready to load "+str(len(df.index))+" images.")
for i,row in df.iterrows():
print("Image "+str(i))
save_image_from_url(row["image_url"], os.path.join(output_folder, row["image_name"]))
''' From a given URL, download the image and store it at the given path'''
def save_image_from_url(url, output_path):
image = requests.get(url)
with open(output_path, 'wb') as f:
f.write(image.content)
The problem is that the process is very slow (from 0.5 seconds to 4 seconds per images). Is there a way to do it faster ?
The obvious way is to parallelize the downloads, you have a clear example in the docs
For your case, try this aproach:
import concurrent.futures
import os
import requests
def save_image_from_url(url, output_folder):
image = requests.get(url.image_url)
output_path = os.path.join(
output_folder, url.image_name
)
with open(output_path, "wb") as f:
f.write(image.content)
def load(df, output_folder):
with concurrent.futures.ThreadPoolExecutor(
max_workers=5
) as executor:
future_to_url = {
executor.submit(save_image_from_url, url, output_folder): url
for _, url in df.iterrows()
}
for future in concurrent.futures.as_completed(
future_to_url
):
url = future_to_url[future]
try:
future.result()
except Exception as exc:
print(
"%r generated an exception: %s" % (url, exc)
)
If you love us? You can donate to us via Paypal or buy me a coffee so we can maintain and grow! Thank you!
Donate Us With