Optimized image downloads
This commit is contained in:
parent
2819b504a3
commit
4feb1b65d6
1 changed files with 30 additions and 16 deletions
|
@ -1,11 +1,12 @@
|
||||||
import json
|
|
||||||
from pickletools import optimize
|
from pickletools import optimize
|
||||||
|
from turtle import down
|
||||||
from weakref import finalize
|
from weakref import finalize
|
||||||
from PIL import Image, ImageOps, ImageFilter
|
from PIL import Image, ImageOps, ImageFilter
|
||||||
import requests
|
from requests import get
|
||||||
from io import BytesIO
|
from io import BytesIO
|
||||||
import io
|
|
||||||
import base64
|
import base64
|
||||||
|
from multiprocessing.pool import ThreadPool
|
||||||
|
from time import time as timer
|
||||||
|
|
||||||
# find the highest res image in an array of images
|
# find the highest res image in an array of images
|
||||||
def findImageWithMostPixels(imageArray):
|
def findImageWithMostPixels(imageArray):
|
||||||
|
@ -27,15 +28,21 @@ def getTotalImgSize(imageArray): # take the image with the most pixels, multiply
|
||||||
else:
|
else:
|
||||||
return (maxImage.size[0] * 2, maxImage.size[1]*2)
|
return (maxImage.size[0] * 2, maxImage.size[1]*2)
|
||||||
|
|
||||||
|
def scaleImageIterable(args):
|
||||||
|
image = args[0]
|
||||||
|
targetWidth = args[1]
|
||||||
|
targetHeight = args[2]
|
||||||
|
pad=args[3]
|
||||||
|
if pad:
|
||||||
|
image = image.convert('RGBA')
|
||||||
|
newImg = ImageOps.pad(image, (targetWidth, targetHeight),color=(0, 0, 0, 0))
|
||||||
|
else:
|
||||||
|
newImg = ImageOps.fit(image, (targetWidth, targetHeight)) # scale + crop
|
||||||
|
return newImg
|
||||||
|
|
||||||
def scaleAllImagesToSameSize(imageArray,targetWidth,targetHeight,pad=True): # scale all images in the array to the same size, preserving aspect ratio
|
def scaleAllImagesToSameSize(imageArray,targetWidth,targetHeight,pad=True): # scale all images in the array to the same size, preserving aspect ratio
|
||||||
newImageArray = []
|
newImageArray = []
|
||||||
for image in imageArray:
|
newImageArray=[scaleImageIterable([image,targetWidth,targetHeight,pad]) for image in imageArray]
|
||||||
if pad:
|
|
||||||
image = image.convert('RGBA')
|
|
||||||
newImg = ImageOps.pad(image, (targetWidth, targetHeight),color=(0, 0, 0, 0))
|
|
||||||
else:
|
|
||||||
newImg = ImageOps.fit(image, (targetWidth, targetHeight)) # scale + crop
|
|
||||||
newImageArray.append(newImg)
|
|
||||||
return newImageArray
|
return newImageArray
|
||||||
|
|
||||||
def blurImage(image, radius):
|
def blurImage(image, radius):
|
||||||
|
@ -83,22 +90,29 @@ def saveImage(image, name):
|
||||||
|
|
||||||
# combine up to four images into a single image
|
# combine up to four images into a single image
|
||||||
def genImage(imageArray):
|
def genImage(imageArray):
|
||||||
combined = combineImages(imageArray, *getTotalImgSize(imageArray))
|
totalSize=getTotalImgSize(imageArray)
|
||||||
combinedBG = combineImages(imageArray, *getTotalImgSize(imageArray),False)
|
combined = combineImages(imageArray, *totalSize)
|
||||||
|
combinedBG = combineImages(imageArray, *totalSize,False)
|
||||||
combinedBG = blurImage(combinedBG,50)
|
combinedBG = blurImage(combinedBG,50)
|
||||||
finalImg = Image.alpha_composite(combinedBG,combined)
|
finalImg = Image.alpha_composite(combinedBG,combined)
|
||||||
finalImg = ImageOps.pad(finalImg, findImageWithMostPixels(imageArray).size,color=(0, 0, 0, 0))
|
finalImg = ImageOps.pad(finalImg, findImageWithMostPixels(imageArray).size,color=(0, 0, 0, 0))
|
||||||
finalImg = finalImg.convert('RGB')
|
finalImg = finalImg.convert('RGB')
|
||||||
return finalImg
|
return finalImg
|
||||||
|
|
||||||
|
def downloadImage(url):
|
||||||
|
return Image.open(BytesIO(get(url).content))
|
||||||
|
|
||||||
def genImageFromURL(urlArray):
|
def genImageFromURL(urlArray):
|
||||||
# this method avoids storing the images in disk, instead they're stored in memory
|
# this method avoids storing the images in disk, instead they're stored in memory
|
||||||
# no cache means that they'll have to be downloaded again if the image is requested again
|
# no cache means that they'll have to be downloaded again if the image is requested again
|
||||||
# TODO: cache?
|
# TODO: cache?
|
||||||
imageArray = []
|
start = timer()
|
||||||
for url in urlArray:
|
imageArray = ThreadPool(8).map(downloadImage,urlArray)
|
||||||
imageArray.append(Image.open(BytesIO(requests.get(url).content)))
|
print(f"Images downloaded in: {timer() - start}s")
|
||||||
return genImage(imageArray)
|
start = timer()
|
||||||
|
finalImg = genImage(imageArray)
|
||||||
|
print(f"Image generated in: {timer() - start}s")
|
||||||
|
return finalImg
|
||||||
|
|
||||||
def lambda_handler(event, context):
|
def lambda_handler(event, context):
|
||||||
# TODO implement
|
# TODO implement
|
||||||
|
|
Loading…
Reference in a new issue