Compare commits
6 commits
main
...
serverless
Author | SHA1 | Date | |
---|---|---|---|
|
f2eefbe5a5 | ||
|
32e5376b98 | ||
|
dee62f812a | ||
|
43d69a1d87 | ||
|
96246aa921 | ||
|
2c9563fafe |
17 changed files with 11008 additions and 536 deletions
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -8,4 +8,3 @@ admin.env
|
||||||
.env
|
.env
|
||||||
_meta
|
_meta
|
||||||
.serverless
|
.serverless
|
||||||
db/
|
|
||||||
|
|
19
Dockerfile
19
Dockerfile
|
@ -1,19 +0,0 @@
|
||||||
FROM python:3.6-alpine AS build
|
|
||||||
RUN apk add build-base python3-dev linux-headers pcre-dev jpeg-dev zlib-dev
|
|
||||||
RUN pip install --upgrade pip
|
|
||||||
RUN pip install yt-dlp pillow uwsgi
|
|
||||||
|
|
||||||
FROM python:3.6-alpine AS deps
|
|
||||||
WORKDIR /twitfix
|
|
||||||
COPY requirements.txt requirements.txt
|
|
||||||
COPY --from=build /usr/local/lib/python3.6/site-packages /usr/local/lib/python3.6/site-packages
|
|
||||||
RUN pip install -r requirements.txt
|
|
||||||
|
|
||||||
FROM python:3.6-alpine AS runner
|
|
||||||
EXPOSE 9000
|
|
||||||
RUN apk add pcre-dev jpeg-dev zlib-dev
|
|
||||||
WORKDIR /twitfix
|
|
||||||
CMD ["uwsgi", "twitfix.ini"]
|
|
||||||
COPY --from=build /usr/local/bin/uwsgi /usr/local/bin/uwsgi
|
|
||||||
COPY --from=deps /usr/local/lib/python3.6/site-packages /usr/local/lib/python3.6/site-packages
|
|
||||||
COPY . .
|
|
|
@ -1,34 +0,0 @@
|
||||||
FROM public.ecr.aws/lambda/python:3.8
|
|
||||||
RUN yum -y update
|
|
||||||
RUN yum -y install git && yum clean all
|
|
||||||
RUN yum -y install tar gzip zlib freetype-devel \
|
|
||||||
gcc \
|
|
||||||
ghostscript \
|
|
||||||
lcms2-devel \
|
|
||||||
libffi-devel \
|
|
||||||
libimagequant-devel \
|
|
||||||
libjpeg-devel \
|
|
||||||
libraqm-devel \
|
|
||||||
libtiff-devel \
|
|
||||||
libwebp-devel \
|
|
||||||
make \
|
|
||||||
openjpeg2-devel \
|
|
||||||
rh-python36 \
|
|
||||||
rh-python36-python-virtualenv \
|
|
||||||
sudo \
|
|
||||||
tcl-devel \
|
|
||||||
tk-devel \
|
|
||||||
tkinter \
|
|
||||||
which \
|
|
||||||
xorg-x11-server-Xvfb \
|
|
||||||
zlib-devel \
|
|
||||||
&& yum clean all
|
|
||||||
RUN pip install -U --force-reinstall pillow-simd
|
|
||||||
RUN pip install requests
|
|
||||||
|
|
||||||
|
|
||||||
# Copy function code
|
|
||||||
COPY __init__.py ${LAMBDA_TASK_ROOT}/app.py
|
|
||||||
|
|
||||||
# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile)
|
|
||||||
CMD [ "app.lambda_handler" ]
|
|
|
@ -1,140 +0,0 @@
|
||||||
from PIL import Image, ImageOps, ImageFilter
|
|
||||||
from requests import get
|
|
||||||
from io import BytesIO
|
|
||||||
import base64
|
|
||||||
import concurrent.futures
|
|
||||||
from time import time as timer
|
|
||||||
|
|
||||||
# find the highest res image in an array of images
|
|
||||||
def findImageWithMostPixels(imageArray):
|
|
||||||
maxPixels = 0
|
|
||||||
maxImage = None
|
|
||||||
for image in imageArray:
|
|
||||||
pixels = image.size[0] * image.size[1]
|
|
||||||
if pixels > maxPixels:
|
|
||||||
maxPixels = pixels
|
|
||||||
maxImage = image
|
|
||||||
return maxImage
|
|
||||||
|
|
||||||
def getTotalImgSize(imageArray): # take the image with the most pixels, multiply it by the number of images, and return the width and height
|
|
||||||
maxImage = findImageWithMostPixels(imageArray)
|
|
||||||
if (len(imageArray) == 1):
|
|
||||||
return (maxImage.size[0], maxImage.size[1])
|
|
||||||
elif (len(imageArray) == 2):
|
|
||||||
return (maxImage.size[0] * 2, maxImage.size[1])
|
|
||||||
else:
|
|
||||||
return (maxImage.size[0] * 2, maxImage.size[1]*2)
|
|
||||||
|
|
||||||
def scaleImageIterable(args):
|
|
||||||
image = args[0]
|
|
||||||
targetWidth = args[1]
|
|
||||||
targetHeight = args[2]
|
|
||||||
pad=args[3]
|
|
||||||
if pad:
|
|
||||||
image = image.convert('RGBA')
|
|
||||||
newImg = ImageOps.pad(image, (targetWidth, targetHeight),color=(0, 0, 0, 0))
|
|
||||||
else:
|
|
||||||
newImg = ImageOps.fit(image, (targetWidth, targetHeight)) # scale + crop
|
|
||||||
return newImg
|
|
||||||
|
|
||||||
def scaleAllImagesToSameSize(imageArray,targetWidth,targetHeight,pad=True): # scale all images in the array to the same size, preserving aspect ratio
|
|
||||||
newImageArray = []
|
|
||||||
with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor:
|
|
||||||
newImageArray = [executor.submit(scaleImageIterable, (image, targetWidth, targetHeight,pad)) for image in imageArray]
|
|
||||||
newImageArray = [future.result() for future in newImageArray]
|
|
||||||
return newImageArray
|
|
||||||
|
|
||||||
def blurImage(image, radius):
|
|
||||||
return image.filter(ImageFilter.GaussianBlur(radius=radius))
|
|
||||||
|
|
||||||
def combineImages(imageArray, totalWidth, totalHeight,pad=True):
|
|
||||||
x = 0
|
|
||||||
y = 0
|
|
||||||
if (len(imageArray) == 1): # if there is only one image, just return it
|
|
||||||
return imageArray[0]
|
|
||||||
# image generation is needed
|
|
||||||
topImg = findImageWithMostPixels(imageArray)
|
|
||||||
newImage = Image.new("RGBA", (totalWidth, totalHeight),(0, 0, 0, 0))
|
|
||||||
imageArray = scaleAllImagesToSameSize(imageArray,topImg.size[0],topImg.size[1],pad)
|
|
||||||
if (len(imageArray) == 2): # if there are two images, combine them horizontally
|
|
||||||
for image in imageArray:
|
|
||||||
newImage.paste(image, (x, y))
|
|
||||||
x += image.size[0]
|
|
||||||
elif (len(imageArray) == 3): # the elusive 3 image upload
|
|
||||||
# if there are three images, combine the first two horizontally, then combine the last one vertically
|
|
||||||
imageArray[2] = scaleAllImagesToSameSize([imageArray[2]],totalWidth,topImg.size[1],pad)[0] # take the last image, treat it like an image array and scale it to the total width, but same height as all individual images
|
|
||||||
for image in imageArray[0:2]:
|
|
||||||
newImage.paste(image, (x, y))
|
|
||||||
x += image.size[0]
|
|
||||||
y += imageArray[0].size[1]
|
|
||||||
x = 0
|
|
||||||
newImage.paste(imageArray[2], (x, y))
|
|
||||||
elif (len(imageArray) == 4): # if there are four images, combine the first two horizontally, then combine the last two vertically
|
|
||||||
for image in imageArray[0:2]:
|
|
||||||
newImage.paste(image, (x, y))
|
|
||||||
x += image.size[0]
|
|
||||||
y += imageArray[0].size[1]
|
|
||||||
x = 0
|
|
||||||
for image in imageArray[2:4]:
|
|
||||||
newImage.paste(image, (x, y))
|
|
||||||
x += image.size[0]
|
|
||||||
else:
|
|
||||||
for image in imageArray:
|
|
||||||
newImage.paste(image, (x, y))
|
|
||||||
x += image.size[0]
|
|
||||||
return newImage
|
|
||||||
|
|
||||||
def saveImage(image, name):
|
|
||||||
image.save(name)
|
|
||||||
|
|
||||||
# combine up to four images into a single image
|
|
||||||
def genImage(imageArray):
|
|
||||||
totalSize=getTotalImgSize(imageArray)
|
|
||||||
combined = combineImages(imageArray, *totalSize)
|
|
||||||
combinedBG = combineImages(imageArray, *totalSize,False)
|
|
||||||
combinedBG = blurImage(combinedBG,50)
|
|
||||||
finalImg = Image.alpha_composite(combinedBG,combined)
|
|
||||||
finalImg = ImageOps.pad(finalImg, findImageWithMostPixels(imageArray).size,color=(0, 0, 0, 0))
|
|
||||||
finalImg = finalImg.convert('RGB')
|
|
||||||
return finalImg
|
|
||||||
|
|
||||||
def downloadImage(url):
|
|
||||||
return Image.open(BytesIO(get(url).content))
|
|
||||||
|
|
||||||
def genImageFromURL(urlArray):
|
|
||||||
# this method avoids storing the images in disk, instead they're stored in memory
|
|
||||||
# no cache means that they'll have to be downloaded again if the image is requested again
|
|
||||||
# TODO: cache?
|
|
||||||
start = timer()
|
|
||||||
with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor:
|
|
||||||
imageArray = [executor.submit(downloadImage, url) for url in urlArray]
|
|
||||||
imageArray = [future.result() for future in imageArray]
|
|
||||||
print(f"Images downloaded in: {timer() - start}s")
|
|
||||||
start = timer()
|
|
||||||
finalImg = genImage(imageArray)
|
|
||||||
print(f"Image generated in: {timer() - start}s")
|
|
||||||
return finalImg
|
|
||||||
|
|
||||||
def lambda_handler(event, context):
|
|
||||||
if ("queryStringParameters" not in event):
|
|
||||||
return {
|
|
||||||
"statusCode": 400,
|
|
||||||
"body": "Invalid request."
|
|
||||||
}
|
|
||||||
images = event["queryStringParameters"].get("imgs","").split(",")
|
|
||||||
for img in images:
|
|
||||||
if not img.startswith("https://pbs.twimg.com"):
|
|
||||||
return {'statusCode':400,'body':'Invalid image URL'}
|
|
||||||
combined = genImageFromURL(images)
|
|
||||||
buffered = BytesIO()
|
|
||||||
combined.save(buffered,format="JPEG",quality=60)
|
|
||||||
combined_str=base64.b64encode(buffered.getvalue()).decode('ascii')
|
|
||||||
return {
|
|
||||||
'statusCode': 200,
|
|
||||||
"headers":
|
|
||||||
{
|
|
||||||
"Content-Type": "image/jpeg"
|
|
||||||
},
|
|
||||||
'body': combined_str,
|
|
||||||
'isBase64Encoded': True
|
|
||||||
}
|
|
|
@ -1,29 +0,0 @@
|
||||||
version: "3"
|
|
||||||
|
|
||||||
services:
|
|
||||||
proxy:
|
|
||||||
image: nginx:alpine
|
|
||||||
container_name: twitfix_proxy
|
|
||||||
volumes:
|
|
||||||
- "./twitfix_proxy.conf:/etc/nginx/conf.d/default.conf"
|
|
||||||
ports:
|
|
||||||
- 8088:80
|
|
||||||
depends_on:
|
|
||||||
- twitfix
|
|
||||||
|
|
||||||
twitfix:
|
|
||||||
image: twitfix
|
|
||||||
build: .
|
|
||||||
container_name: twitfix_main
|
|
||||||
volumes:
|
|
||||||
- "./twitfix.ini:/twitfix/twitfix.ini:ro"
|
|
||||||
- "./config.json:/twitfix/config.json:ro"
|
|
||||||
depends_on:
|
|
||||||
- db
|
|
||||||
|
|
||||||
db:
|
|
||||||
image: mongo:5.0.9
|
|
||||||
container_name: twitfix_db
|
|
||||||
volumes:
|
|
||||||
- "./db:/data/db"
|
|
||||||
|
|
40
docker.md
40
docker.md
|
@ -1,40 +0,0 @@
|
||||||
# vxTwitter Docker
|
|
||||||
|
|
||||||
## Configuration
|
|
||||||
|
|
||||||
Setup mongodb in `config.json`:
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"config":{
|
|
||||||
"link_cache":"db",
|
|
||||||
"database":"mongodb://twitfix_db:27017/",
|
|
||||||
[...]
|
|
||||||
},
|
|
||||||
[...]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Use TCP socket for uwsgi `twitfix.ini`:
|
|
||||||
```ini
|
|
||||||
[uwsgi]
|
|
||||||
module = wsgi:app
|
|
||||||
|
|
||||||
master = true
|
|
||||||
processes = 5
|
|
||||||
|
|
||||||
socket = 0.0.0.0:9000
|
|
||||||
buffer-size = 8192
|
|
||||||
#socket = /var/run/twitfix.sock
|
|
||||||
#chmod-socket = 660
|
|
||||||
vacuum = true
|
|
||||||
|
|
||||||
die-on-term = true
|
|
||||||
```
|
|
||||||
|
|
||||||
## Run
|
|
||||||
|
|
||||||
To run and build, use this command:
|
|
||||||
```bash
|
|
||||||
docker-compose up -d --build
|
|
||||||
```
|
|
||||||
|
|
1
msgs.py
1
msgs.py
|
@ -1 +0,0 @@
|
||||||
failedToScan="Failed to scan your link! This may be due to an incorrect link, private account, or the twitter API itself might be having issues (Check here: https://api.twitterstat.us/)\nIt's also possible that Twitter is API limiting me, in which case I can't do anything about it."
|
|
10771
package-lock.json
generated
Normal file
10771
package-lock.json
generated
Normal file
File diff suppressed because it is too large
Load diff
28
package.json
Normal file
28
package.json
Normal file
|
@ -0,0 +1,28 @@
|
||||||
|
{
|
||||||
|
"name": "bettertwitfix",
|
||||||
|
"version": "1.0.0",
|
||||||
|
"description": "(A fork of TwitFix)\r Basic flask server that serves fixed twitter video embeds to desktop discord by using either the Twitter API or Youtube-DL to grab tweet video information. This also automatically embeds the first link in the text of non video tweets (API Only)",
|
||||||
|
"main": "index.js",
|
||||||
|
"scripts": {
|
||||||
|
"test": "echo \"Error: no test specified\" && exit 1"
|
||||||
|
},
|
||||||
|
"repository": {
|
||||||
|
"type": "git",
|
||||||
|
"url": "git+https://github.com/dylanpdx/BetterTwitFix.git"
|
||||||
|
},
|
||||||
|
"keywords": [],
|
||||||
|
"author": "",
|
||||||
|
"license": "ISC",
|
||||||
|
"bugs": {
|
||||||
|
"url": "https://github.com/dylanpdx/BetterTwitFix/issues"
|
||||||
|
},
|
||||||
|
"homepage": "https://github.com/dylanpdx/BetterTwitFix#readme",
|
||||||
|
"dependencies": {
|
||||||
|
"serverless-wsgi": "^3.0.0"
|
||||||
|
},
|
||||||
|
"devDependencies": {
|
||||||
|
"serverless-plugin-common-excludes": "^4.0.0",
|
||||||
|
"serverless-plugin-include-dependencies": "^5.0.0",
|
||||||
|
"serverless-python-requirements": "^5.4.0"
|
||||||
|
}
|
||||||
|
}
|
|
@ -2,11 +2,6 @@
|
||||||
(A fork of TwitFix)
|
(A fork of TwitFix)
|
||||||
Basic flask server that serves fixed twitter video embeds to desktop discord by using either the Twitter API or Youtube-DL to grab tweet video information. This also automatically embeds the first link in the text of non video tweets (API Only)
|
Basic flask server that serves fixed twitter video embeds to desktop discord by using either the Twitter API or Youtube-DL to grab tweet video information. This also automatically embeds the first link in the text of non video tweets (API Only)
|
||||||
|
|
||||||
## Differences from fxtwitter
|
|
||||||
fxtwitter exposed all recently processed tweets publicly via a "latest" and "top" page.
|
|
||||||
|
|
||||||
Even though Tweets are public, it was a personal concern for me that a tweet with potentially sensitive information in it could suddenly be shown to however many people were browsing the latest tweets page, and could be used as a tool for harassment. This was removed in [The following commit](https://github.com/dylanpdx/BetterTwitFix/commit/87ba86ba502e73ddb370bd4e5b964548d3272400#diff-a11c36d9b2d53672d6b3d781dca5bef9129159947de66bc3ffaec5fab389d80cL115)
|
|
||||||
|
|
||||||
## How to use (discord side)
|
## How to use (discord side)
|
||||||
|
|
||||||
just put the url to the server, and directly after, the full URL to the tweet you want to embed
|
just put the url to the server, and directly after, the full URL to the tweet you want to embed
|
||||||
|
@ -21,8 +16,6 @@ You can also simply type out 'vx' directly before 'twitter.com' in any valid twi
|
||||||
|
|
||||||
**Note**: If you enjoy this service, please considering donating via [Ko-Fi](https://ko-fi.com/dylanpdx) to help cover server costs
|
**Note**: If you enjoy this service, please considering donating via [Ko-Fi](https://ko-fi.com/dylanpdx) to help cover server costs
|
||||||
|
|
||||||
I do not monitor any tweets processed by this server. Additionally, if you plan on hosting the code yourself and are concerned about this, be sure to check how to disable logging on the web server you are using (i.e Nginx)
|
|
||||||
|
|
||||||
## How to run (server side)
|
## How to run (server side)
|
||||||
|
|
||||||
this script uses the youtube-dl python module, along with flask, twitter and pymongo, so install those with pip (you can use `pip install -r requirements.txt`) and start the server with `python twitfix.py`
|
this script uses the youtube-dl python module, along with flask, twitter and pymongo, so install those with pip (you can use `pip install -r requirements.txt`) and start the server with `python twitfix.py`
|
||||||
|
@ -57,8 +50,6 @@ vxTwitter generates a config.json in its root directory the first time you run i
|
||||||
|
|
||||||
**url** - used to tell the user where to look for the oembed endpoint, make sure to set this to your public facing url
|
**url** - used to tell the user where to look for the oembed endpoint, make sure to set this to your public facing url
|
||||||
|
|
||||||
**combination_method** - using c.vxtwitter as the url causes vxTwitter to combine all images in the post into one. This is CPU intensive, so you might not want it running on the same machine that's serving requests. When `combination_method` is set to `local`, it will use the local machine to combine the images. This requires pillow to be installed. If you want to use another server, replace `local` with the URL to the endpoint which combines images. Both methods use the code in the `combineImg` module. Inside, there's also a `Dockerfile` intended to be deployed as a combination endpoint on an [AWS Lambda function](https://docs.aws.amazon.com/lambda/latest/dg/images-create.html).
|
|
||||||
|
|
||||||
This project is licensed under the **Do What The Fuck You Want Public License**
|
This project is licensed under the **Do What The Fuck You Want Public License**
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,20 +1,25 @@
|
||||||
certifi==2021.10.8
|
boto3==1.23.0
|
||||||
|
botocore==1.26.0
|
||||||
|
certifi==2021.10.8
|
||||||
charset-normalizer==2.0.12
|
charset-normalizer==2.0.12
|
||||||
click==8.0.4
|
click==8.0.4
|
||||||
colorama==0.4.4
|
colorama==0.4.4
|
||||||
dataclasses==0.8; python_version < '3.8'
|
dataclasses==0.8
|
||||||
Flask==2.0.3
|
Flask==2.0.3
|
||||||
Flask-Cors==3.0.10
|
Flask-Cors==3.0.10
|
||||||
idna==3.3
|
idna==3.3
|
||||||
importlib-metadata==4.8.3
|
importlib-metadata==4.8.3
|
||||||
itsdangerous==2.0.1
|
itsdangerous==2.0.1
|
||||||
Jinja2==3.0.3
|
Jinja2==3.0.3
|
||||||
|
jmespath==0.10.0
|
||||||
MarkupSafe==2.0.1
|
MarkupSafe==2.0.1
|
||||||
pymongo==4.1.1
|
pymongo==4.1.1
|
||||||
|
python-dateutil==2.8.2
|
||||||
requests==2.27.1
|
requests==2.27.1
|
||||||
|
s3transfer==0.5.2
|
||||||
six==1.16.0
|
six==1.16.0
|
||||||
twitter==1.19.3
|
twitter==1.19.3
|
||||||
typing-extensions==4.1.1
|
typing_extensions==4.1.1
|
||||||
urllib3==1.26.9
|
urllib3==1.26.9
|
||||||
Werkzeug==2.0.3
|
Werkzeug==2.0.3
|
||||||
youtube-dl==2021.12.17
|
youtube-dl==2021.12.17
|
||||||
|
|
79
serverless.yml
Normal file
79
serverless.yml
Normal file
|
@ -0,0 +1,79 @@
|
||||||
|
service: vxTwitter
|
||||||
|
|
||||||
|
provider:
|
||||||
|
name: aws
|
||||||
|
runtime: python3.6
|
||||||
|
stage: dev
|
||||||
|
iamRoleStatements:
|
||||||
|
- Effect: Allow
|
||||||
|
Action:
|
||||||
|
- dynamodb:Query
|
||||||
|
- dynamodb:Scan
|
||||||
|
- dynamodb:GetItem
|
||||||
|
- dynamodb:PutItem
|
||||||
|
- dynamodb:UpdateItem
|
||||||
|
- dynamodb:DeleteItem
|
||||||
|
Resource:
|
||||||
|
- { "Fn::GetAtt": ["vxTwitterDynamoTable", "Arn" ] }
|
||||||
|
environment:
|
||||||
|
CACHE_TABLE: ${self:custom.tableName}
|
||||||
|
RUNNING_SERVERLESS: 1
|
||||||
|
VXTWITTER_LINK_CACHE: dynamodb
|
||||||
|
VXTWITTER_DATABASE: none
|
||||||
|
VXTWITTER_DATABASE_TABLE: none
|
||||||
|
VXTWITTER_METHOD: youtube-dl
|
||||||
|
VXTWITTER_COLOR: \#43B581
|
||||||
|
VXTWITTER_APP_NAME: vxTwitter
|
||||||
|
VXTWITTER_REPO: https://github.com/dylanpdx/BetterTwitFix
|
||||||
|
VXTWITTER_URL: https://vxtwitter.com
|
||||||
|
# Twitter API keys
|
||||||
|
VXTWITTER_TWITTER_API_KEY: none
|
||||||
|
VXTWITTER_TWITTER_API_SECRET: none
|
||||||
|
VXTWITTER_TWITTER_ACCESS_TOKEN: none
|
||||||
|
VXTWITTER_TWITTER_ACCESS_SECRET: none
|
||||||
|
|
||||||
|
package:
|
||||||
|
patterns:
|
||||||
|
- '!node_modules/**'
|
||||||
|
- '!venv/**'
|
||||||
|
|
||||||
|
plugins:
|
||||||
|
- serverless-wsgi
|
||||||
|
- serverless-python-requirements
|
||||||
|
- serverless-plugin-common-excludes
|
||||||
|
- serverless-plugin-include-dependencies
|
||||||
|
|
||||||
|
functions:
|
||||||
|
vxTwitterApp:
|
||||||
|
handler: wsgi_handler.handler
|
||||||
|
url: true
|
||||||
|
layers:
|
||||||
|
- Ref: PythonRequirementsLambdaLayer
|
||||||
|
|
||||||
|
|
||||||
|
custom:
|
||||||
|
tableName: 'users-table-${self:provider.stage}'
|
||||||
|
wsgi:
|
||||||
|
app: twitfix.app
|
||||||
|
pythonRequirements:
|
||||||
|
layer: true
|
||||||
|
dockerizePip: true
|
||||||
|
|
||||||
|
|
||||||
|
resources:
|
||||||
|
Resources:
|
||||||
|
vxTwitterDynamoTable:
|
||||||
|
Type: 'AWS::DynamoDB::Table'
|
||||||
|
Properties:
|
||||||
|
AttributeDefinitions:
|
||||||
|
-
|
||||||
|
AttributeName: tweet
|
||||||
|
AttributeType: S
|
||||||
|
KeySchema:
|
||||||
|
-
|
||||||
|
AttributeName: tweet
|
||||||
|
KeyType: HASH
|
||||||
|
ProvisionedThroughput:
|
||||||
|
ReadCapacityUnits: 1
|
||||||
|
WriteCapacityUnits: 1
|
||||||
|
TableName: ${self:custom.tableName}
|
|
@ -19,7 +19,6 @@
|
||||||
<meta property="og:video:height" content="480" />
|
<meta property="og:video:height" content="480" />
|
||||||
<meta name="twitter:title" content="{{ user }} (@{{ screenName }})" />
|
<meta name="twitter:title" content="{{ user }} (@{{ screenName }})" />
|
||||||
<meta property="og:image" content="{{ pic }}" />
|
<meta property="og:image" content="{{ pic }}" />
|
||||||
<meta property="og:description" content="{{ desc }}" />
|
|
||||||
|
|
||||||
<link rel="alternate" href="{{ url }}/oembed.json?desc={{ urlUser }}&user={{ urlDesc }}&link={{ urlLink }}&ttype=video" type="application/json+oembed" title="{{ user }}">
|
<link rel="alternate" href="{{ url }}/oembed.json?desc={{ urlUser }}&user={{ urlDesc }}&link={{ urlLink }}&ttype=video" type="application/json+oembed" title="{{ user }}">
|
||||||
<meta http-equiv="refresh" content="0; url = {{ tweetLink }}" /> {% endblock %} {% block body %} Redirecting you to the tweet in a moment. <a href="{{ tweetLink }}">Or click here.</a> {% endblock %}
|
<meta http-equiv="refresh" content="0; url = {{ tweetLink }}" /> {% endblock %} {% block body %} Redirecting you to the tweet in a moment. <a href="{{ tweetLink }}">Or click here.</a> {% endblock %}
|
18
twExtract.py
18
twExtract.py
|
@ -1,18 +0,0 @@
|
||||||
import imp
|
|
||||||
import yt_dlp
|
|
||||||
from yt_dlp.extractor import twitter
|
|
||||||
import json
|
|
||||||
|
|
||||||
def extractStatus(url):
|
|
||||||
twIE = twitter.TwitterIE()
|
|
||||||
twIE.set_downloader(yt_dlp.YoutubeDL())
|
|
||||||
twid = twIE._match_id(url)
|
|
||||||
status = twIE._call_api(
|
|
||||||
'statuses/show/%s.json' % twid, twid, {
|
|
||||||
'cards_platform': 'Web-12',
|
|
||||||
'include_cards': 1,
|
|
||||||
'include_reply_count': 1,
|
|
||||||
'include_user_entities': 0,
|
|
||||||
'tweet_mode': 'extended',
|
|
||||||
})
|
|
||||||
return status
|
|
251
twitfix.py
251
twitfix.py
|
@ -1,7 +1,6 @@
|
||||||
from weakref import finalize
|
from flask import Flask, render_template, request, redirect, Response, send_from_directory, url_for, send_file, make_response, jsonify
|
||||||
from flask import Flask, render_template, request, redirect, abort, Response, send_from_directory, url_for, send_file, make_response, jsonify
|
|
||||||
from flask_cors import CORS
|
from flask_cors import CORS
|
||||||
import yt_dlp
|
import youtube_dl
|
||||||
import textwrap
|
import textwrap
|
||||||
import twitter
|
import twitter
|
||||||
import pymongo
|
import pymongo
|
||||||
|
@ -11,11 +10,8 @@ import re
|
||||||
import os
|
import os
|
||||||
import urllib.parse
|
import urllib.parse
|
||||||
import urllib.request
|
import urllib.request
|
||||||
import combineImg
|
from datetime import date
|
||||||
from datetime import date,datetime, timedelta
|
import boto3
|
||||||
from io import BytesIO
|
|
||||||
import msgs
|
|
||||||
import twExtract
|
|
||||||
|
|
||||||
app = Flask(__name__)
|
app = Flask(__name__)
|
||||||
CORS(app)
|
CORS(app)
|
||||||
|
@ -38,18 +34,19 @@ generate_embed_user_agents = [
|
||||||
|
|
||||||
# Read config from config.json. If it does not exist, create new.
|
# Read config from config.json. If it does not exist, create new.
|
||||||
if not os.path.exists("config.json"):
|
if not os.path.exists("config.json"):
|
||||||
|
serverless_check = os.environ.get('RUNNING_SERVERLESS')
|
||||||
|
if serverless_check == None: # Running on local pc, therefore we can access the filesystem
|
||||||
with open("config.json", "w") as outfile:
|
with open("config.json", "w") as outfile:
|
||||||
default_config = {
|
default_config = {
|
||||||
"config":{
|
"config":{
|
||||||
"link_cache":"json",
|
"link_cache":"dynamodb",
|
||||||
"database":"[url to mongo database goes here]",
|
"database":"[url to mongo database goes here]",
|
||||||
"table":"TwiFix",
|
"table":"TwiFix",
|
||||||
"method":"youtube-dl",
|
"method":"youtube-dl",
|
||||||
"color":"#43B581",
|
"color":"#43B581",
|
||||||
"appname": "vxTwitter",
|
"appname": "vxTwitter",
|
||||||
"repo": "https://github.com/dylanpdx/BetterTwitFix",
|
"repo": "https://github.com/dylanpdx/BetterTwitFix",
|
||||||
"url": "https://vxtwitter.com",
|
"url": "https://vxtwitter.com"
|
||||||
"combination_method": "local" # can either be 'local' or a URL to a server handling requests in the same format
|
|
||||||
},
|
},
|
||||||
"api":{"api_key":"[api_key goes here]",
|
"api":{"api_key":"[api_key goes here]",
|
||||||
"api_secret":"[api_secret goes here]",
|
"api_secret":"[api_secret goes here]",
|
||||||
|
@ -57,8 +54,26 @@ if not os.path.exists("config.json"):
|
||||||
"access_secret":"[access_secret goes here]"
|
"access_secret":"[access_secret goes here]"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
json.dump(default_config, outfile, indent=4, sort_keys=True)
|
json.dump(default_config, outfile, indent=4, sort_keys=True)
|
||||||
|
else: # Running on serverless, therefore we cannot access the filesystem and must use environment variables
|
||||||
|
default_config = {
|
||||||
|
"config":{
|
||||||
|
"link_cache":os.environ['VXTWITTER_LINK_CACHE'],
|
||||||
|
"database":os.environ['VXTWITTER_DATABASE'],
|
||||||
|
"table":os.environ['VXTWITTER_DATABASE_TABLE'],
|
||||||
|
"method":os.environ['VXTWITTER_METHOD'],
|
||||||
|
"color":os.environ['VXTWITTER_COLOR'],
|
||||||
|
"appname": os.environ['VXTWITTER_APP_NAME'],
|
||||||
|
"repo": os.environ['VXTWITTER_REPO'],
|
||||||
|
"url": os.environ['VXTWITTER_URL'],
|
||||||
|
},
|
||||||
|
"api":{
|
||||||
|
"api_key":os.environ['VXTWITTER_TWITTER_API_KEY'],
|
||||||
|
"api_secret":os.environ['VXTWITTER_TWITTER_API_SECRET'],
|
||||||
|
"access_token":os.environ['VXTWITTER_TWITTER_ACCESS_TOKEN'],
|
||||||
|
"access_secret":os.environ['VXTWITTER_TWITTER_ACCESS_SECRET']
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
config = default_config
|
config = default_config
|
||||||
else:
|
else:
|
||||||
|
@ -72,6 +87,9 @@ if config['config']['method'] in ('api', 'hybrid'):
|
||||||
twitter_api = twitter.Twitter(auth=auth)
|
twitter_api = twitter.Twitter(auth=auth)
|
||||||
|
|
||||||
link_cache_system = config['config']['link_cache']
|
link_cache_system = config['config']['link_cache']
|
||||||
|
DYNAMO_CACHE_TBL=None
|
||||||
|
if link_cache_system=="dynamodb":
|
||||||
|
DYNAMO_CACHE_TBL=os.environ['CACHE_TABLE']
|
||||||
|
|
||||||
if link_cache_system == "json":
|
if link_cache_system == "json":
|
||||||
link_cache = {}
|
link_cache = {}
|
||||||
|
@ -80,19 +98,15 @@ if link_cache_system == "json":
|
||||||
default_link_cache = {"test":"test"}
|
default_link_cache = {"test":"test"}
|
||||||
json.dump(default_link_cache, outfile, indent=4, sort_keys=True)
|
json.dump(default_link_cache, outfile, indent=4, sort_keys=True)
|
||||||
|
|
||||||
try:
|
f = open('links.json',)
|
||||||
with open('links.json', "r") as f:
|
|
||||||
link_cache = json.load(f)
|
link_cache = json.load(f)
|
||||||
except (json.decoder.JSONDecodeError, FileNotFoundError):
|
f.close()
|
||||||
print(" ➤ [ X ] Failed to load cache JSON file. Creating new file.")
|
|
||||||
with open('links.json', "w") as f:
|
|
||||||
link_cache = {}
|
|
||||||
json.dump(link_cache, f)
|
|
||||||
|
|
||||||
elif link_cache_system == "db":
|
elif link_cache_system == "db":
|
||||||
client = pymongo.MongoClient(config['config']['database'], connect=False)
|
client = pymongo.MongoClient(config['config']['database'], connect=False)
|
||||||
table = config['config']['table']
|
table = config['config']['table']
|
||||||
db = client[table]
|
db = client[table]
|
||||||
|
elif link_cache_system == "dynamodb":
|
||||||
|
client = boto3.resource('dynamodb')
|
||||||
|
|
||||||
@app.route('/') # If the useragent is discord, return the embed, if not, redirect to configured repo directly
|
@app.route('/') # If the useragent is discord, return the embed, if not, redirect to configured repo directly
|
||||||
def default():
|
def default():
|
||||||
|
@ -117,28 +131,16 @@ def twitfix(sub_path):
|
||||||
print(request.url)
|
print(request.url)
|
||||||
|
|
||||||
if request.url.startswith("https://d.vx"): # Matches d.fx? Try to give the user a direct link
|
if request.url.startswith("https://d.vx"): # Matches d.fx? Try to give the user a direct link
|
||||||
if match.start() == 0:
|
|
||||||
twitter_url = "https://twitter.com/" + sub_path
|
|
||||||
if user_agent in generate_embed_user_agents:
|
if user_agent in generate_embed_user_agents:
|
||||||
print( " ➤ [ D ] d.vx link shown to discord user-agent!")
|
print( " ➤ [ D ] d.vx link shown to discord user-agent!")
|
||||||
if request.url.endswith(".mp4") and "?" not in request.url:
|
if request.url.endswith(".mp4") and "?" not in request.url:
|
||||||
return redirect(direct_video_link(twitter_url),302)
|
return dl(sub_path)
|
||||||
else:
|
else:
|
||||||
return message("To use a direct MP4 link in discord, remove anything past '?' and put '.mp4' at the end")
|
return message("To use a direct MP4 link in discord, remove anything past '?' and put '.mp4' at the end")
|
||||||
else:
|
else:
|
||||||
print(" ➤ [ R ] Redirect to MP4 using d.fxtwitter.com")
|
print(" ➤ [ R ] Redirect to MP4 using d.fxtwitter.com")
|
||||||
return dir(sub_path)
|
return dir(sub_path)
|
||||||
elif request.url.startswith("https://c.vx"):
|
|
||||||
twitter_url = sub_path
|
|
||||||
|
|
||||||
if match.start() == 0:
|
|
||||||
twitter_url = "https://twitter.com/" + sub_path
|
|
||||||
|
|
||||||
if user_agent in generate_embed_user_agents:
|
|
||||||
return embedCombined(twitter_url)
|
|
||||||
else:
|
|
||||||
print(" ➤ [ R ] Redirect to " + twitter_url)
|
|
||||||
return redirect(twitter_url, 301)
|
|
||||||
elif request.url.endswith(".mp4") or request.url.endswith("%2Emp4"):
|
elif request.url.endswith(".mp4") or request.url.endswith("%2Emp4"):
|
||||||
twitter_url = "https://twitter.com/" + sub_path
|
twitter_url = "https://twitter.com/" + sub_path
|
||||||
|
|
||||||
|
@ -147,7 +149,7 @@ def twitfix(sub_path):
|
||||||
else:
|
else:
|
||||||
clean = twitter_url
|
clean = twitter_url
|
||||||
|
|
||||||
return redirect(direct_video_link(clean),302)
|
return dl(clean)
|
||||||
|
|
||||||
# elif request.url.endswith(".json") or request.url.endswith("%2Ejson"):
|
# elif request.url.endswith(".json") or request.url.endswith("%2Ejson"):
|
||||||
# twitter_url = "https://twitter.com/" + sub_path
|
# twitter_url = "https://twitter.com/" + sub_path
|
||||||
|
@ -220,33 +222,6 @@ def favicon():
|
||||||
return send_from_directory(os.path.join(app.root_path, 'static'),
|
return send_from_directory(os.path.join(app.root_path, 'static'),
|
||||||
'favicon.ico',mimetype='image/vnd.microsoft.icon')
|
'favicon.ico',mimetype='image/vnd.microsoft.icon')
|
||||||
|
|
||||||
@app.route("/rendercombined.jpg")
|
|
||||||
def rendercombined():
|
|
||||||
# get "imgs" from request arguments
|
|
||||||
imgs = request.args.get("imgs", "")
|
|
||||||
|
|
||||||
if 'combination_method' in config['config'] and config['config']['combination_method'] != "local":
|
|
||||||
url = config['config']['combination_method'] + "/rendercombined.jpg?imgs=" + imgs
|
|
||||||
return redirect(url, 302)
|
|
||||||
# Redirecting here instead of setting the embed URL directly to this because if the config combination_method changes in the future, old URLs will still work
|
|
||||||
|
|
||||||
imgs = imgs.split(",")
|
|
||||||
if (len(imgs) == 0 or len(imgs)>4):
|
|
||||||
abort(400)
|
|
||||||
#check that each image starts with "https://pbs.twimg.com"
|
|
||||||
for img in imgs:
|
|
||||||
if not img.startswith("https://pbs.twimg.com"):
|
|
||||||
abort(400)
|
|
||||||
finalImg= combineImg.genImageFromURL(imgs)
|
|
||||||
imgIo = BytesIO()
|
|
||||||
finalImg = finalImg.convert("RGB")
|
|
||||||
finalImg.save(imgIo, 'JPEG',quality=70)
|
|
||||||
imgIo.seek(0)
|
|
||||||
return send_file(imgIo, mimetype='image/jpeg')
|
|
||||||
|
|
||||||
def getDefaultTTL():
|
|
||||||
return datetime.today().replace(microsecond=0) + timedelta(days=1)
|
|
||||||
|
|
||||||
def direct_video(video_link): # Just get a redirect to a MP4 link from any tweet link
|
def direct_video(video_link): # Just get a redirect to a MP4 link from any tweet link
|
||||||
cached_vnf = getVnfFromLinkCache(video_link)
|
cached_vnf = getVnfFromLinkCache(video_link)
|
||||||
if cached_vnf == None:
|
if cached_vnf == None:
|
||||||
|
@ -257,7 +232,7 @@ def direct_video(video_link): # Just get a redirect to a MP4 link from any tweet
|
||||||
print(" ➤ [ D ] Redirecting to direct URL: " + vnf['url'])
|
print(" ➤ [ D ] Redirecting to direct URL: " + vnf['url'])
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(e)
|
print(e)
|
||||||
return message(msgs.failedToScan)
|
return message("Failed to scan your link!")
|
||||||
else:
|
else:
|
||||||
return redirect(cached_vnf['url'], 301)
|
return redirect(cached_vnf['url'], 301)
|
||||||
print(" ➤ [ D ] Redirecting to direct URL: " + vnf['url'])
|
print(" ➤ [ D ] Redirecting to direct URL: " + vnf['url'])
|
||||||
|
@ -272,7 +247,7 @@ def direct_video_link(video_link): # Just get a redirect to a MP4 link from any
|
||||||
print(" ➤ [ D ] Redirecting to direct URL: " + vnf['url'])
|
print(" ➤ [ D ] Redirecting to direct URL: " + vnf['url'])
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(e)
|
print(e)
|
||||||
return message(msgs.failedToScan)
|
return message("Failed to scan your link!")
|
||||||
else:
|
else:
|
||||||
return cached_vnf['url']
|
return cached_vnf['url']
|
||||||
print(" ➤ [ D ] Redirecting to direct URL: " + vnf['url'])
|
print(" ➤ [ D ] Redirecting to direct URL: " + vnf['url'])
|
||||||
|
@ -288,13 +263,11 @@ def embed_video(video_link, image=0): # Return Embed from any tweet link
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(e)
|
print(e)
|
||||||
return message(msgs.failedToScan)
|
return message("Failed to scan your link!")
|
||||||
else:
|
else:
|
||||||
return embed(video_link, cached_vnf, image)
|
return embed(video_link, cached_vnf, image)
|
||||||
|
|
||||||
def tweetInfo(url, tweet="", desc="", thumb="", uploader="", screen_name="", pfp="", tweetType="", images="", hits=0, likes=0, rts=0, time="", qrt={}, nsfw=False,ttl=None): # Return a dict of video info with default values
|
def tweetInfo(url, tweet="", desc="", thumb="", uploader="", screen_name="", pfp="", tweetType="", images="", hits=0, likes=0, rts=0, time="", qrt={}, nsfw=False): # Return a dict of video info with default values
|
||||||
if (ttl==None):
|
|
||||||
ttl = getDefaultTTL()
|
|
||||||
vnf = {
|
vnf = {
|
||||||
"tweet" : tweet,
|
"tweet" : tweet,
|
||||||
"url" : url,
|
"url" : url,
|
||||||
|
@ -310,19 +283,16 @@ def tweetInfo(url, tweet="", desc="", thumb="", uploader="", screen_name="", pfp
|
||||||
"rts" : rts,
|
"rts" : rts,
|
||||||
"time" : time,
|
"time" : time,
|
||||||
"qrt" : qrt,
|
"qrt" : qrt,
|
||||||
"nsfw" : nsfw,
|
"nsfw" : nsfw
|
||||||
"ttl" : ttl
|
|
||||||
}
|
}
|
||||||
return vnf
|
return vnf
|
||||||
|
|
||||||
def get_tweet_data_from_api(video_link):
|
def link_to_vnf_from_api(video_link):
|
||||||
print(" ➤ [ + ] Attempting to download tweet info from Twitter API")
|
print(" ➤ [ + ] Attempting to download tweet info from Twitter API")
|
||||||
twid = int(re.sub(r'\?.*$','',video_link.rsplit("/", 1)[-1])) # gets the tweet ID as a int from the passed url
|
twid = int(re.sub(r'\?.*$','',video_link.rsplit("/", 1)[-1])) # gets the tweet ID as a int from the passed url
|
||||||
tweet = twitter_api.statuses.show(_id=twid, tweet_mode="extended")
|
tweet = twitter_api.statuses.show(_id=twid, tweet_mode="extended")
|
||||||
#print(tweet) # For when I need to poke around and see what a tweet looks like
|
# For when I need to poke around and see what a tweet looks like
|
||||||
return tweet
|
print(tweet)
|
||||||
|
|
||||||
def link_to_vnf_from_tweet_data(tweet,video_link):
|
|
||||||
imgs = ["","","","", ""]
|
imgs = ["","","","", ""]
|
||||||
print(" ➤ [ + ] Tweet Type: " + tweetType(tweet))
|
print(" ➤ [ + ] Tweet Type: " + tweetType(tweet))
|
||||||
# Check to see if tweet has a video, if not, make the url passed to the VNF the first t.co link in the tweet
|
# Check to see if tweet has a video, if not, make the url passed to the VNF the first t.co link in the tweet
|
||||||
|
@ -333,7 +303,6 @@ def link_to_vnf_from_tweet_data(tweet,video_link):
|
||||||
for video in tweet['extended_entities']['media'][0]['video_info']['variants']:
|
for video in tweet['extended_entities']['media'][0]['video_info']['variants']:
|
||||||
if video['content_type'] == "video/mp4" and video['bitrate'] > best_bitrate:
|
if video['content_type'] == "video/mp4" and video['bitrate'] > best_bitrate:
|
||||||
url = video['url']
|
url = video['url']
|
||||||
best_bitrate = video['bitrate']
|
|
||||||
elif tweetType(tweet) == "Text":
|
elif tweetType(tweet) == "Text":
|
||||||
url = ""
|
url = ""
|
||||||
thumb = ""
|
thumb = ""
|
||||||
|
@ -382,21 +351,9 @@ def link_to_vnf_from_tweet_data(tweet,video_link):
|
||||||
|
|
||||||
return vnf
|
return vnf
|
||||||
|
|
||||||
|
|
||||||
def link_to_vnf_from_unofficial_api(video_link):
|
|
||||||
print(" ➤ [ + ] Attempting to download tweet info from UNOFFICIAL Twitter API")
|
|
||||||
tweet = twExtract.extractStatus(video_link)
|
|
||||||
print (" ➤ [ ✔ ] Unofficial API Success")
|
|
||||||
return link_to_vnf_from_tweet_data(tweet,video_link)
|
|
||||||
|
|
||||||
|
|
||||||
def link_to_vnf_from_api(video_link):
|
|
||||||
tweet = get_tweet_data_from_api(video_link)
|
|
||||||
return link_to_vnf_from_tweet_data(tweet,video_link)
|
|
||||||
|
|
||||||
def link_to_vnf_from_youtubedl(video_link):
|
def link_to_vnf_from_youtubedl(video_link):
|
||||||
print(" ➤ [ X ] Attempting to download tweet info via YoutubeDL: " + video_link)
|
print(" ➤ [ X ] Attempting to download tweet info via YoutubeDL: " + video_link)
|
||||||
with yt_dlp.YoutubeDL({'outtmpl': '%(id)s.%(ext)s'}) as ydl:
|
with youtube_dl.YoutubeDL({'outtmpl': '%(id)s.%(ext)s'}) as ydl:
|
||||||
result = ydl.extract_info(video_link, download=False)
|
result = ydl.extract_info(video_link, download=False)
|
||||||
vnf = tweetInfo(result['url'], video_link, result['description'].rsplit(' ',1)[0], result['thumbnail'], result['uploader'])
|
vnf = tweetInfo(result['url'], video_link, result['description'].rsplit(' ',1)[0], result['thumbnail'], result['uploader'])
|
||||||
return vnf
|
return vnf
|
||||||
|
@ -408,13 +365,7 @@ def link_to_vnf(video_link): # Return a VideoInfo object or die trying
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(" ➤ [ !!! ] API Failed")
|
print(" ➤ [ !!! ] API Failed")
|
||||||
print(e)
|
print(e)
|
||||||
try:
|
return link_to_vnf_from_youtubedl(video_link)
|
||||||
return link_to_vnf_from_unofficial_api(video_link)
|
|
||||||
except Exception as e:
|
|
||||||
print(" ➤ [ !!! ] UNOFFICIAL API Failed")
|
|
||||||
print(e)
|
|
||||||
return link_to_vnf_from_youtubedl(video_link) # This is the last resort, will only work for videos
|
|
||||||
|
|
||||||
elif config['config']['method'] == 'api':
|
elif config['config']['method'] == 'api':
|
||||||
try:
|
try:
|
||||||
return link_to_vnf_from_api(video_link)
|
return link_to_vnf_from_api(video_link)
|
||||||
|
@ -456,26 +407,45 @@ def getVnfFromLinkCache(video_link):
|
||||||
else:
|
else:
|
||||||
print(" ➤ [ X ] Link not in json cache")
|
print(" ➤ [ X ] Link not in json cache")
|
||||||
return None
|
return None
|
||||||
|
elif link_cache_system == "dynamodb":
|
||||||
def serializeUnknown(obj):
|
table = client.Table(DYNAMO_CACHE_TBL)
|
||||||
if isinstance(obj, (datetime, date)):
|
response = table.get_item(
|
||||||
return obj.isoformat()
|
Key={
|
||||||
raise TypeError ("Type %s not serializable" % type(obj))
|
'tweet': video_link
|
||||||
|
}
|
||||||
|
)
|
||||||
|
if 'Item' in response:
|
||||||
|
print("Link located in dynamodb cache")
|
||||||
|
vnf = response['Item']['vnf']
|
||||||
|
return vnf
|
||||||
|
else:
|
||||||
|
print(" ➤ [ X ] Link not in dynamodb cache")
|
||||||
|
return None
|
||||||
|
|
||||||
def addVnfToLinkCache(video_link, vnf):
|
def addVnfToLinkCache(video_link, vnf):
|
||||||
try:
|
|
||||||
if link_cache_system == "db":
|
if link_cache_system == "db":
|
||||||
|
try:
|
||||||
out = db.linkCache.insert_one(vnf)
|
out = db.linkCache.insert_one(vnf)
|
||||||
print(" ➤ [ + ] Link added to DB cache ")
|
print(" ➤ [ + ] Link added to DB cache ")
|
||||||
return True
|
return True
|
||||||
elif link_cache_system == "json":
|
|
||||||
link_cache[video_link] = vnf
|
|
||||||
with open("links.json", "w") as outfile:
|
|
||||||
json.dump(link_cache, outfile, indent=4, sort_keys=True, default=serializeUnknown)
|
|
||||||
return None
|
|
||||||
except Exception:
|
except Exception:
|
||||||
print(" ➤ [ X ] Failed to add link to DB cache")
|
print(" ➤ [ X ] Failed to add link to DB cache")
|
||||||
return None
|
return None
|
||||||
|
elif link_cache_system == "json":
|
||||||
|
link_cache[video_link] = vnf
|
||||||
|
with open("links.json", "w") as outfile:
|
||||||
|
json.dump(link_cache, outfile, indent=4, sort_keys=True)
|
||||||
|
return None
|
||||||
|
elif link_cache_system == "dynamodb":
|
||||||
|
table = client.Table(DYNAMO_CACHE_TBL)
|
||||||
|
table.put_item(
|
||||||
|
Item={
|
||||||
|
'tweet': video_link,
|
||||||
|
'vnf': vnf
|
||||||
|
}
|
||||||
|
)
|
||||||
|
print(" ➤ [ + ] Link added to dynamodb cache ")
|
||||||
|
return True
|
||||||
|
|
||||||
def message(text):
|
def message(text):
|
||||||
return render_template(
|
return render_template(
|
||||||
|
@ -487,6 +457,7 @@ def message(text):
|
||||||
url = config['config']['url'] )
|
url = config['config']['url'] )
|
||||||
|
|
||||||
def embed(video_link, vnf, image):
|
def embed(video_link, vnf, image):
|
||||||
|
print(vnf)
|
||||||
print(" ➤ [ E ] Embedding " + vnf['type'] + ": " + vnf['url'])
|
print(" ➤ [ E ] Embedding " + vnf['type'] + ": " + vnf['url'])
|
||||||
|
|
||||||
desc = re.sub(r' http.*t\.co\S+', '', vnf['description'])
|
desc = re.sub(r' http.*t\.co\S+', '', vnf['description'])
|
||||||
|
@ -508,12 +479,10 @@ def embed(video_link, vnf, image):
|
||||||
except:
|
except:
|
||||||
vnf['likes'] = 0; vnf['rts'] = 0; vnf['time'] = 0
|
vnf['likes'] = 0; vnf['rts'] = 0; vnf['time'] = 0
|
||||||
print(' ➤ [ X ] Failed QRT check - old VNF object')
|
print(' ➤ [ X ] Failed QRT check - old VNF object')
|
||||||
appNamePost = ""
|
|
||||||
if vnf['type'] == "Text": # Change the template based on tweet type
|
if vnf['type'] == "Text": # Change the template based on tweet type
|
||||||
template = 'text.html'
|
template = 'text.html'
|
||||||
if vnf['type'] == "Image":
|
if vnf['type'] == "Image":
|
||||||
if vnf['images'][4]!="1":
|
|
||||||
appNamePost = " - Image " + str(image+1) + "/" + str(vnf['images'][4])
|
|
||||||
image = vnf['images'][image]
|
image = vnf['images'][image]
|
||||||
template = 'image.html'
|
template = 'image.html'
|
||||||
if vnf['type'] == "Video":
|
if vnf['type'] == "Video":
|
||||||
|
@ -542,7 +511,7 @@ def embed(video_link, vnf, image):
|
||||||
user = vnf['uploader'],
|
user = vnf['uploader'],
|
||||||
video_link = video_link,
|
video_link = video_link,
|
||||||
color = color,
|
color = color,
|
||||||
appname = config['config']['appname']+appNamePost,
|
appname = config['config']['appname'],
|
||||||
repo = config['config']['repo'],
|
repo = config['config']['repo'],
|
||||||
url = config['config']['url'],
|
url = config['config']['url'],
|
||||||
urlDesc = urlDesc,
|
urlDesc = urlDesc,
|
||||||
|
@ -550,68 +519,6 @@ def embed(video_link, vnf, image):
|
||||||
urlLink = urlLink,
|
urlLink = urlLink,
|
||||||
tweetLink = vnf['tweet'] )
|
tweetLink = vnf['tweet'] )
|
||||||
|
|
||||||
|
|
||||||
def embedCombined(video_link):
|
|
||||||
cached_vnf = getVnfFromLinkCache(video_link)
|
|
||||||
|
|
||||||
if cached_vnf == None:
|
|
||||||
try:
|
|
||||||
vnf = link_to_vnf(video_link)
|
|
||||||
addVnfToLinkCache(video_link, vnf)
|
|
||||||
return embedCombinedVnf(video_link, vnf)
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(e)
|
|
||||||
return message(msgs.failedToScan)
|
|
||||||
else:
|
|
||||||
return embedCombinedVnf(video_link, cached_vnf)
|
|
||||||
|
|
||||||
def embedCombinedVnf(video_link,vnf):
|
|
||||||
if vnf['type'] != "Image" or vnf['images'][4] == "1":
|
|
||||||
return embed(video_link, vnf, 0)
|
|
||||||
desc = re.sub(r' http.*t\.co\S+', '', vnf['description'])
|
|
||||||
urlUser = urllib.parse.quote(vnf['uploader'])
|
|
||||||
urlDesc = urllib.parse.quote(desc)
|
|
||||||
urlLink = urllib.parse.quote(video_link)
|
|
||||||
likeDisplay = ("\n\n💖 " + str(vnf['likes']) + " 🔁 " + str(vnf['rts']) + "\n")
|
|
||||||
|
|
||||||
if vnf['qrt'] == {}: # Check if this is a QRT and modify the description
|
|
||||||
desc = (desc + likeDisplay)
|
|
||||||
else:
|
|
||||||
qrtDisplay = ("\n─────────────\n ➤ QRT of " + vnf['qrt']['handle'] + " (@" + vnf['qrt']['screen_name'] + "):\n─────────────\n'" + vnf['qrt']['desc'] + "'")
|
|
||||||
desc = (desc + qrtDisplay + likeDisplay)
|
|
||||||
|
|
||||||
color = "#7FFFD4" # Green
|
|
||||||
|
|
||||||
if vnf['nsfw'] == True:
|
|
||||||
color = "#800020" # Red
|
|
||||||
image = "https://vxtwitter.com/rendercombined.jpg?imgs="
|
|
||||||
for i in range(0,int(vnf['images'][4])):
|
|
||||||
image = image + vnf['images'][i] + ","
|
|
||||||
image = image[:-1] # Remove last comma
|
|
||||||
return render_template(
|
|
||||||
'image.html',
|
|
||||||
likes = vnf['likes'],
|
|
||||||
rts = vnf['rts'],
|
|
||||||
time = vnf['time'],
|
|
||||||
screenName = vnf['screen_name'],
|
|
||||||
vidlink = vnf['url'],
|
|
||||||
pfp = vnf['pfp'],
|
|
||||||
vidurl = vnf['url'],
|
|
||||||
desc = desc,
|
|
||||||
pic = image,
|
|
||||||
user = vnf['uploader'],
|
|
||||||
video_link = video_link,
|
|
||||||
color = color,
|
|
||||||
appname = config['config']['appname'] + " - View original tweet for full quality",
|
|
||||||
repo = config['config']['repo'],
|
|
||||||
url = config['config']['url'],
|
|
||||||
urlDesc = urlDesc,
|
|
||||||
urlUser = urlUser,
|
|
||||||
urlLink = urlLink,
|
|
||||||
tweetLink = vnf['tweet'] )
|
|
||||||
|
|
||||||
|
|
||||||
def tweetType(tweet): # Are we dealing with a Video, Image, or Text tweet?
|
def tweetType(tweet): # Are we dealing with a Video, Image, or Text tweet?
|
||||||
if 'extended_entities' in tweet:
|
if 'extended_entities' in tweet:
|
||||||
if 'video_info' in tweet['extended_entities']['media'][0]:
|
if 'video_info' in tweet['extended_entities']['media'][0]:
|
||||||
|
|
|
@ -8,8 +8,6 @@ Group=dylan
|
||||||
WorkingDirectory=/home/dylan/BetterTwitFix
|
WorkingDirectory=/home/dylan/BetterTwitFix
|
||||||
Environment="PATH=/home/dylan/BetterTwitFix/venv/bin"
|
Environment="PATH=/home/dylan/BetterTwitFix/venv/bin"
|
||||||
ExecStart=/home/dylan/BetterTwitFix/venv/bin/uwsgi --ini twitfix.ini
|
ExecStart=/home/dylan/BetterTwitFix/venv/bin/uwsgi --ini twitfix.ini
|
||||||
Restart=always
|
|
||||||
RestartSec=3
|
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
WantedBy=multi-user.target
|
WantedBy=multi-user.target
|
||||||
|
|
|
@ -1,24 +0,0 @@
|
||||||
server {
|
|
||||||
listen 80;
|
|
||||||
server_name localhost;
|
|
||||||
|
|
||||||
#access_log /var/log/nginx/host.access.log main;
|
|
||||||
|
|
||||||
location / {
|
|
||||||
try_files $uri @twitfix;
|
|
||||||
}
|
|
||||||
|
|
||||||
location @twitfix {
|
|
||||||
include uwsgi_params;
|
|
||||||
uwsgi_pass uwsgi://twitfix_main:9000;
|
|
||||||
}
|
|
||||||
|
|
||||||
#error_page 404 /404.html;
|
|
||||||
|
|
||||||
# redirect server error pages to the static page /50x.html
|
|
||||||
#
|
|
||||||
error_page 500 502 503 504 /50x.html;
|
|
||||||
location = /50x.html {
|
|
||||||
root /usr/share/nginx/html;
|
|
||||||
}
|
|
||||||
}
|
|
Loading…
Reference in a new issue