Compare commits
41 commits
serverless
...
main
Author | SHA1 | Date | |
---|---|---|---|
b27296e317 | |||
40f4770e17 | |||
|
e28789a09d | ||
|
faed7482f4 | ||
|
a5b2eb6cd0 | ||
|
16dfc509d3 | ||
|
174a20d896 | ||
|
e64dee9349 | ||
|
f0f709f62e | ||
|
0f77b2e0a4 | ||
|
285c8f70f8 | ||
|
ffcde09b97 | ||
|
891db049af | ||
|
797b670d0e | ||
|
728785278b | ||
|
ec1f4dc284 | ||
|
77b972a1b2 | ||
|
4feb1b65d6 | ||
|
2819b504a3 | ||
|
2399788fdc | ||
|
7a021916f0 | ||
|
a06a89825c | ||
|
6717fd464a | ||
|
022edb5122 | ||
|
d7e0cb9089 | ||
|
55ea554357 | ||
|
b457760fae | ||
|
39a9ffd512 | ||
|
a6efd26447 | ||
|
e81beb975c | ||
|
55c956ad4e | ||
|
014f9d0e8d | ||
|
f640ac69a1 | ||
|
08d686c652 | ||
|
7eff745550 | ||
|
8df211090c | ||
|
5a2a4e6316 | ||
|
681f191502 | ||
|
82fe66af48 | ||
|
fba1256efd | ||
|
85c682ab11 |
17 changed files with 536 additions and 11008 deletions
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -8,3 +8,4 @@ admin.env
|
|||
.env
|
||||
_meta
|
||||
.serverless
|
||||
db/
|
||||
|
|
19
Dockerfile
Normal file
19
Dockerfile
Normal file
|
@ -0,0 +1,19 @@
|
|||
FROM python:3.6-alpine AS build
|
||||
RUN apk add build-base python3-dev linux-headers pcre-dev jpeg-dev zlib-dev
|
||||
RUN pip install --upgrade pip
|
||||
RUN pip install yt-dlp pillow uwsgi
|
||||
|
||||
FROM python:3.6-alpine AS deps
|
||||
WORKDIR /twitfix
|
||||
COPY requirements.txt requirements.txt
|
||||
COPY --from=build /usr/local/lib/python3.6/site-packages /usr/local/lib/python3.6/site-packages
|
||||
RUN pip install -r requirements.txt
|
||||
|
||||
FROM python:3.6-alpine AS runner
|
||||
EXPOSE 9000
|
||||
RUN apk add pcre-dev jpeg-dev zlib-dev
|
||||
WORKDIR /twitfix
|
||||
CMD ["uwsgi", "twitfix.ini"]
|
||||
COPY --from=build /usr/local/bin/uwsgi /usr/local/bin/uwsgi
|
||||
COPY --from=deps /usr/local/lib/python3.6/site-packages /usr/local/lib/python3.6/site-packages
|
||||
COPY . .
|
34
combineImg/Dockerfile
Normal file
34
combineImg/Dockerfile
Normal file
|
@ -0,0 +1,34 @@
|
|||
FROM public.ecr.aws/lambda/python:3.8
|
||||
RUN yum -y update
|
||||
RUN yum -y install git && yum clean all
|
||||
RUN yum -y install tar gzip zlib freetype-devel \
|
||||
gcc \
|
||||
ghostscript \
|
||||
lcms2-devel \
|
||||
libffi-devel \
|
||||
libimagequant-devel \
|
||||
libjpeg-devel \
|
||||
libraqm-devel \
|
||||
libtiff-devel \
|
||||
libwebp-devel \
|
||||
make \
|
||||
openjpeg2-devel \
|
||||
rh-python36 \
|
||||
rh-python36-python-virtualenv \
|
||||
sudo \
|
||||
tcl-devel \
|
||||
tk-devel \
|
||||
tkinter \
|
||||
which \
|
||||
xorg-x11-server-Xvfb \
|
||||
zlib-devel \
|
||||
&& yum clean all
|
||||
RUN pip install -U --force-reinstall pillow-simd
|
||||
RUN pip install requests
|
||||
|
||||
|
||||
# Copy function code
|
||||
COPY __init__.py ${LAMBDA_TASK_ROOT}/app.py
|
||||
|
||||
# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile)
|
||||
CMD [ "app.lambda_handler" ]
|
140
combineImg/__init__.py
Normal file
140
combineImg/__init__.py
Normal file
|
@ -0,0 +1,140 @@
|
|||
from PIL import Image, ImageOps, ImageFilter
|
||||
from requests import get
|
||||
from io import BytesIO
|
||||
import base64
|
||||
import concurrent.futures
|
||||
from time import time as timer
|
||||
|
||||
# find the highest res image in an array of images
|
||||
def findImageWithMostPixels(imageArray):
|
||||
maxPixels = 0
|
||||
maxImage = None
|
||||
for image in imageArray:
|
||||
pixels = image.size[0] * image.size[1]
|
||||
if pixels > maxPixels:
|
||||
maxPixels = pixels
|
||||
maxImage = image
|
||||
return maxImage
|
||||
|
||||
def getTotalImgSize(imageArray): # take the image with the most pixels, multiply it by the number of images, and return the width and height
|
||||
maxImage = findImageWithMostPixels(imageArray)
|
||||
if (len(imageArray) == 1):
|
||||
return (maxImage.size[0], maxImage.size[1])
|
||||
elif (len(imageArray) == 2):
|
||||
return (maxImage.size[0] * 2, maxImage.size[1])
|
||||
else:
|
||||
return (maxImage.size[0] * 2, maxImage.size[1]*2)
|
||||
|
||||
def scaleImageIterable(args):
|
||||
image = args[0]
|
||||
targetWidth = args[1]
|
||||
targetHeight = args[2]
|
||||
pad=args[3]
|
||||
if pad:
|
||||
image = image.convert('RGBA')
|
||||
newImg = ImageOps.pad(image, (targetWidth, targetHeight),color=(0, 0, 0, 0))
|
||||
else:
|
||||
newImg = ImageOps.fit(image, (targetWidth, targetHeight)) # scale + crop
|
||||
return newImg
|
||||
|
||||
def scaleAllImagesToSameSize(imageArray,targetWidth,targetHeight,pad=True): # scale all images in the array to the same size, preserving aspect ratio
|
||||
newImageArray = []
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor:
|
||||
newImageArray = [executor.submit(scaleImageIterable, (image, targetWidth, targetHeight,pad)) for image in imageArray]
|
||||
newImageArray = [future.result() for future in newImageArray]
|
||||
return newImageArray
|
||||
|
||||
def blurImage(image, radius):
|
||||
return image.filter(ImageFilter.GaussianBlur(radius=radius))
|
||||
|
||||
def combineImages(imageArray, totalWidth, totalHeight,pad=True):
|
||||
x = 0
|
||||
y = 0
|
||||
if (len(imageArray) == 1): # if there is only one image, just return it
|
||||
return imageArray[0]
|
||||
# image generation is needed
|
||||
topImg = findImageWithMostPixels(imageArray)
|
||||
newImage = Image.new("RGBA", (totalWidth, totalHeight),(0, 0, 0, 0))
|
||||
imageArray = scaleAllImagesToSameSize(imageArray,topImg.size[0],topImg.size[1],pad)
|
||||
if (len(imageArray) == 2): # if there are two images, combine them horizontally
|
||||
for image in imageArray:
|
||||
newImage.paste(image, (x, y))
|
||||
x += image.size[0]
|
||||
elif (len(imageArray) == 3): # the elusive 3 image upload
|
||||
# if there are three images, combine the first two horizontally, then combine the last one vertically
|
||||
imageArray[2] = scaleAllImagesToSameSize([imageArray[2]],totalWidth,topImg.size[1],pad)[0] # take the last image, treat it like an image array and scale it to the total width, but same height as all individual images
|
||||
for image in imageArray[0:2]:
|
||||
newImage.paste(image, (x, y))
|
||||
x += image.size[0]
|
||||
y += imageArray[0].size[1]
|
||||
x = 0
|
||||
newImage.paste(imageArray[2], (x, y))
|
||||
elif (len(imageArray) == 4): # if there are four images, combine the first two horizontally, then combine the last two vertically
|
||||
for image in imageArray[0:2]:
|
||||
newImage.paste(image, (x, y))
|
||||
x += image.size[0]
|
||||
y += imageArray[0].size[1]
|
||||
x = 0
|
||||
for image in imageArray[2:4]:
|
||||
newImage.paste(image, (x, y))
|
||||
x += image.size[0]
|
||||
else:
|
||||
for image in imageArray:
|
||||
newImage.paste(image, (x, y))
|
||||
x += image.size[0]
|
||||
return newImage
|
||||
|
||||
def saveImage(image, name):
|
||||
image.save(name)
|
||||
|
||||
# combine up to four images into a single image
|
||||
def genImage(imageArray):
|
||||
totalSize=getTotalImgSize(imageArray)
|
||||
combined = combineImages(imageArray, *totalSize)
|
||||
combinedBG = combineImages(imageArray, *totalSize,False)
|
||||
combinedBG = blurImage(combinedBG,50)
|
||||
finalImg = Image.alpha_composite(combinedBG,combined)
|
||||
finalImg = ImageOps.pad(finalImg, findImageWithMostPixels(imageArray).size,color=(0, 0, 0, 0))
|
||||
finalImg = finalImg.convert('RGB')
|
||||
return finalImg
|
||||
|
||||
def downloadImage(url):
|
||||
return Image.open(BytesIO(get(url).content))
|
||||
|
||||
def genImageFromURL(urlArray):
|
||||
# this method avoids storing the images in disk, instead they're stored in memory
|
||||
# no cache means that they'll have to be downloaded again if the image is requested again
|
||||
# TODO: cache?
|
||||
start = timer()
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor:
|
||||
imageArray = [executor.submit(downloadImage, url) for url in urlArray]
|
||||
imageArray = [future.result() for future in imageArray]
|
||||
print(f"Images downloaded in: {timer() - start}s")
|
||||
start = timer()
|
||||
finalImg = genImage(imageArray)
|
||||
print(f"Image generated in: {timer() - start}s")
|
||||
return finalImg
|
||||
|
||||
def lambda_handler(event, context):
|
||||
if ("queryStringParameters" not in event):
|
||||
return {
|
||||
"statusCode": 400,
|
||||
"body": "Invalid request."
|
||||
}
|
||||
images = event["queryStringParameters"].get("imgs","").split(",")
|
||||
for img in images:
|
||||
if not img.startswith("https://pbs.twimg.com"):
|
||||
return {'statusCode':400,'body':'Invalid image URL'}
|
||||
combined = genImageFromURL(images)
|
||||
buffered = BytesIO()
|
||||
combined.save(buffered,format="JPEG",quality=60)
|
||||
combined_str=base64.b64encode(buffered.getvalue()).decode('ascii')
|
||||
return {
|
||||
'statusCode': 200,
|
||||
"headers":
|
||||
{
|
||||
"Content-Type": "image/jpeg"
|
||||
},
|
||||
'body': combined_str,
|
||||
'isBase64Encoded': True
|
||||
}
|
29
docker-compose.yml
Normal file
29
docker-compose.yml
Normal file
|
@ -0,0 +1,29 @@
|
|||
version: "3"
|
||||
|
||||
services:
|
||||
proxy:
|
||||
image: nginx:alpine
|
||||
container_name: twitfix_proxy
|
||||
volumes:
|
||||
- "./twitfix_proxy.conf:/etc/nginx/conf.d/default.conf"
|
||||
ports:
|
||||
- 8088:80
|
||||
depends_on:
|
||||
- twitfix
|
||||
|
||||
twitfix:
|
||||
image: twitfix
|
||||
build: .
|
||||
container_name: twitfix_main
|
||||
volumes:
|
||||
- "./twitfix.ini:/twitfix/twitfix.ini:ro"
|
||||
- "./config.json:/twitfix/config.json:ro"
|
||||
depends_on:
|
||||
- db
|
||||
|
||||
db:
|
||||
image: mongo:5.0.9
|
||||
container_name: twitfix_db
|
||||
volumes:
|
||||
- "./db:/data/db"
|
||||
|
40
docker.md
Normal file
40
docker.md
Normal file
|
@ -0,0 +1,40 @@
|
|||
# vxTwitter Docker
|
||||
|
||||
## Configuration
|
||||
|
||||
Setup mongodb in `config.json`:
|
||||
```json
|
||||
{
|
||||
"config":{
|
||||
"link_cache":"db",
|
||||
"database":"mongodb://twitfix_db:27017/",
|
||||
[...]
|
||||
},
|
||||
[...]
|
||||
}
|
||||
```
|
||||
|
||||
Use TCP socket for uwsgi `twitfix.ini`:
|
||||
```ini
|
||||
[uwsgi]
|
||||
module = wsgi:app
|
||||
|
||||
master = true
|
||||
processes = 5
|
||||
|
||||
socket = 0.0.0.0:9000
|
||||
buffer-size = 8192
|
||||
#socket = /var/run/twitfix.sock
|
||||
#chmod-socket = 660
|
||||
vacuum = true
|
||||
|
||||
die-on-term = true
|
||||
```
|
||||
|
||||
## Run
|
||||
|
||||
To run and build, use this command:
|
||||
```bash
|
||||
docker-compose up -d --build
|
||||
```
|
||||
|
1
msgs.py
Normal file
1
msgs.py
Normal file
|
@ -0,0 +1 @@
|
|||
failedToScan="Failed to scan your link! This may be due to an incorrect link, private account, or the twitter API itself might be having issues (Check here: https://api.twitterstat.us/)\nIt's also possible that Twitter is API limiting me, in which case I can't do anything about it."
|
10771
package-lock.json
generated
10771
package-lock.json
generated
File diff suppressed because it is too large
Load diff
28
package.json
28
package.json
|
@ -1,28 +0,0 @@
|
|||
{
|
||||
"name": "bettertwitfix",
|
||||
"version": "1.0.0",
|
||||
"description": "(A fork of TwitFix)\r Basic flask server that serves fixed twitter video embeds to desktop discord by using either the Twitter API or Youtube-DL to grab tweet video information. This also automatically embeds the first link in the text of non video tweets (API Only)",
|
||||
"main": "index.js",
|
||||
"scripts": {
|
||||
"test": "echo \"Error: no test specified\" && exit 1"
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git+https://github.com/dylanpdx/BetterTwitFix.git"
|
||||
},
|
||||
"keywords": [],
|
||||
"author": "",
|
||||
"license": "ISC",
|
||||
"bugs": {
|
||||
"url": "https://github.com/dylanpdx/BetterTwitFix/issues"
|
||||
},
|
||||
"homepage": "https://github.com/dylanpdx/BetterTwitFix#readme",
|
||||
"dependencies": {
|
||||
"serverless-wsgi": "^3.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"serverless-plugin-common-excludes": "^4.0.0",
|
||||
"serverless-plugin-include-dependencies": "^5.0.0",
|
||||
"serverless-python-requirements": "^5.4.0"
|
||||
}
|
||||
}
|
|
@ -2,6 +2,11 @@
|
|||
(A fork of TwitFix)
|
||||
Basic flask server that serves fixed twitter video embeds to desktop discord by using either the Twitter API or Youtube-DL to grab tweet video information. This also automatically embeds the first link in the text of non video tweets (API Only)
|
||||
|
||||
## Differences from fxtwitter
|
||||
fxtwitter exposed all recently processed tweets publicly via a "latest" and "top" page.
|
||||
|
||||
Even though Tweets are public, it was a personal concern for me that a tweet with potentially sensitive information in it could suddenly be shown to however many people were browsing the latest tweets page, and could be used as a tool for harassment. This was removed in [The following commit](https://github.com/dylanpdx/BetterTwitFix/commit/87ba86ba502e73ddb370bd4e5b964548d3272400#diff-a11c36d9b2d53672d6b3d781dca5bef9129159947de66bc3ffaec5fab389d80cL115)
|
||||
|
||||
## How to use (discord side)
|
||||
|
||||
just put the url to the server, and directly after, the full URL to the tweet you want to embed
|
||||
|
@ -16,6 +21,8 @@ You can also simply type out 'vx' directly before 'twitter.com' in any valid twi
|
|||
|
||||
**Note**: If you enjoy this service, please considering donating via [Ko-Fi](https://ko-fi.com/dylanpdx) to help cover server costs
|
||||
|
||||
I do not monitor any tweets processed by this server. Additionally, if you plan on hosting the code yourself and are concerned about this, be sure to check how to disable logging on the web server you are using (i.e Nginx)
|
||||
|
||||
## How to run (server side)
|
||||
|
||||
this script uses the youtube-dl python module, along with flask, twitter and pymongo, so install those with pip (you can use `pip install -r requirements.txt`) and start the server with `python twitfix.py`
|
||||
|
@ -50,6 +57,8 @@ vxTwitter generates a config.json in its root directory the first time you run i
|
|||
|
||||
**url** - used to tell the user where to look for the oembed endpoint, make sure to set this to your public facing url
|
||||
|
||||
**combination_method** - using c.vxtwitter as the url causes vxTwitter to combine all images in the post into one. This is CPU intensive, so you might not want it running on the same machine that's serving requests. When `combination_method` is set to `local`, it will use the local machine to combine the images. This requires pillow to be installed. If you want to use another server, replace `local` with the URL to the endpoint which combines images. Both methods use the code in the `combineImg` module. Inside, there's also a `Dockerfile` intended to be deployed as a combination endpoint on an [AWS Lambda function](https://docs.aws.amazon.com/lambda/latest/dg/images-create.html).
|
||||
|
||||
This project is licensed under the **Do What The Fuck You Want Public License**
|
||||
|
||||
|
||||
|
|
|
@ -1,25 +1,20 @@
|
|||
boto3==1.23.0
|
||||
botocore==1.26.0
|
||||
certifi==2021.10.8
|
||||
certifi==2021.10.8
|
||||
charset-normalizer==2.0.12
|
||||
click==8.0.4
|
||||
colorama==0.4.4
|
||||
dataclasses==0.8
|
||||
dataclasses==0.8; python_version < '3.8'
|
||||
Flask==2.0.3
|
||||
Flask-Cors==3.0.10
|
||||
idna==3.3
|
||||
importlib-metadata==4.8.3
|
||||
itsdangerous==2.0.1
|
||||
Jinja2==3.0.3
|
||||
jmespath==0.10.0
|
||||
MarkupSafe==2.0.1
|
||||
pymongo==4.1.1
|
||||
python-dateutil==2.8.2
|
||||
requests==2.27.1
|
||||
s3transfer==0.5.2
|
||||
six==1.16.0
|
||||
twitter==1.19.3
|
||||
typing_extensions==4.1.1
|
||||
typing-extensions==4.1.1
|
||||
urllib3==1.26.9
|
||||
Werkzeug==2.0.3
|
||||
youtube-dl==2021.12.17
|
||||
|
|
|
@ -1,79 +0,0 @@
|
|||
service: vxTwitter
|
||||
|
||||
provider:
|
||||
name: aws
|
||||
runtime: python3.6
|
||||
stage: dev
|
||||
iamRoleStatements:
|
||||
- Effect: Allow
|
||||
Action:
|
||||
- dynamodb:Query
|
||||
- dynamodb:Scan
|
||||
- dynamodb:GetItem
|
||||
- dynamodb:PutItem
|
||||
- dynamodb:UpdateItem
|
||||
- dynamodb:DeleteItem
|
||||
Resource:
|
||||
- { "Fn::GetAtt": ["vxTwitterDynamoTable", "Arn" ] }
|
||||
environment:
|
||||
CACHE_TABLE: ${self:custom.tableName}
|
||||
RUNNING_SERVERLESS: 1
|
||||
VXTWITTER_LINK_CACHE: dynamodb
|
||||
VXTWITTER_DATABASE: none
|
||||
VXTWITTER_DATABASE_TABLE: none
|
||||
VXTWITTER_METHOD: youtube-dl
|
||||
VXTWITTER_COLOR: \#43B581
|
||||
VXTWITTER_APP_NAME: vxTwitter
|
||||
VXTWITTER_REPO: https://github.com/dylanpdx/BetterTwitFix
|
||||
VXTWITTER_URL: https://vxtwitter.com
|
||||
# Twitter API keys
|
||||
VXTWITTER_TWITTER_API_KEY: none
|
||||
VXTWITTER_TWITTER_API_SECRET: none
|
||||
VXTWITTER_TWITTER_ACCESS_TOKEN: none
|
||||
VXTWITTER_TWITTER_ACCESS_SECRET: none
|
||||
|
||||
package:
|
||||
patterns:
|
||||
- '!node_modules/**'
|
||||
- '!venv/**'
|
||||
|
||||
plugins:
|
||||
- serverless-wsgi
|
||||
- serverless-python-requirements
|
||||
- serverless-plugin-common-excludes
|
||||
- serverless-plugin-include-dependencies
|
||||
|
||||
functions:
|
||||
vxTwitterApp:
|
||||
handler: wsgi_handler.handler
|
||||
url: true
|
||||
layers:
|
||||
- Ref: PythonRequirementsLambdaLayer
|
||||
|
||||
|
||||
custom:
|
||||
tableName: 'users-table-${self:provider.stage}'
|
||||
wsgi:
|
||||
app: twitfix.app
|
||||
pythonRequirements:
|
||||
layer: true
|
||||
dockerizePip: true
|
||||
|
||||
|
||||
resources:
|
||||
Resources:
|
||||
vxTwitterDynamoTable:
|
||||
Type: 'AWS::DynamoDB::Table'
|
||||
Properties:
|
||||
AttributeDefinitions:
|
||||
-
|
||||
AttributeName: tweet
|
||||
AttributeType: S
|
||||
KeySchema:
|
||||
-
|
||||
AttributeName: tweet
|
||||
KeyType: HASH
|
||||
ProvisionedThroughput:
|
||||
ReadCapacityUnits: 1
|
||||
WriteCapacityUnits: 1
|
||||
TableName: ${self:custom.tableName}
|
|
@ -19,6 +19,7 @@
|
|||
<meta property="og:video:height" content="480" />
|
||||
<meta name="twitter:title" content="{{ user }} (@{{ screenName }})" />
|
||||
<meta property="og:image" content="{{ pic }}" />
|
||||
<meta property="og:description" content="{{ desc }}" />
|
||||
|
||||
<link rel="alternate" href="{{ url }}/oembed.json?desc={{ urlUser }}&user={{ urlDesc }}&link={{ urlLink }}&ttype=video" type="application/json+oembed" title="{{ user }}">
|
||||
<meta http-equiv="refresh" content="0; url = {{ tweetLink }}" /> {% endblock %} {% block body %} Redirecting you to the tweet in a moment. <a href="{{ tweetLink }}">Or click here.</a> {% endblock %}
|
18
twExtract.py
Normal file
18
twExtract.py
Normal file
|
@ -0,0 +1,18 @@
|
|||
import imp
|
||||
import yt_dlp
|
||||
from yt_dlp.extractor import twitter
|
||||
import json
|
||||
|
||||
def extractStatus(url):
|
||||
twIE = twitter.TwitterIE()
|
||||
twIE.set_downloader(yt_dlp.YoutubeDL())
|
||||
twid = twIE._match_id(url)
|
||||
status = twIE._call_api(
|
||||
'statuses/show/%s.json' % twid, twid, {
|
||||
'cards_platform': 'Web-12',
|
||||
'include_cards': 1,
|
||||
'include_reply_count': 1,
|
||||
'include_user_entities': 0,
|
||||
'tweet_mode': 'extended',
|
||||
})
|
||||
return status
|
249
twitfix.py
249
twitfix.py
|
@ -1,6 +1,7 @@
|
|||
from flask import Flask, render_template, request, redirect, Response, send_from_directory, url_for, send_file, make_response, jsonify
|
||||
from weakref import finalize
|
||||
from flask import Flask, render_template, request, redirect, abort, Response, send_from_directory, url_for, send_file, make_response, jsonify
|
||||
from flask_cors import CORS
|
||||
import youtube_dl
|
||||
import yt_dlp
|
||||
import textwrap
|
||||
import twitter
|
||||
import pymongo
|
||||
|
@ -10,8 +11,11 @@ import re
|
|||
import os
|
||||
import urllib.parse
|
||||
import urllib.request
|
||||
from datetime import date
|
||||
import boto3
|
||||
import combineImg
|
||||
from datetime import date,datetime, timedelta
|
||||
from io import BytesIO
|
||||
import msgs
|
||||
import twExtract
|
||||
|
||||
app = Flask(__name__)
|
||||
CORS(app)
|
||||
|
@ -34,19 +38,18 @@ generate_embed_user_agents = [
|
|||
|
||||
# Read config from config.json. If it does not exist, create new.
|
||||
if not os.path.exists("config.json"):
|
||||
serverless_check = os.environ.get('RUNNING_SERVERLESS')
|
||||
if serverless_check == None: # Running on local pc, therefore we can access the filesystem
|
||||
with open("config.json", "w") as outfile:
|
||||
default_config = {
|
||||
"config":{
|
||||
"link_cache":"dynamodb",
|
||||
"link_cache":"json",
|
||||
"database":"[url to mongo database goes here]",
|
||||
"table":"TwiFix",
|
||||
"method":"youtube-dl",
|
||||
"color":"#43B581",
|
||||
"appname": "vxTwitter",
|
||||
"repo": "https://github.com/dylanpdx/BetterTwitFix",
|
||||
"url": "https://vxtwitter.com"
|
||||
"url": "https://vxtwitter.com",
|
||||
"combination_method": "local" # can either be 'local' or a URL to a server handling requests in the same format
|
||||
},
|
||||
"api":{"api_key":"[api_key goes here]",
|
||||
"api_secret":"[api_secret goes here]",
|
||||
|
@ -54,26 +57,8 @@ if not os.path.exists("config.json"):
|
|||
"access_secret":"[access_secret goes here]"
|
||||
}
|
||||
}
|
||||
|
||||
json.dump(default_config, outfile, indent=4, sort_keys=True)
|
||||
else: # Running on serverless, therefore we cannot access the filesystem and must use environment variables
|
||||
default_config = {
|
||||
"config":{
|
||||
"link_cache":os.environ['VXTWITTER_LINK_CACHE'],
|
||||
"database":os.environ['VXTWITTER_DATABASE'],
|
||||
"table":os.environ['VXTWITTER_DATABASE_TABLE'],
|
||||
"method":os.environ['VXTWITTER_METHOD'],
|
||||
"color":os.environ['VXTWITTER_COLOR'],
|
||||
"appname": os.environ['VXTWITTER_APP_NAME'],
|
||||
"repo": os.environ['VXTWITTER_REPO'],
|
||||
"url": os.environ['VXTWITTER_URL'],
|
||||
},
|
||||
"api":{
|
||||
"api_key":os.environ['VXTWITTER_TWITTER_API_KEY'],
|
||||
"api_secret":os.environ['VXTWITTER_TWITTER_API_SECRET'],
|
||||
"access_token":os.environ['VXTWITTER_TWITTER_ACCESS_TOKEN'],
|
||||
"access_secret":os.environ['VXTWITTER_TWITTER_ACCESS_SECRET']
|
||||
}
|
||||
}
|
||||
|
||||
config = default_config
|
||||
else:
|
||||
|
@ -87,9 +72,6 @@ if config['config']['method'] in ('api', 'hybrid'):
|
|||
twitter_api = twitter.Twitter(auth=auth)
|
||||
|
||||
link_cache_system = config['config']['link_cache']
|
||||
DYNAMO_CACHE_TBL=None
|
||||
if link_cache_system=="dynamodb":
|
||||
DYNAMO_CACHE_TBL=os.environ['CACHE_TABLE']
|
||||
|
||||
if link_cache_system == "json":
|
||||
link_cache = {}
|
||||
|
@ -98,15 +80,19 @@ if link_cache_system == "json":
|
|||
default_link_cache = {"test":"test"}
|
||||
json.dump(default_link_cache, outfile, indent=4, sort_keys=True)
|
||||
|
||||
f = open('links.json',)
|
||||
try:
|
||||
with open('links.json', "r") as f:
|
||||
link_cache = json.load(f)
|
||||
f.close()
|
||||
except (json.decoder.JSONDecodeError, FileNotFoundError):
|
||||
print(" ➤ [ X ] Failed to load cache JSON file. Creating new file.")
|
||||
with open('links.json', "w") as f:
|
||||
link_cache = {}
|
||||
json.dump(link_cache, f)
|
||||
|
||||
elif link_cache_system == "db":
|
||||
client = pymongo.MongoClient(config['config']['database'], connect=False)
|
||||
table = config['config']['table']
|
||||
db = client[table]
|
||||
elif link_cache_system == "dynamodb":
|
||||
client = boto3.resource('dynamodb')
|
||||
|
||||
@app.route('/') # If the useragent is discord, return the embed, if not, redirect to configured repo directly
|
||||
def default():
|
||||
|
@ -131,16 +117,28 @@ def twitfix(sub_path):
|
|||
print(request.url)
|
||||
|
||||
if request.url.startswith("https://d.vx"): # Matches d.fx? Try to give the user a direct link
|
||||
if match.start() == 0:
|
||||
twitter_url = "https://twitter.com/" + sub_path
|
||||
if user_agent in generate_embed_user_agents:
|
||||
print( " ➤ [ D ] d.vx link shown to discord user-agent!")
|
||||
if request.url.endswith(".mp4") and "?" not in request.url:
|
||||
return dl(sub_path)
|
||||
return redirect(direct_video_link(twitter_url),302)
|
||||
else:
|
||||
return message("To use a direct MP4 link in discord, remove anything past '?' and put '.mp4' at the end")
|
||||
else:
|
||||
print(" ➤ [ R ] Redirect to MP4 using d.fxtwitter.com")
|
||||
return dir(sub_path)
|
||||
elif request.url.startswith("https://c.vx"):
|
||||
twitter_url = sub_path
|
||||
|
||||
if match.start() == 0:
|
||||
twitter_url = "https://twitter.com/" + sub_path
|
||||
|
||||
if user_agent in generate_embed_user_agents:
|
||||
return embedCombined(twitter_url)
|
||||
else:
|
||||
print(" ➤ [ R ] Redirect to " + twitter_url)
|
||||
return redirect(twitter_url, 301)
|
||||
elif request.url.endswith(".mp4") or request.url.endswith("%2Emp4"):
|
||||
twitter_url = "https://twitter.com/" + sub_path
|
||||
|
||||
|
@ -149,7 +147,7 @@ def twitfix(sub_path):
|
|||
else:
|
||||
clean = twitter_url
|
||||
|
||||
return dl(clean)
|
||||
return redirect(direct_video_link(clean),302)
|
||||
|
||||
# elif request.url.endswith(".json") or request.url.endswith("%2Ejson"):
|
||||
# twitter_url = "https://twitter.com/" + sub_path
|
||||
|
@ -222,6 +220,33 @@ def favicon():
|
|||
return send_from_directory(os.path.join(app.root_path, 'static'),
|
||||
'favicon.ico',mimetype='image/vnd.microsoft.icon')
|
||||
|
||||
@app.route("/rendercombined.jpg")
|
||||
def rendercombined():
|
||||
# get "imgs" from request arguments
|
||||
imgs = request.args.get("imgs", "")
|
||||
|
||||
if 'combination_method' in config['config'] and config['config']['combination_method'] != "local":
|
||||
url = config['config']['combination_method'] + "/rendercombined.jpg?imgs=" + imgs
|
||||
return redirect(url, 302)
|
||||
# Redirecting here instead of setting the embed URL directly to this because if the config combination_method changes in the future, old URLs will still work
|
||||
|
||||
imgs = imgs.split(",")
|
||||
if (len(imgs) == 0 or len(imgs)>4):
|
||||
abort(400)
|
||||
#check that each image starts with "https://pbs.twimg.com"
|
||||
for img in imgs:
|
||||
if not img.startswith("https://pbs.twimg.com"):
|
||||
abort(400)
|
||||
finalImg= combineImg.genImageFromURL(imgs)
|
||||
imgIo = BytesIO()
|
||||
finalImg = finalImg.convert("RGB")
|
||||
finalImg.save(imgIo, 'JPEG',quality=70)
|
||||
imgIo.seek(0)
|
||||
return send_file(imgIo, mimetype='image/jpeg')
|
||||
|
||||
def getDefaultTTL():
|
||||
return datetime.today().replace(microsecond=0) + timedelta(days=1)
|
||||
|
||||
def direct_video(video_link): # Just get a redirect to a MP4 link from any tweet link
|
||||
cached_vnf = getVnfFromLinkCache(video_link)
|
||||
if cached_vnf == None:
|
||||
|
@ -232,7 +257,7 @@ def direct_video(video_link): # Just get a redirect to a MP4 link from any tweet
|
|||
print(" ➤ [ D ] Redirecting to direct URL: " + vnf['url'])
|
||||
except Exception as e:
|
||||
print(e)
|
||||
return message("Failed to scan your link!")
|
||||
return message(msgs.failedToScan)
|
||||
else:
|
||||
return redirect(cached_vnf['url'], 301)
|
||||
print(" ➤ [ D ] Redirecting to direct URL: " + vnf['url'])
|
||||
|
@ -247,7 +272,7 @@ def direct_video_link(video_link): # Just get a redirect to a MP4 link from any
|
|||
print(" ➤ [ D ] Redirecting to direct URL: " + vnf['url'])
|
||||
except Exception as e:
|
||||
print(e)
|
||||
return message("Failed to scan your link!")
|
||||
return message(msgs.failedToScan)
|
||||
else:
|
||||
return cached_vnf['url']
|
||||
print(" ➤ [ D ] Redirecting to direct URL: " + vnf['url'])
|
||||
|
@ -263,11 +288,13 @@ def embed_video(video_link, image=0): # Return Embed from any tweet link
|
|||
|
||||
except Exception as e:
|
||||
print(e)
|
||||
return message("Failed to scan your link!")
|
||||
return message(msgs.failedToScan)
|
||||
else:
|
||||
return embed(video_link, cached_vnf, image)
|
||||
|
||||
def tweetInfo(url, tweet="", desc="", thumb="", uploader="", screen_name="", pfp="", tweetType="", images="", hits=0, likes=0, rts=0, time="", qrt={}, nsfw=False): # Return a dict of video info with default values
|
||||
def tweetInfo(url, tweet="", desc="", thumb="", uploader="", screen_name="", pfp="", tweetType="", images="", hits=0, likes=0, rts=0, time="", qrt={}, nsfw=False,ttl=None): # Return a dict of video info with default values
|
||||
if (ttl==None):
|
||||
ttl = getDefaultTTL()
|
||||
vnf = {
|
||||
"tweet" : tweet,
|
||||
"url" : url,
|
||||
|
@ -283,16 +310,19 @@ def tweetInfo(url, tweet="", desc="", thumb="", uploader="", screen_name="", pfp
|
|||
"rts" : rts,
|
||||
"time" : time,
|
||||
"qrt" : qrt,
|
||||
"nsfw" : nsfw
|
||||
"nsfw" : nsfw,
|
||||
"ttl" : ttl
|
||||
}
|
||||
return vnf
|
||||
|
||||
def link_to_vnf_from_api(video_link):
|
||||
def get_tweet_data_from_api(video_link):
|
||||
print(" ➤ [ + ] Attempting to download tweet info from Twitter API")
|
||||
twid = int(re.sub(r'\?.*$','',video_link.rsplit("/", 1)[-1])) # gets the tweet ID as a int from the passed url
|
||||
tweet = twitter_api.statuses.show(_id=twid, tweet_mode="extended")
|
||||
# For when I need to poke around and see what a tweet looks like
|
||||
print(tweet)
|
||||
#print(tweet) # For when I need to poke around and see what a tweet looks like
|
||||
return tweet
|
||||
|
||||
def link_to_vnf_from_tweet_data(tweet,video_link):
|
||||
imgs = ["","","","", ""]
|
||||
print(" ➤ [ + ] Tweet Type: " + tweetType(tweet))
|
||||
# Check to see if tweet has a video, if not, make the url passed to the VNF the first t.co link in the tweet
|
||||
|
@ -303,6 +333,7 @@ def link_to_vnf_from_api(video_link):
|
|||
for video in tweet['extended_entities']['media'][0]['video_info']['variants']:
|
||||
if video['content_type'] == "video/mp4" and video['bitrate'] > best_bitrate:
|
||||
url = video['url']
|
||||
best_bitrate = video['bitrate']
|
||||
elif tweetType(tweet) == "Text":
|
||||
url = ""
|
||||
thumb = ""
|
||||
|
@ -351,9 +382,21 @@ def link_to_vnf_from_api(video_link):
|
|||
|
||||
return vnf
|
||||
|
||||
|
||||
def link_to_vnf_from_unofficial_api(video_link):
|
||||
print(" ➤ [ + ] Attempting to download tweet info from UNOFFICIAL Twitter API")
|
||||
tweet = twExtract.extractStatus(video_link)
|
||||
print (" ➤ [ ✔ ] Unofficial API Success")
|
||||
return link_to_vnf_from_tweet_data(tweet,video_link)
|
||||
|
||||
|
||||
def link_to_vnf_from_api(video_link):
|
||||
tweet = get_tweet_data_from_api(video_link)
|
||||
return link_to_vnf_from_tweet_data(tweet,video_link)
|
||||
|
||||
def link_to_vnf_from_youtubedl(video_link):
|
||||
print(" ➤ [ X ] Attempting to download tweet info via YoutubeDL: " + video_link)
|
||||
with youtube_dl.YoutubeDL({'outtmpl': '%(id)s.%(ext)s'}) as ydl:
|
||||
with yt_dlp.YoutubeDL({'outtmpl': '%(id)s.%(ext)s'}) as ydl:
|
||||
result = ydl.extract_info(video_link, download=False)
|
||||
vnf = tweetInfo(result['url'], video_link, result['description'].rsplit(' ',1)[0], result['thumbnail'], result['uploader'])
|
||||
return vnf
|
||||
|
@ -365,7 +408,13 @@ def link_to_vnf(video_link): # Return a VideoInfo object or die trying
|
|||
except Exception as e:
|
||||
print(" ➤ [ !!! ] API Failed")
|
||||
print(e)
|
||||
return link_to_vnf_from_youtubedl(video_link)
|
||||
try:
|
||||
return link_to_vnf_from_unofficial_api(video_link)
|
||||
except Exception as e:
|
||||
print(" ➤ [ !!! ] UNOFFICIAL API Failed")
|
||||
print(e)
|
||||
return link_to_vnf_from_youtubedl(video_link) # This is the last resort, will only work for videos
|
||||
|
||||
elif config['config']['method'] == 'api':
|
||||
try:
|
||||
return link_to_vnf_from_api(video_link)
|
||||
|
@ -407,45 +456,26 @@ def getVnfFromLinkCache(video_link):
|
|||
else:
|
||||
print(" ➤ [ X ] Link not in json cache")
|
||||
return None
|
||||
elif link_cache_system == "dynamodb":
|
||||
table = client.Table(DYNAMO_CACHE_TBL)
|
||||
response = table.get_item(
|
||||
Key={
|
||||
'tweet': video_link
|
||||
}
|
||||
)
|
||||
if 'Item' in response:
|
||||
print("Link located in dynamodb cache")
|
||||
vnf = response['Item']['vnf']
|
||||
return vnf
|
||||
else:
|
||||
print(" ➤ [ X ] Link not in dynamodb cache")
|
||||
return None
|
||||
|
||||
def serializeUnknown(obj):
|
||||
if isinstance(obj, (datetime, date)):
|
||||
return obj.isoformat()
|
||||
raise TypeError ("Type %s not serializable" % type(obj))
|
||||
|
||||
def addVnfToLinkCache(video_link, vnf):
|
||||
if link_cache_system == "db":
|
||||
try:
|
||||
if link_cache_system == "db":
|
||||
out = db.linkCache.insert_one(vnf)
|
||||
print(" ➤ [ + ] Link added to DB cache ")
|
||||
return True
|
||||
except Exception:
|
||||
print(" ➤ [ X ] Failed to add link to DB cache")
|
||||
return None
|
||||
elif link_cache_system == "json":
|
||||
link_cache[video_link] = vnf
|
||||
with open("links.json", "w") as outfile:
|
||||
json.dump(link_cache, outfile, indent=4, sort_keys=True)
|
||||
json.dump(link_cache, outfile, indent=4, sort_keys=True, default=serializeUnknown)
|
||||
return None
|
||||
except Exception:
|
||||
print(" ➤ [ X ] Failed to add link to DB cache")
|
||||
return None
|
||||
elif link_cache_system == "dynamodb":
|
||||
table = client.Table(DYNAMO_CACHE_TBL)
|
||||
table.put_item(
|
||||
Item={
|
||||
'tweet': video_link,
|
||||
'vnf': vnf
|
||||
}
|
||||
)
|
||||
print(" ➤ [ + ] Link added to dynamodb cache ")
|
||||
return True
|
||||
|
||||
def message(text):
|
||||
return render_template(
|
||||
|
@ -457,7 +487,6 @@ def message(text):
|
|||
url = config['config']['url'] )
|
||||
|
||||
def embed(video_link, vnf, image):
|
||||
print(vnf)
|
||||
print(" ➤ [ E ] Embedding " + vnf['type'] + ": " + vnf['url'])
|
||||
|
||||
desc = re.sub(r' http.*t\.co\S+', '', vnf['description'])
|
||||
|
@ -479,10 +508,12 @@ def embed(video_link, vnf, image):
|
|||
except:
|
||||
vnf['likes'] = 0; vnf['rts'] = 0; vnf['time'] = 0
|
||||
print(' ➤ [ X ] Failed QRT check - old VNF object')
|
||||
|
||||
appNamePost = ""
|
||||
if vnf['type'] == "Text": # Change the template based on tweet type
|
||||
template = 'text.html'
|
||||
if vnf['type'] == "Image":
|
||||
if vnf['images'][4]!="1":
|
||||
appNamePost = " - Image " + str(image+1) + "/" + str(vnf['images'][4])
|
||||
image = vnf['images'][image]
|
||||
template = 'image.html'
|
||||
if vnf['type'] == "Video":
|
||||
|
@ -511,7 +542,7 @@ def embed(video_link, vnf, image):
|
|||
user = vnf['uploader'],
|
||||
video_link = video_link,
|
||||
color = color,
|
||||
appname = config['config']['appname'],
|
||||
appname = config['config']['appname']+appNamePost,
|
||||
repo = config['config']['repo'],
|
||||
url = config['config']['url'],
|
||||
urlDesc = urlDesc,
|
||||
|
@ -519,6 +550,68 @@ def embed(video_link, vnf, image):
|
|||
urlLink = urlLink,
|
||||
tweetLink = vnf['tweet'] )
|
||||
|
||||
|
||||
def embedCombined(video_link):
|
||||
cached_vnf = getVnfFromLinkCache(video_link)
|
||||
|
||||
if cached_vnf == None:
|
||||
try:
|
||||
vnf = link_to_vnf(video_link)
|
||||
addVnfToLinkCache(video_link, vnf)
|
||||
return embedCombinedVnf(video_link, vnf)
|
||||
|
||||
except Exception as e:
|
||||
print(e)
|
||||
return message(msgs.failedToScan)
|
||||
else:
|
||||
return embedCombinedVnf(video_link, cached_vnf)
|
||||
|
||||
def embedCombinedVnf(video_link,vnf):
|
||||
if vnf['type'] != "Image" or vnf['images'][4] == "1":
|
||||
return embed(video_link, vnf, 0)
|
||||
desc = re.sub(r' http.*t\.co\S+', '', vnf['description'])
|
||||
urlUser = urllib.parse.quote(vnf['uploader'])
|
||||
urlDesc = urllib.parse.quote(desc)
|
||||
urlLink = urllib.parse.quote(video_link)
|
||||
likeDisplay = ("\n\n💖 " + str(vnf['likes']) + " 🔁 " + str(vnf['rts']) + "\n")
|
||||
|
||||
if vnf['qrt'] == {}: # Check if this is a QRT and modify the description
|
||||
desc = (desc + likeDisplay)
|
||||
else:
|
||||
qrtDisplay = ("\n─────────────\n ➤ QRT of " + vnf['qrt']['handle'] + " (@" + vnf['qrt']['screen_name'] + "):\n─────────────\n'" + vnf['qrt']['desc'] + "'")
|
||||
desc = (desc + qrtDisplay + likeDisplay)
|
||||
|
||||
color = "#7FFFD4" # Green
|
||||
|
||||
if vnf['nsfw'] == True:
|
||||
color = "#800020" # Red
|
||||
image = "https://vxtwitter.com/rendercombined.jpg?imgs="
|
||||
for i in range(0,int(vnf['images'][4])):
|
||||
image = image + vnf['images'][i] + ","
|
||||
image = image[:-1] # Remove last comma
|
||||
return render_template(
|
||||
'image.html',
|
||||
likes = vnf['likes'],
|
||||
rts = vnf['rts'],
|
||||
time = vnf['time'],
|
||||
screenName = vnf['screen_name'],
|
||||
vidlink = vnf['url'],
|
||||
pfp = vnf['pfp'],
|
||||
vidurl = vnf['url'],
|
||||
desc = desc,
|
||||
pic = image,
|
||||
user = vnf['uploader'],
|
||||
video_link = video_link,
|
||||
color = color,
|
||||
appname = config['config']['appname'] + " - View original tweet for full quality",
|
||||
repo = config['config']['repo'],
|
||||
url = config['config']['url'],
|
||||
urlDesc = urlDesc,
|
||||
urlUser = urlUser,
|
||||
urlLink = urlLink,
|
||||
tweetLink = vnf['tweet'] )
|
||||
|
||||
|
||||
def tweetType(tweet): # Are we dealing with a Video, Image, or Text tweet?
|
||||
if 'extended_entities' in tweet:
|
||||
if 'video_info' in tweet['extended_entities']['media'][0]:
|
||||
|
|
|
@ -8,6 +8,8 @@ Group=dylan
|
|||
WorkingDirectory=/home/dylan/BetterTwitFix
|
||||
Environment="PATH=/home/dylan/BetterTwitFix/venv/bin"
|
||||
ExecStart=/home/dylan/BetterTwitFix/venv/bin/uwsgi --ini twitfix.ini
|
||||
Restart=always
|
||||
RestartSec=3
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
|
24
twitfix_proxy.conf
Normal file
24
twitfix_proxy.conf
Normal file
|
@ -0,0 +1,24 @@
|
|||
server {
|
||||
listen 80;
|
||||
server_name localhost;
|
||||
|
||||
#access_log /var/log/nginx/host.access.log main;
|
||||
|
||||
location / {
|
||||
try_files $uri @twitfix;
|
||||
}
|
||||
|
||||
location @twitfix {
|
||||
include uwsgi_params;
|
||||
uwsgi_pass uwsgi://twitfix_main:9000;
|
||||
}
|
||||
|
||||
#error_page 404 /404.html;
|
||||
|
||||
# redirect server error pages to the static page /50x.html
|
||||
#
|
||||
error_page 500 502 503 504 /50x.html;
|
||||
location = /50x.html {
|
||||
root /usr/share/nginx/html;
|
||||
}
|
||||
}
|
Loading…
Reference in a new issue