mirror of
https://github.com/KevinMidboe/bulk-downloader-for-reddit.git
synced 2025-10-29 09:30:15 +00:00
Merge branch 'master' of https://github.com/aliparlakci/bulk-downloader-for-reddit
This commit is contained in:
@@ -1,13 +1,15 @@
|
||||
import io
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import urllib.request
|
||||
from html.parser import HTMLParser
|
||||
from multiprocessing import Queue
|
||||
from pathlib import Path
|
||||
from urllib.error import HTTPError
|
||||
|
||||
import imgurpython
|
||||
from multiprocessing import Queue
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
from src.errors import (AlbumNotDownloadedCompletely, FileAlreadyExistsError,
|
||||
FileNameTooLong, ImgurLoginError,
|
||||
@@ -442,24 +444,16 @@ class Gfycat:
|
||||
|
||||
url = "https://gfycat.com/" + url.split('/')[-1]
|
||||
|
||||
pageSource = (urllib.request.urlopen(url).read().decode().split('\n'))
|
||||
pageSource = (urllib.request.urlopen(url).read().decode())
|
||||
|
||||
theLine = pageSource[lineNumber]
|
||||
lenght = len(query)
|
||||
link = []
|
||||
soup = BeautifulSoup(pageSource, "html.parser")
|
||||
attributes = {"data-react-helmet":"true","type":"application/ld+json"}
|
||||
content = soup.find("script",attrs=attributes)
|
||||
|
||||
for i in range(len(theLine)):
|
||||
if theLine[i:i+lenght] == query:
|
||||
cursor = (i+lenght)+1
|
||||
while not theLine[cursor] == '"':
|
||||
link.append(theLine[cursor])
|
||||
cursor += 1
|
||||
break
|
||||
|
||||
if "".join(link) == "":
|
||||
if content is None:
|
||||
raise NotADownloadableLinkError("Could not read the page source")
|
||||
|
||||
return "".join(link)
|
||||
return json.loads(content.text)["video"]["contentUrl"]
|
||||
|
||||
class Direct:
|
||||
def __init__(self,directory,POST):
|
||||
|
||||
Reference in New Issue
Block a user