mirror of
https://github.com/KevinMidboe/bulk-downloader-for-reddit.git
synced 2026-01-10 19:25:41 +00:00
Compare commits
16 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
aece2273fb | ||
|
|
f807efe4d5 | ||
|
|
743d887927 | ||
|
|
da5492858c | ||
|
|
cebfc713d2 | ||
|
|
f522154214 | ||
|
|
27cd3ee991 | ||
|
|
29873331e6 | ||
|
|
8a3dcd68a3 | ||
|
|
ac323f2abe | ||
|
|
32d26fa956 | ||
|
|
137481cf3e | ||
|
|
9b63c55d3e | ||
|
|
3a6954c7d3 | ||
|
|
9a59da0c5f | ||
|
|
d56efed1c6 |
@@ -1,4 +1,11 @@
|
||||
# Changes on *master*
|
||||
## [28/08/2018](https://github.com/aliparlakci/bulk-downloader-for-reddit/tree/d56efed1c6833a66322d9158523b89d0ce57f5de)
|
||||
- Adjusted algorith used for extracting gfycat links because of gfycat's design change
|
||||
- Ignore space at the end of the given directory
|
||||
|
||||
## [16/08/2018](https://github.com/aliparlakci/bulk-downloader-for-reddit/tree/d56efed1c6833a66322d9158523b89d0ce57f5de)
|
||||
- Fix the bug that prevents downloading imgur videos
|
||||
|
||||
## [15/08/2018](https://github.com/aliparlakci/bulk-downloader-for-reddit/tree/adccd8f3ba03ad124d58643d78dab287a4123a6f)
|
||||
- Prints out the title of posts' that are already downloaded
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
bs4
|
||||
requests
|
||||
praw
|
||||
imgurpython
|
||||
26
script.py
26
script.py
@@ -23,7 +23,7 @@ from src.tools import (GLOBAL, createLogFile, jsonFile, nameCorrector,
|
||||
|
||||
__author__ = "Ali Parlakci"
|
||||
__license__ = "GPL"
|
||||
__version__ = "1.6.2"
|
||||
__version__ = "1.6.4"
|
||||
__maintainer__ = "Ali Parlakci"
|
||||
__email__ = "parlakciali@gmail.com"
|
||||
|
||||
@@ -569,7 +569,9 @@ def download(submissions):
|
||||
print(f" – {submissions[i]['postType'].upper()}",end="",noPrint=True)
|
||||
|
||||
if isPostExists(submissions[i]):
|
||||
print(f"\n{nameCorrector(submissions[i]['postTitle'])}")
|
||||
print(f"\n" \
|
||||
f"{submissions[i]['postSubmitter']}_"
|
||||
f"{nameCorrector(submissions[i]['postTitle'])}")
|
||||
print("It already exists")
|
||||
duplicates += 1
|
||||
downloadedCount -= 1
|
||||
@@ -633,23 +635,33 @@ def download(submissions):
|
||||
downloadedCount -= 1
|
||||
|
||||
if duplicates:
|
||||
print("\n There was {} duplicates".format(duplicates))
|
||||
print(f"\nThere {'were' if duplicates > 1 else 'was'} " \
|
||||
f"{duplicates} duplicate{'s' if duplicates > 1 else ''}")
|
||||
|
||||
if downloadedCount == 0:
|
||||
print(" Nothing downloaded :(")
|
||||
print("Nothing downloaded :(")
|
||||
|
||||
else:
|
||||
print(" Total of {} links downloaded!".format(downloadedCount))
|
||||
print(f"Total of {downloadedCount} " \
|
||||
f"link{'s' if downloadedCount > 1 else ''} downloaded!")
|
||||
|
||||
def main():
|
||||
|
||||
VanillaPrint(
|
||||
f" Bulk Downloader for Reddit v{__version__}\n" \
|
||||
f" Written by Ali PARLAKCI – parlakciali@gmail.com\n\n" \
|
||||
f" https://github.com/aliparlakci/bulk-downloader-for-reddit/"
|
||||
)
|
||||
GLOBAL.arguments = parseArguments()
|
||||
|
||||
if GLOBAL.arguments.directory is not None:
|
||||
GLOBAL.directory = Path(GLOBAL.arguments.directory)
|
||||
GLOBAL.directory = Path(GLOBAL.arguments.directory.strip())
|
||||
else:
|
||||
GLOBAL.directory = Path(input("download directory: "))
|
||||
GLOBAL.directory = Path(input("download directory: ").strip())
|
||||
|
||||
print("\n"," ".join(sys.argv),"\n",noPrint=True)
|
||||
print(f"Bulk Downloader for Reddit v{__version__}\n",noPrint=True
|
||||
)
|
||||
|
||||
try:
|
||||
checkConflicts()
|
||||
|
||||
@@ -1,13 +1,15 @@
|
||||
import io
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import urllib.request
|
||||
from html.parser import HTMLParser
|
||||
from multiprocessing import Queue
|
||||
from pathlib import Path
|
||||
from urllib.error import HTTPError
|
||||
|
||||
import imgurpython
|
||||
from multiprocessing import Queue
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
from src.errors import (AlbumNotDownloadedCompletely, FileAlreadyExistsError,
|
||||
FileNameTooLong, ImgurLoginError,
|
||||
@@ -66,7 +68,8 @@ def getFile(fileDir,tempDir,imageURL,indent=0):
|
||||
]
|
||||
|
||||
opener = urllib.request.build_opener()
|
||||
opener.addheaders = headers
|
||||
if not "imgur" in imageURL:
|
||||
opener.addheaders = headers
|
||||
urllib.request.install_opener(opener)
|
||||
|
||||
if not (os.path.isfile(fileDir)):
|
||||
@@ -441,24 +444,16 @@ class Gfycat:
|
||||
|
||||
url = "https://gfycat.com/" + url.split('/')[-1]
|
||||
|
||||
pageSource = (urllib.request.urlopen(url).read().decode().split('\n'))
|
||||
pageSource = (urllib.request.urlopen(url).read().decode())
|
||||
|
||||
theLine = pageSource[lineNumber]
|
||||
lenght = len(query)
|
||||
link = []
|
||||
soup = BeautifulSoup(pageSource, "html.parser")
|
||||
attributes = {"data-react-helmet":"true","type":"application/ld+json"}
|
||||
content = soup.find("script",attrs=attributes)
|
||||
|
||||
for i in range(len(theLine)):
|
||||
if theLine[i:i+lenght] == query:
|
||||
cursor = (i+lenght)+1
|
||||
while not theLine[cursor] == '"':
|
||||
link.append(theLine[cursor])
|
||||
cursor += 1
|
||||
break
|
||||
|
||||
if "".join(link) == "":
|
||||
if content is None:
|
||||
raise NotADownloadableLinkError("Could not read the page source")
|
||||
|
||||
return "".join(link)
|
||||
return json.loads(content.text)["video"]["contentUrl"]
|
||||
|
||||
class Direct:
|
||||
def __init__(self,directory,POST):
|
||||
|
||||
Reference in New Issue
Block a user