Moved old webpage, app & client into .archive folder

This commit is contained in:
2022-08-19 00:44:25 +02:00
parent 851af204ab
commit 0efc109992
66 changed files with 0 additions and 0 deletions

108
.archive/app/.gitignore vendored Normal file
View File

@@ -0,0 +1,108 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
.hypothesis/
# Translations
*.mo
*.pot
# Django stuff:
*.log
.static_storage/
.media/
local_settings.py
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# pyenv
.python-version
# celery beat schedule file
celerybeat-schedule
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
# - - - - -
# My own gitignore files and folders
env_variables.py

224
.archive/app/classedStray.py Executable file
View File

@@ -0,0 +1,224 @@
#!/usr/bin/env python3.6
# -*- coding: utf-8 -*-
# @Author: KevinMidboe
# @Date: 2017-04-05 18:40:11
# @Last Modified by: KevinMidboe
# @Last Modified time: 2018-04-03 22:58:20
import os.path, hashlib, time, glob, sqlite3, re, json, tweepy
import logging
from functools import reduce
from fuzzywuzzy import process
from langdetect import detect
from time import sleep
import env_variables as env
dirHash = None
class twitter(object):
def __init__(self):
if '' in [env.consumer_key, env.consumer_secret, env.access_token, env.access_token_secret]:
logging.warning('Twitter api keys not set!')
self.consumer_key = env.consumer_key
self.consumer_secret = env.consumer_secret
self.access_token = env.access_token
self.access_token_secret = env.access_token_secret
self.authenticate()
def authenticate(self):
auth = tweepy.OAuthHandler(self.consumer_key, self.consumer_secret)
auth.set_access_token(self.access_token, self.access_token_secret)
self.api_token = tweepy.API(auth)
def api(self):
return self.api_token
def dm(self, message, user='kevinmidboe'):
response = self.api_token.send_direct_message(user, text=message)
class strayEpisode(object):
def __init__(self, parent, childrenList):
self.parent = parent
self.children = childrenList
self._id = self.getUniqueID()
self.showName = self.findSeriesName()
self.season = self.getSeasonNumber()
self.episode = self.getEpisodeNumber()
self.videoFiles = []
self.subtitles = []
self.trash = []
self.sortMediaItems()
if self.saveToDB():
self.notifyInsert()
def getUniqueID(self):
# conn = sqlite3.connect(env.db_path)
# c = conn.cursor()
# c.execute("SELECT id FROM stray_eps WHERE id is " + )
return hashlib.md5("b'{}'".format(self.parent).encode()).hexdigest()[:8]
def findSeriesName(self):
find = re.compile("^[a-zA-Z0-9. ]*")
m = re.match(find, self.parent)
if m:
name, hit = process.extractOne(m.group(0), getShowNames().keys())
if hit >= 60:
return name
else:
# This should be logged or handled somehow
return 'Unmatched!'
def getSeasonNumber(self):
m = re.search('[sS][0-9]{1,2}', self.parent)
if m:
return re.sub('[sS]', '', m.group(0))
def getEpisodeNumber(self):
m = re.search('[eE][0-9]{1,2}', self.parent)
if m:
return re.sub('[eE]', '', m.group(0))
def removeUploadSign(self, file):
match = re.search('-[a-zA-Z\[\]\-]*.[a-z]{3}', file)
if match:
uploader = match.group(0)[:-4]
return re.sub(uploader, '', file)
return file
def analyseSubtitles(self, subFile):
# TODO verify that it is a file
try:
subtitlePath = os.path.join([env.input_dir, self.parent, subFile])
except TypeError:
# TODO don't get a list in subtitlePath
return self.removeUploadSign(subFile)
f = open(subtitlesPath, 'r', encoding='ISO-8859-15')
language = detect(f.read())
f.close()
file = self.removeUploadSign(subFile)
if 'sdh' in subFile.lower():
return '.'.join([file[:-4], 'sdh', language, file[-3:]])
return '.'.join([file[:-4], language, file[-3:]])
def sortMediaItems(self):
for child in self.children:
if child[-3:] in env.mediaExt and child[:-4] not in env.mediaExcluders:
self.videoFiles.append([child, self.removeUploadSign(child)])
elif child[-3:] in env.subExt:
self.subtitles.append([child, self.analyseSubtitles(child)])
else:
self.trash.append(child)
def notifyInsert(self):
# Send unique id. (time)
tweetObj = twitter()
if self.showName is None:
message = 'Error adding ep: ' + self._id
else:
message = 'Added episode:\n' + self.showName + ' S' + self.season\
+ 'E' + self.episode + '\nDetails: \n https://kevinmidboe.com/seasoned/verified.html?id=' + self._id
tweetObj.dm(message)
def saveToDB(self):
# TODO Setup script
conn = sqlite3.connect(env.db_path)
c = conn.cursor()
path = '/'.join([env.input_dir, self.parent])
video_files = json.dumps(self.videoFiles)
subtitles = json.dumps(self.subtitles)
trash = json.dumps(self.trash)
try:
c.execute("INSERT INTO stray_eps ('id', 'parent', 'path', 'name', 'season', 'episode', 'video_files', 'subtitles', 'trash') VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)", \
[self._id, self.parent, path, self.showName, self.season, self.episode, video_files, subtitles, trash])
except sqlite3.IntegrityError:
logging.info(self._id + ': episode already registered')
return False
conn.commit()
conn.close()
return True
def getDirContent(dir=env.input_dir):
# TODO What if item in db is not in this list?
try:
return [d for d in os.listdir(dir) if d[0] != '.']
except FileNotFoundError:
# TODO Log to error file
logging.info('Error: "' + dir + '" is not a directory.')
# Hashes the contents of media folder to easily check for changes.
def directoryChecksum():
dirList = getDirContent()
# Creates a string of all the list items.
dirConcat = reduce(lambda x, y: x + y, dirList, "")
m = hashlib.md5()
m.update(bytes(dirConcat, 'utf-16be')) # String to byte conversion.
global dirHash
if dirHash != m.digest():
dirHash = m.digest()
return True
return False
def getShowNames():
conn = sqlite3.connect(env.db_path)
c = conn.cursor()
c.execute('SELECT show_names, date_added, date_modified FROM shows')
returnList = {}
for name, added, modified in c.fetchall():
returnList[name] = [added, modified]
conn.close()
return returnList
def XOR(list1, list2):
return set(list1) ^ set(list2)
def filterChildItems(parent):
try:
children = getDirContent('/'.join([env.input_dir, parent]))
if children:
strayEpisode(parent, children)
except FileNotFoundError:
# TODO Log to error file
logging.info('Error: "' + '/'.join([env.input_dir, parent]) + '" is not a valid directory.')
def getNewItems():
newItems = XOR(getDirContent(), getShowNames())
for item in newItems:
filterChildItems(item)
def main():
# TODO Verify env variables (showDir)
start_time = time.time()
if directoryChecksum():
getNewItems()
logging.debug("--- %s seconds ---" % '{0:.4f}'.format((time.time() - start_time)))
if __name__ == '__main__':
if (os.path.exists(env.logfile)):
logging.basicConfig(filename=env.logfile, level=logging.DEBUG)
else:
print('Logfile could not be found at ' + env.logfile + '. Verifiy presence or disable logging in config.')
exit(0)
while True:
main()
sleep(30)

279
.archive/app/core.py Executable file
View File

@@ -0,0 +1,279 @@
#!/usr/bin/env python3.6
# -*- coding: utf-8 -*-
# @Author: KevinMidboe
# @Date: 2017-08-25 23:22:27
# @Last Modified by: KevinMidboe
# @Last Modified time: 2017-10-12 22:44:27
from guessit import guessit
import os, errno
import logging
import tvdb_api
from pprint import pprint
import env_variables as env
from video import VIDEO_EXTENSIONS, Episode, Movie, Video
from subtitle import SUBTITLE_EXTENSIONS, Subtitle, get_subtitle_path
from utils import sanitize
logging.basicConfig(filename=os.path.dirname(__file__) + '/' + env.logfile, level=logging.INFO)
from datetime import datetime
#: Supported archive extensions
ARCHIVE_EXTENSIONS = ('.rar',)
def scan_video(path):
"""Scan a video from a `path`.
:param str path: existing path to the video.
:return: the scanned video.
:rtype: :class:`~subliminal.video.Video`
"""
# check for non-existing path
if not os.path.exists(path):
raise ValueError('Path does not exist')
# check video extension
# if not path.endswith(VIDEO_EXTENSIONS):
# raise ValueError('%r is not a valid video extension' % os.path.splitext(path)[1])
dirpath, filename = os.path.split(path)
logging.info('Scanning video %r in %r', filename, dirpath)
# guess
parent_path = path.strip(filename)
# video = Video.fromguess(filename, parent_path, guessit(path))
video = Video(filename)
# guessit(path)
return video
def scan_subtitle(path):
if not os.path.exists(path):
raise ValueError('Path does not exist')
dirpath, filename = os.path.split(path)
logging.info('Scanning subtitle %r in %r', filename, dirpath)
# guess
parent_path = path.strip(filename)
subtitle = Subtitle.fromguess(filename, parent_path, guessit(path))
return subtitle
def scan_files(path, age=None, archives=True):
"""Scan `path` for videos and their subtitles.
See :func:`refine` to find additional information for the video.
:param str path: existing directory path to scan.
:param datetime.timedelta age: maximum age of the video or archive.
:param bool archives: scan videos in archives.
:return: the scanned videos.
:rtype: list of :class:`~subliminal.video.Video`
"""
# check for non-existing path
if not os.path.exists(path):
raise ValueError('Path does not exist')
# check for non-directory path
if not os.path.isdir(path):
raise ValueError('Path is not a directory')
name_dict = {}
# walk the path
mediafiles = []
for dirpath, dirnames, filenames in os.walk(path):
logging.debug('Walking directory %r', dirpath)
# remove badly encoded and hidden dirnames
for dirname in list(dirnames):
if dirname.startswith('.'):
logging.debug('Skipping hidden dirname %r in %r', dirname, dirpath)
dirnames.remove(dirname)
# scan for videos
for filename in filenames:
# filter on videos and archives
if not (filename.endswith(VIDEO_EXTENSIONS) or filename.endswith(SUBTITLE_EXTENSIONS) or archives and filename.endswith(ARCHIVE_EXTENSIONS)):
continue
# skip hidden files
if filename.startswith('.'):
logging.debug('Skipping hidden filename %r in %r', filename, dirpath)
continue
# reconstruct the file path
filepath = os.path.join(dirpath, filename)
# skip links
if os.path.islink(filepath):
logging.debug('Skipping link %r in %r', filename, dirpath)
continue
# skip old files
if age and datetime.utcnow() - datetime.utcfromtimestamp(os.path.getmtime(filepath)) > age:
logging.debug('Skipping old file %r in %r', filename, dirpath)
continue
# scan
if filename.endswith(VIDEO_EXTENSIONS): # video
try:
video = scan_video(filepath)
try:
name_dict[video.series] += 1
except KeyError:
name_dict[video.series] = 0
mediafiles.append(video)
except ValueError: # pragma: no cover
logging.exception('Error scanning video')
continue
elif archives and filename.endswith(ARCHIVE_EXTENSIONS): # archive
print('archive')
pass
# try:
# video = scan_archive(filepath)
# mediafiles.append(video)
# except (NotRarFile, RarCannotExec, ValueError): # pragma: no cover
# logging.exception('Error scanning archive')
# continue
elif filename.endswith(SUBTITLE_EXTENSIONS): # subtitle
try:
subtitle = scan_subtitle(filepath)
mediafiles.append(subtitle)
except ValueError:
logging.exception('Error scanning subtitle')
continue
else: # pragma: no cover
raise ValueError('Unsupported file %r' % filename)
pprint(name_dict)
return mediafiles
def organize_files(path):
hashList = {}
mediafiles = scan_files(path)
# print(mediafiles)
for file in mediafiles:
hashList.setdefault(file.__hash__(),[]).append(file)
# hashList[file.__hash__()] = file
return hashList
def save_subtitles(files, single=False, directory=None, encoding=None):
t = tvdb_api.Tvdb()
if not isinstance(files, list):
files = [files]
for file in files:
# TODO this should not be done in the loop
dirname = "%s S%sE%s" % (file.series, "%02d" % (file.season), "%02d" % (file.episode))
createParentfolder = not dirname in file.parent_path
if createParentfolder:
dirname = os.path.join(file.parent_path, dirname)
print('Created: %s' % dirname)
try:
os.makedirs(dirname)
except OSError as e:
if e.errno != errno.EEXIST:
raise
# TODO Clean this !
try:
tvdb_episode = t[file.series][file.season][file.episode]
episode_title = tvdb_episode['episodename']
except:
episode_title = ''
old = os.path.join(file.parent_path, file.name)
if file.name.endswith(SUBTITLE_EXTENSIONS):
lang = file.getLanguage()
sdh = '.sdh' if file.sdh else ''
filename = "%s S%sE%s %s%s.%s.%s" % (file.series, "%02d" % (file.season), "%02d" % (file.episode), episode_title, sdh, lang, file.container)
else:
filename = "%s S%sE%s %s.%s" % (file.series, "%02d" % (file.season), "%02d" % (file.episode), episode_title, file.container)
if createParentfolder:
newname = os.path.join(dirname, filename)
else:
newname = os.path.join(file.parent_path, filename)
print('Moved: %s ---> %s' % (old, newname))
os.rename(old, newname)
print()
# for hash in files:
# hashIndex = [files[hash]]
# for hashItems in hashIndex:
# for file in hashItems:
# print(file.series)
# saved_subtitles = []
# for subtitle in files:
# # check content
# if subtitle.name is None:
# logging.error('Skipping subtitle %r: no content', subtitle)
# continue
# # check language
# if subtitle.language in set(s.language for s in saved_subtitles):
# logging.debug('Skipping subtitle %r: language already saved', subtitle)
# continue
# # create subtitle path
# subtitle_path = get_subtitle_path(video.name, None if single else subtitle.language)
# if directory is not None:
# subtitle_path = os.path.join(directory, os.path.split(subtitle_path)[1])
# # save content as is or in the specified encoding
# logging.info('Saving %r to %r', subtitle, subtitle_path)
# if encoding is None:
# with io.open(subtitle_path, 'wb') as f:
# f.write(subtitle.content)
# else:
# with io.open(subtitle_path, 'w', encoding=encoding) as f:
# f.write(subtitle.text)
# saved_subtitles.append(subtitle)
# # check single
# if single:
# break
# return saved_subtitles
def stringTime():
return str(datetime.now().strftime("%Y-%m-%d %H:%M:%S:%f"))
def main():
# episodePath = '/Volumes/media/tv/Black Mirror/Black Mirror Season 01/'
episodePath = '/Volumes/mainframe/shows/Black Mirror/Black Mirror Season 01/'
t = tvdb_api.Tvdb()
hashList = organize_files(episodePath)
pprint(hashList)
if __name__ == '__main__':
main()

99
.archive/app/magnet.py Executable file
View File

@@ -0,0 +1,99 @@
#!/usr/bin/env python
'''
Created on Apr 19, 2012
@author: dan, Faless
GNU GENERAL PUBLIC LICENSE - Version 3
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
http://www.gnu.org/licenses/gpl-3.0.txt
'''
import shutil
import tempfile
import os.path as pt
import sys, logging
import libtorrent as lt
from time import sleep
import env_variables as env
logging.basicConfig(filename=pt.dirname(__file__) + '/' + env.logfile)
def magnet2torrent(magnet, output_name=None):
if output_name and \
not pt.isdir(output_name) and \
not pt.isdir(pt.dirname(pt.abspath(output_name))):
logging.info("Invalid output folder: " + pt.dirname(pt.abspath(output_name)))
logging.info("")
sys.exit(0)
tempdir = tempfile.mkdtemp()
ses = lt.session()
params = {
'save_path': tempdir,
'storage_mode': lt.storage_mode_t(2),
'paused': False,
'auto_managed': True,
'duplicate_is_error': True
}
handle = lt.add_magnet_uri(ses, magnet, params)
logging.info("Downloading Metadata (this may take a while)")
while (not handle.has_metadata()):
try:
sleep(1)
except KeyboardInterrupt:
logging.info("Aborting...")
ses.pause()
logging.info("Cleanup dir " + tempdir)
shutil.rmtree(tempdir)
sys.exit(0)
ses.pause()
logging.info("Done")
torinfo = handle.get_torrent_info()
torfile = lt.create_torrent(torinfo)
output = pt.abspath(torinfo.name() + ".torrent")
if output_name:
if pt.isdir(output_name):
output = pt.abspath(pt.join(
output_name, torinfo.name() + ".torrent"))
elif pt.isdir(pt.dirname(pt.abspath(output_name))):
output = pt.abspath(output_name)
logging.info("Saving torrent file here : " + output + " ...")
torcontent = lt.bencode(torfile.generate())
f = open(output, "wb")
f.write(lt.bencode(torfile.generate()))
f.close()
logging.info("Saved! Cleaning up dir: " + tempdir)
ses.remove_torrent(handle)
shutil.rmtree(tempdir)
return output
def main():
magnet = sys.argv[1]
logging.info('INPUT: {}'.format(magnet))
magnet2torrent(magnet, env.torrent_dumpsite)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,26 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Author: KevinMidboe
# @Date: 2017-03-04 13:46:28
# @Last Modified by: KevinMidboe
# @Last Modified time: 2017-03-04 14:03:57
from langdetect import detect
from removeUploader import removeUploader
testFiles = ['subs/The.Man.from.U.N.C.L.E.2015.1080p-[eztv].srt',
'subs/The.Man.from.U.N.C.L.E.2015.1080p-[eztv]ENGLUISH.srt']
def detectLanguage(file):
f = open(file, 'r', encoding= 'ISO-8859-15')
language = detect(f.read())
f.close()
return removeUploader(file)[:-3] + language + '.srt'
def addLangExtension():
for file in testFiles:
print(detectLanguage(file))
if __name__ == '__main__':
addLangExtension()

View File

@@ -0,0 +1,16 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Author: KevinMidboe
# @Date: 2017-02-23 21:41:40
# @Last Modified by: KevinMidboe
# @Last Modified time: 2017-03-05 19:35:10
from pasteee import Paste
def createPasteee():
paste = Paste('Test pastee', views=10)
print(paste)
print(paste['raw'])
if __name__ == '__main__':
createPasteee()

23
.archive/app/modules/dirHash.py Executable file
View File

@@ -0,0 +1,23 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Author: KevinMidboe
# @Date: 2017-04-05 15:24:17
# @Last Modified by: KevinMidboe
# @Last Modified time: 2017-04-05 18:22:13
import os, hashlib
from functools import reduce
hashDir = '/Volumes/media/tv'
def main():
dirList = os.listdir(hashDir)
concat = reduce(lambda x, y: x + y, dirList, "")
m = hashlib.md5()
m.update(bytes(concat, 'utf-16be'))
return m.digest()
if __name__ == '__main__':
print(main())
# TODO The hash value should be saved in a global manner

View File

@@ -0,0 +1,125 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Author: KevinMidboe
# @Date: 2017-03-05 13:52:45
# @Last Modified by: KevinMidboe
# @Last Modified time: 2017-03-05 17:14:25
import sqlite3, json, os, tweepy
from re import sub
dbPath = 'shows.db'
consumer_key, consumer_secret = 'yvVTrxNtVsLkoHxKWxh4xvgjg', '39OW6Q8fIKDXvTPPCaEJDULcYaHC5XZ3fe7HHCGtdepBKui2jK'
access_token, access_token_secret = '3214835117-OXVVLYeqUScRAPMqfVw5hS8NI63zPnWOVK63C5I', 'ClcGnF8vW6DbvgRgjwU6YjDc9f2oxMzOvUAN8kzpsmbcL'
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
def newnameMediaitems(media_items):
# media_items = [['New.Girl.S06E18.720p.HDTV.x264-EZTV.srt', '-EZTV', 'nl'], ['New.Girl.S06E18.720p.HDTV.x264-FLEET.srt', '-FLEET', 'en']]
media_items = json.loads(media_items)
returnList = []
for item in media_items:
returnList.append([item[0], item[0].replace(item[1], '')])
return returnList
def newnameSubtitles(subtitles):
subtitles = json.loads(subtitles)
returnList = []
for item in subtitles:
returnList.append([item[0], item[0].replace(item[1], '.' + item[2])])
return returnList
def updateMovedStatus(episodeDict):
conn = sqlite3.connect(dbPath)
c = conn.cursor()
c.execute('UPDATE stray_episodes SET moved = 1 WHERE original is "' + episodeDict['original'] + '"')
conn.commit()
conn.close()
def unpackEpisodes():
conn = sqlite3.connect(dbPath)
c = conn.cursor()
cursor = c.execute('SELECT * FROM stray_episodes WHERE verified = 1 AND moved = 0')
episodeList = []
for row in c.fetchall():
columnNames = [description[0] for description in cursor.description]
episodeDict = dict.fromkeys(columnNames)
for i, key in enumerate(episodeDict.keys()):
episodeDict[key] = row[i]
episodeList.append(episodeDict)
conn.close()
return episodeList
def createFolders(episode):
showDir = '/media/hdd1/tv/%s/'% episode['name']
episodeFormat = '%s S%sE%s/'% (episode['name'], episode['season'], episode['episode'])
seasonFormat = '%s Season %s/'% (episode['name'], episode['season'])
if not os.path.isdir(showDir + seasonFormat):
os.makedirs(showDir + seasonFormat)
if not os.path.isdir(showDir + seasonFormat + episodeFormat):
os.makedirs(showDir + seasonFormat + episodeFormat)
def moveFiles(episode):
# TODO All this should be imported from config file
showDir = '/media/hdd1/tv/'
episodeFormat = '%s S%sE%s/'% (episode['name'], episode['season'], episode['episode'])
seasonFormat = '%s/%s Season %s/'% (episode['name'], episode['name'], episode['season'])
# TODO All this is pretty ballsy to do this hard/stict.
newMediaitems = newnameMediaitems(episode['media_items'])
for item in newMediaitems:
old_location = showDir + episode['original'] + '/' + item[0]
new_location = showDir + seasonFormat + episodeFormat + item[1]
os.rename(old_location, new_location)
if episode['subtitles']:
newSubtitles = newnameSubtitles(episode['subtitles'])
for item in newSubtitles:
old_location = showDir + episode['original'] + '/' + item[0]
new_location = showDir + seasonFormat + episodeFormat + item[1]
os.rename(old_location, new_location)
# shutil.rmtree(showDir + episode['original'])
if episode['trash']:
for trash in json.loads(episode['trash']):
os.remove(showDir + episode['original'] + '/'+ trash)
# TODO Maybe move to delete folder instead, than user can dump.
os.rmdir(showDir + episode['original'])
updateMovedStatus(episode)
api.create_favorite(episode['response_id'])
def findVerified():
episodes = unpackEpisodes()
if episodes:
for episode in episodes:
createFolders(episode)
moveFiles(episode)
if __name__ == '__main__':
findVerified()

View File

@@ -0,0 +1,57 @@
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
pasteee module
Allows pasting to https://paste.ee
https://github.com/i-ghost/pasteee
"""
# 2 <-> 3
from urllib.request import urlopen
from urllib.request import Request as urlrequest
from urllib.parse import urlencode
from urllib import error as urlerror
import json
class PasteError(Exception):
"""Exception class for this module"""
pass
class Paste(object):
def __new__(cls, paste,
private=True, lang="plain",
key="public", desc="",
expire=0, views=0, encrypted=False):
if not paste:
raise PasteError("No paste provided")
if expire and views:
# API incorrectly returns success so we raise error locally
raise PasteError("Options 'expire' and 'views' are mutually exclusive")
request = urlrequest(
"http://paste.ee/api",
data=urlencode(
{
'paste': paste,
'private': bool(private),
'language': lang,
'key': key,
'description': desc,
'expire': expire,
'views': views,
'encrypted': bool(encrypted),
'format': "json"
}
).encode("utf-8"),
headers={'User-Agent': 'Mozilla/5.0'}
)
try:
result = json.loads(urlopen(request).read().decode("utf-8"))
return result["paste"]
except urlerror.HTTPError:
print("Couldn't send paste")
raise
except KeyError:
raise PasteError("Invalid paste option: %s" % (result["error"]))

30
.archive/app/modules/readDB.py Executable file
View File

@@ -0,0 +1,30 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Author: KevinMidboe
# @Date: 2017-03-03 22:35:38
# @Last Modified by: KevinMidboe
# @Last Modified time: 2017-03-04 11:09:09
import sqlite3
from fuzzywuzzy import process
path = "/Users/KevinMidboe/Dropbox/python/seasonedShows/shows.db"
def main():
conn = sqlite3.connect(path)
c = conn.cursor()
c.execute('SELECT show_names, date_added, date_modified FROM shows')
returnList = {}
for name, added, modified in c.fetchall():
returnList[name] = [added, modified]
while True:
query = input('Query: ')
print(process.extractOne(query, returnList.keys()))
conn.close()
main()

View File

@@ -0,0 +1,24 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Author: KevinMidboe
# @Date: 2017-03-04 13:47:32
# @Last Modified by: KevinMidboe
# @Last Modified time: 2017-03-04 13:53:12
import re
testFile = '/Volumes/media/tv/New Girl/New Girl Season 06/New Girl S06E18/New.Girl.S06E18.Young.Adult.1080p.WEB-DL.DD5.1.H264-[eztv]-horse.mkv'
def removeUploader(file=testFile):
match = re.search('-[a-zA-Z\[\]\-]*.[a-z]{3}', file)
if match and input('Remove uploader:\t' + match.group(0)[:-4] + ' [Y/n] ') != 'n':
uploader, ext = match.group(0).split('.')
# if ext not in subExtensions:
# file = file.replace(uploader, '')
# else:
# file = file.replace(uploader, '.eng')
file = file.replace(uploader, '')
return file
if __name__ == '__main__':
print(removeUploader())

View File

@@ -0,0 +1,22 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Author: KevinMidboe
# @Date: 2017-03-05 15:55:16
# @Last Modified by: KevinMidboe
# @Last Modified time: 2017-03-05 17:25:50
from time import sleep
from findStray import findStray
from tweetNewEpisodes import tweetNewEpisodes
from folderCreator import findVerified
def main():
while True:
findStray()
tweetNewEpisodes()
findVerified()
sleep(10)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,12 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from langdetect import detect
def main():
f = open('/Volumes/media/movies/The Man from UNCLE (2015)/The.Man.from.U.N.C.L.E.2015.1080p.nl.srt', 'r', encoding = "ISO-8859-15")
print(detect(f.read()))
f.close()
print(f.close())
if __name__ == '__main__':
main()

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,137 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Author: KevinMidboe
# @Date: 2017-03-04 16:50:09
# @Last Modified by: KevinMidboe
# @Last Modified time: 2017-03-05 16:51:35
import tweepy, sqlite3
from pasteee import Paste
from pprint import pprint
dbPath = "shows.db"
consumer_key, consumer_secret = 'yvVTrxNtVsLkoHxKWxh4xvgjg', '39OW6Q8fIKDXvTPPCaEJDULcYaHC5XZ3fe7HHCGtdepBKui2jK'
access_token, access_token_secret = '3214835117-OXVVLYeqUScRAPMqfVw5hS8NI63zPnWOVK63C5I', 'ClcGnF8vW6DbvgRgjwU6YjDc9f2oxMzOvUAN8kzpsmbcL'
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
def unpackEpisode(episode):
conn = sqlite3.connect(dbPath)
c = conn.cursor()
cursor = c.execute('SELECT * FROM stray_episodes')
columnNames = [description[0] for description in cursor.description]
conn.close()
episodeDict = dict.fromkeys(columnNames)
for i, key in enumerate(episodeDict.keys()):
episodeDict[key] = episode[i]
return episodeDict
def prettifyEpisode(episode):
returnString = ''
for key, value in episode.items():
returnString += key + ': ' + str(value) + '\n'
return returnString
def createPasteee(episode):
return Paste(prettifyEpisode(episode), private=False, desc="My first paste", views=10)
def tweetString(episode):
print(type(episode['episode']), episode)
tweetString = '@KevinMidboe\nAdded episode:\n' + episode['name'] + ' S' + str(episode['season'])\
+ 'E' + str(episode['episode']) + '\nDetails: '
return tweetString
# TODO What if error when tweeting, no id_str
def tweet(tweetString):
response = api.update_status(status=tweetString)
return response.id_str
def updateTweetID(episodeDict, id):
conn = sqlite3.connect(dbPath)
c = conn.cursor()
c.execute('UPDATE stray_episodes SET tweet_id = ' + id + ' WHERE original is "' + episodeDict['original'] + '"')
conn.commit()
conn.close()
def tweetEpisode(episode):
pasteee = createPasteee(episode)
tweet_id = tweet(tweetString(episode) + pasteee['raw'])
updateTweetID(episode, tweet_id)
def getLastTweets(user, count=1):
return api.user_timeline(screen_name=user,count=count)
def verifyByID(id, reponse_id):
conn = sqlite3.connect(dbPath)
c = conn.cursor()
c.execute('UPDATE stray_episodes SET verified = 1, response_id = ' + reponse_id + ' WHERE tweet_id is "' + id + '"')
conn.commit()
conn.close()
# TODO Add more parsing than confirm
def parseReply(tweet):
if b'\xf0\x9f\x91\x8d' in tweet.text.encode('utf-8'):
print('Verified!')
verifyByID(tweet.in_reply_to_status_id_str, tweet.id_str)
def getReply(tweet):
conn = sqlite3.connect(dbPath)
c = conn.cursor()
tweetID = tweet.in_reply_to_status_id_str
c.execute('SELECT * FROM stray_episodes WHERE tweet_id is ' + tweetID + ' AND verified is 0')
row = c.fetchone()
if row:
episode = unpackEpisode(row)
conn.close()
parseReply(tweet)
conn.close()
def lookForNewEpisodes():
conn = sqlite3.connect(dbPath)
c = conn.cursor()
c.execute('SELECT * FROM stray_episodes WHERE tweet_id is NULL')
for row in c.fetchall():
episode = unpackEpisode(row)
tweetEpisode(episode)
conn.close()
def checkForReply():
for tweet in getLastTweets('KevinMidboe', 10):
if tweet.in_reply_to_status_id_str != None:
getReply(tweet)
def tweetNewEpisodes():
lookForNewEpisodes()
checkForReply()
if __name__ == '__main__':
tweetNewEpisodes()

View File

@@ -0,0 +1,104 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Author: KevinMidboe
# @Date: 2017-03-04 14:06:53
# @Last Modified by: KevinMidboe
# @Last Modified time: 2017-03-05 11:48:47
import tweepy, sqlite3
from pasteee import Paste
from pprint import pprint
dbPath = "shows.db"
consumer_key, consumer_secret = 'yvVTrxNtVsLkoHxKWxh4xvgjg', '39OW6Q8fIKDXvTPPCaEJDULcYaHC5XZ3fe7HHCGtdepBKui2jK'
access_token, access_token_secret = '3214835117-OXVVLYeqUScRAPMqfVw5hS8NI63zPnWOVK63C5I', 'ClcGnF8vW6DbvgRgjwU6YjDc9f2oxMzOvUAN8kzpsmbcL'
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
apiUser = api.me()
apiUsername, apiUserID = apiUser.screen_name, apiUser.id_str
def tweetNewEpisode(episode):
createPasteee()
def unpackEpisode(episode):
episodeDict = dict.fromkeys(['original', 'full_path', 'name', 'season', 'episode',\
'media_items', 'subtitles', 'trash', 'tweet_id', 'verified'])
for i, key in enumerate(episodeDict.keys()):
episodeDict[key] = episode[i]
return episodeDict
def
def updateTweetID(episodeDict, id):
conn = sqlite3.connect(dbPath)
c = conn.cursor()
c.execute('UPDATE stray_episodes SET tweet_id = ' + id + ' WHERE original is ' + episodeDict['original'])
conn.commit()
conn.close()
def getUntweetedEpisodes():
conn = sqlite3.connect(dbPath)
c = conn.cursor()
c.execute('SELECT * FROM stray_episodes WHERE tweet_id is NULL')
for episode in c.fetchall():
tweetNewEpisode(episode)
conn.close()
exit(0)
return episode
def lastTweet(user, count=1):
return api.user_timeline(screen_name=user,count=count)
def checkReply():
originalTweet = lastTweet('pi_midboe')[0]
originalID, lastText = originalTweet.id_str, originalTweet.text
tweets = lastTweet('KevinMidboe', 10)
for tweet in tweets:
tweetID = tweet.in_reply_to_status_id_str
if tweetID == originalID:
print(tweet.text)
def unpackEpisodes():
conn = sqlite3.connect(dbPath)
c = conn.cursor()
c.execute('SELECT * FROM stray_episodes WHERE tweet_id IS NULL and verified IS 0')
content = c.fetchall()
conn.close()
return content
def tweet(tweetString):
if not lastTweet('pi_midboe')[0].text.startswith(tweetString):
paste = Paste(unpackEpisodes(), private=False, desc="My first paste", views=2)
tweetString += paste['raw']
response = api.update_status(status=tweetString)
print('\n', response.id_str)
def main():
episode = getUntweetedEpisodes()
print(episode)
tweet('@KevinMidboe\nAdded episode: \nNew Girl S06E16\n\nDetails: ')
# unpackEpisodes()
checkReply()
if __name__ == '__main__':
main()

112
.archive/app/moveSeasoned.py Executable file
View File

@@ -0,0 +1,112 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Author: KevinMidboe
# @Date: 2017-04-12 23:27:51
# @Last Modified by: KevinMidboe
# @Last Modified time: 2018-05-13 19:17:17
import sys, sqlite3, json, os.path
import logging
import env_variables as env
import shutil
import delugeClient.deluge_cli as delugeCli
class episode(object):
def __init__(self, id):
self.id = id
self.getVarsFromDB()
def getVarsFromDB(self):
c = sqlite3.connect(env.db_path).cursor()
c.execute('SELECT parent, name, season, episode, video_files, subtitles, trash FROM stray_eps WHERE id = ?', (self.id,))
returnMsg = c.fetchone()
self.parent = returnMsg[0]
self.name = returnMsg[1]
self.season = returnMsg[2]
self.episode = returnMsg[3]
self.video_files = json.loads(returnMsg[4])
self.subtitles = json.loads(returnMsg[5])
self.trash = json.loads(returnMsg[6])
c.close()
self.queries = {
'parent_input': [env.input_dir, self.parent],
'season': [env.show_dir, self.name, self.name + ' Season ' + "%02d" % self.season],
'episode': [env.show_dir, self.name, self.name + ' Season ' + "%02d" % self.season, \
self.name + ' S' + "%02d" % self.season + 'E' + "%02d" % self.episode],
}
def typeDir(self, dType, create=False, mergeItem=None):
url = '/'.join(self.queries[dType])
print(url)
if create and not os.path.isdir(url):
os.makedirs(url)
fix_ownership(url)
if mergeItem:
return '/'.join([url, str(mergeItem)])
return url
def fix_ownership(path):
pass
# TODO find this from username from config
# uid = 1000
# gid = 112
# os.chown(path, uid, gid)
def moveStray(strayId):
ep = episode(strayId)
for item in ep.video_files:
try:
old_dir = ep.typeDir('parent_input', mergeItem=item[0])
new_dir = ep.typeDir('episode', mergeItem=item[1], create=True)
shutil.move(old_dir, new_dir)
except FileNotFoundError:
logging.warning(old_dir + ' does not exits, cannot be moved.')
for item in ep.subtitles:
try:
old_dir = ep.typeDir('parent_input', mergeItem=item[0])
new_dir = ep.typeDir('episode', mergeItem=item[1], create=True)
shutil.move(old_dir, new_dir)
except FileNotFoundError:
logging.warning(old_dir + ' does not exits, cannot be moved.')
for item in ep.trash:
try:
os.remove(ep.typeDir('parent_input', mergeItem=item))
except FileNotFoundError:
logging.warning(ep.typeDir('parent_input', mergeItem=item) + 'does not exist, cannot be removed.')
fix_ownership(ep.typeDir('episode'))
for root, dirs, files in os.walk(ep.typeDir('episode')):
for item in files:
fix_ownership(os.path.join(ep.typeDir('episode'), item))
# TODO because we might jump over same files, the dir might no longer
# be empty and cannot remove dir like this.
try:
os.rmdir(ep.typeDir('parent_input'))
except FileNotFoundError:
logging.warning('Cannot remove ' + ep.typeDir('parent_input') + ', file no longer exists.')
# Remove from deluge client
logging.info('Removing {} for deluge'.format(ep.parent))
deluge = delugeCli.Deluge()
response = deluge.remove(ep.parent)
logging.info('Deluge response after delete: {}'.format(response))
if __name__ == '__main__':
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
if (os.path.exists(os.path.join(dname, env.logfile))):
logging.basicConfig(filename=env.logfile, level=logging.INFO)
else:
print('Logfile could not be found at ' + env.logfile + '. Verifiy presence or disable logging in config.')
moveStray(sys.argv[-1])

318
.archive/app/pirateSearch.py Executable file
View File

@@ -0,0 +1,318 @@
#!/usr/bin/env python3.6
# -*- coding: utf-8 -*-
# @Author: KevinMidboe
# @Date: 2017-10-12 11:55:03
# @Last Modified by: KevinMidboe
# @Last Modified time: 2017-10-22 18:54:18
import sys, logging, re, json
from urllib import parse, request
from urllib.error import URLError
from bs4 import BeautifulSoup
from os import path
import datetime
from pprint import pprint
from core import stringTime
import env_variables as env
logging.basicConfig(filename=path.dirname(__file__) + '/' + env.logfile, level=logging.INFO)
RELEASE_TYPES = ('bdremux', 'brremux', 'remux',
'bdrip', 'brrip', 'blu-ray', 'bluray', 'bdmv', 'bdr', 'bd5',
'web-cap', 'webcap', 'web cap',
'webrip', 'web rip', 'web-rip', 'web',
'webdl', 'web dl', 'web-dl', 'hdrip',
'dsr', 'dsrip', 'satrip', 'dthrip', 'dvbrip', 'hdtv', 'pdtv', 'tvrip', 'hdtvrip',
'dvdr', 'dvd-full', 'full-rip', 'iso',
'ts', 'hdts', 'hdts', 'telesync', 'pdvd', 'predvdrip',
'camrip', 'cam')
def sanitize(string, ignore_characters=None, replace_characters=None):
"""Sanitize a string to strip special characters.
:param str string: the string to sanitize.
:param set ignore_characters: characters to ignore.
:return: the sanitized string.
:rtype: str
"""
# only deal with strings
if string is None:
return
replace_characters = replace_characters or ''
ignore_characters = ignore_characters or set()
characters = ignore_characters
if characters:
string = re.sub(r'[%s]' % re.escape(''.join(characters)), replace_characters, string)
return string
def return_re_match(string, re_statement):
if string is None:
return
m = re.search(re_statement, string)
if 'Y-day' in m.group():
return datetime.timedelta(days=1).strftime('%m-%d %Y')
if 'Today' in m.group():
return datetime.datetime.now().strftime('%m-%d %Y')
return sanitize(m.group(), '\xa0', ' ')
# Can maybe be moved away from this class
# returns a number that is either the value of multiple_pages
# or if it exceeds total_pages, return total_pages.
def pagesToCount(multiple, total):
if (multiple > total):
return total
return multiple
# Should maybe not be able to set values without checking if they are valid?
class piratebay(object):
def __init__(self, query=None, page=0, sort=None, category=None):
# This should be moved to a config file
self.url = 'https://thepiratebay.org/search'
self.sortTypes = {
'size': 5,
'seed_count': 99
}
self.categoryTypes = {
'movies': 207,
'porn_movies': 505,
}
# - - -
# Req params
self.query = query
self.page = page
self.sort = sort
self.category = category
self.total_pages = 0
self.headers = {'User-Agent': 'Mozilla/5.0'}
# self.headers = {}
def build_URL_request(self):
url = '/'.join([self.url, parse.quote(self.query), str(self.page), str(self.sort), str(self.category)])
return request.Request(url, headers=self.headers)
def next_page(self):
# If page exceeds the max_page, return None
# Can either save the last query/url in the object or have it passed
# again on call to next_page
# Throw a error if it is not possible (overflow)
self.page += 1
raw_page = self.callPirateBaT()
return self.parse_raw_page_for_torrents(raw_page)
def set_total_pages(self, raw_page):
# body-id:searchResults-id:content-align:center
soup = BeautifulSoup(raw_page, 'html.parser')
content_searchResult = soup.body.find(id='SearchResults')
page_div = content_searchResult.find_next(attrs={"align": "center"})
last_page = 0
for page in page_div.find_all('a'):
last_page += 1
self.total_pages = last_page
def callPirateBaT(self):
req = self.build_URL_request()
raw_page = self.fetchURL(req).read()
logging.info('Finished searching piratebay for query | %s' % stringTime())
if raw_page is None:
raise ValueError('Search result returned no content. Please check log for error reason.')
if self.total_pages is 0:
self.set_total_pages(raw_page)
return raw_page
# Sets the search
def search(self, query, multiple_pages=1, page=0, sort=None, category=None):
# This should not be logged here, but in loop. Something else here maybe?
logging.info('Searching piratebay with query: %r, sort: %s and category: %s | %s' %
(query, sort, category, stringTime()))
if sort is not None and sort in self.sortTypes:
self.sort = self.sortTypes[sort]
else:
raise ValueError('Invalid sort category for piratebay search')
# Verify input? and reset total_pages
self.query = query
self.total_pages = 0
if str(page).isnumeric() and type(page) == int and page >= 0:
self.page = page
# TODO add category list
if category is not None and category in self.categoryTypes:
self.category = self.categoryTypes[category]
# TODO Pull most of this logic out bc it needs to also be done in next_page
raw_page = self.callPirateBaT()
torrents_found = self.parse_raw_page_for_torrents(raw_page)
# Fetch in parallel
n = pagesToCount(multiple_pages, self.total_pages)
while n > 1:
torrents_found.extend(self.next_page())
n -= 1
return torrents_found
def removeHeader(self, bs4_element):
if ('header' in bs4_element['class']):
return bs4_element.find_next('tr')
return bs4_element
def has_magnet(self, href):
return href and re.compile('magnet').search(href)
def parse_raw_page_for_torrents(self, content):
soup = BeautifulSoup(content, 'html.parser')
content_searchResult = soup.body.find(id='searchResult')
if content_searchResult is None:
logging.info('No torrents found for the search criteria.')
return None
listElements = content_searchResult.tr
torrentWrapper = self.removeHeader(listElements)
torrents_found = []
for torrentElement in torrentWrapper.find_all_next('td'):
if torrentElement.find_all("div", class_='detName'):
name = torrentElement.find('a', class_='detLink').get_text()
url = torrentElement.find('a', class_='detLink')['href']
magnet = torrentElement.find(href=self.has_magnet)
uploader = torrentElement.find('a', class_='detDesc')
if uploader is None:
uploader = torrentElement.find('i')
uploader = uploader.get_text()
info_text = torrentElement.find('font', class_='detDesc').get_text()
date = return_re_match(info_text, r"(\d+\-\d+(\s\d{4})?)|(Y\-day|Today)")
size = return_re_match(info_text, r"(\d+(\.\d+)?\s[a-zA-Z]+)")
# COULD NOT FIND HREF!
if (magnet is None):
continue
seed_and_leech = torrentElement.find_all_next(attrs={"align": "right"})
seed = seed_and_leech[0].get_text()
leech = seed_and_leech[1].get_text()
torrent = Torrent(name, magnet['href'], size, uploader, date, seed, leech, url)
torrents_found.append(torrent)
else:
# print(torrentElement)
continue
logging.info('Found %s torrents for given search criteria.' % len(torrents_found))
return torrents_found
def fetchURL(self, req):
try:
response = request.urlopen(req)
except URLError as e:
if hasattr(e, 'reason'):
logging.error('We failed to reach a server with request: %s' % req.full_url)
logging.error('Reason: %s' % e.reason)
elif hasattr(e, 'code'):
logging.error('The server couldn\'t fulfill the request.')
logging.error('Error code: ', e.code)
else:
return response
class Torrent(object):
def __init__(self, name, magnet=None, size=None, uploader=None, date=None,
seed_count=None, leech_count=None, url=None):
self.name = name
self.magnet = magnet
self.size = size
self.uploader = uploader
self.date = date
self.seed_count = seed_count
self.leech_count = leech_count
self.url = url
def find_release_type(self):
name = self.name.casefold()
return [r_type for r_type in RELEASE_TYPES if r_type in name]
def get_all_attr(self):
return ({'name': self.name, 'magnet': self.magnet,'uploader': self.uploader,'size': self.size,'date': self.date,'seed': self.seed_count,'leech': self.leech_count,'url': self.url})
def __repr__(self):
return '<%s [%r]>' % (self.__class__.__name__, self.name)
# This should be done front_end!
# I.E. filtering like this should be done in another script
# and should be done with the shared standard for types.
# PS: Is it the right move to use a shared standard? What
# happens if it is no longer public?
def chooseCandidate(torrent_list):
interesting_torrents = []
match_release_type = ['bdremux', 'brremux', 'remux', 'bdrip', 'brrip', 'blu-ray', 'bluray', 'bdmv', 'bdr', 'bd5']
for torrent in torrent_list:
intersecting_release_types = set(torrent.find_release_type()) & set(match_release_type)
size, _, size_id = torrent.size.partition(' ')
if intersecting_release_types and int(torrent.seed_count) > 0 and float(size) > 4 and size_id == 'GiB':
# print('{} : {} : {} {}'.format(torrent.name, torrent.size, torrent.seed_count, torrent.magnet))
interesting_torrents.append(torrent.get_all_attr())
# else:
# print('Denied match! %s : %s : %s' % (torrent.name, torrent.size, torrent.seed_count))
return interesting_torrents
def searchTorrentSite(query, site='piratebay'):
pirate = piratebay()
torrents_found = pirate.search(query, page=0, multiple_pages=3, sort='size')
candidates = {}
if (torrents_found):
candidates = chooseCandidate(torrents_found)
print(json.dumps(candidates))
# torrents_found = pirate.next_page()
# pprint(torrents_found)
# candidates = chooseCandidate(torrents_found)
# Can autocall to next_page in a looped way to get more if nothing is found
# and there is more pages to be looked at
def main():
query = sys.argv[1]
searchTorrentSite(query)
if __name__ == '__main__':
main()

57
.archive/app/seasonMover.py Executable file
View File

@@ -0,0 +1,57 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Author: KevinMidboe
# @Date: 2017-07-11 19:16:23
# @Last Modified by: KevinMidboe
# @Last Modified time: 2017-07-11 19:16:23
import fire, re, os
class seasonMover(object):
''' Moving multiple files to multiple folders with
identifer '''
workingDir = os.getcwd()
def create(self, name, interval):
pass
def move(self, fileSyntax, folderName):
episodeRange = self.findInterval(fileSyntax)
self.motherMover(fileSyntax, folderName, episodeRange)
def findInterval(self, item):
if (re.search(r'\((.*)\)', item) is None):
raise ValueError('Need to declare an identifier e.g. (1..3) in: \n\t' + item)
start = int(re.search('\((\d+)\.\.', item).group(1))
end = int(re.search('\.\.(\d+)\)', item).group(1))
return list(range(start, end+1))
def removeUploadSign(self, file):
match = re.search('-[a-zA-Z\[\]\-]*.[a-z]{3}', file)
if match:
uploader = match.group(0)[:-4]
return re.sub(uploader, '', file)
return file
def motherMover(self, fileSyntax, folderName, episodeRange):
# Call for sub of fileList
# TODO check if range is same as folderContent
for episode in episodeRange:
leadingZeroNumber = "%02d" % episode
fileName = re.sub(r'\((.*)\)', leadingZeroNumber, fileSyntax)
oldPath = os.path.join(self.workingDir,fileName)
newFolder = os.path.join(self.workingDir, folderName + leadingZeroNumber)
newPath = os.path.join(newFolder, self.removeUploadSign(fileName))
os.makedirs(newFolder)
os.rename(oldPath, newPath)
# print(newFolder)
# print(oldPath + ' --> ' + newPath)
if __name__ == '__main__':
fire.Fire(seasonMover)

111
.archive/app/subtitle.py Normal file
View File

@@ -0,0 +1,111 @@
# -*- coding: utf-8 -*-
import codecs
import logging
import os
import chardet
import hashlib
from video import Episode, Movie
from utils import sanitize
from langdetect import detect
logger = logging.getLogger(__name__)
#: Subtitle extensions
SUBTITLE_EXTENSIONS = ('.srt', '.sub')
class Subtitle(object):
"""Base class for subtitle.
:param language: language of the subtitle.
:type language: :class:`~babelfish.language.Language`
:param bool hearing_impaired: whether or not the subtitle is hearing impaired.
:param page_link: URL of the web page from which the subtitle can be downloaded.
:type page_link: str
:param encoding: Text encoding of the subtitle.
:type encoding: str
"""
#: Name of the provider that returns that class of subtitle
provider_name = ''
def __init__(self, name, parent_path, series, season, episode, language=None, hash=None, container=None, format=None, sdh=False):
#: Language of the subtitle
self.name = name
self.parent_path = parent_path
self.series = series
self.season = season
self.episode = episode
self.language=language
self.hash = hash
self.container = container
self.format = format
self.sdh = sdh
@classmethod
def fromguess(cls, name, parent_path, guess):
if not (guess['type'] == 'movie' or guess['type'] == 'episode'):
raise ValueError('The guess must be an episode guess')
if 'title' not in guess:
raise ValueError('Insufficient data to process the guess')
sdh = 'sdh' in name.lower()
if guess['type'] is 'episode':
return cls(name, parent_path, guess.get('title', 1), guess.get('season'), guess['episode'],
container=guess.get('container'), format=guess.get('format'), sdh=sdh)
elif guess['type'] is 'movie':
return cls(name, parent_path, guess.get('title', 1), container=guess.get('container'),
format=guess.get('format'), sdh=sdh)
def getLanguage(self):
f = open(os.path.join(self.parent_path, self.name), 'r', encoding='ISO-8859-15')
language = detect(f.read())
f.close()
return language
def __hash__(self):
return hashlib.md5("b'{}'".format(str(self.series) + str(self.season) + str(self.episode)).encode()).hexdigest()
def __repr__(self):
return '<%s %s [%sx%s]>' % (self.__class__.__name__, self.series, self.season, str(self.episode))
def get_subtitle_path(subtitles_path, language=None, extension='.srt'):
"""Get the subtitle path using the `subtitles_path` and `language`.
:param str subtitles_path: path to the subtitle.
:param language: language of the subtitle to put in the path.
:type language: :class:`~babelfish.language.Language`
:param str extension: extension of the subtitle.
:return: path of the subtitle.
:rtype: str
"""
subtitle_root = os.path.splitext(subtitles_path)[0]
if language:
subtitle_root += '.' + str(language)
return subtitle_root + extension

38
.archive/app/utils.py Normal file
View File

@@ -0,0 +1,38 @@
# -*- coding: utf-8 -*-
from datetime import datetime
import hashlib
import os
import re
import struct
def sanitize(string, ignore_characters=None):
"""Sanitize a string to strip special characters.
:param str string: the string to sanitize.
:param set ignore_characters: characters to ignore.
:return: the sanitized string.
:rtype: str
"""
# only deal with strings
if string is None:
return
ignore_characters = ignore_characters or set()
# replace some characters with one space
# characters = {'-', ':', '(', ')', '.'} - ignore_characters
# if characters:
# string = re.sub(r'[%s]' % re.escape(''.join(characters)), ' ', string)
# remove some characters
characters = {'\''} - ignore_characters
if characters:
string = re.sub(r'[%s]' % re.escape(''.join(characters)), '', string)
# replace multiple spaces with one
string = re.sub(r'\s+', ' ', string)
# strip and lower case
return string.strip().lower()

233
.archive/app/video.py Normal file
View File

@@ -0,0 +1,233 @@
#!/usr/bin/env python3.6
# -*- coding: utf-8 -*-
# @Author: KevinMidboe
# @Date: 2017-08-26 08:23:18
# @Last Modified by: KevinMidboe
# @Last Modified time: 2017-09-29 13:56:21
from guessit import guessit
import os
import hashlib, tvdb_api
#: Video extensions
VIDEO_EXTENSIONS = ('.3g2', '.3gp', '.3gp2', '.3gpp', '.60d', '.ajp', '.asf', '.asx', '.avchd', '.avi', '.bik',
'.bix', '.box', '.cam', '.dat', '.divx', '.dmf', '.dv', '.dvr-ms', '.evo', '.flc', '.fli',
'.flic', '.flv', '.flx', '.gvi', '.gvp', '.h264', '.m1v', '.m2p', '.m2ts', '.m2v', '.m4e',
'.m4v', '.mjp', '.mjpeg', '.mjpg', '.mkv', '.moov', '.mov', '.movhd', '.movie', '.movx', '.mp4',
'.mpe', '.mpeg', '.mpg', '.mpv', '.mpv2', '.mxf', '.nsv', '.nut', '.ogg', '.ogm' '.ogv', '.omf',
'.ps', '.qt', '.ram', '.rm', '.rmvb', '.swf', '.ts', '.vfw', '.vid', '.video', '.viv', '.vivo',
'.vob', '.vro', '.wm', '.wmv', '.wmx', '.wrap', '.wvx', '.wx', '.x264', '.xvid')
class Video(object):
"""Base class for videos.
Represent a video, existing or not.
:param str name: name or path of the video.
:param str format: format of the video (HDTV, WEB-DL, BluRay, ...).
:param str release_group: release group of the video.
:param str resolution: resolution of the video stream (480p, 720p, 1080p or 1080i).
:param str video_codec: codec of the video stream.
:param str audio_codec: codec of the main audio stream.
:param str imdb_id: IMDb id of the video.
:param dict hashes: hashes of the video file by provider names.
:param int size: size of the video file in bytes.
:param set subtitle_languages: existing subtitle languages.
"""
def __init__(self, name, format=None, release_group=None, resolution=None, video_codec=None, audio_codec=None,
imdb_id=None, hashes=None, size=None, subtitle_languages=None):
#: Name or path of the video
self.name = name
#: Format of the video (HDTV, WEB-DL, BluRay, ...)
self.format = format
#: Release group of the video
self.release_group = release_group
#: Resolution of the video stream (480p, 720p, 1080p or 1080i)
self.resolution = resolution
#: Codec of the video stream
self.video_codec = video_codec
#: Codec of the main audio stream
self.audio_codec = audio_codec
#: IMDb id of the video
self.imdb_id = imdb_id
#: Hashes of the video file by provider names
self.hashes = hashes or {}
#: Size of the video file in bytes
self.size = size
#: Existing subtitle languages
self.subtitle_languages = subtitle_languages or set()
@property
def exists(self):
"""Test whether the video exists"""
return os.path.exists(self.name)
@property
def age(self):
"""Age of the video"""
if self.exists:
return datetime.utcnow() - datetime.utcfromtimestamp(os.path.getmtime(self.name))
return timedelta()
@classmethod
def fromguess(cls, name, parent_path, guess):
"""Create an :class:`Episode` or a :class:`Movie` with the given `name` based on the `guess`.
:param str name: name of the video.
:param dict guess: guessed data.
:raise: :class:`ValueError` if the `type` of the `guess` is invalid
"""
if guess['type'] == 'episode':
return Episode.fromguess(name, parent_path, guess)
if guess['type'] == 'movie':
return Movie.fromguess(name, guess)
raise ValueError('The guess must be an episode or a movie guess')
@classmethod
def fromname(cls, name):
"""Shortcut for :meth:`fromguess` with a `guess` guessed from the `name`.
:param str name: name of the video.
"""
return cls.fromguess(name, guessit(name))
def __repr__(self):
return '<%s [%r]>' % (self.__class__.__name__, self.name)
def __hash__(self):
return hash(self.name)
class Episode():
"""Episode :class:`Video`.
:param str series: series of the episode.
:param int season: season number of the episode.
:param int episode: episode number of the episode.
:param str title: title of the episode.
:param int year: year of the series.
:param bool original_series: whether the series is the first with this name.
:param int tvdb_id: TVDB id of the episode.
:param \*\*kwargs: additional parameters for the :class:`Video` constructor.
"""
def __init__(self, name, parent_path, series, season, episode, year=None, original_series=True, tvdb_id=None,
series_tvdb_id=None, series_imdb_id=None, release_group=None, video_codec=None, container=None,
format=None, screen_size=None, **kwargs):
super(Episode, self).__init__()
self.name = name
self.parent_path = parent_path
#: Series of the episode
self.series = series
#: Season number of the episode
self.season = season
#: Episode number of the episode
self.episode = episode
#: Year of series
self.year = year
#: The series is the first with this name
self.original_series = original_series
#: TVDB id of the episode
self.tvdb_id = tvdb_id
#: TVDB id of the series
self.series_tvdb_id = series_tvdb_id
#: IMDb id of the series
self.series_imdb_id = series_imdb_id
# The release group of the episode
self.release_group = release_group
# The video vodec of the series
self.video_codec = video_codec
# The Video container of the episode
self.container = container
# The Video format of the episode
self.format = format
# The Video screen_size of the episode
self.screen_size = screen_size
@classmethod
def fromguess(cls, name, parent_path, guess):
if guess['type'] != 'episode':
raise ValueError('The guess must be an episode guess')
if 'title' not in guess or 'episode' not in guess:
raise ValueError('Insufficient data to process the guess')
return cls(name, parent_path, guess['title'], guess.get('season', 1), guess['episode'],
year=guess.get('year'), original_series='year' not in guess, release_group=guess.get('release_group'),
video_codec=guess.get('video_codec'), audio_codec=guess.get('audio_codec'), container=guess.get('container'),
format=guess.get('format'), screen_size=guess.get('screen_size'))
@classmethod
def fromname(cls, name):
return cls.fromguess(name, guessit(name, {'type': 'episode'}))
def __hash__(self):
return hashlib.md5("b'{}'".format(str(self.series) + str(self.season) + str(self.episode)).encode()).hexdigest()
# THE EP NUMBER IS CONVERTED TO STRING AS A QUICK FIX FOR MULTIPLE NUMBERS IN ONE
def __repr__(self):
if self.year is None:
return '<%s [%r, %sx%s]>' % (self.__class__.__name__, self.series, self.season, str(self.episode))
return '<%s [%r, %d, %sx%s]>' % (self.__class__.__name__, self.series, self.year, self.season, str(self.episode))
class Movie():
"""Movie :class:`Video`.
:param str title: title of the movie.
:param int year: year of the movie.
:param \*\*kwargs: additional parameters for the :class:`Video` constructor.
"""
def __init__(self, name, title, year=None, format=None, **kwargs):
super(Movie, self).__init__()
#: Title of the movie
self.title = title
#: Year of the movie
self.year = year
self.format = format
@classmethod
def fromguess(cls, name, guess):
if guess['type'] != 'movie':
raise ValueError('The guess must be a movie guess')
if 'title' not in guess:
raise ValueError('Insufficient data to process the guess')
return cls(name, guess['title'], format=guess.get('format'), release_group=guess.get('release_group'),
resolution=guess.get('screen_size'), video_codec=guess.get('video_codec'),
audio_codec=guess.get('audio_codec'), year=guess.get('year'))
@classmethod
def fromname(cls, name):
return cls.fromguess(name, guessit(name, {'type': 'movie'}))
def __repr__(self):
if self.year is None:
return '<%s [%r]>' % (self.__class__.__name__, self.title)
return '<%s [%r, %d]>' % (self.__class__.__name__, self.title, self.year)