diff --git a/README.md b/README.md index fdd159d..6af9825 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,10 @@ # *Seasoned*: an intelligent organizer for your shows -## Architecture -The flow of the system will first check for new folders in your tv shows directory, if a new file is found it's contents are analyzed, stored and tweets suggested changes to it's contents to use_admin. +*Seasoned* is a intelligent organizer for your tv show episodes. It is made to automate and simplify to process of renaming and moving newly downloaded tv show episodes following Plex file naming and placement. -Then there is a script for looking for replies on twitter by user_admin, if caanges are needed, it handles the changes specified and updates dtabbase. +## Architecture +The flow of the system will first check for new folders in your tv shows directory, if a new file is found it's contents are analyzed, stored and tweets suggested changes to it's contents to use_admin. + +Then there is a script for looking for replies on twitter by user_admin, if caanges are needed, it handles the changes specified and updates dtabbase. After approval by user the files are modified and moved to folders in resptected area. If error occours, pasteee link if log is sent to user. - - diff --git a/findStray.py b/findStray.py index 5f8d549..bf034d2 100755 --- a/findStray.py +++ b/findStray.py @@ -52,26 +52,11 @@ def XOR(list1, list2): return set(list1) ^ set(list2) - -def getNewFolderContents(): - showNames = getShowNames().keys() - folderContents = filter( lambda f: not f.startswith('.'), os.listdir(showDir)) - - return XOR(folderContents, showNames) - - -def checkForSingleEpisodes(folderItem): - showName, hit = getFuzzyName(folderItem) - episodeMatch = re.findall(re.sub(' ', '.', showName)+'\.S[0-9]{1,2}E[0-9]{1,2}\.', folderItem) - - if episodeMatch: - return True - - - def getByIdentifier(folderItem, identifier): itemMatch = re.findall(identifier + '[0-9]{1,2}', folderItem) + # TODO Should be more precise than first item in list item = re.sub(identifier, '', itemMatch[0]) + # TODO Should be checking for errors return item def getItemChildren(folder): @@ -90,31 +75,7 @@ def getItemChildren(folder): return media_items, subtitles, trash -def getEpisodeInfo(folderItem): - showName, hit = getFuzzyName(folderItem) - season = getByIdentifier(folderItem, 'S') - episode = getByIdentifier(folderItem, 'E') - media_items, subtitles, trash = getItemChildren(folderItem) - episodeInfo = [] - episodeInfo = {'original': folderItem, - 'full_path': showDir + folderItem, - 'name': showName, - 'season': season, - 'episode': episode, - 'media_items': media_items, - 'subtitles': subtitles, - 'trash': trash, - 'tweet_id': None, - 'reponse_id': None, - 'verified': '0', - 'moved': '0'} - - - addToDB(episodeInfo) - return episodeInfo - - def addToDB(episodeInfo): conn = sqlite3.connect(dbPath) c = conn.cursor() @@ -143,7 +104,56 @@ def addToDB(episodeInfo): conn.commit() conn.close() + + + +def getNewFolderContents(): + # TODO Should not do on keys, if empty. + showNames = getShowNames().keys() + # TODO Better way to filter non dotfiles, dirread in filter? + # Should maybe all dirs be checked at start? + folderContents = filter( lambda f: not f.startswith('.'), os.listdir(showDir)) + + return XOR(folderContents, showNames) # OK + + +def checkForSingleEpisodes(folderItem): + # TODO also if empty, should be checked earlier + showName, hit = getFuzzyName(folderItem) + + episodeMatch = re.findall(re.sub(' ', '.', showName)+'\.S[0-9]{1,2}E[0-9]{1,2}\.', folderItem) + if episodeMatch: + return True # OK + + +def getEpisodeInfo(folderItem): + showName, hit = getFuzzyName(folderItem) + season = getByIdentifier(folderItem, 'S') + episode = getByIdentifier(folderItem, 'E') + media_items, subtitles, trash = getItemChildren(folderItem) + + episodeInfo = [] + episodeInfo = {'original': folderItem, + 'full_path': showDir + folderItem, + 'name': showName, + 'season': season, + 'episode': episode, + 'media_items': media_items, + 'subtitles': subtitles, + 'trash': trash, + 'tweet_id': None, + 'reponse_id': None, + 'verified': '0', + 'moved': '0'} + + + addToDB(episodeInfo) + return episodeInfo + + + def findStray(): + # TODO What if null or tries to pass down error for item in getNewFolderContents(): if checkForSingleEpisodes(item): pprint(getEpisodeInfo(item)) diff --git a/folderCreator.py b/folderCreator.py index 0f197e8..bfd5ac9 100755 --- a/folderCreator.py +++ b/folderCreator.py @@ -17,40 +17,6 @@ auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) api = tweepy.API(auth) -def unpackEpisodes(): - conn = sqlite3.connect(dbPath) - c = conn.cursor() - - cursor = c.execute('SELECT * FROM stray_episodes WHERE verified = 1 AND moved = 0') - episodeList = [] - for row in c.fetchall(): - columnNames = [description[0] for description in cursor.description] - - episodeDict = dict.fromkeys(columnNames) - - for i, key in enumerate(episodeDict.keys()): - episodeDict[key] = row[i] - - episodeList.append(episodeDict) - - conn.close() - - return episodeList - - -def createFolders(episode): - showDir = '/media/hdd1/tv/%s/'% episode['name'] - episodeFormat = '%s S%sE%s/'% (episode['name'], episode['season'], episode['episode']) - seasonFormat = '%s Season %s/'% (episode['name'], episode['season']) - - if not os.path.isdir(showDir + seasonFormat): - os.makedirs(showDir + seasonFormat) - - if not os.path.isdir(showDir + seasonFormat + episodeFormat): - os.makedirs(showDir + seasonFormat + episodeFormat) - - - def newnameMediaitems(media_items): # media_items = [['New.Girl.S06E18.720p.HDTV.x264-EZTV.srt', '-EZTV', 'nl'], ['New.Girl.S06E18.720p.HDTV.x264-FLEET.srt', '-FLEET', 'en']] media_items = json.loads(media_items) @@ -80,11 +46,46 @@ def updateMovedStatus(episodeDict): conn.commit() conn.close() + + +def unpackEpisodes(): + conn = sqlite3.connect(dbPath) + c = conn.cursor() + + cursor = c.execute('SELECT * FROM stray_episodes WHERE verified = 1 AND moved = 0') + episodeList = [] + for row in c.fetchall(): + columnNames = [description[0] for description in cursor.description] + + episodeDict = dict.fromkeys(columnNames) + + for i, key in enumerate(episodeDict.keys()): + episodeDict[key] = row[i] + + episodeList.append(episodeDict) + + conn.close() + + return episodeList + +def createFolders(episode): + showDir = '/media/hdd1/tv/%s/'% episode['name'] + episodeFormat = '%s S%sE%s/'% (episode['name'], episode['season'], episode['episode']) + seasonFormat = '%s Season %s/'% (episode['name'], episode['season']) + + if not os.path.isdir(showDir + seasonFormat): + os.makedirs(showDir + seasonFormat) + + if not os.path.isdir(showDir + seasonFormat + episodeFormat): + os.makedirs(showDir + seasonFormat + episodeFormat) + def moveFiles(episode): + # TODO All this should be imported from config file showDir = '/media/hdd1/tv/' episodeFormat = '%s S%sE%s/'% (episode['name'], episode['season'], episode['episode']) seasonFormat = '%s/%s Season %s/'% (episode['name'], episode['name'], episode['season']) + # TODO All this is pretty ballsy to do this hard/stict. newMediaitems = newnameMediaitems(episode['media_items']) for item in newMediaitems: old_location = showDir + episode['original'] + '/' + item[0] @@ -103,6 +104,7 @@ def moveFiles(episode): for trash in json.loads(episode['trash']): os.remove(showDir + episode['original'] + '/'+ trash) + # TODO Maybe move to delete folder instead, than user can dump. os.rmdir(showDir + episode['original']) updateMovedStatus(episode) diff --git a/modules/classedStray.py b/modules/classedStray.py new file mode 100755 index 0000000..2bcbda7 --- /dev/null +++ b/modules/classedStray.py @@ -0,0 +1,33 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# @Author: KevinMidboe +# @Date: 2017-04-05 18:40:11 +# @Last Modified by: KevinMidboe +# @Last Modified time: 2017-04-05 18:51:32 +import os, hashlib +from functools import reduce +import time, glob + +dirHash = None + +def directoryChecksum(): + dirList = os.listdir('/Volumes/media/tv') + concat = reduce(lambda x, y: x + y, dirList, "") + + m = hashlib.md5() + m.update(bytes(concat, 'utf-16be')) + return m.digest() + +def blober(): + for filename in glob.iglob('/Volumes/media/tv/*'): + pass + +def main(): + start_time = time.time() + if dirHash is None: + blober() + print("--- %s seconds ---" % (time.time() - start_time)) + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/modules/createPasteee.py b/modules/createPasteee.py new file mode 100755 index 0000000..be5341a --- /dev/null +++ b/modules/createPasteee.py @@ -0,0 +1,16 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# @Author: KevinMidboe +# @Date: 2017-02-23 21:41:40 +# @Last Modified by: KevinMidboe +# @Last Modified time: 2017-03-05 19:35:10 + +from pasteee import Paste + +def createPasteee(): + paste = Paste('Test pastee', views=10) + print(paste) + print(paste['raw']) + +if __name__ == '__main__': + createPasteee() \ No newline at end of file diff --git a/modules/dirHash.py b/modules/dirHash.py new file mode 100755 index 0000000..98a69c9 --- /dev/null +++ b/modules/dirHash.py @@ -0,0 +1,23 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# @Author: KevinMidboe +# @Date: 2017-04-05 15:24:17 +# @Last Modified by: KevinMidboe +# @Last Modified time: 2017-04-05 18:22:13 +import os, hashlib +from functools import reduce + +hashDir = '/Volumes/media/tv' + +def main(): + dirList = os.listdir(hashDir) + concat = reduce(lambda x, y: x + y, dirList, "") + + m = hashlib.md5() + m.update(bytes(concat, 'utf-16be')) + return m.digest() + +if __name__ == '__main__': + print(main()) + +# TODO The hash value should be saved in a global manner \ No newline at end of file diff --git a/modules/subLangfinder.py b/modules/subLangfinder.py new file mode 100755 index 0000000..df3bf15 --- /dev/null +++ b/modules/subLangfinder.py @@ -0,0 +1,12 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +from langdetect import detect + +def main(): + f = open('/Volumes/media/movies/The Man from UNCLE (2015)/The.Man.from.U.N.C.L.E.2015.1080p.nl.srt', 'r', encoding = "ISO-8859-15") + print(detect(f.read())) + f.close() + print(f.close()) + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/tweetNewEpisodes.py b/tweetNewEpisodes.py index 26f6d46..790eddf 100755 --- a/tweetNewEpisodes.py +++ b/tweetNewEpisodes.py @@ -73,24 +73,11 @@ def tweetEpisode(episode): updateTweetID(episode, tweet_id) -def lookForNewEpisodes(): - conn = sqlite3.connect(dbPath) - c = conn.cursor() - - c.execute('SELECT * FROM stray_episodes WHERE tweet_id is NULL') - - for row in c.fetchall(): - episode = unpackEpisode(row) - tweetEpisode(episode) - - conn.close() - - def getLastTweets(user, count=1): - return api.user_timeline(screen_name=user,count=count) + return api.(screen_name=user,count=count) -def verifyByID(id, reponse_id): +def verifyByID(id, reponse_iduser_timeline): conn = sqlite3.connect(dbPath) c = conn.cursor() @@ -99,6 +86,7 @@ def verifyByID(id, reponse_id): conn.commit() conn.close() +# TODO Add more parsing than confirm def parseReply(tweet): if b'\xf0\x9f\x91\x8d' in tweet.text.encode('utf-8'): print('Verified!') @@ -119,7 +107,21 @@ def getReply(tweet): parseReply(tweet) conn.close() - + + + +def lookForNewEpisodes(): + conn = sqlite3.connect(dbPath) + c = conn.cursor() + + c.execute('SELECT * FROM stray_episodes WHERE tweet_id is NULL') + + for row in c.fetchall(): + episode = unpackEpisode(row) + tweetEpisode(episode) + + conn.close() + def checkForReply(): for tweet in getLastTweets('KevinMidboe', 10): if tweet.in_reply_to_status_id_str != None: