the test didn't load our Gzip opener
this was blocking the Vimeo test + some more gentest fixespull/580/head^2
parent
59ce201915
commit
1ca63e3ae3
|
@ -8,4 +8,4 @@ notifications:
|
||||||
#command to install the setup
|
#command to install the setup
|
||||||
install:
|
install:
|
||||||
# command to run tests
|
# command to run tests
|
||||||
script: nosetests test --nocapture
|
script: nosetests test --verbose
|
||||||
|
|
|
@ -23,13 +23,28 @@ import os
|
||||||
import json
|
import json
|
||||||
import unittest
|
import unittest
|
||||||
import sys
|
import sys
|
||||||
|
import socket
|
||||||
|
|
||||||
# Allow direct execution
|
# Allow direct execution
|
||||||
import os
|
import os
|
||||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
from youtube_dl.FileDownloader import FileDownloader
|
import youtube_dl.FileDownloader
|
||||||
import youtube_dl.InfoExtractors
|
import youtube_dl.InfoExtractors
|
||||||
|
from youtube_dl.utils import *
|
||||||
|
|
||||||
|
# General configuration (from __init__, not very elegant...)
|
||||||
|
jar = compat_cookiejar.CookieJar()
|
||||||
|
cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar)
|
||||||
|
proxy_handler = compat_urllib_request.ProxyHandler()
|
||||||
|
opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler())
|
||||||
|
compat_urllib_request.install_opener(opener)
|
||||||
|
socket.setdefaulttimeout(300) # 5 minutes should be enough (famous last words)
|
||||||
|
|
||||||
|
class FileDownloader(youtube_dl.FileDownloader):
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
youtube_dl.FileDownloader.__init__(self, *args, **kwargs)
|
||||||
|
self.to_stderr = self.to_screen
|
||||||
|
|
||||||
def _file_md5(fn):
|
def _file_md5(fn):
|
||||||
with open(fn, 'rb') as f:
|
with open(fn, 'rb') as f:
|
||||||
|
@ -76,12 +91,12 @@ def gentests():
|
||||||
with io.open(TEST_FILE, 'w', encoding='utf-8') as testf:
|
with io.open(TEST_FILE, 'w', encoding='utf-8') as testf:
|
||||||
testf.write(HEADER)
|
testf.write(HEADER)
|
||||||
spaces = ' ' * 4
|
spaces = ' ' * 4
|
||||||
write = lambda l: testf.write(spaces + l + '\n')
|
write = lambda l: testf.write(spaces + l + u'\n')
|
||||||
|
|
||||||
for d in defs:
|
for d in defs:
|
||||||
name = d['name']
|
name = d['name']
|
||||||
ie = getattr(youtube_dl.InfoExtractors, name + 'IE')
|
ie = getattr(youtube_dl.InfoExtractors, name + 'IE')
|
||||||
testf.write('\n')
|
testf.write(u'\n')
|
||||||
write('@_skip_unless(youtube_dl.InfoExtractors.' + name + 'IE._WORKING, "IE marked as not _WORKING")')
|
write('@_skip_unless(youtube_dl.InfoExtractors.' + name + 'IE._WORKING, "IE marked as not _WORKING")')
|
||||||
if not d['file']:
|
if not d['file']:
|
||||||
write('@_skip("No output file specified")')
|
write('@_skip("No output file specified")')
|
||||||
|
@ -101,7 +116,7 @@ def gentests():
|
||||||
write(' md5_for_file = _file_md5(filename)')
|
write(' md5_for_file = _file_md5(filename)')
|
||||||
write(' self.assertEqual(md5_for_file, ' + repr(d['md5']) + ')')
|
write(' self.assertEqual(md5_for_file, ' + repr(d['md5']) + ')')
|
||||||
|
|
||||||
testf.write('\n\n')
|
testf.write(u'\n\n')
|
||||||
write('def tearDown(self):')
|
write('def tearDown(self):')
|
||||||
for d in defs:
|
for d in defs:
|
||||||
if d['file']:
|
if d['file']:
|
||||||
|
@ -109,7 +124,7 @@ def gentests():
|
||||||
write(' os.remove(' + repr(d['file']) + ')')
|
write(' os.remove(' + repr(d['file']) + ')')
|
||||||
else:
|
else:
|
||||||
write(' # No file specified for ' + d['name'])
|
write(' # No file specified for ' + d['name'])
|
||||||
testf.write('\n')
|
testf.write(u'\n')
|
||||||
testf.write(FOOTER)
|
testf.write(FOOTER)
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|
|
@ -1072,8 +1072,8 @@ class VimeoIE(InfoExtractor):
|
||||||
self.report_extraction(video_id)
|
self.report_extraction(video_id)
|
||||||
|
|
||||||
# Extract the config JSON
|
# Extract the config JSON
|
||||||
config = webpage.split(' = {config:')[1].split(',assets:')[0]
|
|
||||||
try:
|
try:
|
||||||
|
config = webpage.split(' = {config:')[1].split(',assets:')[0]
|
||||||
config = json.loads(config)
|
config = json.loads(config)
|
||||||
except:
|
except:
|
||||||
self._downloader.trouble(u'ERROR: unable to extract info section')
|
self._downloader.trouble(u'ERROR: unable to extract info section')
|
||||||
|
|
Loading…
Reference in New Issue