Compare commits
770 Commits
feature/re
...
redesign
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d4ed4791bf | ||
|
|
adb744a526 | ||
|
|
0f82cda811 | ||
|
|
0d6c3c8ecb | ||
|
|
6598f53fd4 | ||
|
|
6b8458d87f | ||
|
|
99a0621238 | ||
|
|
c52666309a | ||
|
|
84a458d40b | ||
|
|
f8631c6d53 | ||
|
|
b19b0775c7 | ||
|
|
2dc1c1dd38 | ||
|
|
7db8b233c8 | ||
|
|
427c77a9ef | ||
|
|
94c3969f10 | ||
|
|
debd1855dd | ||
|
|
9f77597c11 | ||
|
|
afc9039625 | ||
|
|
920d3cb44e | ||
|
|
b1fc8ad862 | ||
|
|
11b9bc39ab | ||
|
|
6dcb3f3bf2 | ||
|
|
ce768f45c5 | ||
|
|
9b91d1d6c0 | ||
|
|
d9c7a97604 | ||
|
|
0fd01aa697 | ||
|
|
58615e6f9b | ||
|
|
2277322e57 | ||
|
|
18020e609e | ||
|
|
6a31b920ac | ||
|
|
c1266a36e4 | ||
|
|
578effc538 | ||
|
|
d881120013 | ||
|
|
da5318033a | ||
|
|
31df5bce01 | ||
|
|
d5622b7cba | ||
|
|
26ad1b354f | ||
|
|
7a616a81f7 | ||
|
|
275aefc3cc | ||
|
|
2b32490f72 | ||
|
|
7b9043c16b | ||
|
|
cf83f99be0 | ||
|
|
fb8a66d207 | ||
|
|
e8a3645bc6 | ||
|
|
592e40993c | ||
|
|
b00e69e222 | ||
|
|
c9b4c8167f | ||
|
|
cdb9cfe756 | ||
|
|
e52f50b204 | ||
|
|
770c2be14c | ||
|
|
ab61961a64 | ||
|
|
6aca799bbb | ||
|
|
89836be1d1 | ||
|
|
20e1283627 | ||
|
|
ee8406e026 | ||
|
|
514941b785 | ||
|
|
1510e37652 | ||
|
|
e1e39cd3f4 | ||
|
|
e1bb8c5419 | ||
|
|
17fa33a496 | ||
|
|
601f0b54cf | ||
|
|
51d44bfc3e | ||
|
|
12148217a2 | ||
|
|
132fa12ef4 | ||
|
|
1827c2e4cd | ||
|
|
f423bca06b | ||
|
|
e7b089edf5 | ||
|
|
b8b7d94a6a | ||
|
|
2c080fec3d | ||
|
|
4c68566c77 | ||
|
|
a3af784c18 | ||
|
|
ac6f295c93 | ||
|
|
2c72cd7d9f | ||
|
|
d012dc5c85 | ||
|
|
038b4c63ee | ||
|
|
17e37996c4 | ||
|
|
9318e19347 | ||
|
|
045c8f4dc8 | ||
|
|
02e25a9e25 | ||
|
|
819f619297 | ||
|
|
c303789817 | ||
|
|
8f4e03d04b | ||
|
|
229d67c086 | ||
|
|
d84897ff33 | ||
|
|
387a711538 | ||
|
|
7a1b914824 | ||
|
|
5e62801666 | ||
|
|
00d887153f | ||
|
|
1a2d79f719 | ||
|
|
6d5882001a | ||
|
|
4a6b45c65c | ||
|
|
b0d1fe5c33 | ||
|
|
a6e49098c8 | ||
|
|
ffcd36cbf4 | ||
|
|
3bf2d844a0 | ||
|
|
dd24eb8893 | ||
|
|
ac382d5131 | ||
|
|
abc9e78027 | ||
|
|
538f51dd5b | ||
|
|
c94d79cc6c | ||
|
|
9883a7a85a | ||
|
|
eea9f40501 | ||
|
|
576bcb9f4b | ||
|
|
f4a486c47b | ||
|
|
80cf144e8b | ||
|
|
cf5a774313 | ||
|
|
b9b77042dc | ||
|
|
9e96aa14b7 | ||
|
|
6a0220b496 | ||
|
|
02ff0acc64 | ||
|
|
ae6affdb52 | ||
|
|
a08df704be | ||
|
|
af9a47d528 | ||
|
|
62c5365329 | ||
|
|
ddf575a86e | ||
|
|
0155c8de2d | ||
|
|
6b9383ce92 | ||
|
|
cb8d24ef1f | ||
|
|
5bfdb121df | ||
|
|
814ddfb79f | ||
|
|
766f819c0b | ||
|
|
b8b6024592 | ||
|
|
d77cfb3e69 | ||
|
|
858d8b4291 | ||
|
|
3852fc720d | ||
|
|
5145618c39 | ||
|
|
d6cfcae45b | ||
|
|
5609536f46 | ||
|
|
f992c00eb7 | ||
|
|
87086a0336 | ||
|
|
62cb57f217 | ||
|
|
2a0e46fe00 | ||
|
|
1f7555e8fd | ||
|
|
ff43df9ef1 | ||
|
|
2e907e93e7 | ||
|
|
4d329d6a36 | ||
|
|
752191bc23 | ||
|
|
1d73fd9d7e | ||
|
|
79688c412a | ||
|
|
fc1c95fefb | ||
|
|
6a174716af | ||
|
|
defe256f1b | ||
|
|
8a5f154d9e | ||
|
|
fe56a69e8f | ||
|
|
c6d326f973 | ||
|
|
9e5f670feb | ||
|
|
9ebacf8816 | ||
|
|
df2d7ec9c2 | ||
|
|
ddab74582b | ||
|
|
2801079bc8 | ||
|
|
1deb49b524 | ||
|
|
49d550f652 | ||
|
|
1a43ce6ecc | ||
|
|
15a0131587 | ||
|
|
0dca34958c | ||
|
|
4b231e36ea | ||
|
|
52478a00db | ||
|
|
e177766270 | ||
|
|
ff8da7c8f8 | ||
|
|
89c8c5a0c7 | ||
|
|
38c6266f9c | ||
|
|
16f8e7e123 | ||
|
|
7110c7a11f | ||
|
|
6d79f316a6 | ||
|
|
c1b6811b8a | ||
|
|
7d7b76b2e9 | ||
|
|
657aa52fa7 | ||
|
|
8e9ef8db39 | ||
|
|
92a0096b54 | ||
|
|
87338760ad | ||
|
|
28019b0a09 | ||
|
|
248b007f4a | ||
|
|
9e31c59de8 | ||
|
|
269e785888 | ||
|
|
3669aef42d | ||
|
|
1087eb3a06 | ||
|
|
43af80a137 | ||
|
|
0766a27a71 | ||
|
|
a12f049d14 | ||
|
|
6afe2fd9cf | ||
|
|
61f634a21e | ||
|
|
02b6659235 | ||
|
|
dacc3d8f47 | ||
|
|
4f140bb1ac | ||
|
|
3dffaa7075 | ||
|
|
d626fda710 | ||
|
|
51c8de0fc3 | ||
|
|
4f23ccc284 | ||
|
|
a6ff34a47f | ||
|
|
b40d1f3463 | ||
|
|
f1a2d960bc | ||
|
|
4e7069e0c6 | ||
|
|
477a47e45e | ||
|
|
a3264240ab | ||
|
|
1030d0d748 | ||
|
|
f9d9fffedb | ||
|
|
6b4e9a3fac | ||
|
|
6787289846 | ||
|
|
d31a2e2768 | ||
|
|
c992680209 | ||
|
|
65f0dc25d2 | ||
|
|
b616af3a83 | ||
|
|
ca13107330 | ||
|
|
c7ce18f8c2 | ||
|
|
b6f288a522 | ||
|
|
cb48ca03df | ||
|
|
7b6641d709 | ||
|
|
3c12a2c4bf | ||
|
|
259e2bc61c | ||
|
|
9f6e4cc2fa | ||
|
|
a763957334 | ||
|
|
06293dc0a2 | ||
|
|
38a5d967dd | ||
|
|
4cdb9bc81d | ||
|
|
2104cb2839 | ||
|
|
d4a4bd40a8 | ||
|
|
ba47d7eea7 | ||
|
|
c9638ec3fa | ||
|
|
14d636d098 | ||
|
|
e1d4df7937 | ||
|
|
e08d06ba31 | ||
|
|
984ee7580d | ||
|
|
ab118ea580 | ||
|
|
f897eebb41 | ||
|
|
755873c5e7 | ||
|
|
dbc254efbe | ||
|
|
9de8ed2dee | ||
|
|
230b7f47cc | ||
|
|
58878d8a0f | ||
|
|
d9bb1bfbfb | ||
|
|
f8674f9baa | ||
|
|
20f1076037 | ||
|
|
e84f2aa04c | ||
|
|
01f70051f8 | ||
|
|
492f69b149 | ||
|
|
2270b2a28b | ||
|
|
b5a0418a36 | ||
|
|
e595722139 | ||
|
|
78ba855c68 | ||
|
|
158f638fb9 | ||
|
|
2e52c8124a | ||
|
|
5bea9dd04f | ||
|
|
910393d00e | ||
|
|
4b66b0ea07 | ||
|
|
543226450c | ||
|
|
b9dbadda0b | ||
|
|
7cb214d8a2 | ||
|
|
f6d4ddbe80 | ||
|
|
faefd7a5b5 | ||
|
|
8f02b0eea0 | ||
|
|
39d0f91de2 | ||
|
|
b3d75cb485 | ||
|
|
17b940a271 | ||
|
|
3338b72d1f | ||
|
|
70ca31a265 | ||
|
|
d7f43c2cf8 | ||
|
|
b1f88c1c48 | ||
|
|
6fa6d530ec | ||
|
|
11e7fb23ca | ||
|
|
da9d2b5ed8 | ||
|
|
2599bac1a4 | ||
|
|
0bae509311 | ||
|
|
2fa7834e6e | ||
|
|
2deb6ee6a7 | ||
|
|
0d166025d0 | ||
|
|
7861416dc5 | ||
|
|
2639c5e9ad | ||
|
|
8de5fcdac6 | ||
|
|
4aa9801be4 | ||
|
|
c4db4ace13 | ||
|
|
db367a80d1 | ||
|
|
3093b21555 | ||
|
|
3e58378490 | ||
|
|
2c40db3074 | ||
|
|
fba228fd9d | ||
|
|
ef2b8e88b4 | ||
|
|
9b62e32da8 | ||
|
|
a0b3ee8186 | ||
|
|
d70da1edce | ||
|
|
7c674b3aab | ||
|
|
98540f2fcd | ||
|
|
2f0e197320 | ||
|
|
db49585818 | ||
|
|
160bc1a5c4 | ||
|
|
8e23b02653 | ||
|
|
41e69aeac3 | ||
|
|
be30200a18 | ||
|
|
387650d040 | ||
|
|
052d64eb39 | ||
|
|
a3a8a820fe | ||
|
|
1b724b5606 | ||
|
|
5fc9d7182c | ||
|
|
c948216e33 | ||
|
|
c77b270fa8 | ||
|
|
035b99bc8a | ||
|
|
f74b837faa | ||
|
|
4c198f7116 | ||
|
|
76322c0145 | ||
|
|
12150c5efc | ||
|
|
4a9452672a | ||
|
|
f7eeaf3eda | ||
|
|
002ce4d4e1 | ||
|
|
80df57f2b6 | ||
|
|
0358378cae | ||
|
|
fa054b6b34 | ||
|
|
4b9e226cc6 | ||
|
|
ca24bf031c | ||
|
|
872a4f4650 | ||
|
|
af8806e292 | ||
|
|
4f646094b5 | ||
|
|
6e8503cfc5 | ||
|
|
4879bc6251 | ||
|
|
ab253f9030 | ||
|
|
bc6d197004 | ||
|
|
1de457fa8d | ||
|
|
9e564c49b3 | ||
|
|
50a150f570 | ||
|
|
8d55b0c92a | ||
|
|
5a2df62462 | ||
|
|
9d21dd9196 | ||
|
|
3b34196901 | ||
|
|
bad26026ae | ||
|
|
6e455e62d5 | ||
|
|
c97bd38c83 | ||
|
|
356322c5b1 | ||
|
|
9dbb477dd8 | ||
|
|
089609d5d2 | ||
|
|
487ddf1c25 | ||
|
|
83b4c17969 | ||
|
|
08c381cf0d | ||
|
|
286f14a6d2 | ||
|
|
0b14fe5454 | ||
|
|
c5a0d521d1 | ||
|
|
4a1f70da09 | ||
|
|
87e97cd8a5 | ||
|
|
e0dffe20a4 | ||
|
|
73d37584ad | ||
|
|
5fd3e86624 | ||
|
|
d0f1e7c6a3 | ||
|
|
53e7e383a3 | ||
|
|
c06e1f3135 | ||
|
|
b0ff526c95 | ||
|
|
3cfe90d581 | ||
|
|
1d60d9caf1 | ||
|
|
8e0d1520e8 | ||
|
|
b07f91d6a5 | ||
|
|
43af091b02 | ||
|
|
5f0543ba42 | ||
|
|
ef8cd1aa40 | ||
|
|
e01fe51b9e | ||
|
|
afa782194d | ||
|
|
77e602f359 | ||
|
|
a6063b0665 | ||
|
|
9a7e4ea500 | ||
|
|
1daedb7259 | ||
|
|
8e82e976f1 | ||
|
|
8b445ac9f9 | ||
|
|
91c24105cc | ||
|
|
13df26851e | ||
|
|
ca58d25785 | ||
|
|
42d728f71e | ||
|
|
659960899e | ||
|
|
d40b052cbc | ||
|
|
282f6fb73a | ||
|
|
416c9eabde | ||
|
|
b4a15f344d | ||
|
|
c545c9aab1 | ||
|
|
bb73cb8eec | ||
|
|
c0492a41d9 | ||
|
|
cfd92b8268 | ||
|
|
436883a96d | ||
|
|
c381b719b1 | ||
|
|
81d4d9a4e2 | ||
|
|
e2df3a4dfd | ||
|
|
7df92f2882 | ||
|
|
072b6d09fa | ||
|
|
3869e350bf | ||
|
|
058846f54f | ||
|
|
cd836f3660 | ||
|
|
d75f58f5ec | ||
|
|
f2b0d3f80b | ||
|
|
a366d57278 | ||
|
|
a821d85bf2 | ||
|
|
a1ce3e0d6b | ||
|
|
e7be5c7809 | ||
|
|
5acab98025 | ||
|
|
ed6a46e9c0 | ||
|
|
89f3b6624e | ||
|
|
3546f29caf | ||
|
|
e3414fe91f | ||
|
|
bdadd00d93 | ||
|
|
dd7de31e9f | ||
|
|
6897dab647 | ||
|
|
accf19bb26 | ||
|
|
4126007cac | ||
|
|
9f12fe2636 | ||
|
|
9fb348f3a4 | ||
|
|
e749d132cd | ||
|
|
bed9458604 | ||
|
|
7984ee9fcf | ||
|
|
69e3e36fae | ||
|
|
456563eab0 | ||
|
|
7b6fa4f0e5 | ||
|
|
cd1dc39ef2 | ||
|
|
0771aeac3b | ||
|
|
cd0afd20e5 | ||
|
|
324920cd8c | ||
|
|
12cda35494 | ||
|
|
4291e2233d | ||
|
|
6ccbad031f | ||
|
|
d1dfed2833 | ||
|
|
1c1af9f90c | ||
|
|
687221f035 | ||
|
|
a99d52392f | ||
|
|
bd6690b159 | ||
|
|
b13df16b53 | ||
|
|
06f49be090 | ||
|
|
0b48ad5084 | ||
|
|
32ce93d2e9 | ||
|
|
e0479e79bd | ||
|
|
04e22b3966 | ||
|
|
3986de4ebc | ||
|
|
40a5ce087b | ||
|
|
330e15bbcb | ||
|
|
d201d9fff9 | ||
|
|
f765794c99 | ||
|
|
34320e617d | ||
|
|
169ddeef5d | ||
|
|
33ad4c22c7 | ||
|
|
265f90fe69 | ||
|
|
099b72ed27 | ||
|
|
d20c0ee37e | ||
|
|
f6030a333a | ||
|
|
4cbc089de2 | ||
|
|
c45c04659f | ||
|
|
61a9037835 | ||
|
|
ad33c0bcca | ||
|
|
7afc524a9f | ||
|
|
c5a4bc9a1b | ||
|
|
1c0178dbaf | ||
|
|
dbf7feca3e | ||
|
|
d92de8ec4e | ||
|
|
8347da5a58 | ||
|
|
59e248d7de | ||
|
|
12e556e1d1 | ||
|
|
14d3ab93da | ||
|
|
e27ece512f | ||
|
|
b88d8efc8d | ||
|
|
9ec4c2837e | ||
|
|
ffc3fc9ec4 | ||
|
|
a566b4f428 | ||
|
|
69819460f3 | ||
|
|
24a8cb41fe | ||
|
|
1de0443492 | ||
|
|
bb19b380b4 | ||
|
|
b6b936ddf3 | ||
|
|
b00b6acba8 | ||
|
|
3941076c06 | ||
|
|
7401201af2 | ||
|
|
5c586fbf30 | ||
|
|
5c891b7e8e | ||
|
|
5425fcae9e | ||
|
|
4008cce12f | ||
|
|
d227105527 | ||
|
|
508649e6b6 | ||
|
|
b4e25d4345 | ||
|
|
733f925c75 | ||
|
|
40e910192e | ||
|
|
424a3cd892 | ||
|
|
9f6036c8d6 | ||
|
|
5af5749d4a | ||
|
|
f01449f14c | ||
|
|
03dff14ee9 | ||
|
|
e55302592a | ||
|
|
dbeaab052d | ||
|
|
9f07dd5a21 | ||
|
|
b933cd8718 | ||
|
|
8d85dde2c6 | ||
|
|
eaaa8dc834 | ||
|
|
5350dbf0ce | ||
|
|
28ffad10ab | ||
|
|
a37517bf6a | ||
|
|
fab9b96c8e | ||
|
|
50d6882a98 | ||
|
|
94064ac7da | ||
|
|
1c5f19a68a | ||
|
|
a26abd0dbb | ||
|
|
fb9080c18a | ||
|
|
15980471b0 | ||
|
|
b11bb9cdac | ||
|
|
474cd45fc5 | ||
|
|
0b6843a1b9 | ||
|
|
fdcdf07fa6 | ||
|
|
5617953d39 | ||
|
|
964144996f | ||
|
|
37214dd413 | ||
|
|
5a08fed0b6 | ||
|
|
443866ef04 | ||
|
|
96275adaff | ||
|
|
33884deb6c | ||
|
|
7db291fc93 | ||
|
|
9df14bd55a | ||
|
|
1e183625c9 | ||
|
|
643be19711 | ||
|
|
21a1770f3f | ||
|
|
07063d855a | ||
|
|
cf95e417f1 | ||
|
|
3f92ed0ea0 | ||
|
|
578b74f2c0 | ||
|
|
8e17b9aea5 | ||
|
|
6f766aae8c | ||
|
|
5797348bb3 | ||
|
|
57ca5067ff | ||
|
|
e8ff8a41de | ||
|
|
0b5dfe826a | ||
|
|
67fbcc8238 | ||
|
|
dd61c7dc21 | ||
|
|
3786b5435f | ||
|
|
1857e047b0 | ||
|
|
648ac7793f | ||
|
|
664ce6421f | ||
|
|
cfb77a1076 | ||
|
|
f65ddbbb9e | ||
|
|
76126271fc | ||
|
|
3faece0b4c | ||
|
|
530d3cd91e | ||
|
|
e659aba176 | ||
|
|
a196a499ae | ||
|
|
58bd9cd7a1 | ||
|
|
9dd9f850c6 | ||
|
|
cbecb74307 | ||
|
|
8ae1e58614 | ||
|
|
83e8ae392d | ||
|
|
c0297f10cb | ||
|
|
41052ae508 | ||
|
|
2d243d51e4 | ||
|
|
fdec80f676 | ||
|
|
5d3b0deb4d | ||
|
|
f68c356944 | ||
|
|
553f8d6ccd | ||
|
|
60fb3e33ae | ||
|
|
9b7c1db509 | ||
|
|
963ce356fb | ||
|
|
dcd0364ecc | ||
|
|
a2da428777 | ||
|
|
876c602710 | ||
|
|
79cb716ced | ||
|
|
ba9c975335 | ||
|
|
ef407bcb3c | ||
|
|
2898a066fe | ||
|
|
7950c4bdb4 | ||
|
|
2499012d88 | ||
|
|
7788669de1 | ||
|
|
d7f6fad3dd | ||
|
|
699c562d34 | ||
|
|
36d8225389 | ||
|
|
17ba9ee96b | ||
|
|
2769fc28d3 | ||
|
|
f5f3cfba50 | ||
|
|
1b1c77d225 | ||
|
|
cfc49e286b | ||
|
|
a2b3677c59 | ||
|
|
e5cfafdb00 | ||
|
|
bff05925e8 | ||
|
|
05f4b2b8ce | ||
|
|
2eac294643 | ||
|
|
f6789f79ea | ||
|
|
0b5976bdb1 | ||
|
|
7d2b2b9809 | ||
|
|
cce92dc1f8 | ||
|
|
fa7e59e842 | ||
|
|
8635f0ddb2 | ||
|
|
c90a423012 | ||
|
|
f0daee669b | ||
|
|
d252b660f5 | ||
|
|
e717a49c0c | ||
|
|
426155e65c | ||
|
|
6b9b446e3d | ||
|
|
ab2b2cfe6e | ||
|
|
4b236c6ed6 | ||
|
|
2396fadf04 | ||
|
|
a3bffb5867 | ||
|
|
1b44fc40af | ||
|
|
b894139ca1 | ||
|
|
daa0662869 | ||
|
|
81de9529c3 | ||
|
|
6b06caf00d | ||
|
|
9370366112 | ||
|
|
32bcf6e615 | ||
|
|
aa804471a7 | ||
|
|
681d8b1ddc | ||
|
|
c82b1f51e3 | ||
|
|
6d048e0003 | ||
|
|
0314910bbe | ||
|
|
3bd831782c | ||
|
|
40f01dca6f | ||
|
|
8dead66b58 | ||
|
|
18807191c0 | ||
|
|
9d9630a27a | ||
|
|
8ac851555d | ||
|
|
27f331a1fc | ||
|
|
e6b4d32506 | ||
|
|
a28ee58a1f | ||
|
|
47749c2d73 | ||
|
|
d6d0ff724a | ||
|
|
ba65700aad | ||
|
|
84a7cfe07d | ||
|
|
9ccd4a5e84 | ||
|
|
616434a00f | ||
|
|
4cf62f73da | ||
|
|
0145aecab4 | ||
|
|
6c4184d1f5 | ||
|
|
9d011b42a9 | ||
|
|
bf81b5cacc | ||
|
|
8d2b6e4097 | ||
|
|
50d8399f09 | ||
|
|
bc99b77dbe | ||
|
|
1c7edc9487 | ||
|
|
90c06fb3c9 | ||
|
|
10a04c16ba | ||
|
|
90a618bd7e | ||
|
|
b630b84ab0 | ||
|
|
a5ee362fc0 | ||
|
|
7c0870b6b8 | ||
|
|
a42264b280 | ||
|
|
e714604ec0 | ||
|
|
c094120f04 | ||
|
|
6691c8ddd7 | ||
|
|
013705c318 | ||
|
|
bda6f92a4d | ||
|
|
7ceb8dc79c | ||
|
|
7f48210c97 | ||
|
|
23c440cd58 | ||
|
|
0097167dec | ||
|
|
21e5f156bb | ||
|
|
08f55314d5 | ||
|
|
577bf09859 | ||
|
|
c446cd2fb0 | ||
|
|
d80fe99609 | ||
|
|
06a8414f12 | ||
|
|
43b6e3ac07 | ||
|
|
58acd53a9a | ||
|
|
1ac01456a9 | ||
|
|
b86853f06f | ||
|
|
311a2798dd | ||
|
|
fe9998fb9d | ||
|
|
ce648c5d35 | ||
|
|
5a2a9bbf9a | ||
|
|
0f8ab05fd4 | ||
|
|
b87c00c041 | ||
|
|
8999f51dc9 | ||
|
|
d5e19db5e6 | ||
|
|
675bee83ca | ||
|
|
33e5dd1fdb | ||
|
|
4ff2794c83 | ||
|
|
81f9302da1 | ||
|
|
93f4b8b537 | ||
|
|
0587d2f8db | ||
|
|
6ba25b5468 | ||
|
|
cc10969506 | ||
|
|
c2eb50a7ee | ||
|
|
33d24068fd | ||
|
|
3a4c191b11 | ||
|
|
e06b4ccb3f | ||
|
|
3c6b86ea28 | ||
|
|
c4a9a13d6c | ||
|
|
c0f1a3c603 | ||
|
|
9d3425061a | ||
|
|
c2dcd2f67d | ||
|
|
24b822aecd | ||
|
|
a7d3de766f | ||
|
|
b56c897e4b | ||
|
|
df14032107 | ||
|
|
66b4821f7f | ||
|
|
d301cde266 | ||
|
|
0590a0d722 | ||
|
|
fc71a03a12 | ||
|
|
923c794e39 | ||
|
|
e7fbff5b3f | ||
|
|
1bd556fbb3 | ||
|
|
18a870f8c3 | ||
|
|
3e2a2c3bee | ||
|
|
73e74881a6 | ||
|
|
b37112600e | ||
|
|
6172ce4960 | ||
|
|
3d277e1c01 | ||
|
|
b3b13899f1 | ||
|
|
7c4a59539a | ||
|
|
e6dfb3da16 | ||
|
|
8e220ededa | ||
|
|
11126f8083 | ||
|
|
ac8a13db22 | ||
|
|
5ab10ff97a | ||
|
|
f3b0346ba2 | ||
|
|
96c94f97f4 | ||
|
|
192c0200e5 | ||
|
|
03ae8f459c | ||
|
|
377fdd9e5e | ||
|
|
daec7d20fe | ||
|
|
66a149590b | ||
|
|
1b6f010df2 | ||
|
|
7e4bc29b59 | ||
|
|
0284fa9b0a | ||
|
|
e5bcea59b5 | ||
|
|
16f603ced2 | ||
|
|
bdcb3b7e33 | ||
|
|
0def6fcfe3 | ||
|
|
75a352fef3 | ||
|
|
07eb1f7f4c | ||
|
|
8e35c02763 | ||
|
|
c1f6d9a858 | ||
|
|
3e20a3bac7 | ||
|
|
818570fd2d | ||
|
|
05a97a19ab | ||
|
|
db23f5cdef | ||
|
|
bcd2d22fbf | ||
|
|
ffc99cd4f4 | ||
|
|
bb56750c1a | ||
|
|
b08d587a22 | ||
|
|
47f4132b39 | ||
|
|
faefab5554 | ||
|
|
243a033055 | ||
|
|
db1eeaae38 | ||
|
|
8c2960e891 | ||
|
|
d6a86e8616 | ||
|
|
5260f42378 | ||
|
|
84f28f3c54 | ||
|
|
860b6793fb | ||
|
|
df03409d7a | ||
|
|
6a81f2241d | ||
|
|
5ce817cee6 | ||
|
|
7cdf124f9d | ||
|
|
ff46aa0226 | ||
|
|
669e331f6c | ||
|
|
4179ba642b | ||
|
|
00954d98f7 | ||
|
|
037e77860b | ||
|
|
47e187449d | ||
|
|
06e9afbe69 | ||
|
|
bfe8aa5f5f | ||
|
|
e51ddd7a50 | ||
|
|
442552c024 | ||
|
|
ce4806df64 | ||
|
|
0c2e65c92b | ||
|
|
b01aa2b385 | ||
|
|
2e04890756 | ||
|
|
1657857b4a | ||
|
|
72383592ba | ||
|
|
d093f935f9 | ||
|
|
8cc7d101aa | ||
|
|
f39eebbd22 | ||
|
|
3ac8bc738a | ||
|
|
0eac041a26 | ||
|
|
ab0f5daaf3 | ||
|
|
b59a0f82ab | ||
|
|
9b75e6af5c | ||
|
|
aa37f2b0ef | ||
|
|
d22237a5cc | ||
|
|
26f5e8aa4b | ||
|
|
9072c6cae0 | ||
|
|
8739c1197f | ||
|
|
a477973862 | ||
|
|
95ce26d261 | ||
|
|
85163443e3 | ||
|
|
6ea49405f4 | ||
|
|
4776cef473 | ||
|
|
e8fe9da602 | ||
|
|
8c934c1ca8 | ||
|
|
349d7d4866 | ||
|
|
f1ea8fa693 |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -3,3 +3,5 @@
|
||||
/_source/
|
||||
.project
|
||||
.pydevproject
|
||||
node_modules
|
||||
.tmp
|
||||
@@ -10,7 +10,6 @@ import socket
|
||||
import subprocess
|
||||
import sys
|
||||
import traceback
|
||||
import time
|
||||
|
||||
# Root path
|
||||
base_path = dirname(os.path.abspath(__file__))
|
||||
@@ -19,7 +18,12 @@ base_path = dirname(os.path.abspath(__file__))
|
||||
sys.path.insert(0, os.path.join(base_path, 'libs'))
|
||||
|
||||
from couchpotato.environment import Env
|
||||
from couchpotato.core.helpers.variable import getDataDir
|
||||
from couchpotato.core.helpers.variable import getDataDir, removePyc
|
||||
|
||||
|
||||
# Remove pyc files before dynamic load (sees .pyc files regular .py modules)
|
||||
removePyc(base_path)
|
||||
|
||||
|
||||
class Loader(object):
|
||||
|
||||
@@ -29,7 +33,7 @@ class Loader(object):
|
||||
|
||||
# Get options via arg
|
||||
from couchpotato.runner import getOptions
|
||||
self.options = getOptions(base_path, sys.argv[1:])
|
||||
self.options = getOptions(sys.argv[1:])
|
||||
|
||||
# Load settings
|
||||
settings = Env.get('settings')
|
||||
@@ -50,7 +54,7 @@ class Loader(object):
|
||||
# Create logging dir
|
||||
self.log_dir = os.path.join(self.data_dir, 'logs');
|
||||
if not os.path.isdir(self.log_dir):
|
||||
os.mkdir(self.log_dir)
|
||||
os.makedirs(self.log_dir)
|
||||
|
||||
# Logging
|
||||
from couchpotato.core.logger import CPLog
|
||||
@@ -67,10 +71,11 @@ class Loader(object):
|
||||
signal.signal(signal.SIGTERM, lambda signum, stack_frame: sys.exit(1))
|
||||
|
||||
from couchpotato.core.event import addEvent
|
||||
addEvent('app.after_shutdown', self.afterShutdown)
|
||||
addEvent('app.do_shutdown', self.setRestart)
|
||||
|
||||
def afterShutdown(self, restart):
|
||||
def setRestart(self, restart):
|
||||
self.do_restart = restart
|
||||
return True
|
||||
|
||||
def onExit(self, signal, frame):
|
||||
from couchpotato.core.event import fireEvent
|
||||
@@ -98,7 +103,6 @@ class Loader(object):
|
||||
|
||||
# Release log files and shutdown logger
|
||||
logging.shutdown()
|
||||
time.sleep(3)
|
||||
|
||||
args = [sys.executable] + [os.path.join(base_path, os.path.basename(__file__))] + sys.argv[1:]
|
||||
subprocess.Popen(args)
|
||||
|
||||
121
Gruntfile.js
Normal file
121
Gruntfile.js
Normal file
@@ -0,0 +1,121 @@
|
||||
'use strict';
|
||||
|
||||
module.exports = function(grunt){
|
||||
|
||||
require('time-grunt')(grunt);
|
||||
|
||||
// Configurable paths
|
||||
var config = {
|
||||
tmp: '.tmp',
|
||||
base: 'couchpotato',
|
||||
css_dest: 'couchpotato/static/style/combined.min.css'
|
||||
};
|
||||
|
||||
grunt.initConfig({
|
||||
|
||||
// Project settings
|
||||
config: config,
|
||||
|
||||
// Make sure code styles are up to par and there are no obvious mistakes
|
||||
jshint: {
|
||||
options: {
|
||||
reporter: require('jshint-stylish'),
|
||||
unused: false,
|
||||
camelcase: false,
|
||||
devel: true
|
||||
},
|
||||
all: [
|
||||
'<%= config.base %>/{,**/}*.js',
|
||||
'!<%= config.base %>/static/scripts/vendor/{,**/}*.js'
|
||||
]
|
||||
},
|
||||
|
||||
// Compiles Sass to CSS and generates necessary files if requested
|
||||
sass: {
|
||||
options: {
|
||||
compass: true,
|
||||
update: true
|
||||
},
|
||||
server: {
|
||||
files: [{
|
||||
expand: true,
|
||||
cwd: '<%= config.base %>/',
|
||||
src: ['**/*.scss'],
|
||||
dest: '<%= config.tmp %>/styles/',
|
||||
ext: '.css'
|
||||
}]
|
||||
}
|
||||
},
|
||||
|
||||
// Add vendor prefixed styles
|
||||
autoprefixer: {
|
||||
options: {
|
||||
browsers: ['> 1%', 'Android >= 2.1', 'Chrome >= 21', 'Explorer >= 7', 'Firefox >= 17', 'Opera >= 12.1', 'Safari >= 6.0']
|
||||
},
|
||||
dist: {
|
||||
files: [{
|
||||
expand: true,
|
||||
cwd: '<%= config.tmp %>/styles/',
|
||||
src: '{,**/}*.css',
|
||||
dest: '<%= config.tmp %>/styles/'
|
||||
}]
|
||||
}
|
||||
},
|
||||
|
||||
cssmin: {
|
||||
dist: {
|
||||
files: {
|
||||
'<%= config.css_dest %>': ['<%= config.tmp %>/styles/**/*.css']
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
shell: {
|
||||
runCouchPotato: {
|
||||
command: 'python CouchPotato.py'
|
||||
}
|
||||
},
|
||||
|
||||
// COOL TASKS ==============================================================
|
||||
watch: {
|
||||
scss: {
|
||||
files: ['<%= config.base %>/**/*.{scss,sass}'],
|
||||
tasks: ['sass:server', 'autoprefixer', 'cssmin']
|
||||
},
|
||||
js: {
|
||||
files: [
|
||||
'<%= config.base %>/**/*.js'
|
||||
],
|
||||
tasks: ['jshint']
|
||||
},
|
||||
livereload: {
|
||||
options: {
|
||||
livereload: 35729
|
||||
},
|
||||
files: [
|
||||
'<%= config.css_dest %>'
|
||||
]
|
||||
}
|
||||
},
|
||||
|
||||
concurrent: {
|
||||
options: {
|
||||
logConcurrentOutput: true
|
||||
},
|
||||
tasks: ['shell:runCouchPotato', 'sass:server', 'autoprefixer', 'cssmin', 'watch']
|
||||
}
|
||||
|
||||
});
|
||||
|
||||
grunt.loadNpmTasks('grunt-contrib-jshint');
|
||||
//grunt.loadNpmTasks('grunt-contrib-uglify');
|
||||
grunt.loadNpmTasks('grunt-contrib-sass');
|
||||
grunt.loadNpmTasks('grunt-contrib-cssmin');
|
||||
grunt.loadNpmTasks('grunt-contrib-watch');
|
||||
grunt.loadNpmTasks('grunt-autoprefixer');
|
||||
grunt.loadNpmTasks('grunt-concurrent');
|
||||
grunt.loadNpmTasks('grunt-shell');
|
||||
|
||||
grunt.registerTask('default', ['concurrent']);
|
||||
|
||||
};
|
||||
27
README.md
27
README.md
@@ -17,9 +17,9 @@ Windows, see [the CP forum](http://couchpota.to/forum/showthread.php?tid=14) for
|
||||
* Open up `Git Bash` (or CMD) and go to the folder you want to install CP. Something like Program Files.
|
||||
* Run `git clone https://github.com/RuudBurger/CouchPotatoServer.git`.
|
||||
* You can now start CP via `CouchPotatoServer\CouchPotato.py` to start
|
||||
* Your browser should open up, but if it doesn't go to: `http://localhost:5050/`
|
||||
* Your browser should open up, but if it doesn't go to `http://localhost:5050/`
|
||||
|
||||
OSx:
|
||||
OS X:
|
||||
|
||||
* If you're on Leopard (10.5) install Python 2.6+: [Python 2.6.5](http://www.python.org/download/releases/2.6.5/)
|
||||
* Install [GIT](http://git-scm.com/)
|
||||
@@ -27,20 +27,27 @@ OSx:
|
||||
* Go to your App folder `cd /Applications`
|
||||
* Run `git clone https://github.com/RuudBurger/CouchPotatoServer.git`
|
||||
* Then do `python CouchPotatoServer/CouchPotato.py`
|
||||
* Your browser should open up, but if it doesn't go to: `http://localhost:5050/`
|
||||
* Your browser should open up, but if it doesn't go to `http://localhost:5050/`
|
||||
|
||||
Linux (ubuntu / debian):
|
||||
Linux:
|
||||
|
||||
* Install [GIT](http://git-scm.com/) with `apt-get install git-core`
|
||||
* (Ubuntu / Debian) Install [GIT](http://git-scm.com/) with `apt-get install git-core`
|
||||
* (Fedora / CentOS) Install [GIT](http://git-scm.com/) with `yum install git`
|
||||
* 'cd' to the folder of your choosing.
|
||||
* Run `git clone https://github.com/RuudBurger/CouchPotatoServer.git`
|
||||
* Then do `python CouchPotatoServer/CouchPotato.py` to start
|
||||
* To run on boot copy the init script. `sudo cp CouchPotatoServer/init/ubuntu /etc/init.d/couchpotato`
|
||||
* Change the paths inside the init script. `sudo nano /etc/init.d/couchpotato`
|
||||
* Make it executable. `sudo chmod +x /etc/init.d/couchpotato`
|
||||
* Add it to defaults. `sudo update-rc.d couchpotato defaults`
|
||||
* Open your browser and go to: `http://localhost:5050/`
|
||||
* (Ubuntu / Debian) To run on boot copy the init script `sudo cp CouchPotatoServer/init/ubuntu /etc/init.d/couchpotato`
|
||||
* (Ubuntu / Debian) Copy the default paths file `sudo cp CouchPotatoServer/init/ubuntu.default /etc/default/couchpotato`
|
||||
* (Ubuntu / Debian) Change the paths inside the default file `sudo nano /etc/default/couchpotato`
|
||||
* (Ubuntu / Debian) Make it executable `sudo chmod +x /etc/init.d/couchpotato`
|
||||
* (Ubuntu / Debian) Add it to defaults `sudo update-rc.d couchpotato defaults`
|
||||
* (systemd) To run on boot copy the systemd config `sudo cp CouchPotatoServer/init/couchpotato.fedora.service /etc/systemd/system/couchpotato.service`
|
||||
* (systemd) Update the systemd config file with your user and path to CouchPotato.py
|
||||
* (systemd) Enable it at boot with `sudo systemctl enable couchpotato`
|
||||
* Open your browser and go to `http://localhost:5050/`
|
||||
|
||||
Docker:
|
||||
* You can use [razorgirl's Dockerfile](https://github.com/razorgirl/docker-couchpotato) to quickly build your own isolated app container. It's based on the Linux instructions above. For more info about Docker check out the [official website](https://www.docker.com).
|
||||
|
||||
FreeBSD :
|
||||
|
||||
|
||||
45
config.rb
Normal file
45
config.rb
Normal file
@@ -0,0 +1,45 @@
|
||||
# First, require any additional compass plugins installed on your system.
|
||||
# require 'zen-grids'
|
||||
require 'susy'
|
||||
# require 'breakpoint'
|
||||
|
||||
|
||||
# Toggle this between :development and :production when deploying the CSS to the
|
||||
# live server. Development mode will retain comments and spacing from the
|
||||
# original Sass source and adds line numbering comments for easier debugging.
|
||||
environment = :development
|
||||
# environment = :development
|
||||
|
||||
# In development, we can turn on the FireSass-compatible debug_info.
|
||||
firesass = false
|
||||
# firesass = true
|
||||
|
||||
|
||||
# Location of the your project's resources.
|
||||
|
||||
|
||||
# Set this to the root of your project. All resource locations above are
|
||||
# considered to be relative to this path.
|
||||
http_path = "/"
|
||||
|
||||
# To use relative paths to assets in your compiled CSS files, set this to true.
|
||||
# relative_assets = true
|
||||
|
||||
|
||||
##
|
||||
## You probably don't need to edit anything below this.
|
||||
##
|
||||
|
||||
sass_dir = "./"
|
||||
css_dir = "./static/style_compiled"
|
||||
|
||||
# You can select your preferred output style here (can be overridden via the command line):
|
||||
# output_style = :expanded or :nested or :compact or :compressed
|
||||
output_style = (environment == :development) ? :expanded : :compressed
|
||||
|
||||
# To disable debugging comments that display the original location of your selectors. Uncomment:
|
||||
# line_comments = false
|
||||
|
||||
# Pass options to sass. For development, we turn on the FireSass-compatible
|
||||
# debug_info if the firesass config variable above is true.
|
||||
sass_options = (environment == :development && firesass == true) ? {:debug_info => true} : {}
|
||||
@@ -13,6 +13,8 @@ Lastly, for anything related to CouchPotato, feel free to stop by the [forum](ht
|
||||
## Issues
|
||||
Issues are intended for reporting bugs and weird behaviour or suggesting improvements to CouchPotatoServer.
|
||||
Before you submit an issue, please go through the following checklist:
|
||||
* **FILL IN ALL THE FIELDS ASKED FOR**
|
||||
* **POST MORE THAN A SINGLE LINE LOG**, if you do, you'd better have a easy reproducable bug
|
||||
* Search through existing issues (*including closed issues!*) first: you might be able to get your answer there.
|
||||
* Double check your issue manually, because it could be an external issue.
|
||||
* Post logs with your issue: Without seeing what is going on, the developers can't reproduce the error.
|
||||
@@ -22,15 +24,17 @@ Before you submit an issue, please go through the following checklist:
|
||||
* What providers are you using? (While your logs include these, scanning through hundreds of lines of logs isn't our hobby)
|
||||
* Post the logs from the *config* directory, please do not copy paste the UI. Use pastebin to store these logs!
|
||||
* Give a short step by step of how to reproduce the error.
|
||||
* What hardware / OS are you using and what are its limitations? For example: NAS can be slow and maybe have a different version of python installed then when you use CP on OSX or Windows.
|
||||
* What hardware / OS are you using and what are its limitations? For example: NAS can be slow and maybe have a different version of python installed than when you use CP on OS X or Windows.
|
||||
* Your issue might be marked with the "can't reproduce" tag. Don't ask why your issue was closed if it says so in the tag.
|
||||
* If you're running on a NAS (QNAP, Austor etc..) with pre-made packages, make sure these are set up to use our source repository (RuudBurger/CouchPotatoServer) and nothing else!!
|
||||
|
||||
The more relevant information you can provide, the more likely it is the issue will be resolved rather than closed.
|
||||
* If you're running on a NAS (QNAP, Austor, Synology etc.) with pre-made packages, make sure these are set up to use our source repository (RuudBurger/CouchPotatoServer) and nothing else!
|
||||
* Do not "bump" issues with "Any updates on this" or whatever. Yes I've seen it, you don't have to remind me of it. There will be an update when the code is done or I need information. If you feel the need to do so, you'd better have more info on the issue.
|
||||
|
||||
The more relevant information you provide, the more likely that your issue will be resolved.
|
||||
If you don't follow any of the checks above, I'll close the issue. If you are wondering why (and ask) I'll block you from posting new issues and the repo.
|
||||
|
||||
## Pull Requests
|
||||
Pull requests are intended for contributing code or documentation to the project. Before you submit a pull request, consider the following:
|
||||
* Make sure your pull request is made for the *develop* branch (or relevant feature branch).
|
||||
* Have you tested your PR? If not, why?
|
||||
* Does your PR have any limitations we should know of?
|
||||
* Does your PR have any limitations I should know of?
|
||||
* Is your PR up-to-date with the branch you're trying to push into?
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
import os
|
||||
import time
|
||||
import traceback
|
||||
|
||||
from couchpotato.api import api_docs, api_docs_missing, api
|
||||
from couchpotato.core.event import fireEvent
|
||||
from couchpotato.core.helpers.variable import md5, tryInt
|
||||
@@ -5,9 +9,6 @@ from couchpotato.core.logger import CPLog
|
||||
from couchpotato.environment import Env
|
||||
from tornado import template
|
||||
from tornado.web import RequestHandler, authenticated
|
||||
import os
|
||||
import time
|
||||
import traceback
|
||||
|
||||
|
||||
log = CPLog(__name__)
|
||||
@@ -39,13 +40,15 @@ class WebHandler(BaseHandler):
|
||||
return
|
||||
|
||||
try:
|
||||
if route == 'robots.txt':
|
||||
self.set_header('Content-Type', 'text/plain')
|
||||
self.write(views[route]())
|
||||
except:
|
||||
log.error("Failed doing web request '%s': %s", (route, traceback.format_exc()))
|
||||
self.write({'success': False, 'error': 'Failed returning results'})
|
||||
|
||||
|
||||
def addView(route, func, static = False):
|
||||
def addView(route, func):
|
||||
views[route] = func
|
||||
|
||||
|
||||
@@ -59,6 +62,13 @@ def index():
|
||||
addView('', index)
|
||||
|
||||
|
||||
# Web view
|
||||
def robots():
|
||||
return 'User-agent: * \n' \
|
||||
'Disallow: /'
|
||||
addView('robots.txt', robots)
|
||||
|
||||
|
||||
# API docs
|
||||
def apiDocs():
|
||||
routes = list(api.keys())
|
||||
|
||||
@@ -7,9 +7,8 @@ import urllib
|
||||
|
||||
from couchpotato.core.helpers.request import getParams
|
||||
from couchpotato.core.logger import CPLog
|
||||
from tornado.gen import coroutine
|
||||
from tornado.ioloop import IOLoop
|
||||
from tornado.web import RequestHandler, asynchronous
|
||||
import tornado
|
||||
|
||||
|
||||
log = CPLog(__name__)
|
||||
@@ -28,10 +27,18 @@ def run_async(func):
|
||||
def async_func(*args, **kwargs):
|
||||
func_hl = Thread(target = func, args = args, kwargs = kwargs)
|
||||
func_hl.start()
|
||||
return func_hl
|
||||
|
||||
return async_func
|
||||
|
||||
@run_async
|
||||
def run_handler(route, kwargs, callback = None):
|
||||
try:
|
||||
res = api[route](**kwargs)
|
||||
callback(res, route)
|
||||
except:
|
||||
log.error('Failed doing api request "%s": %s', (route, traceback.format_exc()))
|
||||
callback({'success': False, 'error': 'Failed returning results'}, route)
|
||||
|
||||
|
||||
# NonBlock API handler
|
||||
class NonBlockHandler(RequestHandler):
|
||||
@@ -44,24 +51,22 @@ class NonBlockHandler(RequestHandler):
|
||||
start, stop = api_nonblock[route]
|
||||
self.stopper = stop
|
||||
|
||||
start(self.onNewMessage, last_id = self.get_argument('last_id', None))
|
||||
start(self.sendData, last_id = self.get_argument('last_id', None))
|
||||
|
||||
def onNewMessage(self, response):
|
||||
if self.request.connection.stream.closed():
|
||||
self.on_connection_close()
|
||||
return
|
||||
def sendData(self, response):
|
||||
if not self.request.connection.stream.closed():
|
||||
try:
|
||||
self.finish(response)
|
||||
except:
|
||||
log.debug('Failed doing nonblock request, probably already closed: %s', (traceback.format_exc()))
|
||||
try: self.finish({'success': False, 'error': 'Failed returning results'})
|
||||
except: pass
|
||||
|
||||
try:
|
||||
self.finish(response)
|
||||
except:
|
||||
log.debug('Failed doing nonblock request, probably already closed: %s', (traceback.format_exc()))
|
||||
try: self.finish({'success': False, 'error': 'Failed returning results'})
|
||||
except: pass
|
||||
|
||||
def on_connection_close(self):
|
||||
self.removeStopper()
|
||||
|
||||
def removeStopper(self):
|
||||
if self.stopper:
|
||||
self.stopper(self.onNewMessage)
|
||||
self.stopper(self.sendData)
|
||||
|
||||
self.stopper = None
|
||||
|
||||
@@ -77,14 +82,20 @@ def addNonBlockApiView(route, func_tuple, docs = None, **kwargs):
|
||||
|
||||
# Blocking API handler
|
||||
class ApiHandler(RequestHandler):
|
||||
route = None
|
||||
|
||||
@coroutine
|
||||
@asynchronous
|
||||
def get(self, route, *args, **kwargs):
|
||||
route = route.strip('/')
|
||||
self.route = route = route.strip('/')
|
||||
if not api.get(route):
|
||||
self.write('API call doesn\'t seem to exist')
|
||||
self.finish()
|
||||
return
|
||||
|
||||
# Create lock if it doesn't exist
|
||||
if route in api_locks and not api_locks.get(route):
|
||||
api_locks[route] = threading.Lock()
|
||||
|
||||
api_locks[route].acquire()
|
||||
|
||||
try:
|
||||
@@ -102,36 +113,49 @@ class ApiHandler(RequestHandler):
|
||||
except: pass
|
||||
|
||||
# Add async callback handler
|
||||
@run_async
|
||||
def run_handler(callback):
|
||||
try:
|
||||
res = api[route](**kwargs)
|
||||
callback(res)
|
||||
except:
|
||||
log.error('Failed doing api request "%s": %s', (route, traceback.format_exc()))
|
||||
callback({'success': False, 'error': 'Failed returning results'})
|
||||
|
||||
result = yield tornado.gen.Task(run_handler)
|
||||
|
||||
# Check JSONP callback
|
||||
jsonp_callback = self.get_argument('callback_func', default = None)
|
||||
|
||||
if jsonp_callback:
|
||||
self.write(str(jsonp_callback) + '(' + json.dumps(result) + ')')
|
||||
self.set_header("Content-Type", "text/javascript")
|
||||
elif isinstance(result, tuple) and result[0] == 'redirect':
|
||||
self.redirect(result[1])
|
||||
else:
|
||||
self.write(result)
|
||||
run_handler(route, kwargs, callback = self.taskFinished)
|
||||
|
||||
except:
|
||||
log.error('Failed doing api request "%s": %s', (route, traceback.format_exc()))
|
||||
self.write({'success': False, 'error': 'Failed returning results'})
|
||||
try:
|
||||
self.write({'success': False, 'error': 'Failed returning results'})
|
||||
self.finish()
|
||||
except:
|
||||
log.error('Failed write error "%s": %s', (route, traceback.format_exc()))
|
||||
|
||||
api_locks[route].release()
|
||||
self.unlock()
|
||||
|
||||
post = get
|
||||
|
||||
def taskFinished(self, result, route):
|
||||
IOLoop.current().add_callback(self.sendData, result, route)
|
||||
self.unlock()
|
||||
|
||||
def sendData(self, result, route):
|
||||
|
||||
if not self.request.connection.stream.closed():
|
||||
try:
|
||||
# Check JSONP callback
|
||||
jsonp_callback = self.get_argument('callback_func', default = None)
|
||||
|
||||
if jsonp_callback:
|
||||
self.set_header('Content-Type', 'text/javascript')
|
||||
self.finish(str(jsonp_callback) + '(' + json.dumps(result) + ')')
|
||||
elif isinstance(result, tuple) and result[0] == 'redirect':
|
||||
self.redirect(result[1])
|
||||
else:
|
||||
self.finish(result)
|
||||
except UnicodeDecodeError:
|
||||
log.error('Failed proper encode: %s', traceback.format_exc())
|
||||
except:
|
||||
log.debug('Failed doing request, probably already closed: %s', (traceback.format_exc()))
|
||||
try: self.finish({'success': False, 'error': 'Failed returning results'})
|
||||
except: pass
|
||||
|
||||
def unlock(self):
|
||||
try: api_locks[self.route].release()
|
||||
except: pass
|
||||
|
||||
|
||||
def addApiView(route, func, static = False, docs = None, **kwargs):
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ import webbrowser
|
||||
|
||||
from couchpotato.api import addApiView
|
||||
from couchpotato.core.event import fireEvent, addEvent
|
||||
from couchpotato.core.helpers.variable import cleanHost, md5
|
||||
from couchpotato.core.helpers.variable import cleanHost, md5, isSubFolder
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.plugins.base import Plugin
|
||||
from couchpotato.environment import Env
|
||||
@@ -71,13 +71,14 @@ class Core(Plugin):
|
||||
return value if value and len(value) > 3 else uuid4().hex
|
||||
|
||||
def checkDataDir(self):
|
||||
if Env.get('app_dir') in Env.get('data_dir'):
|
||||
if isSubFolder(Env.get('data_dir'), Env.get('app_dir')):
|
||||
log.error('You should NOT use your CouchPotato directory to save your settings in. Files will get overwritten or be deleted.')
|
||||
|
||||
return True
|
||||
|
||||
def cleanUpFolders(self):
|
||||
self.deleteEmptyFolder(Env.get('app_dir'), show_error = False)
|
||||
only_clean = ['couchpotato', 'libs', 'init']
|
||||
self.deleteEmptyFolder(Env.get('app_dir'), show_error = False, only_clean = only_clean)
|
||||
|
||||
def available(self, **kwargs):
|
||||
return {
|
||||
@@ -90,7 +91,11 @@ class Core(Plugin):
|
||||
|
||||
def shutdown():
|
||||
self.initShutdown()
|
||||
IOLoop.current().add_callback(shutdown)
|
||||
|
||||
if IOLoop.current()._closing:
|
||||
shutdown()
|
||||
else:
|
||||
IOLoop.current().add_callback(shutdown)
|
||||
|
||||
return 'shutdown'
|
||||
|
||||
@@ -113,7 +118,7 @@ class Core(Plugin):
|
||||
|
||||
self.shutdown_started = True
|
||||
|
||||
fireEvent('app.do_shutdown')
|
||||
fireEvent('app.do_shutdown', restart = restart)
|
||||
log.debug('Every plugin got shutdown event')
|
||||
|
||||
loop = True
|
||||
@@ -138,8 +143,11 @@ class Core(Plugin):
|
||||
|
||||
log.debug('Safe to shutdown/restart')
|
||||
|
||||
loop = IOLoop.current()
|
||||
|
||||
try:
|
||||
IOLoop.current().stop()
|
||||
if not loop._closing:
|
||||
loop.stop()
|
||||
except RuntimeError:
|
||||
pass
|
||||
except:
|
||||
@@ -173,13 +181,13 @@ class Core(Plugin):
|
||||
return '%sapi/%s' % (self.createBaseUrl(), Env.setting('api_key'))
|
||||
|
||||
def version(self):
|
||||
ver = fireEvent('updater.info', single = True)
|
||||
ver = fireEvent('updater.info', single = True) or {'version': {}}
|
||||
|
||||
if os.name == 'nt': platf = 'windows'
|
||||
elif 'Darwin' in platform.platform(): platf = 'osx'
|
||||
else: platf = 'linux'
|
||||
|
||||
return '%s - %s-%s - v2' % (platf, ver.get('version')['type'], ver.get('version')['hash'])
|
||||
return '%s - %s-%s - v2' % (platf, ver.get('version').get('type') or 'unknown', ver.get('version').get('hash') or 'unknown')
|
||||
|
||||
def versionView(self, **kwargs):
|
||||
return {
|
||||
@@ -278,13 +286,13 @@ config = [{
|
||||
'name': 'permission_folder',
|
||||
'default': '0755',
|
||||
'label': 'Folder CHMOD',
|
||||
'description': 'Can be either decimal (493) or octal (leading zero: 0755)',
|
||||
'description': 'Can be either decimal (493) or octal (leading zero: 0755). <a target="_blank" href="http://permissions-calculator.org/">Calculate the correct value</a>',
|
||||
},
|
||||
{
|
||||
'name': 'permission_file',
|
||||
'default': '0755',
|
||||
'default': '0644',
|
||||
'label': 'File CHMOD',
|
||||
'description': 'Same as Folder CHMOD but for files',
|
||||
'description': 'See Folder CHMOD description, but for files',
|
||||
},
|
||||
],
|
||||
},
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import os
|
||||
import re
|
||||
import traceback
|
||||
|
||||
from couchpotato.core.event import addEvent
|
||||
from couchpotato.core.helpers.encoding import ss
|
||||
@@ -8,8 +7,6 @@ from couchpotato.core.helpers.variable import tryInt
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.plugins.base import Plugin
|
||||
from couchpotato.environment import Env
|
||||
from minify.cssmin import cssmin
|
||||
from minify.jsmin import jsmin
|
||||
from tornado.web import StaticFileHandler
|
||||
|
||||
|
||||
@@ -22,30 +19,26 @@ class ClientScript(Plugin):
|
||||
|
||||
core_static = {
|
||||
'style': [
|
||||
'style/main.css',
|
||||
'style/uniform.generic.css',
|
||||
'style/uniform.css',
|
||||
'style/settings.css',
|
||||
'style/combined.min.css',
|
||||
],
|
||||
'script': [
|
||||
'scripts/library/mootools.js',
|
||||
'scripts/library/mootools_more.js',
|
||||
'scripts/vendor/mootools.js',
|
||||
'scripts/vendor/mootools_more.js',
|
||||
'scripts/vendor/form_replacement/form_check.js',
|
||||
'scripts/vendor/form_replacement/form_radio.js',
|
||||
'scripts/vendor/form_replacement/form_dropdown.js',
|
||||
'scripts/vendor/form_replacement/form_selectoption.js',
|
||||
'scripts/vendor/Array.stableSort.js',
|
||||
'scripts/vendor/history.js',
|
||||
'scripts/library/uniform.js',
|
||||
'scripts/library/form_replacement/form_check.js',
|
||||
'scripts/library/form_replacement/form_radio.js',
|
||||
'scripts/library/form_replacement/form_dropdown.js',
|
||||
'scripts/library/form_replacement/form_selectoption.js',
|
||||
'scripts/library/question.js',
|
||||
'scripts/library/scrollspy.js',
|
||||
'scripts/library/spin.js',
|
||||
'scripts/library/Array.stableSort.js',
|
||||
'scripts/library/async.js',
|
||||
'scripts/couchpotato.js',
|
||||
'scripts/api.js',
|
||||
'scripts/library/history.js',
|
||||
'scripts/page.js',
|
||||
'scripts/block.js',
|
||||
'scripts/block/navigation.js',
|
||||
'scripts/block/header.js',
|
||||
'scripts/block/footer.js',
|
||||
'scripts/block/menu.js',
|
||||
'scripts/page/home.js',
|
||||
@@ -54,8 +47,9 @@ class ClientScript(Plugin):
|
||||
],
|
||||
}
|
||||
|
||||
urls = {'style': {}, 'script': {}}
|
||||
minified = {'style': {}, 'script': {}}
|
||||
watches = {}
|
||||
|
||||
original_paths = {'style': {}, 'script': {}}
|
||||
paths = {'style': {}, 'script': {}}
|
||||
comment = {
|
||||
'style': '/*** %s:%d ***/\n',
|
||||
@@ -74,8 +68,7 @@ class ClientScript(Plugin):
|
||||
addEvent('clientscript.get_styles', self.getStyles)
|
||||
addEvent('clientscript.get_scripts', self.getScripts)
|
||||
|
||||
if not Env.get('dev'):
|
||||
addEvent('app.load', self.minify)
|
||||
addEvent('app.load', self.compile)
|
||||
|
||||
self.addCore()
|
||||
|
||||
@@ -91,7 +84,7 @@ class ClientScript(Plugin):
|
||||
else:
|
||||
self.registerStyle(core_url, file_path, position = 'front')
|
||||
|
||||
def minify(self):
|
||||
def compile(self):
|
||||
|
||||
# Create cache dir
|
||||
cache = Env.get('cache_dir')
|
||||
@@ -102,47 +95,43 @@ class ClientScript(Plugin):
|
||||
|
||||
for file_type in ['style', 'script']:
|
||||
ext = 'js' if file_type is 'script' else 'css'
|
||||
positions = self.paths.get(file_type, {})
|
||||
positions = self.original_paths.get(file_type, {})
|
||||
for position in positions:
|
||||
files = positions.get(position)
|
||||
self._minify(file_type, files, position, position + '.' + ext)
|
||||
self._compile(file_type, files, position, position + '.' + ext)
|
||||
|
||||
def _minify(self, file_type, files, position, out):
|
||||
def _compile(self, file_type, paths, position, out):
|
||||
|
||||
cache = Env.get('cache_dir')
|
||||
out_name = out
|
||||
out = os.path.join(cache, 'minified', out_name)
|
||||
minified_dir = os.path.join(cache, 'minified')
|
||||
|
||||
data_combined = ''
|
||||
|
||||
new_paths = []
|
||||
for x in paths:
|
||||
file_path, url_path = x
|
||||
|
||||
raw = []
|
||||
for file_path in files:
|
||||
f = open(file_path, 'r').read()
|
||||
|
||||
if file_type == 'script':
|
||||
data = jsmin(f)
|
||||
else:
|
||||
data = self.prefix(f)
|
||||
data = cssmin(data)
|
||||
data = data.replace('../images/', '../static/images/')
|
||||
data = data.replace('../fonts/', '../static/fonts/')
|
||||
data = data.replace('../../static/', '../static/') # Replace inside plugins
|
||||
if not Env.get('dev'):
|
||||
data = f
|
||||
|
||||
raw.append({'file': file_path, 'date': int(os.path.getmtime(file_path)), 'data': data})
|
||||
data_combined += self.comment.get(file_type) % (ss(file_path), int(os.path.getmtime(file_path)))
|
||||
data_combined += data + '\n\n'
|
||||
else:
|
||||
new_paths.append(x)
|
||||
|
||||
# Combine all files together with some comments
|
||||
data = ''
|
||||
for r in raw:
|
||||
data += self.comment.get(file_type) % (ss(r.get('file')), r.get('date'))
|
||||
data += r.get('data') + '\n\n'
|
||||
if not Env.get('dev'):
|
||||
|
||||
self.createFile(out, data.strip())
|
||||
out_path = os.path.join(minified_dir, out_name)
|
||||
self.createFile(out_path, data_combined.strip())
|
||||
|
||||
if not self.minified.get(file_type):
|
||||
self.minified[file_type] = {}
|
||||
if not self.minified[file_type].get(position):
|
||||
self.minified[file_type][position] = []
|
||||
minified_url = 'minified/%s?%s' % (out_name, tryInt(os.path.getmtime(out)))
|
||||
new_paths.append((out_path, {'url': minified_url}))
|
||||
|
||||
minified_url = 'minified/%s?%s' % (out_name, tryInt(os.path.getmtime(out)))
|
||||
self.minified[file_type][position].append(minified_url)
|
||||
self.paths[file_type][position] = new_paths
|
||||
|
||||
def getStyles(self, *args, **kwargs):
|
||||
return self.get('style', *args, **kwargs)
|
||||
@@ -150,22 +139,12 @@ class ClientScript(Plugin):
|
||||
def getScripts(self, *args, **kwargs):
|
||||
return self.get('script', *args, **kwargs)
|
||||
|
||||
def get(self, type, as_html = False, location = 'head'):
|
||||
def get(self, type, location = 'head'):
|
||||
if type in self.paths and location in self.paths[type]:
|
||||
paths = self.paths[type][location]
|
||||
return [x[1] for x in paths]
|
||||
|
||||
data = '' if as_html else []
|
||||
|
||||
try:
|
||||
try:
|
||||
if not Env.get('dev'):
|
||||
return self.minified[type][location]
|
||||
except:
|
||||
pass
|
||||
|
||||
return self.urls[type][location]
|
||||
except:
|
||||
log.error('Error getting minified %s, %s: %s', (type, location, traceback.format_exc()))
|
||||
|
||||
return data
|
||||
return []
|
||||
|
||||
def registerStyle(self, api_path, file_path, position = 'head'):
|
||||
self.register(api_path, file_path, 'style', position)
|
||||
@@ -177,36 +156,10 @@ class ClientScript(Plugin):
|
||||
|
||||
api_path = '%s?%s' % (api_path, tryInt(os.path.getmtime(file_path)))
|
||||
|
||||
if not self.urls[type].get(location):
|
||||
self.urls[type][location] = []
|
||||
self.urls[type][location].append(api_path)
|
||||
if not self.original_paths[type].get(location):
|
||||
self.original_paths[type][location] = []
|
||||
self.original_paths[type][location].append((file_path, api_path))
|
||||
|
||||
if not self.paths[type].get(location):
|
||||
self.paths[type][location] = []
|
||||
self.paths[type][location].append(file_path)
|
||||
|
||||
prefix_properties = ['border-radius', 'transform', 'transition', 'box-shadow']
|
||||
prefix_tags = ['ms', 'moz', 'webkit']
|
||||
|
||||
def prefix(self, data):
|
||||
|
||||
trimmed_data = re.sub('(\t|\n|\r)+', '', data)
|
||||
|
||||
new_data = ''
|
||||
colon_split = trimmed_data.split(';')
|
||||
for splt in colon_split:
|
||||
curl_split = splt.strip().split('{')
|
||||
for curly in curl_split:
|
||||
curly = curly.strip()
|
||||
for prop in self.prefix_properties:
|
||||
if curly[:len(prop) + 1] == prop + ':':
|
||||
for tag in self.prefix_tags:
|
||||
new_data += ' -%s-%s; ' % (tag, curly)
|
||||
|
||||
new_data += curly + (' { ' if len(curl_split) > 1 else ' ')
|
||||
|
||||
new_data += '; '
|
||||
|
||||
new_data = new_data.replace('{ ;', '; ').replace('} ;', '} ')
|
||||
|
||||
return new_data
|
||||
self.paths[type][location].append((file_path, api_path))
|
||||
|
||||
@@ -25,6 +25,7 @@ class DownloaderBase(Provider):
|
||||
status_support = True
|
||||
|
||||
torrent_sources = [
|
||||
'https://zoink.it/torrent/%s.torrent',
|
||||
'http://torrage.com/torrent/%s.torrent',
|
||||
'https://torcache.net/torrent/%s.torrent',
|
||||
]
|
||||
@@ -72,6 +73,9 @@ class DownloaderBase(Provider):
|
||||
return
|
||||
return self.download(data = data, media = media, filedata = filedata)
|
||||
|
||||
def download(self, *args, **kwargs):
|
||||
return False
|
||||
|
||||
def _getAllDownloadStatus(self, download_ids):
|
||||
if self.isDisabled(manual = True, data = {}):
|
||||
return
|
||||
|
||||
@@ -16,8 +16,8 @@ var DownloadersBase = new Class({
|
||||
|
||||
var setting_page = App.getPage('Settings');
|
||||
setting_page.addEvent('create', function(){
|
||||
Object.each(setting_page.tabs.downloaders.groups, self.addTestButton.bind(self))
|
||||
})
|
||||
Object.each(setting_page.tabs.downloaders.groups, self.addTestButton.bind(self));
|
||||
});
|
||||
|
||||
},
|
||||
|
||||
@@ -40,22 +40,23 @@ var DownloadersBase = new Class({
|
||||
|
||||
button.set('text', button_name);
|
||||
|
||||
var message;
|
||||
if(json.success){
|
||||
var message = new Element('span.success', {
|
||||
message = new Element('span.success', {
|
||||
'text': 'Connection successful'
|
||||
}).inject(button, 'after')
|
||||
}).inject(button, 'after');
|
||||
}
|
||||
else {
|
||||
var msg_text = 'Connection failed. Check logs for details.';
|
||||
if(json.hasOwnProperty('msg')) msg_text = json.msg;
|
||||
var message = new Element('span.failed', {
|
||||
message = new Element('span.failed', {
|
||||
'text': msg_text
|
||||
}).inject(button, 'after')
|
||||
}).inject(button, 'after');
|
||||
}
|
||||
|
||||
(function(){
|
||||
message.destroy();
|
||||
}).delay(3000)
|
||||
}).delay(3000);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@@ -33,9 +33,9 @@ class Scheduler(Plugin):
|
||||
except:
|
||||
pass
|
||||
|
||||
def doShutdown(self):
|
||||
def doShutdown(self, *args, **kwargs):
|
||||
self.stop()
|
||||
return super(Scheduler, self).doShutdown()
|
||||
return super(Scheduler, self).doShutdown(*args, **kwargs)
|
||||
|
||||
def stop(self):
|
||||
if self.started:
|
||||
|
||||
@@ -10,13 +10,13 @@ from threading import RLock
|
||||
|
||||
from couchpotato.api import addApiView
|
||||
from couchpotato.core.event import addEvent, fireEvent, fireEventAsync
|
||||
from couchpotato.core.helpers.encoding import ss
|
||||
from couchpotato.core.helpers.encoding import sp
|
||||
from couchpotato.core.helpers.variable import removePyc
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.plugins.base import Plugin
|
||||
from couchpotato.environment import Env
|
||||
from dateutil.parser import parse
|
||||
from git.repository import LocalRepository
|
||||
from scandir import scandir
|
||||
import version
|
||||
from six.moves import filter
|
||||
|
||||
@@ -142,9 +142,11 @@ class Updater(Plugin):
|
||||
'success': success
|
||||
}
|
||||
|
||||
def doShutdown(self):
|
||||
self.updater.deletePyc(show_logs = False)
|
||||
return super(Updater, self).doShutdown()
|
||||
def doShutdown(self, *args, **kwargs):
|
||||
if not Env.get('dev') and not Env.get('desktop'):
|
||||
removePyc(Env.get('app_dir'), show_logs = False)
|
||||
|
||||
return super(Updater, self).doShutdown(*args, **kwargs)
|
||||
|
||||
|
||||
class BaseUpdater(Plugin):
|
||||
@@ -180,30 +182,6 @@ class BaseUpdater(Plugin):
|
||||
def check(self):
|
||||
pass
|
||||
|
||||
def deletePyc(self, only_excess = True, show_logs = True):
|
||||
|
||||
for root, dirs, files in scandir.walk(ss(Env.get('app_dir'))):
|
||||
|
||||
pyc_files = filter(lambda filename: filename.endswith('.pyc'), files)
|
||||
py_files = set(filter(lambda filename: filename.endswith('.py'), files))
|
||||
excess_pyc_files = filter(lambda pyc_filename: pyc_filename[:-1] not in py_files, pyc_files) if only_excess else pyc_files
|
||||
|
||||
for excess_pyc_file in excess_pyc_files:
|
||||
full_path = os.path.join(root, excess_pyc_file)
|
||||
if show_logs: log.debug('Removing old PYC file: %s', full_path)
|
||||
try:
|
||||
os.remove(full_path)
|
||||
except:
|
||||
log.error('Couldn\'t remove %s: %s', (full_path, traceback.format_exc()))
|
||||
|
||||
for dir_name in dirs:
|
||||
full_path = os.path.join(root, dir_name)
|
||||
if len(os.listdir(full_path)) == 0:
|
||||
try:
|
||||
os.rmdir(full_path)
|
||||
except:
|
||||
log.error('Couldn\'t remove empty directory %s: %s', (full_path, traceback.format_exc()))
|
||||
|
||||
|
||||
class GitUpdater(BaseUpdater):
|
||||
|
||||
@@ -227,19 +205,28 @@ class GitUpdater(BaseUpdater):
|
||||
def getVersion(self):
|
||||
|
||||
if not self.version:
|
||||
|
||||
hash = None
|
||||
date = None
|
||||
branch = self.branch
|
||||
|
||||
try:
|
||||
output = self.repo.getHead() # Yes, please
|
||||
log.debug('Git version output: %s', output.hash)
|
||||
self.version = {
|
||||
'repr': 'git:(%s:%s % s) %s (%s)' % (self.repo_user, self.repo_name, self.repo.getCurrentBranch().name or self.branch, output.hash[:8], datetime.fromtimestamp(output.getDate())),
|
||||
'hash': output.hash[:8],
|
||||
'date': output.getDate(),
|
||||
'type': 'git',
|
||||
'branch': self.repo.getCurrentBranch().name
|
||||
}
|
||||
|
||||
hash = output.hash[:8]
|
||||
date = output.getDate()
|
||||
branch = self.repo.getCurrentBranch().name
|
||||
except Exception as e:
|
||||
log.error('Failed using GIT updater, running from source, you need to have GIT installed. %s', e)
|
||||
return 'No GIT'
|
||||
|
||||
self.version = {
|
||||
'repr': 'git:(%s:%s % s) %s (%s)' % (self.repo_user, self.repo_name, branch, hash or 'unknown_hash', datetime.fromtimestamp(date) if date else 'unknown_date'),
|
||||
'hash': hash,
|
||||
'date': date,
|
||||
'type': 'git',
|
||||
'branch': branch
|
||||
}
|
||||
|
||||
return self.version
|
||||
|
||||
@@ -322,17 +309,18 @@ class SourceUpdater(BaseUpdater):
|
||||
return False
|
||||
|
||||
def replaceWith(self, path):
|
||||
app_dir = ss(Env.get('app_dir'))
|
||||
data_dir = ss(Env.get('data_dir'))
|
||||
path = sp(path)
|
||||
app_dir = Env.get('app_dir')
|
||||
data_dir = Env.get('data_dir')
|
||||
|
||||
# Get list of files we want to overwrite
|
||||
self.deletePyc()
|
||||
removePyc(app_dir)
|
||||
existing_files = []
|
||||
for root, subfiles, filenames in scandir.walk(app_dir):
|
||||
for root, subfiles, filenames in os.walk(app_dir):
|
||||
for filename in filenames:
|
||||
existing_files.append(os.path.join(root, filename))
|
||||
|
||||
for root, subfiles, filenames in scandir.walk(path):
|
||||
for root, subfiles, filenames in os.walk(path):
|
||||
for filename in filenames:
|
||||
fromfile = os.path.join(root, filename)
|
||||
tofile = os.path.join(app_dir, fromfile.replace(path + os.path.sep, ''))
|
||||
|
||||
@@ -27,7 +27,7 @@ var UpdaterBase = new Class({
|
||||
App.trigger('message', ['No updates available']);
|
||||
}
|
||||
}
|
||||
})
|
||||
});
|
||||
|
||||
},
|
||||
|
||||
@@ -50,8 +50,8 @@ var UpdaterBase = new Class({
|
||||
self.message.destroy();
|
||||
}
|
||||
}
|
||||
})
|
||||
}, (timeout || 0))
|
||||
});
|
||||
}, (timeout || 0));
|
||||
|
||||
},
|
||||
|
||||
@@ -84,7 +84,7 @@ var UpdaterBase = new Class({
|
||||
'click': self.doUpdate.bind(self)
|
||||
}
|
||||
})
|
||||
).inject(document.body)
|
||||
).inject(document.body);
|
||||
},
|
||||
|
||||
doUpdate: function(){
|
||||
@@ -96,7 +96,7 @@ var UpdaterBase = new Class({
|
||||
if(json.success)
|
||||
self.updating();
|
||||
else
|
||||
App.unBlockPage()
|
||||
App.unBlockPage();
|
||||
}
|
||||
});
|
||||
},
|
||||
|
||||
@@ -2,12 +2,15 @@ import json
|
||||
import os
|
||||
import time
|
||||
import traceback
|
||||
from sqlite3 import OperationalError
|
||||
|
||||
from CodernityDB.database import RecordNotFound
|
||||
from CodernityDB.index import IndexException, IndexNotFoundException, IndexConflict
|
||||
from couchpotato import CPLog
|
||||
from couchpotato.api import addApiView
|
||||
from couchpotato.core.event import addEvent, fireEvent
|
||||
from couchpotato.core.helpers.encoding import toUnicode
|
||||
from couchpotato.core.helpers.variable import getImdb, tryInt
|
||||
from couchpotato.core.event import addEvent, fireEvent, fireEventAsync
|
||||
from couchpotato.core.helpers.encoding import toUnicode, sp
|
||||
from couchpotato.core.helpers.variable import getImdb, tryInt, randomString
|
||||
|
||||
|
||||
log = CPLog(__name__)
|
||||
@@ -15,19 +18,25 @@ log = CPLog(__name__)
|
||||
|
||||
class Database(object):
|
||||
|
||||
indexes = []
|
||||
indexes = None
|
||||
db = None
|
||||
|
||||
def __init__(self):
|
||||
|
||||
self.indexes = {}
|
||||
|
||||
addApiView('database.list_documents', self.listDocuments)
|
||||
addApiView('database.reindex', self.reindex)
|
||||
addApiView('database.compact', self.compact)
|
||||
addApiView('database.document.update', self.updateDocument)
|
||||
addApiView('database.document.delete', self.deleteDocument)
|
||||
|
||||
addEvent('database.setup.after', self.startup_compact)
|
||||
addEvent('database.setup_index', self.setupIndex)
|
||||
addEvent('database.delete_corrupted', self.deleteCorrupted)
|
||||
|
||||
addEvent('app.migrate', self.migrate)
|
||||
addEvent('app.after_shutdown', self.close)
|
||||
|
||||
def getDB(self):
|
||||
|
||||
@@ -37,28 +46,50 @@ class Database(object):
|
||||
|
||||
return self.db
|
||||
|
||||
def close(self, **kwargs):
|
||||
self.getDB().close()
|
||||
|
||||
def setupIndex(self, index_name, klass):
|
||||
|
||||
self.indexes.append(index_name)
|
||||
self.indexes[index_name] = klass
|
||||
|
||||
db = self.getDB()
|
||||
|
||||
# Category index
|
||||
index_instance = klass(db.path, index_name)
|
||||
try:
|
||||
db.add_index(index_instance)
|
||||
db.reindex_index(index_name)
|
||||
except:
|
||||
previous = db.indexes_names[index_name]
|
||||
previous_version = previous._version
|
||||
current_version = klass._version
|
||||
|
||||
# Only edit index if versions are different
|
||||
if previous_version < current_version:
|
||||
log.debug('Index "%s" already exists, updating and reindexing', index_name)
|
||||
db.destroy_index(previous)
|
||||
# Make sure store and bucket don't exist
|
||||
exists = []
|
||||
for x in ['buck', 'stor']:
|
||||
full_path = os.path.join(db.path, '%s_%s' % (index_name, x))
|
||||
if os.path.exists(full_path):
|
||||
exists.append(full_path)
|
||||
|
||||
if index_name not in db.indexes_names:
|
||||
|
||||
# Remove existing buckets if index isn't there
|
||||
for x in exists:
|
||||
os.unlink(x)
|
||||
|
||||
# Add index (will restore buckets)
|
||||
db.add_index(index_instance)
|
||||
db.reindex_index(index_name)
|
||||
else:
|
||||
# Previous info
|
||||
previous = db.indexes_names[index_name]
|
||||
previous_version = previous._version
|
||||
current_version = klass._version
|
||||
|
||||
# Only edit index if versions are different
|
||||
if previous_version < current_version:
|
||||
log.debug('Index "%s" already exists, updating and reindexing', index_name)
|
||||
db.destroy_index(previous)
|
||||
db.add_index(index_instance)
|
||||
db.reindex_index(index_name)
|
||||
|
||||
except:
|
||||
log.error('Failed adding index %s: %s', (index_name, traceback.format_exc()))
|
||||
|
||||
def deleteDocument(self, **kwargs):
|
||||
|
||||
@@ -118,6 +149,17 @@ class Database(object):
|
||||
|
||||
return results
|
||||
|
||||
def deleteCorrupted(self, _id, traceback_error = ''):
|
||||
|
||||
db = self.getDB()
|
||||
|
||||
try:
|
||||
log.debug('Deleted corrupted document "%s": %s', (_id, traceback_error))
|
||||
corrupted = db.get('id', _id, with_storage = False)
|
||||
db._delete_id_index(corrupted.get('_id'), corrupted.get('_rev'), None)
|
||||
except:
|
||||
log.debug('Failed deleting corrupted: %s', traceback.format_exc())
|
||||
|
||||
def reindex(self, **kwargs):
|
||||
|
||||
success = True
|
||||
@@ -132,20 +174,108 @@ class Database(object):
|
||||
'success': success
|
||||
}
|
||||
|
||||
def compact(self, **kwargs):
|
||||
def compact(self, try_repair = True, **kwargs):
|
||||
|
||||
success = False
|
||||
db = self.getDB()
|
||||
|
||||
# Removing left over compact files
|
||||
db_path = sp(db.path)
|
||||
for f in os.listdir(sp(db.path)):
|
||||
for x in ['_compact_buck', '_compact_stor']:
|
||||
if f[-len(x):] == x:
|
||||
os.unlink(os.path.join(db_path, f))
|
||||
|
||||
success = True
|
||||
try:
|
||||
db = self.getDB()
|
||||
start = time.time()
|
||||
size = float(db.get_db_details().get('size', 0))
|
||||
log.debug('Compacting database, current size: %sMB', round(size/1048576, 2))
|
||||
|
||||
db.compact()
|
||||
new_size = float(db.get_db_details().get('size', 0))
|
||||
log.debug('Done compacting database in %ss, new size: %sMB, saved: %sMB', (round(time.time()-start, 2), round(new_size/1048576, 2), round((size-new_size)/1048576, 2)))
|
||||
success = True
|
||||
except (IndexException, AttributeError):
|
||||
if try_repair:
|
||||
log.error('Something wrong with indexes, trying repair')
|
||||
|
||||
# Remove all indexes
|
||||
old_indexes = self.indexes.keys()
|
||||
for index_name in old_indexes:
|
||||
try:
|
||||
db.destroy_index(index_name)
|
||||
except IndexNotFoundException:
|
||||
pass
|
||||
except:
|
||||
log.error('Failed removing old index %s', index_name)
|
||||
|
||||
# Add them again
|
||||
for index_name in self.indexes:
|
||||
klass = self.indexes[index_name]
|
||||
|
||||
# Category index
|
||||
index_instance = klass(db.path, index_name)
|
||||
try:
|
||||
db.add_index(index_instance)
|
||||
db.reindex_index(index_name)
|
||||
except IndexConflict:
|
||||
pass
|
||||
except:
|
||||
log.error('Failed adding index %s', index_name)
|
||||
raise
|
||||
|
||||
self.compact(try_repair = False)
|
||||
else:
|
||||
log.error('Failed compact: %s', traceback.format_exc())
|
||||
|
||||
except:
|
||||
log.error('Failed compact: %s', traceback.format_exc())
|
||||
success = False
|
||||
|
||||
return {
|
||||
'success': success
|
||||
}
|
||||
|
||||
# Compact on start
|
||||
def startup_compact(self):
|
||||
from couchpotato import Env
|
||||
|
||||
db = self.getDB()
|
||||
|
||||
# Try fix for migration failures on desktop
|
||||
if Env.get('desktop'):
|
||||
try:
|
||||
list(db.all('profile', with_doc = True))
|
||||
except RecordNotFound:
|
||||
|
||||
failed_location = '%s_failed' % db.path
|
||||
old_db = os.path.join(Env.get('data_dir'), 'couchpotato.db.old')
|
||||
|
||||
if not os.path.isdir(failed_location) and os.path.isfile(old_db):
|
||||
log.error('Corrupt database, trying migrate again')
|
||||
db.close()
|
||||
|
||||
# Rename database folder
|
||||
os.rename(db.path, '%s_failed' % db.path)
|
||||
|
||||
# Rename .old database to try another migrate
|
||||
os.rename(old_db, old_db[:-4])
|
||||
|
||||
fireEventAsync('app.restart')
|
||||
else:
|
||||
log.error('Migration failed and couldn\'t recover database. Please report on GitHub, with this message.')
|
||||
db.reindex()
|
||||
|
||||
return
|
||||
|
||||
# Check size and compact if needed
|
||||
size = db.get_db_details().get('size')
|
||||
prop_name = 'last_db_compact'
|
||||
last_check = int(Env.prop(prop_name, default = 0))
|
||||
|
||||
if size > 26214400 and last_check < time.time()-604800: # 25MB / 7 days
|
||||
self.compact()
|
||||
Env.prop(prop_name, value = int(time.time()))
|
||||
|
||||
def migrate(self):
|
||||
|
||||
from couchpotato import Env
|
||||
@@ -182,301 +312,328 @@ class Database(object):
|
||||
}
|
||||
|
||||
migrate_data = {}
|
||||
rename_old = False
|
||||
|
||||
c = conn.cursor()
|
||||
try:
|
||||
|
||||
for ml in migrate_list:
|
||||
migrate_data[ml] = {}
|
||||
rows = migrate_list[ml]
|
||||
c = conn.cursor()
|
||||
|
||||
try:
|
||||
c.execute('SELECT %s FROM `%s`' % ('`' + '`,`'.join(rows) + '`', ml))
|
||||
except:
|
||||
# ignore faulty destination_id database
|
||||
if ml == 'category':
|
||||
migrate_data[ml] = {}
|
||||
for ml in migrate_list:
|
||||
migrate_data[ml] = {}
|
||||
rows = migrate_list[ml]
|
||||
|
||||
try:
|
||||
c.execute('SELECT %s FROM `%s`' % ('`' + '`,`'.join(rows) + '`', ml))
|
||||
except:
|
||||
# ignore faulty destination_id database
|
||||
if ml == 'category':
|
||||
migrate_data[ml] = {}
|
||||
else:
|
||||
rename_old = True
|
||||
raise
|
||||
|
||||
for p in c.fetchall():
|
||||
columns = {}
|
||||
for row in migrate_list[ml]:
|
||||
columns[row] = p[rows.index(row)]
|
||||
|
||||
if not migrate_data[ml].get(p[0]):
|
||||
migrate_data[ml][p[0]] = columns
|
||||
else:
|
||||
if not isinstance(migrate_data[ml][p[0]], list):
|
||||
migrate_data[ml][p[0]] = [migrate_data[ml][p[0]]]
|
||||
migrate_data[ml][p[0]].append(columns)
|
||||
|
||||
conn.close()
|
||||
|
||||
log.info('Getting data took %s', time.time() - migrate_start)
|
||||
|
||||
db = self.getDB()
|
||||
if not db.opened:
|
||||
return
|
||||
|
||||
# Use properties
|
||||
properties = migrate_data['properties']
|
||||
log.info('Importing %s properties', len(properties))
|
||||
for x in properties:
|
||||
property = properties[x]
|
||||
Env.prop(property.get('identifier'), property.get('value'))
|
||||
|
||||
# Categories
|
||||
categories = migrate_data.get('category', [])
|
||||
log.info('Importing %s categories', len(categories))
|
||||
category_link = {}
|
||||
for x in categories:
|
||||
c = categories[x]
|
||||
|
||||
new_c = db.insert({
|
||||
'_t': 'category',
|
||||
'order': c.get('order', 999),
|
||||
'label': toUnicode(c.get('label', '')),
|
||||
'ignored': toUnicode(c.get('ignored', '')),
|
||||
'preferred': toUnicode(c.get('preferred', '')),
|
||||
'required': toUnicode(c.get('required', '')),
|
||||
'destination': toUnicode(c.get('destination', '')),
|
||||
})
|
||||
|
||||
category_link[x] = new_c.get('_id')
|
||||
|
||||
# Profiles
|
||||
log.info('Importing profiles')
|
||||
new_profiles = db.all('profile', with_doc = True)
|
||||
new_profiles_by_label = {}
|
||||
for x in new_profiles:
|
||||
|
||||
# Remove default non core profiles
|
||||
if not x['doc'].get('core'):
|
||||
db.delete(x['doc'])
|
||||
else:
|
||||
raise
|
||||
new_profiles_by_label[x['doc']['label']] = x['_id']
|
||||
|
||||
for p in c.fetchall():
|
||||
columns = {}
|
||||
for row in migrate_list[ml]:
|
||||
columns[row] = p[rows.index(row)]
|
||||
profiles = migrate_data['profile']
|
||||
profile_link = {}
|
||||
for x in profiles:
|
||||
p = profiles[x]
|
||||
|
||||
if not migrate_data[ml].get(p[0]):
|
||||
migrate_data[ml][p[0]] = columns
|
||||
exists = new_profiles_by_label.get(p.get('label'))
|
||||
|
||||
# Update existing with order only
|
||||
if exists and p.get('core'):
|
||||
profile = db.get('id', exists)
|
||||
profile['order'] = tryInt(p.get('order'))
|
||||
profile['hide'] = p.get('hide') in [1, True, 'true', 'True']
|
||||
db.update(profile)
|
||||
|
||||
profile_link[x] = profile.get('_id')
|
||||
else:
|
||||
if not isinstance(migrate_data[ml][p[0]], list):
|
||||
migrate_data[ml][p[0]] = [migrate_data[ml][p[0]]]
|
||||
migrate_data[ml][p[0]].append(columns)
|
||||
|
||||
conn.close()
|
||||
|
||||
log.info('Getting data took %s', time.time() - migrate_start)
|
||||
|
||||
db = self.getDB()
|
||||
|
||||
# Use properties
|
||||
properties = migrate_data['properties']
|
||||
log.info('Importing %s properties', len(properties))
|
||||
for x in properties:
|
||||
property = properties[x]
|
||||
Env.prop(property.get('identifier'), property.get('value'))
|
||||
|
||||
# Categories
|
||||
categories = migrate_data.get('category', [])
|
||||
log.info('Importing %s categories', len(categories))
|
||||
category_link = {}
|
||||
for x in categories:
|
||||
c = categories[x]
|
||||
|
||||
new_c = db.insert({
|
||||
'_t': 'category',
|
||||
'order': c.get('order', 999),
|
||||
'label': toUnicode(c.get('label', '')),
|
||||
'ignored': toUnicode(c.get('ignored', '')),
|
||||
'preferred': toUnicode(c.get('preferred', '')),
|
||||
'required': toUnicode(c.get('required', '')),
|
||||
'destination': toUnicode(c.get('destination', '')),
|
||||
})
|
||||
|
||||
category_link[x] = new_c.get('_id')
|
||||
|
||||
# Profiles
|
||||
log.info('Importing profiles')
|
||||
new_profiles = db.all('profile', with_doc = True)
|
||||
new_profiles_by_label = {}
|
||||
for x in new_profiles:
|
||||
|
||||
# Remove default non core profiles
|
||||
if not x['doc'].get('core'):
|
||||
db.delete(x['doc'])
|
||||
else:
|
||||
new_profiles_by_label[x['doc']['label']] = x['_id']
|
||||
|
||||
profiles = migrate_data['profile']
|
||||
profile_link = {}
|
||||
for x in profiles:
|
||||
p = profiles[x]
|
||||
|
||||
exists = new_profiles_by_label.get(p.get('label'))
|
||||
|
||||
# Update existing with order only
|
||||
if exists and p.get('core'):
|
||||
profile = db.get('id', exists)
|
||||
profile['order'] = tryInt(p.get('order'))
|
||||
profile['hide'] = p.get('hide') in [1, True, 'true', 'True']
|
||||
db.update(profile)
|
||||
|
||||
profile_link[x] = profile.get('_id')
|
||||
else:
|
||||
|
||||
new_profile = {
|
||||
'_t': 'profile',
|
||||
'label': p.get('label'),
|
||||
'order': int(p.get('order', 999)),
|
||||
'core': p.get('core', False),
|
||||
'qualities': [],
|
||||
'wait_for': [],
|
||||
'finish': []
|
||||
}
|
||||
|
||||
types = migrate_data['profiletype']
|
||||
for profile_type in types:
|
||||
p_type = types[profile_type]
|
||||
if types[profile_type]['profile_id'] == p['id']:
|
||||
new_profile['finish'].append(p_type['finish'])
|
||||
new_profile['wait_for'].append(p_type['wait_for'])
|
||||
new_profile['qualities'].append(migrate_data['quality'][p_type['quality_id']]['identifier'])
|
||||
|
||||
new_profile.update(db.insert(new_profile))
|
||||
|
||||
profile_link[x] = new_profile.get('_id')
|
||||
|
||||
# Qualities
|
||||
log.info('Importing quality sizes')
|
||||
new_qualities = db.all('quality', with_doc = True)
|
||||
new_qualities_by_identifier = {}
|
||||
for x in new_qualities:
|
||||
new_qualities_by_identifier[x['doc']['identifier']] = x['_id']
|
||||
|
||||
qualities = migrate_data['quality']
|
||||
quality_link = {}
|
||||
for x in qualities:
|
||||
q = qualities[x]
|
||||
q_id = new_qualities_by_identifier[q.get('identifier')]
|
||||
|
||||
quality = db.get('id', q_id)
|
||||
quality['order'] = q.get('order')
|
||||
quality['size_min'] = tryInt(q.get('size_min'))
|
||||
quality['size_max'] = tryInt(q.get('size_max'))
|
||||
db.update(quality)
|
||||
|
||||
quality_link[x] = quality
|
||||
|
||||
# Titles
|
||||
titles = migrate_data['librarytitle']
|
||||
titles_by_library = {}
|
||||
for x in titles:
|
||||
title = titles[x]
|
||||
if title.get('default'):
|
||||
titles_by_library[title.get('libraries_id')] = title.get('title')
|
||||
|
||||
# Releases
|
||||
releaseinfos = migrate_data['releaseinfo']
|
||||
for x in releaseinfos:
|
||||
info = releaseinfos[x]
|
||||
|
||||
# Skip if release doesn't exist for this info
|
||||
if not migrate_data['release'].get(info.get('release_id')):
|
||||
continue
|
||||
|
||||
if not migrate_data['release'][info.get('release_id')].get('info'):
|
||||
migrate_data['release'][info.get('release_id')]['info'] = {}
|
||||
|
||||
migrate_data['release'][info.get('release_id')]['info'][info.get('identifier')] = info.get('value')
|
||||
|
||||
releases = migrate_data['release']
|
||||
releases_by_media = {}
|
||||
for x in releases:
|
||||
release = releases[x]
|
||||
if not releases_by_media.get(release.get('movie_id')):
|
||||
releases_by_media[release.get('movie_id')] = []
|
||||
|
||||
releases_by_media[release.get('movie_id')].append(release)
|
||||
|
||||
# Type ids
|
||||
types = migrate_data['filetype']
|
||||
type_by_id = {}
|
||||
for t in types:
|
||||
type = types[t]
|
||||
type_by_id[type.get('id')] = type
|
||||
|
||||
# Media
|
||||
log.info('Importing %s media items', len(migrate_data['movie']))
|
||||
statuses = migrate_data['status']
|
||||
libraries = migrate_data['library']
|
||||
library_files = migrate_data['library_files__file_library']
|
||||
releases_files = migrate_data['release_files__file_release']
|
||||
all_files = migrate_data['file']
|
||||
poster_type = migrate_data['filetype']['poster']
|
||||
medias = migrate_data['movie']
|
||||
for x in medias:
|
||||
m = medias[x]
|
||||
|
||||
status = statuses.get(m['status_id']).get('identifier')
|
||||
l = libraries[m['library_id']]
|
||||
|
||||
# Only migrate wanted movies, Skip if no identifier present
|
||||
if not getImdb(l.get('identifier')): continue
|
||||
|
||||
profile_id = profile_link.get(m['profile_id'])
|
||||
category_id = category_link.get(m['category_id'])
|
||||
title = titles_by_library.get(m['library_id'])
|
||||
releases = releases_by_media.get(x, [])
|
||||
info = json.loads(l.get('info', ''))
|
||||
|
||||
files = library_files.get(m['library_id'], [])
|
||||
if not isinstance(files, list):
|
||||
files = [files]
|
||||
|
||||
added_media = fireEvent('movie.add', {
|
||||
'info': info,
|
||||
'identifier': l.get('identifier'),
|
||||
'profile_id': profile_id,
|
||||
'category_id': category_id,
|
||||
'title': title
|
||||
}, force_readd = False, search_after = False, update_after = False, notify_after = False, status = status, single = True)
|
||||
|
||||
if not added_media:
|
||||
log.error('Failed adding media %s: %s', (l.get('identifier'), info))
|
||||
continue
|
||||
|
||||
added_media['files'] = added_media.get('files', {})
|
||||
for f in files:
|
||||
ffile = all_files[f.get('file_id')]
|
||||
|
||||
# Only migrate posters
|
||||
if ffile.get('type_id') == poster_type.get('id'):
|
||||
if ffile.get('path') not in added_media['files'].get('image_poster', []) and os.path.isfile(ffile.get('path')):
|
||||
added_media['files']['image_poster'] = [ffile.get('path')]
|
||||
break
|
||||
|
||||
if 'image_poster' in added_media['files']:
|
||||
db.update(added_media)
|
||||
|
||||
for rel in releases:
|
||||
|
||||
empty_info = False
|
||||
if not rel.get('info'):
|
||||
empty_info = True
|
||||
rel['info'] = {}
|
||||
|
||||
quality = quality_link[rel.get('quality_id')]
|
||||
release_status = statuses.get(rel.get('status_id')).get('identifier')
|
||||
|
||||
if rel['info'].get('download_id'):
|
||||
status_support = rel['info'].get('download_status_support', False) in [True, 'true', 'True']
|
||||
rel['info']['download_info'] = {
|
||||
'id': rel['info'].get('download_id'),
|
||||
'downloader': rel['info'].get('download_downloader'),
|
||||
'status_support': status_support,
|
||||
new_profile = {
|
||||
'_t': 'profile',
|
||||
'label': p.get('label'),
|
||||
'order': int(p.get('order', 999)),
|
||||
'core': p.get('core', False),
|
||||
'qualities': [],
|
||||
'wait_for': [],
|
||||
'finish': []
|
||||
}
|
||||
|
||||
# Add status to keys
|
||||
rel['info']['status'] = release_status
|
||||
if not empty_info:
|
||||
fireEvent('release.create_from_search', [rel['info']], added_media, quality, single = True)
|
||||
else:
|
||||
release = {
|
||||
'_t': 'release',
|
||||
'identifier': rel.get('identifier'),
|
||||
'media_id': added_media.get('_id'),
|
||||
'quality': quality.get('identifier'),
|
||||
'status': release_status,
|
||||
'last_edit': int(time.time()),
|
||||
'files': {}
|
||||
}
|
||||
types = migrate_data['profiletype']
|
||||
for profile_type in types:
|
||||
p_type = types[profile_type]
|
||||
if types[profile_type]['profile_id'] == p['id']:
|
||||
if p_type['quality_id']:
|
||||
new_profile['finish'].append(p_type['finish'])
|
||||
new_profile['wait_for'].append(p_type['wait_for'])
|
||||
new_profile['qualities'].append(migrate_data['quality'][p_type['quality_id']]['identifier'])
|
||||
|
||||
# Add downloader info if provided
|
||||
try:
|
||||
release['download_info'] = rel['info']['download_info']
|
||||
del rel['download_info']
|
||||
except:
|
||||
pass
|
||||
if len(new_profile['qualities']) > 0:
|
||||
new_profile.update(db.insert(new_profile))
|
||||
profile_link[x] = new_profile.get('_id')
|
||||
else:
|
||||
log.error('Corrupt profile list for "%s", using default.', p.get('label'))
|
||||
|
||||
# Add files
|
||||
release_files = releases_files.get(rel.get('id'), [])
|
||||
if not isinstance(release_files, list):
|
||||
release_files = [release_files]
|
||||
# Qualities
|
||||
log.info('Importing quality sizes')
|
||||
new_qualities = db.all('quality', with_doc = True)
|
||||
new_qualities_by_identifier = {}
|
||||
for x in new_qualities:
|
||||
new_qualities_by_identifier[x['doc']['identifier']] = x['_id']
|
||||
|
||||
if len(release_files) == 0:
|
||||
qualities = migrate_data['quality']
|
||||
quality_link = {}
|
||||
for x in qualities:
|
||||
q = qualities[x]
|
||||
q_id = new_qualities_by_identifier[q.get('identifier')]
|
||||
|
||||
quality = db.get('id', q_id)
|
||||
quality['order'] = q.get('order')
|
||||
quality['size_min'] = tryInt(q.get('size_min'))
|
||||
quality['size_max'] = tryInt(q.get('size_max'))
|
||||
db.update(quality)
|
||||
|
||||
quality_link[x] = quality
|
||||
|
||||
# Titles
|
||||
titles = migrate_data['librarytitle']
|
||||
titles_by_library = {}
|
||||
for x in titles:
|
||||
title = titles[x]
|
||||
if title.get('default'):
|
||||
titles_by_library[title.get('libraries_id')] = title.get('title')
|
||||
|
||||
# Releases
|
||||
releaseinfos = migrate_data['releaseinfo']
|
||||
for x in releaseinfos:
|
||||
info = releaseinfos[x]
|
||||
|
||||
# Skip if release doesn't exist for this info
|
||||
if not migrate_data['release'].get(info.get('release_id')):
|
||||
continue
|
||||
|
||||
if not migrate_data['release'][info.get('release_id')].get('info'):
|
||||
migrate_data['release'][info.get('release_id')]['info'] = {}
|
||||
|
||||
migrate_data['release'][info.get('release_id')]['info'][info.get('identifier')] = info.get('value')
|
||||
|
||||
releases = migrate_data['release']
|
||||
releases_by_media = {}
|
||||
for x in releases:
|
||||
release = releases[x]
|
||||
if not releases_by_media.get(release.get('movie_id')):
|
||||
releases_by_media[release.get('movie_id')] = []
|
||||
|
||||
releases_by_media[release.get('movie_id')].append(release)
|
||||
|
||||
# Type ids
|
||||
types = migrate_data['filetype']
|
||||
type_by_id = {}
|
||||
for t in types:
|
||||
type = types[t]
|
||||
type_by_id[type.get('id')] = type
|
||||
|
||||
# Media
|
||||
log.info('Importing %s media items', len(migrate_data['movie']))
|
||||
statuses = migrate_data['status']
|
||||
libraries = migrate_data['library']
|
||||
library_files = migrate_data['library_files__file_library']
|
||||
releases_files = migrate_data['release_files__file_release']
|
||||
all_files = migrate_data['file']
|
||||
poster_type = migrate_data['filetype']['poster']
|
||||
medias = migrate_data['movie']
|
||||
for x in medias:
|
||||
m = medias[x]
|
||||
|
||||
status = statuses.get(m['status_id']).get('identifier')
|
||||
l = libraries.get(m['library_id'])
|
||||
|
||||
# Only migrate wanted movies, Skip if no identifier present
|
||||
if not l or not getImdb(l.get('identifier')): continue
|
||||
|
||||
profile_id = profile_link.get(m['profile_id'])
|
||||
category_id = category_link.get(m['category_id'])
|
||||
title = titles_by_library.get(m['library_id'])
|
||||
releases = releases_by_media.get(x, [])
|
||||
info = json.loads(l.get('info', ''))
|
||||
|
||||
files = library_files.get(m['library_id'], [])
|
||||
if not isinstance(files, list):
|
||||
files = [files]
|
||||
|
||||
added_media = fireEvent('movie.add', {
|
||||
'info': info,
|
||||
'identifier': l.get('identifier'),
|
||||
'profile_id': profile_id,
|
||||
'category_id': category_id,
|
||||
'title': title
|
||||
}, force_readd = False, search_after = False, update_after = False, notify_after = False, status = status, single = True)
|
||||
|
||||
if not added_media:
|
||||
log.error('Failed adding media %s: %s', (l.get('identifier'), info))
|
||||
continue
|
||||
|
||||
added_media['files'] = added_media.get('files', {})
|
||||
for f in files:
|
||||
ffile = all_files[f.get('file_id')]
|
||||
|
||||
# Only migrate posters
|
||||
if ffile.get('type_id') == poster_type.get('id'):
|
||||
if ffile.get('path') not in added_media['files'].get('image_poster', []) and os.path.isfile(ffile.get('path')):
|
||||
added_media['files']['image_poster'] = [ffile.get('path')]
|
||||
break
|
||||
|
||||
if 'image_poster' in added_media['files']:
|
||||
db.update(added_media)
|
||||
|
||||
for rel in releases:
|
||||
|
||||
empty_info = False
|
||||
if not rel.get('info'):
|
||||
empty_info = True
|
||||
rel['info'] = {}
|
||||
|
||||
quality = quality_link.get(rel.get('quality_id'))
|
||||
if not quality:
|
||||
continue
|
||||
|
||||
for f in release_files:
|
||||
rfile = all_files[f.get('file_id')]
|
||||
file_type = type_by_id.get(rfile.get('type_id')).get('identifier')
|
||||
release_status = statuses.get(rel.get('status_id')).get('identifier')
|
||||
|
||||
if not release['files'].get(file_type):
|
||||
release['files'][file_type] = []
|
||||
if rel['info'].get('download_id'):
|
||||
status_support = rel['info'].get('download_status_support', False) in [True, 'true', 'True']
|
||||
rel['info']['download_info'] = {
|
||||
'id': rel['info'].get('download_id'),
|
||||
'downloader': rel['info'].get('download_downloader'),
|
||||
'status_support': status_support,
|
||||
}
|
||||
|
||||
release['files'][file_type].append(rfile.get('path'))
|
||||
# Add status to keys
|
||||
rel['info']['status'] = release_status
|
||||
if not empty_info:
|
||||
fireEvent('release.create_from_search', [rel['info']], added_media, quality, single = True)
|
||||
else:
|
||||
release = {
|
||||
'_t': 'release',
|
||||
'identifier': rel.get('identifier'),
|
||||
'media_id': added_media.get('_id'),
|
||||
'quality': quality.get('identifier'),
|
||||
'status': release_status,
|
||||
'last_edit': int(time.time()),
|
||||
'files': {}
|
||||
}
|
||||
|
||||
try:
|
||||
rls = db.get('release_identifier', rel.get('identifier'), with_doc = True)['doc']
|
||||
rls.update(release)
|
||||
db.update(rls)
|
||||
except:
|
||||
db.insert(release)
|
||||
# Add downloader info if provided
|
||||
try:
|
||||
release['download_info'] = rel['info']['download_info']
|
||||
del rel['download_info']
|
||||
except:
|
||||
pass
|
||||
|
||||
# Add files
|
||||
release_files = releases_files.get(rel.get('id'), [])
|
||||
if not isinstance(release_files, list):
|
||||
release_files = [release_files]
|
||||
|
||||
if len(release_files) == 0:
|
||||
continue
|
||||
|
||||
for f in release_files:
|
||||
rfile = all_files.get(f.get('file_id'))
|
||||
if not rfile:
|
||||
continue
|
||||
|
||||
file_type = type_by_id.get(rfile.get('type_id')).get('identifier')
|
||||
|
||||
if not release['files'].get(file_type):
|
||||
release['files'][file_type] = []
|
||||
|
||||
release['files'][file_type].append(rfile.get('path'))
|
||||
|
||||
try:
|
||||
rls = db.get('release_identifier', rel.get('identifier'), with_doc = True)['doc']
|
||||
rls.update(release)
|
||||
db.update(rls)
|
||||
except:
|
||||
db.insert(release)
|
||||
|
||||
log.info('Total migration took %s', time.time() - migrate_start)
|
||||
log.info('=' * 30)
|
||||
|
||||
rename_old = True
|
||||
|
||||
except OperationalError:
|
||||
log.error('Migrating from faulty database, probably a (too) old version: %s', traceback.format_exc())
|
||||
|
||||
rename_old = True
|
||||
except:
|
||||
log.error('Migration failed: %s', traceback.format_exc())
|
||||
|
||||
log.info('Total migration took %s', time.time() - migrate_start)
|
||||
log.info('=' * 30)
|
||||
|
||||
# rename old database
|
||||
log.info('Renaming old database to %s ', old_db + '.old')
|
||||
os.rename(old_db, old_db + '.old')
|
||||
if rename_old:
|
||||
random = randomString()
|
||||
log.info('Renaming old database to %s ', '%s.%s_old' % (old_db, random))
|
||||
os.rename(old_db, '%s.%s_old' % (old_db, random))
|
||||
|
||||
if os.path.isfile(old_db + '-wal'):
|
||||
os.rename(old_db + '-wal', old_db + '-wal.old')
|
||||
if os.path.isfile(old_db + '-shm'):
|
||||
os.rename(old_db + '-shm', old_db + '-shm.old')
|
||||
if os.path.isfile(old_db + '-wal'):
|
||||
os.rename(old_db + '-wal', '%s-wal.%s_old' % (old_db, random))
|
||||
if os.path.isfile(old_db + '-shm'):
|
||||
os.rename(old_db + '-shm', '%s-shm.%s_old' % (old_db, random))
|
||||
|
||||
@@ -20,14 +20,31 @@ class Blackhole(DownloaderBase):
|
||||
status_support = False
|
||||
|
||||
def download(self, data = None, media = None, filedata = None):
|
||||
""" Send a torrent/nzb file to the downloader
|
||||
|
||||
:param data: dict returned from provider
|
||||
Contains the release information
|
||||
:param media: media dict with information
|
||||
Used for creating the filename when possible
|
||||
:param filedata: downloaded torrent/nzb filedata
|
||||
The file gets downloaded in the searcher and send to this function
|
||||
This is done to have failed checking before using the downloader, so the downloader
|
||||
doesn't need to worry about that
|
||||
:return: boolean
|
||||
One faile returns false, but the downloaded should log his own errors
|
||||
"""
|
||||
|
||||
if not media: media = {}
|
||||
if not data: data = {}
|
||||
|
||||
directory = self.conf('directory')
|
||||
|
||||
# The folder needs to exist
|
||||
if not directory or not os.path.isdir(directory):
|
||||
log.error('No directory set for blackhole %s download.', data.get('protocol'))
|
||||
else:
|
||||
try:
|
||||
# Filedata can be empty, which probably means it a magnet link
|
||||
if not filedata or len(filedata) < 50:
|
||||
try:
|
||||
if data.get('protocol') == 'torrent_magnet':
|
||||
@@ -36,13 +53,16 @@ class Blackhole(DownloaderBase):
|
||||
except:
|
||||
log.error('Failed download torrent via magnet url: %s', traceback.format_exc())
|
||||
|
||||
# If it's still empty, don't know what to do!
|
||||
if not filedata or len(filedata) < 50:
|
||||
log.error('No nzb/torrent available: %s', data.get('url'))
|
||||
return False
|
||||
|
||||
# Create filename with imdb id and other nice stuff
|
||||
file_name = self.createFileName(data, filedata, media)
|
||||
full_path = os.path.join(directory, file_name)
|
||||
|
||||
# People want thinks nice and tidy, create a subdir
|
||||
if self.conf('create_subdir'):
|
||||
try:
|
||||
new_path = os.path.splitext(full_path)[0]
|
||||
@@ -53,6 +73,8 @@ class Blackhole(DownloaderBase):
|
||||
log.error('Couldnt create sub dir, reverting to old one: %s', full_path)
|
||||
|
||||
try:
|
||||
|
||||
# Make sure the file doesn't exist yet, no need in overwriting it
|
||||
if not os.path.isfile(full_path):
|
||||
log.info('Downloading %s to %s.', (data.get('protocol'), full_path))
|
||||
with open(full_path, 'wb') as f:
|
||||
@@ -74,6 +96,10 @@ class Blackhole(DownloaderBase):
|
||||
return False
|
||||
|
||||
def test(self):
|
||||
""" Test and see if the directory is writable
|
||||
:return: boolean
|
||||
"""
|
||||
|
||||
directory = self.conf('directory')
|
||||
if directory and os.path.isdir(directory):
|
||||
|
||||
@@ -88,6 +114,10 @@ class Blackhole(DownloaderBase):
|
||||
return False
|
||||
|
||||
def getEnabledProtocol(self):
|
||||
""" What protocols is this downloaded used for
|
||||
:return: list with protocols
|
||||
"""
|
||||
|
||||
if self.conf('use_for') == 'both':
|
||||
return super(Blackhole, self).getEnabledProtocol()
|
||||
elif self.conf('use_for') == 'torrent':
|
||||
@@ -96,6 +126,12 @@ class Blackhole(DownloaderBase):
|
||||
return ['nzb']
|
||||
|
||||
def isEnabled(self, manual = False, data = None):
|
||||
""" Check if protocol is used (and enabled)
|
||||
:param manual: The user has clicked to download a link through the webUI
|
||||
:param data: dict returned from provider
|
||||
Contains the release information
|
||||
:return: boolean
|
||||
"""
|
||||
if not data: data = {}
|
||||
for_protocol = ['both']
|
||||
if data and 'torrent' in data.get('protocol'):
|
||||
|
||||
@@ -25,8 +25,18 @@ class Deluge(DownloaderBase):
|
||||
drpc = None
|
||||
|
||||
def connect(self, reconnect = False):
|
||||
""" Connect to the delugeRPC, re-use connection when already available
|
||||
:param reconnect: force reconnect
|
||||
:return: DelugeRPC instance
|
||||
"""
|
||||
|
||||
# Load host from config and split out port.
|
||||
host = cleanHost(self.conf('host'), protocol = False).split(':')
|
||||
|
||||
# Force host assignment
|
||||
if len(host) == 1:
|
||||
host.append(80)
|
||||
|
||||
if not isInt(host[1]):
|
||||
log.error('Config properties are not filled in correctly, port is missing.')
|
||||
return False
|
||||
@@ -37,6 +47,20 @@ class Deluge(DownloaderBase):
|
||||
return self.drpc
|
||||
|
||||
def download(self, data = None, media = None, filedata = None):
|
||||
""" Send a torrent/nzb file to the downloader
|
||||
|
||||
:param data: dict returned from provider
|
||||
Contains the release information
|
||||
:param media: media dict with information
|
||||
Used for creating the filename when possible
|
||||
:param filedata: downloaded torrent/nzb filedata
|
||||
The file gets downloaded in the searcher and send to this function
|
||||
This is done to have failed checking before using the downloader, so the downloader
|
||||
doesn't need to worry about that
|
||||
:return: boolean
|
||||
One faile returns false, but the downloaded should log his own errors
|
||||
"""
|
||||
|
||||
if not media: media = {}
|
||||
if not data: data = {}
|
||||
|
||||
@@ -91,11 +115,21 @@ class Deluge(DownloaderBase):
|
||||
return self.downloadReturnId(remote_torrent)
|
||||
|
||||
def test(self):
|
||||
""" Check if connection works
|
||||
:return: bool
|
||||
"""
|
||||
if self.connect(True) and self.drpc.test():
|
||||
return True
|
||||
return False
|
||||
|
||||
def getAllDownloadStatus(self, ids):
|
||||
""" Get status of all active downloads
|
||||
|
||||
:param ids: list of (mixed) downloader ids
|
||||
Used to match the releases for this downloader as there could be
|
||||
other downloaders active that it should ignore
|
||||
:return: list of releases
|
||||
"""
|
||||
|
||||
log.debug('Checking Deluge download status.')
|
||||
|
||||
|
||||
427
couchpotato/core/downloaders/hadouken.py
Normal file
427
couchpotato/core/downloaders/hadouken.py
Normal file
@@ -0,0 +1,427 @@
|
||||
from base64 import b16encode, b32decode, b64encode
|
||||
from distutils.version import LooseVersion
|
||||
from hashlib import sha1
|
||||
import httplib
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import urllib2
|
||||
|
||||
from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList
|
||||
from couchpotato.core.helpers.encoding import isInt, sp
|
||||
from couchpotato.core.helpers.variable import cleanHost
|
||||
from couchpotato.core.logger import CPLog
|
||||
from bencode import bencode as benc, bdecode
|
||||
|
||||
|
||||
log = CPLog(__name__)
|
||||
|
||||
autoload = 'Hadouken'
|
||||
|
||||
|
||||
class Hadouken(DownloaderBase):
|
||||
protocol = ['torrent', 'torrent_magnet']
|
||||
hadouken_api = None
|
||||
|
||||
def connect(self):
|
||||
# Load host from config and split out port.
|
||||
host = cleanHost(self.conf('host'), protocol = False).split(':')
|
||||
|
||||
if not isInt(host[1]):
|
||||
log.error('Config properties are not filled in correctly, port is missing.')
|
||||
return False
|
||||
|
||||
if not self.conf('api_key'):
|
||||
log.error('Config properties are not filled in correctly, API key is missing.')
|
||||
return False
|
||||
|
||||
self.hadouken_api = HadoukenAPI(host[0], port = host[1], api_key = self.conf('api_key'))
|
||||
|
||||
return True
|
||||
|
||||
def download(self, data = None, media = None, filedata = None):
|
||||
""" Send a torrent/nzb file to the downloader
|
||||
|
||||
:param data: dict returned from provider
|
||||
Contains the release information
|
||||
:param media: media dict with information
|
||||
Used for creating the filename when possible
|
||||
:param filedata: downloaded torrent/nzb filedata
|
||||
The file gets downloaded in the searcher and send to this function
|
||||
This is done to have failed checking before using the downloader, so the downloader
|
||||
doesn't need to worry about that
|
||||
:return: boolean
|
||||
One faile returns false, but the downloaded should log his own errors
|
||||
"""
|
||||
|
||||
if not media: media = {}
|
||||
if not data: data = {}
|
||||
|
||||
log.debug("Sending '%s' (%s) to Hadouken.", (data.get('name'), data.get('protocol')))
|
||||
|
||||
if not self.connect():
|
||||
return False
|
||||
|
||||
torrent_params = {}
|
||||
|
||||
if self.conf('label'):
|
||||
torrent_params['label'] = self.conf('label')
|
||||
|
||||
torrent_filename = self.createFileName(data, filedata, media)
|
||||
|
||||
if data.get('protocol') == 'torrent_magnet':
|
||||
torrent_hash = re.findall('urn:btih:([\w]{32,40})', data.get('url'))[0].upper()
|
||||
torrent_params['trackers'] = self.torrent_trackers
|
||||
torrent_params['name'] = torrent_filename
|
||||
else:
|
||||
info = bdecode(filedata)['info']
|
||||
torrent_hash = sha1(benc(info)).hexdigest().upper()
|
||||
|
||||
# Convert base 32 to hex
|
||||
if len(torrent_hash) == 32:
|
||||
torrent_hash = b16encode(b32decode(torrent_hash))
|
||||
|
||||
# Send request to Hadouken
|
||||
if data.get('protocol') == 'torrent_magnet':
|
||||
self.hadouken_api.add_magnet_link(data.get('url'), torrent_params)
|
||||
else:
|
||||
self.hadouken_api.add_file(filedata, torrent_params)
|
||||
|
||||
return self.downloadReturnId(torrent_hash)
|
||||
|
||||
def test(self):
|
||||
""" Tests the given host:port and API key """
|
||||
|
||||
if not self.connect():
|
||||
return False
|
||||
|
||||
version = self.hadouken_api.get_version()
|
||||
|
||||
if not version:
|
||||
log.error('Could not get Hadouken version.')
|
||||
return False
|
||||
|
||||
# The minimum required version of Hadouken is 4.5.6.
|
||||
if LooseVersion(version) >= LooseVersion('4.5.6'):
|
||||
return True
|
||||
|
||||
log.error('Hadouken v4.5.6 (or newer) required. Found v%s', version)
|
||||
return False
|
||||
|
||||
def getAllDownloadStatus(self, ids):
|
||||
""" Get status of all active downloads
|
||||
|
||||
:param ids: list of (mixed) downloader ids
|
||||
Used to match the releases for this downloader as there could be
|
||||
other downloaders active that it should ignore
|
||||
:return: list of releases
|
||||
"""
|
||||
|
||||
log.debug('Checking Hadouken download status.')
|
||||
|
||||
if not self.connect():
|
||||
return []
|
||||
|
||||
release_downloads = ReleaseDownloadList(self)
|
||||
queue = self.hadouken_api.get_by_hash_list(ids)
|
||||
|
||||
if not queue:
|
||||
return []
|
||||
|
||||
for torrent in queue:
|
||||
if torrent is None:
|
||||
continue
|
||||
|
||||
torrent_filelist = self.hadouken_api.get_files_by_hash(torrent['InfoHash'])
|
||||
torrent_files = []
|
||||
|
||||
save_path = torrent['SavePath']
|
||||
|
||||
# The 'Path' key for each file_item contains
|
||||
# the full path to the single file relative to the
|
||||
# torrents save path.
|
||||
|
||||
# For a single file torrent the result would be,
|
||||
# - Save path: "C:\Downloads"
|
||||
# - file_item['Path'] = "file1.iso"
|
||||
# Resulting path: "C:\Downloads\file1.iso"
|
||||
|
||||
# For a multi file torrent the result would be,
|
||||
# - Save path: "C:\Downloads"
|
||||
# - file_item['Path'] = "dirname/file1.iso"
|
||||
# Resulting path: "C:\Downloads\dirname/file1.iso"
|
||||
|
||||
for file_item in torrent_filelist:
|
||||
torrent_files.append(sp(os.path.join(save_path, file_item['Path'])))
|
||||
|
||||
release_downloads.append({
|
||||
'id': torrent['InfoHash'].upper(),
|
||||
'name': torrent['Name'],
|
||||
'status': self.get_torrent_status(torrent),
|
||||
'seed_ratio': self.get_seed_ratio(torrent),
|
||||
'original_status': torrent['State'],
|
||||
'timeleft': -1,
|
||||
'folder': sp(save_path if len(torrent_files == 1) else os.path.join(save_path, torrent['Name'])),
|
||||
'files': torrent_files
|
||||
})
|
||||
|
||||
return release_downloads
|
||||
|
||||
def get_seed_ratio(self, torrent):
|
||||
""" Returns the seed ratio for a given torrent.
|
||||
|
||||
Keyword arguments:
|
||||
torrent -- The torrent to calculate seed ratio for.
|
||||
"""
|
||||
|
||||
up = torrent['TotalUploadedBytes']
|
||||
down = torrent['TotalDownloadedBytes']
|
||||
|
||||
if up > 0 and down > 0:
|
||||
return up / down
|
||||
|
||||
return 0
|
||||
|
||||
def get_torrent_status(self, torrent):
|
||||
""" Returns the CouchPotato status for a given torrent.
|
||||
|
||||
Keyword arguments:
|
||||
torrent -- The torrent to translate status for.
|
||||
"""
|
||||
|
||||
if torrent['IsSeeding'] and torrent['IsFinished'] and torrent['Paused']:
|
||||
return 'completed'
|
||||
|
||||
if torrent['IsSeeding']:
|
||||
return 'seeding'
|
||||
|
||||
return 'busy'
|
||||
|
||||
def pause(self, release_download, pause = True):
|
||||
""" Pauses or resumes the torrent specified by the ID field
|
||||
in release_download.
|
||||
|
||||
Keyword arguments:
|
||||
release_download -- The CouchPotato release_download to pause/resume.
|
||||
pause -- Boolean indicating whether to pause or resume.
|
||||
"""
|
||||
|
||||
if not self.connect():
|
||||
return False
|
||||
|
||||
return self.hadouken_api.pause(release_download['id'], pause)
|
||||
|
||||
def removeFailed(self, release_download):
|
||||
""" Removes a failed torrent and also remove the data associated with it.
|
||||
|
||||
Keyword arguments:
|
||||
release_download -- The CouchPotato release_download to remove.
|
||||
"""
|
||||
|
||||
log.info('%s failed downloading, deleting...', release_download['name'])
|
||||
|
||||
if not self.connect():
|
||||
return False
|
||||
|
||||
return self.hadouken_api.remove(release_download['id'], remove_data = True)
|
||||
|
||||
def processComplete(self, release_download, delete_files = False):
|
||||
""" Removes the completed torrent from Hadouken and optionally removes the data
|
||||
associated with it.
|
||||
|
||||
Keyword arguments:
|
||||
release_download -- The CouchPotato release_download to remove.
|
||||
delete_files: Boolean indicating whether to remove the associated data.
|
||||
"""
|
||||
|
||||
log.debug('Requesting Hadouken to remove the torrent %s%s.',
|
||||
(release_download['name'], ' and cleanup the downloaded files' if delete_files else ''))
|
||||
|
||||
if not self.connect():
|
||||
return False
|
||||
|
||||
return self.hadouken_api.remove(release_download['id'], remove_data = delete_files)
|
||||
|
||||
|
||||
class HadoukenAPI(object):
|
||||
def __init__(self, host = 'localhost', port = 7890, api_key = None):
|
||||
self.url = 'http://' + str(host) + ':' + str(port)
|
||||
self.api_key = api_key
|
||||
self.requestId = 0;
|
||||
|
||||
self.opener = urllib2.build_opener()
|
||||
self.opener.addheaders = [('User-agent', 'couchpotato-hadouken-client/1.0'), ('Accept', 'application/json')]
|
||||
|
||||
if not api_key:
|
||||
log.error('API key missing.')
|
||||
|
||||
def add_file(self, filedata, torrent_params):
|
||||
""" Add a file to Hadouken with the specified parameters.
|
||||
|
||||
Keyword arguments:
|
||||
filedata -- The binary torrent data.
|
||||
torrent_params -- Additional parameters for the file.
|
||||
"""
|
||||
data = {
|
||||
'method': 'torrents.addFile',
|
||||
'params': [b64encode(filedata), torrent_params]
|
||||
}
|
||||
|
||||
return self._request(data)
|
||||
|
||||
def add_magnet_link(self, magnetLink, torrent_params):
|
||||
""" Add a magnet link to Hadouken with the specified parameters.
|
||||
|
||||
Keyword arguments:
|
||||
magnetLink -- The magnet link to send.
|
||||
torrent_params -- Additional parameters for the magnet link.
|
||||
"""
|
||||
data = {
|
||||
'method': 'torrents.addUrl',
|
||||
'params': [magnetLink, torrent_params]
|
||||
}
|
||||
|
||||
return self._request(data)
|
||||
|
||||
def get_by_hash_list(self, infoHashList):
|
||||
""" Gets a list of torrents filtered by the given info hash list.
|
||||
|
||||
Keyword arguments:
|
||||
infoHashList -- A list of info hashes.
|
||||
"""
|
||||
data = {
|
||||
'method': 'torrents.getByInfoHashList',
|
||||
'params': [infoHashList]
|
||||
}
|
||||
|
||||
return self._request(data)
|
||||
|
||||
def get_files_by_hash(self, infoHash):
|
||||
""" Gets a list of files for the torrent identified by the
|
||||
given info hash.
|
||||
|
||||
Keyword arguments:
|
||||
infoHash -- The info hash of the torrent to return files for.
|
||||
"""
|
||||
data = {
|
||||
'method': 'torrents.getFiles',
|
||||
'params': [infoHash]
|
||||
}
|
||||
|
||||
return self._request(data)
|
||||
|
||||
def get_version(self):
|
||||
""" Gets the version, commitish and build date of Hadouken. """
|
||||
data = {
|
||||
'method': 'core.getVersion',
|
||||
'params': None
|
||||
}
|
||||
|
||||
result = self._request(data)
|
||||
|
||||
if not result:
|
||||
return False
|
||||
|
||||
return result['Version']
|
||||
|
||||
def pause(self, infoHash, pause):
|
||||
""" Pauses/unpauses the torrent identified by the given info hash.
|
||||
|
||||
Keyword arguments:
|
||||
infoHash -- The info hash of the torrent to operate on.
|
||||
pause -- If true, pauses the torrent. Otherwise resumes.
|
||||
"""
|
||||
data = {
|
||||
'method': 'torrents.pause',
|
||||
'params': [infoHash]
|
||||
}
|
||||
|
||||
if not pause:
|
||||
data['method'] = 'torrents.resume'
|
||||
|
||||
return self._request(data)
|
||||
|
||||
def remove(self, infoHash, remove_data = False):
|
||||
""" Removes the torrent identified by the given info hash and
|
||||
optionally removes the data as well.
|
||||
|
||||
Keyword arguments:
|
||||
infoHash -- The info hash of the torrent to remove.
|
||||
remove_data -- If true, removes the data associated with the torrent.
|
||||
"""
|
||||
data = {
|
||||
'method': 'torrents.remove',
|
||||
'params': [infoHash, remove_data]
|
||||
}
|
||||
|
||||
return self._request(data)
|
||||
|
||||
|
||||
def _request(self, data):
|
||||
self.requestId += 1
|
||||
|
||||
data['jsonrpc'] = '2.0'
|
||||
data['id'] = self.requestId
|
||||
|
||||
request = urllib2.Request(self.url + '/jsonrpc', data = json.dumps(data))
|
||||
request.add_header('Authorization', 'Token ' + self.api_key)
|
||||
request.add_header('Content-Type', 'application/json')
|
||||
|
||||
try:
|
||||
f = self.opener.open(request)
|
||||
response = f.read()
|
||||
f.close()
|
||||
|
||||
obj = json.loads(response)
|
||||
|
||||
if not 'error' in obj.keys():
|
||||
return obj['result']
|
||||
|
||||
log.error('JSONRPC error, %s: %s', obj['error']['code'], obj['error']['message'])
|
||||
except httplib.InvalidURL as err:
|
||||
log.error('Invalid Hadouken host, check your config %s', err)
|
||||
except urllib2.HTTPError as err:
|
||||
if err.code == 401:
|
||||
log.error('Invalid Hadouken API key, check your config')
|
||||
else:
|
||||
log.error('Hadouken HTTPError: %s', err)
|
||||
except urllib2.URLError as err:
|
||||
log.error('Unable to connect to Hadouken %s', err)
|
||||
|
||||
return False
|
||||
|
||||
|
||||
config = [{
|
||||
'name': 'hadouken',
|
||||
'groups': [
|
||||
{
|
||||
'tab': 'downloaders',
|
||||
'list': 'download_providers',
|
||||
'name': 'hadouken',
|
||||
'label': 'Hadouken',
|
||||
'description': 'Use <a href="http://www.hdkn.net">Hadouken</a> (>= v4.5.6) to download torrents.',
|
||||
'wizard': True,
|
||||
'options': [
|
||||
{
|
||||
'name': 'enabled',
|
||||
'default': 0,
|
||||
'type': 'enabler',
|
||||
'radio_group': 'torrent'
|
||||
},
|
||||
{
|
||||
'name': 'host',
|
||||
'default': 'localhost:7890'
|
||||
},
|
||||
{
|
||||
'name': 'api_key',
|
||||
'label': 'API key',
|
||||
'type': 'password'
|
||||
},
|
||||
{
|
||||
'name': 'label',
|
||||
'description': 'Label to add torrent as.'
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}]
|
||||
@@ -23,6 +23,20 @@ class NZBGet(DownloaderBase):
|
||||
rpc = 'xmlrpc'
|
||||
|
||||
def download(self, data = None, media = None, filedata = None):
|
||||
""" Send a torrent/nzb file to the downloader
|
||||
|
||||
:param data: dict returned from provider
|
||||
Contains the release information
|
||||
:param media: media dict with information
|
||||
Used for creating the filename when possible
|
||||
:param filedata: downloaded torrent/nzb filedata
|
||||
The file gets downloaded in the searcher and send to this function
|
||||
This is done to have failed checking before using the downloader, so the downloader
|
||||
doesn't need to worry about that
|
||||
:return: boolean
|
||||
One faile returns false, but the downloaded should log his own errors
|
||||
"""
|
||||
|
||||
if not media: media = {}
|
||||
if not data: data = {}
|
||||
|
||||
@@ -71,6 +85,10 @@ class NZBGet(DownloaderBase):
|
||||
return False
|
||||
|
||||
def test(self):
|
||||
""" Check if connection works
|
||||
:return: bool
|
||||
"""
|
||||
|
||||
rpc = self.getRPC()
|
||||
|
||||
try:
|
||||
@@ -91,6 +109,13 @@ class NZBGet(DownloaderBase):
|
||||
return True
|
||||
|
||||
def getAllDownloadStatus(self, ids):
|
||||
""" Get status of all active downloads
|
||||
|
||||
:param ids: list of (mixed) downloader ids
|
||||
Used to match the releases for this downloader as there could be
|
||||
other downloaders active that it should ignore
|
||||
:return: list of releases
|
||||
"""
|
||||
|
||||
log.debug('Checking NZBGet download status.')
|
||||
|
||||
@@ -163,12 +188,12 @@ class NZBGet(DownloaderBase):
|
||||
nzb_id = nzb['NZBID']
|
||||
|
||||
if nzb_id in ids:
|
||||
log.debug('Found %s in NZBGet history. ParStatus: %s, ScriptStatus: %s, Log: %s', (nzb['NZBFilename'] , nzb['ParStatus'], nzb['ScriptStatus'] , nzb['Log']))
|
||||
log.debug('Found %s in NZBGet history. TotalStatus: %s, ParStatus: %s, ScriptStatus: %s, Log: %s', (nzb['NZBFilename'] , nzb['Status'], nzb['ParStatus'], nzb['ScriptStatus'] , nzb['Log']))
|
||||
release_downloads.append({
|
||||
'id': nzb_id,
|
||||
'name': nzb['NZBFilename'],
|
||||
'status': 'completed' if nzb['ParStatus'] in ['SUCCESS', 'NONE'] and nzb['ScriptStatus'] in ['SUCCESS', 'NONE'] else 'failed',
|
||||
'original_status': nzb['ParStatus'] + ', ' + nzb['ScriptStatus'],
|
||||
'status': 'completed' if 'SUCCESS' in nzb['Status'] else 'failed',
|
||||
'original_status': nzb['Status'],
|
||||
'timeleft': str(timedelta(seconds = 0)),
|
||||
'folder': sp(nzb['DestDir'])
|
||||
})
|
||||
|
||||
@@ -1,16 +1,10 @@
|
||||
from base64 import b64encode
|
||||
from urllib2 import URLError
|
||||
import os
|
||||
from uuid import uuid4
|
||||
import hashlib
|
||||
import httplib
|
||||
import json
|
||||
import os
|
||||
import socket
|
||||
import ssl
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
import urllib2
|
||||
|
||||
from requests import HTTPError
|
||||
|
||||
from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList
|
||||
from couchpotato.core.helpers.encoding import tryUrlencode, sp
|
||||
@@ -30,23 +24,45 @@ class NZBVortex(DownloaderBase):
|
||||
session_id = None
|
||||
|
||||
def download(self, data = None, media = None, filedata = None):
|
||||
""" Send a torrent/nzb file to the downloader
|
||||
|
||||
:param data: dict returned from provider
|
||||
Contains the release information
|
||||
:param media: media dict with information
|
||||
Used for creating the filename when possible
|
||||
:param filedata: downloaded torrent/nzb filedata
|
||||
The file gets downloaded in the searcher and send to this function
|
||||
This is done to have failed checking before using the downloader, so the downloader
|
||||
doesn't need to worry about that
|
||||
:return: boolean
|
||||
One faile returns false, but the downloaded should log his own errors
|
||||
"""
|
||||
|
||||
if not media: media = {}
|
||||
if not data: data = {}
|
||||
|
||||
# Send the nzb
|
||||
try:
|
||||
nzb_filename = self.createFileName(data, filedata, media)
|
||||
self.call('nzb/add', files = {'file': (nzb_filename, filedata)})
|
||||
nzb_filename = self.createFileName(data, filedata, media, unique_tag = True)
|
||||
response = self.call('nzb/add', files = {'file': (nzb_filename, filedata, 'application/octet-stream')}, parameters = {
|
||||
'name': nzb_filename,
|
||||
'groupname': self.conf('group')
|
||||
})
|
||||
|
||||
time.sleep(10)
|
||||
raw_statuses = self.call('nzb')
|
||||
nzb_id = [nzb['id'] for nzb in raw_statuses.get('nzbs', []) if os.path.basename(nzb['nzbFileName']) == nzb_filename][0]
|
||||
return self.downloadReturnId(nzb_id)
|
||||
if response and response.get('result', '').lower() == 'ok':
|
||||
return self.downloadReturnId(nzb_filename)
|
||||
|
||||
log.error('Something went wrong sending the NZB file. Response: %s', response)
|
||||
return False
|
||||
except:
|
||||
log.error('Something went wrong sending the NZB file: %s', traceback.format_exc())
|
||||
return False
|
||||
|
||||
def test(self):
|
||||
""" Check if connection works
|
||||
:return: bool
|
||||
"""
|
||||
|
||||
try:
|
||||
login_result = self.login()
|
||||
except:
|
||||
@@ -55,12 +71,20 @@ class NZBVortex(DownloaderBase):
|
||||
return login_result
|
||||
|
||||
def getAllDownloadStatus(self, ids):
|
||||
""" Get status of all active downloads
|
||||
|
||||
:param ids: list of (mixed) downloader ids
|
||||
Used to match the releases for this downloader as there could be
|
||||
other downloaders active that it should ignore
|
||||
:return: list of releases
|
||||
"""
|
||||
|
||||
raw_statuses = self.call('nzb')
|
||||
|
||||
release_downloads = ReleaseDownloadList(self)
|
||||
for nzb in raw_statuses.get('nzbs', []):
|
||||
if nzb['id'] in ids:
|
||||
nzb_id = os.path.basename(nzb['nzbFileName'])
|
||||
if nzb_id in ids:
|
||||
|
||||
# Check status
|
||||
status = 'busy'
|
||||
@@ -70,7 +94,8 @@ class NZBVortex(DownloaderBase):
|
||||
status = 'failed'
|
||||
|
||||
release_downloads.append({
|
||||
'id': nzb['id'],
|
||||
'temp_id': nzb['id'],
|
||||
'id': nzb_id,
|
||||
'name': nzb['uiTitle'],
|
||||
'status': status,
|
||||
'original_status': nzb['state'],
|
||||
@@ -85,7 +110,7 @@ class NZBVortex(DownloaderBase):
|
||||
log.info('%s failed downloading, deleting...', release_download['name'])
|
||||
|
||||
try:
|
||||
self.call('nzb/%s/cancel' % release_download['id'])
|
||||
self.call('nzb/%s/cancel' % release_download['temp_id'])
|
||||
except:
|
||||
log.error('Failed deleting: %s', traceback.format_exc(0))
|
||||
return False
|
||||
@@ -114,7 +139,7 @@ class NZBVortex(DownloaderBase):
|
||||
log.error('Login failed, please check you api-key')
|
||||
return False
|
||||
|
||||
def call(self, call, parameters = None, repeat = False, auth = True, *args, **kwargs):
|
||||
def call(self, call, parameters = None, is_repeat = False, auth = True, *args, **kwargs):
|
||||
|
||||
# Login first
|
||||
if not parameters: parameters = {}
|
||||
@@ -127,19 +152,20 @@ class NZBVortex(DownloaderBase):
|
||||
|
||||
params = tryUrlencode(parameters)
|
||||
|
||||
url = cleanHost(self.conf('host'), ssl = self.conf('ssl')) + 'api/' + call
|
||||
url = cleanHost(self.conf('host')) + 'api/' + call
|
||||
|
||||
try:
|
||||
data = self.urlopen('%s?%s' % (url, params), *args, **kwargs)
|
||||
data = self.getJsonData('%s%s' % (url, '?' + params if params else ''), *args, cache_timeout = 0, show_error = False, **kwargs)
|
||||
|
||||
if data:
|
||||
return json.loads(data)
|
||||
except URLError as e:
|
||||
if hasattr(e, 'code') and e.code == 403:
|
||||
return data
|
||||
except HTTPError as e:
|
||||
sc = e.response.status_code
|
||||
if sc == 403:
|
||||
# Try login and do again
|
||||
if not repeat:
|
||||
if not is_repeat:
|
||||
self.login()
|
||||
return self.call(call, parameters = parameters, repeat = True, **kwargs)
|
||||
return self.call(call, parameters = parameters, is_repeat = True, **kwargs)
|
||||
|
||||
log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
|
||||
except:
|
||||
@@ -151,13 +177,12 @@ class NZBVortex(DownloaderBase):
|
||||
|
||||
if not self.api_level:
|
||||
|
||||
url = cleanHost(self.conf('host')) + 'api/app/apilevel'
|
||||
|
||||
try:
|
||||
data = self.urlopen(url, show_error = False)
|
||||
self.api_level = float(json.loads(data).get('apilevel'))
|
||||
except URLError as e:
|
||||
if hasattr(e, 'code') and e.code == 403:
|
||||
data = self.call('app/apilevel', auth = False)
|
||||
self.api_level = float(data.get('apilevel'))
|
||||
except HTTPError as e:
|
||||
sc = e.response.status_code
|
||||
if sc == 403:
|
||||
log.error('This version of NZBVortex isn\'t supported. Please update to 2.8.6 or higher')
|
||||
else:
|
||||
log.error('NZBVortex doesn\'t seem to be running or maybe the remote option isn\'t enabled yet: %s', traceback.format_exc(1))
|
||||
@@ -169,29 +194,6 @@ class NZBVortex(DownloaderBase):
|
||||
return super(NZBVortex, self).isEnabled(manual, data) and self.getApiLevel()
|
||||
|
||||
|
||||
class HTTPSConnection(httplib.HTTPSConnection):
|
||||
def __init__(self, *args, **kwargs):
|
||||
httplib.HTTPSConnection.__init__(self, *args, **kwargs)
|
||||
|
||||
def connect(self):
|
||||
sock = socket.create_connection((self.host, self.port), self.timeout)
|
||||
if sys.version_info < (2, 6, 7):
|
||||
if hasattr(self, '_tunnel_host'):
|
||||
self.sock = sock
|
||||
self._tunnel()
|
||||
else:
|
||||
if self._tunnel_host:
|
||||
self.sock = sock
|
||||
self._tunnel()
|
||||
|
||||
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ssl_version = ssl.PROTOCOL_TLSv1)
|
||||
|
||||
|
||||
class HTTPSHandler(urllib2.HTTPSHandler):
|
||||
def https_open(self, req):
|
||||
return self.do_open(HTTPSConnection, req)
|
||||
|
||||
|
||||
config = [{
|
||||
'name': 'nzbvortex',
|
||||
'groups': [
|
||||
@@ -211,20 +213,18 @@ config = [{
|
||||
},
|
||||
{
|
||||
'name': 'host',
|
||||
'default': 'localhost:4321',
|
||||
'description': 'Hostname with port. Usually <strong>localhost:4321</strong>',
|
||||
},
|
||||
{
|
||||
'name': 'ssl',
|
||||
'default': 1,
|
||||
'type': 'bool',
|
||||
'advanced': True,
|
||||
'description': 'Use HyperText Transfer Protocol Secure, or <strong>https</strong>',
|
||||
'default': 'https://localhost:4321',
|
||||
'description': 'Hostname with port. Usually <strong>https://localhost:4321</strong>',
|
||||
},
|
||||
{
|
||||
'name': 'api_key',
|
||||
'label': 'Api Key',
|
||||
},
|
||||
{
|
||||
'name': 'group',
|
||||
'label': 'Group',
|
||||
'description': 'The group CP places the nzb in. Make sure to create it in NZBVortex.',
|
||||
},
|
||||
{
|
||||
'name': 'manual',
|
||||
'default': False,
|
||||
|
||||
@@ -19,6 +19,20 @@ class Pneumatic(DownloaderBase):
|
||||
status_support = False
|
||||
|
||||
def download(self, data = None, media = None, filedata = None):
|
||||
""" Send a torrent/nzb file to the downloader
|
||||
|
||||
:param data: dict returned from provider
|
||||
Contains the release information
|
||||
:param media: media dict with information
|
||||
Used for creating the filename when possible
|
||||
:param filedata: downloaded torrent/nzb filedata
|
||||
The file gets downloaded in the searcher and send to this function
|
||||
This is done to have failed checking before using the downloader, so the downloader
|
||||
doesn't need to worry about that
|
||||
:return: boolean
|
||||
One faile returns false, but the downloaded should log his own errors
|
||||
"""
|
||||
|
||||
if not media: media = {}
|
||||
if not data: data = {}
|
||||
|
||||
@@ -63,6 +77,10 @@ class Pneumatic(DownloaderBase):
|
||||
return False
|
||||
|
||||
def test(self):
|
||||
""" Check if connection works
|
||||
:return: bool
|
||||
"""
|
||||
|
||||
directory = self.conf('directory')
|
||||
if directory and os.path.isdir(directory):
|
||||
|
||||
|
||||
68
couchpotato/core/downloaders/putio/__init__.py
Normal file
68
couchpotato/core/downloaders/putio/__init__.py
Normal file
@@ -0,0 +1,68 @@
|
||||
from .main import PutIO
|
||||
|
||||
|
||||
def autoload():
|
||||
return PutIO()
|
||||
|
||||
|
||||
config = [{
|
||||
'name': 'putio',
|
||||
'groups': [
|
||||
{
|
||||
'tab': 'downloaders',
|
||||
'list': 'download_providers',
|
||||
'name': 'putio',
|
||||
'label': 'put.io',
|
||||
'description': 'This will start a torrent download on <a href="http://put.io">Put.io</a>.',
|
||||
'wizard': True,
|
||||
'options': [
|
||||
{
|
||||
'name': 'enabled',
|
||||
'default': 0,
|
||||
'type': 'enabler',
|
||||
'radio_group': 'torrent',
|
||||
},
|
||||
{
|
||||
'name': 'oauth_token',
|
||||
'label': 'oauth_token',
|
||||
'description': 'This is the OAUTH_TOKEN from your putio API',
|
||||
'advanced': True,
|
||||
},
|
||||
{
|
||||
'name': 'folder',
|
||||
'description': ('The folder on putio where you want the upload to go','Will find the first first folder that matches this name'),
|
||||
'default': 0,
|
||||
},
|
||||
{
|
||||
'name': 'callback_host',
|
||||
'description': 'External reachable url to CP so put.io can do it\'s thing',
|
||||
},
|
||||
{
|
||||
'name': 'download',
|
||||
'description': 'Set this to have CouchPotato download the file from Put.io',
|
||||
'type': 'bool',
|
||||
'default': 0,
|
||||
},
|
||||
{
|
||||
'name': 'delete_file',
|
||||
'description': ('Set this to remove the file from putio after sucessful download','Does nothing if you don\'t select download'),
|
||||
'type': 'bool',
|
||||
'default': 0,
|
||||
},
|
||||
{
|
||||
'name': 'download_dir',
|
||||
'type': 'directory',
|
||||
'label': 'Download Directory',
|
||||
'description': 'The Directory to download files to, does nothing if you don\'t select download',
|
||||
},
|
||||
{
|
||||
'name': 'manual',
|
||||
'default': 0,
|
||||
'type': 'bool',
|
||||
'advanced': True,
|
||||
'description': 'Disable this downloader for automated searches, but use it when I manually send a release.',
|
||||
},
|
||||
],
|
||||
}
|
||||
],
|
||||
}]
|
||||
181
couchpotato/core/downloaders/putio/main.py
Normal file
181
couchpotato/core/downloaders/putio/main.py
Normal file
@@ -0,0 +1,181 @@
|
||||
from couchpotato.api import addApiView
|
||||
from couchpotato.core.event import addEvent, fireEventAsync
|
||||
from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList
|
||||
from couchpotato.core.helpers.variable import cleanHost
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.environment import Env
|
||||
from pio import api as pio
|
||||
import datetime
|
||||
|
||||
log = CPLog(__name__)
|
||||
|
||||
autoload = 'Putiodownload'
|
||||
|
||||
|
||||
class PutIO(DownloaderBase):
|
||||
|
||||
protocol = ['torrent', 'torrent_magnet']
|
||||
downloading_list = []
|
||||
oauth_authenticate = 'https://api.couchpota.to/authorize/putio/'
|
||||
|
||||
def __init__(self):
|
||||
addApiView('downloader.putio.getfrom', self.getFromPutio, docs = {
|
||||
'desc': 'Allows you to download file from prom Put.io',
|
||||
})
|
||||
addApiView('downloader.putio.auth_url', self.getAuthorizationUrl)
|
||||
addApiView('downloader.putio.credentials', self.getCredentials)
|
||||
addEvent('putio.download', self.putioDownloader)
|
||||
|
||||
return super(PutIO, self).__init__()
|
||||
|
||||
# This is a recusive function to check for the folders
|
||||
def recursionFolder(self, client, folder = 0, tfolder = ''):
|
||||
files = client.File.list(folder)
|
||||
for f in files:
|
||||
if f.content_type == 'application/x-directory':
|
||||
if f.name == tfolder:
|
||||
return f.id
|
||||
else:
|
||||
result = self.recursionFolder(client, f.id, tfolder)
|
||||
if result != 0:
|
||||
return result
|
||||
return 0
|
||||
|
||||
# This will check the root for the folder, and kick of recusively checking sub folder
|
||||
def convertFolder(self, client, folder):
|
||||
if folder == 0:
|
||||
return 0
|
||||
else:
|
||||
return self.recursionFolder(client, 0, folder)
|
||||
|
||||
def download(self, data = None, media = None, filedata = None):
|
||||
if not media: media = {}
|
||||
if not data: data = {}
|
||||
|
||||
log.info('Sending "%s" to put.io', data.get('name'))
|
||||
url = data.get('url')
|
||||
client = pio.Client(self.conf('oauth_token'))
|
||||
putioFolder = self.convertFolder(client, self.conf('folder'))
|
||||
log.debug('putioFolder ID is %s', putioFolder)
|
||||
# It might be possible to call getFromPutio from the renamer if we can then we don't need to do this.
|
||||
# Note callback_host is NOT our address, it's the internet host that putio can call too
|
||||
callbackurl = None
|
||||
if self.conf('download'):
|
||||
callbackurl = 'http://' + self.conf('callback_host') + '%sdownloader.putio.getfrom/' %Env.get('api_base'.strip('/'))
|
||||
resp = client.Transfer.add_url(url, callback_url = callbackurl, parent_id = putioFolder)
|
||||
log.debug('resp is %s', resp.id);
|
||||
return self.downloadReturnId(resp.id)
|
||||
|
||||
def test(self):
|
||||
try:
|
||||
client = pio.Client(self.conf('oauth_token'))
|
||||
if client.File.list():
|
||||
return True
|
||||
except:
|
||||
log.info('Failed to get file listing, check OAUTH_TOKEN')
|
||||
return False
|
||||
|
||||
def getAuthorizationUrl(self, host = None, **kwargs):
|
||||
|
||||
callback_url = cleanHost(host) + '%sdownloader.putio.credentials/' % (Env.get('api_base').lstrip('/'))
|
||||
log.debug('callback_url is %s', callback_url)
|
||||
|
||||
target_url = self.oauth_authenticate + "?target=" + callback_url
|
||||
log.debug('target_url is %s', target_url)
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'url': target_url,
|
||||
}
|
||||
|
||||
def getCredentials(self, **kwargs):
|
||||
try:
|
||||
oauth_token = kwargs.get('oauth')
|
||||
except:
|
||||
return 'redirect', Env.get('web_base') + 'settings/downloaders/'
|
||||
log.debug('oauth_token is: %s', oauth_token)
|
||||
self.conf('oauth_token', value = oauth_token);
|
||||
return 'redirect', Env.get('web_base') + 'settings/downloaders/'
|
||||
|
||||
def getAllDownloadStatus(self, ids):
|
||||
|
||||
log.debug('Checking putio download status.')
|
||||
client = pio.Client(self.conf('oauth_token'))
|
||||
|
||||
transfers = client.Transfer.list()
|
||||
|
||||
log.debug(transfers);
|
||||
release_downloads = ReleaseDownloadList(self)
|
||||
for t in transfers:
|
||||
if t.id in ids:
|
||||
|
||||
log.debug('downloading list is %s', self.downloading_list)
|
||||
if t.status == "COMPLETED" and self.conf('download') == False :
|
||||
status = 'completed'
|
||||
|
||||
# So check if we are trying to download something
|
||||
elif t.status == "COMPLETED" and self.conf('download') == True:
|
||||
# Assume we are done
|
||||
status = 'completed'
|
||||
if not self.downloading_list:
|
||||
now = datetime.datetime.utcnow()
|
||||
date_time = datetime.datetime.strptime(t.finished_at,"%Y-%m-%dT%H:%M:%S")
|
||||
# We need to make sure a race condition didn't happen
|
||||
if (now - date_time) < datetime.timedelta(minutes=5):
|
||||
# 5 minutes haven't passed so we wait
|
||||
status = 'busy'
|
||||
else:
|
||||
# If we have the file_id in the downloading_list mark it as busy
|
||||
if str(t.file_id) in self.downloading_list:
|
||||
status = 'busy'
|
||||
else:
|
||||
status = 'busy'
|
||||
release_downloads.append({
|
||||
'id' : t.id,
|
||||
'name': t.name,
|
||||
'status': status,
|
||||
'timeleft': t.estimated_time,
|
||||
})
|
||||
|
||||
return release_downloads
|
||||
|
||||
def putioDownloader(self, fid):
|
||||
|
||||
log.info('Put.io Real downloader called with file_id: %s',fid)
|
||||
client = pio.Client(self.conf('oauth_token'))
|
||||
|
||||
log.debug('About to get file List')
|
||||
putioFolder = self.convertFolder(client, self.conf('folder'))
|
||||
log.debug('PutioFolderID is %s', putioFolder)
|
||||
files = client.File.list(parent_id=putioFolder)
|
||||
downloaddir = self.conf('download_dir')
|
||||
|
||||
for f in files:
|
||||
if str(f.id) == str(fid):
|
||||
client.File.download(f, dest = downloaddir, delete_after_download = self.conf('delete_file'))
|
||||
# Once the download is complete we need to remove it from the running list.
|
||||
self.downloading_list.remove(fid)
|
||||
|
||||
return True
|
||||
|
||||
def getFromPutio(self, **kwargs):
|
||||
|
||||
try:
|
||||
file_id = str(kwargs.get('file_id'))
|
||||
except:
|
||||
return {
|
||||
'success' : False,
|
||||
}
|
||||
|
||||
log.info('Put.io Download has been called file_id is %s', file_id)
|
||||
if file_id not in self.downloading_list:
|
||||
self.downloading_list.append(file_id)
|
||||
fireEventAsync('putio.download',fid = file_id)
|
||||
return {
|
||||
'success': True,
|
||||
}
|
||||
|
||||
return {
|
||||
'success': False,
|
||||
}
|
||||
|
||||
68
couchpotato/core/downloaders/putio/static/putio.js
Normal file
68
couchpotato/core/downloaders/putio/static/putio.js
Normal file
@@ -0,0 +1,68 @@
|
||||
var PutIODownloader = new Class({
|
||||
|
||||
initialize: function(){
|
||||
var self = this;
|
||||
|
||||
App.addEvent('loadSettings', self.addRegisterButton.bind(self));
|
||||
},
|
||||
|
||||
addRegisterButton: function(){
|
||||
var self = this;
|
||||
|
||||
var setting_page = App.getPage('Settings');
|
||||
setting_page.addEvent('create', function(){
|
||||
|
||||
var fieldset = setting_page.tabs.downloaders.groups.putio,
|
||||
l = window.location;
|
||||
|
||||
var putio_set = 0;
|
||||
fieldset.getElements('input[type=text]').each(function(el){
|
||||
putio_set += +(el.get('value') !== '');
|
||||
});
|
||||
|
||||
new Element('.ctrlHolder').adopt(
|
||||
|
||||
// Unregister button
|
||||
(putio_set > 0) ?
|
||||
[
|
||||
self.unregister = new Element('a.button.red', {
|
||||
'text': 'Unregister "'+fieldset.getElement('input[name*=oauth_token]').get('value')+'"',
|
||||
'events': {
|
||||
'click': function(){
|
||||
fieldset.getElements('input[name*=oauth_token]').set('value', '').fireEvent('change');
|
||||
|
||||
self.unregister.destroy();
|
||||
self.unregister_or.destroy();
|
||||
}
|
||||
}
|
||||
}),
|
||||
self.unregister_or = new Element('span[text=or]')
|
||||
]
|
||||
: null,
|
||||
|
||||
// Register button
|
||||
new Element('a.button', {
|
||||
'text': putio_set > 0 ? 'Register a different account' : 'Register your put.io account',
|
||||
'events': {
|
||||
'click': function(){
|
||||
Api.request('downloader.putio.auth_url', {
|
||||
'data': {
|
||||
'host': l.protocol + '//' + l.hostname + (l.port ? ':' + l.port : '')
|
||||
},
|
||||
'onComplete': function(json){
|
||||
window.location = json.url;
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
})
|
||||
).inject(fieldset.getElement('.test_button'), 'before');
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
});
|
||||
|
||||
window.addEvent('domready', function(){
|
||||
new PutIODownloader();
|
||||
});
|
||||
@@ -41,12 +41,30 @@ class qBittorrent(DownloaderBase):
|
||||
return self.qb
|
||||
|
||||
def test(self):
|
||||
""" Check if connection works
|
||||
:return: bool
|
||||
"""
|
||||
|
||||
if self.connect():
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def download(self, data = None, media = None, filedata = None):
|
||||
""" Send a torrent/nzb file to the downloader
|
||||
|
||||
:param data: dict returned from provider
|
||||
Contains the release information
|
||||
:param media: media dict with information
|
||||
Used for creating the filename when possible
|
||||
:param filedata: downloaded torrent/nzb filedata
|
||||
The file gets downloaded in the searcher and send to this function
|
||||
This is done to have failed checking before using the downloader, so the downloader
|
||||
doesn't need to worry about that
|
||||
:return: boolean
|
||||
One faile returns false, but the downloaded should log his own errors
|
||||
"""
|
||||
|
||||
if not media: media = {}
|
||||
if not data: data = {}
|
||||
|
||||
@@ -95,6 +113,14 @@ class qBittorrent(DownloaderBase):
|
||||
return 'busy'
|
||||
|
||||
def getAllDownloadStatus(self, ids):
|
||||
""" Get status of all active downloads
|
||||
|
||||
:param ids: list of (mixed) downloader ids
|
||||
Used to match the releases for this downloader as there could be
|
||||
other downloaders active that it should ignore
|
||||
:return: list of releases
|
||||
"""
|
||||
|
||||
log.debug('Checking qBittorrent download status.')
|
||||
|
||||
if not self.connect():
|
||||
|
||||
@@ -5,14 +5,12 @@ from urlparse import urlparse
|
||||
import os
|
||||
|
||||
from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList
|
||||
|
||||
from couchpotato.core.event import addEvent
|
||||
from couchpotato.core.helpers.encoding import sp
|
||||
from couchpotato.core.helpers.variable import cleanHost, splitString
|
||||
from couchpotato.core.logger import CPLog
|
||||
from bencode import bencode, bdecode
|
||||
from rtorrent import RTorrent
|
||||
from scandir import scandir
|
||||
|
||||
|
||||
log = CPLog(__name__)
|
||||
@@ -86,6 +84,10 @@ class rTorrent(DownloaderBase):
|
||||
return self.rt
|
||||
|
||||
def test(self):
|
||||
""" Check if connection works
|
||||
:return: bool
|
||||
"""
|
||||
|
||||
if self.connect(True):
|
||||
return True
|
||||
|
||||
@@ -96,6 +98,20 @@ class rTorrent(DownloaderBase):
|
||||
|
||||
|
||||
def download(self, data = None, media = None, filedata = None):
|
||||
""" Send a torrent/nzb file to the downloader
|
||||
|
||||
:param data: dict returned from provider
|
||||
Contains the release information
|
||||
:param media: media dict with information
|
||||
Used for creating the filename when possible
|
||||
:param filedata: downloaded torrent/nzb filedata
|
||||
The file gets downloaded in the searcher and send to this function
|
||||
This is done to have failed checking before using the downloader, so the downloader
|
||||
doesn't need to worry about that
|
||||
:return: boolean
|
||||
One faile returns false, but the downloaded should log his own errors
|
||||
"""
|
||||
|
||||
if not media: media = {}
|
||||
if not data: data = {}
|
||||
|
||||
@@ -154,21 +170,23 @@ class rTorrent(DownloaderBase):
|
||||
return False
|
||||
|
||||
def getTorrentStatus(self, torrent):
|
||||
if torrent.hashing or torrent.hash_checking or torrent.message:
|
||||
return 'busy'
|
||||
|
||||
if not torrent.complete:
|
||||
return 'busy'
|
||||
|
||||
if not torrent.open:
|
||||
return 'completed'
|
||||
|
||||
if torrent.state and torrent.active:
|
||||
if torrent.open:
|
||||
return 'seeding'
|
||||
|
||||
return 'busy'
|
||||
return 'completed'
|
||||
|
||||
def getAllDownloadStatus(self, ids):
|
||||
""" Get status of all active downloads
|
||||
|
||||
:param ids: list of (mixed) downloader ids
|
||||
Used to match the releases for this downloader as there could be
|
||||
other downloaders active that it should ignore
|
||||
:return: list of releases
|
||||
"""
|
||||
|
||||
log.debug('Checking rTorrent download status.')
|
||||
|
||||
if not self.connect():
|
||||
@@ -244,7 +262,7 @@ class rTorrent(DownloaderBase):
|
||||
if torrent.is_multi_file() and torrent.directory.endswith(torrent.name):
|
||||
# Remove empty directories bottom up
|
||||
try:
|
||||
for path, _, _ in scandir.walk(torrent.directory, topdown = False):
|
||||
for path, _, _ in os.walk(sp(torrent.directory), topdown = False):
|
||||
os.rmdir(path)
|
||||
except OSError:
|
||||
log.info('Directory "%s" contains extra files, unable to remove', torrent.directory)
|
||||
|
||||
@@ -21,6 +21,21 @@ class Sabnzbd(DownloaderBase):
|
||||
protocol = ['nzb']
|
||||
|
||||
def download(self, data = None, media = None, filedata = None):
|
||||
"""
|
||||
Send a torrent/nzb file to the downloader
|
||||
|
||||
:param data: dict returned from provider
|
||||
Contains the release information
|
||||
:param media: media dict with information
|
||||
Used for creating the filename when possible
|
||||
:param filedata: downloaded torrent/nzb filedata
|
||||
The file gets downloaded in the searcher and send to this function
|
||||
This is done to have failed checking before using the downloader, so the downloader
|
||||
doesn't need to worry about that
|
||||
:return: boolean
|
||||
One faile returns false, but the downloaded should log his own errors
|
||||
"""
|
||||
|
||||
if not media: media = {}
|
||||
if not data: data = {}
|
||||
|
||||
@@ -69,6 +84,11 @@ class Sabnzbd(DownloaderBase):
|
||||
return False
|
||||
|
||||
def test(self):
|
||||
""" Check if connection works
|
||||
Return message if an old version of SAB is used
|
||||
:return: bool
|
||||
"""
|
||||
|
||||
try:
|
||||
sab_data = self.call({
|
||||
'mode': 'version',
|
||||
@@ -89,6 +109,13 @@ class Sabnzbd(DownloaderBase):
|
||||
return True
|
||||
|
||||
def getAllDownloadStatus(self, ids):
|
||||
""" Get status of all active downloads
|
||||
|
||||
:param ids: list of (mixed) downloader ids
|
||||
Used to match the releases for this downloader as there could be
|
||||
other downloaders active that it should ignore
|
||||
:return: list of releases
|
||||
"""
|
||||
|
||||
log.debug('Checking SABnzbd download status.')
|
||||
|
||||
|
||||
@@ -19,6 +19,21 @@ class Synology(DownloaderBase):
|
||||
status_support = False
|
||||
|
||||
def download(self, data = None, media = None, filedata = None):
|
||||
"""
|
||||
Send a torrent/nzb file to the downloader
|
||||
|
||||
:param data: dict returned from provider
|
||||
Contains the release information
|
||||
:param media: media dict with information
|
||||
Used for creating the filename when possible
|
||||
:param filedata: downloaded torrent/nzb filedata
|
||||
The file gets downloaded in the searcher and send to this function
|
||||
This is done to have failed checking before using the downloader, so the downloader
|
||||
doesn't need to worry about that
|
||||
:return: boolean
|
||||
One faile returns false, but the downloaded should log his own errors
|
||||
"""
|
||||
|
||||
if not media: media = {}
|
||||
if not data: data = {}
|
||||
|
||||
@@ -50,6 +65,10 @@ class Synology(DownloaderBase):
|
||||
return self.downloadReturnId('') if response else False
|
||||
|
||||
def test(self):
|
||||
""" Check if connection works
|
||||
:return: bool
|
||||
"""
|
||||
|
||||
host = cleanHost(self.conf('host'), protocol = False).split(':')
|
||||
try:
|
||||
srpc = SynologyRPC(host[0], host[1], self.conf('username'), self.conf('password'))
|
||||
@@ -90,6 +109,7 @@ class SynologyRPC(object):
|
||||
|
||||
self.download_url = 'http://%s:%s/webapi/DownloadStation/task.cgi' % (host, port)
|
||||
self.auth_url = 'http://%s:%s/webapi/auth.cgi' % (host, port)
|
||||
self.sid = None
|
||||
self.username = username
|
||||
self.password = password
|
||||
self.destination = destination
|
||||
@@ -117,7 +137,7 @@ class SynologyRPC(object):
|
||||
def _req(self, url, args, files = None):
|
||||
response = {'success': False}
|
||||
try:
|
||||
req = requests.post(url, data = args, files = files)
|
||||
req = requests.post(url, data = args, files = files, verify = False)
|
||||
req.raise_for_status()
|
||||
response = json.loads(req.text)
|
||||
if response['success']:
|
||||
|
||||
@@ -23,19 +23,32 @@ class Transmission(DownloaderBase):
|
||||
log = CPLog(__name__)
|
||||
trpc = None
|
||||
|
||||
def connect(self, reconnect = False):
|
||||
def connect(self):
|
||||
# Load host from config and split out port.
|
||||
host = cleanHost(self.conf('host'), protocol = False).split(':')
|
||||
host = cleanHost(self.conf('host')).rstrip('/').rsplit(':', 1)
|
||||
if not isInt(host[1]):
|
||||
log.error('Config properties are not filled in correctly, port is missing.')
|
||||
return False
|
||||
|
||||
if not self.trpc or reconnect:
|
||||
self.trpc = TransmissionRPC(host[0], port = host[1], rpc_url = self.conf('rpc_url').strip('/ '), username = self.conf('username'), password = self.conf('password'))
|
||||
|
||||
self.trpc = TransmissionRPC(host[0], port = host[1], rpc_url = self.conf('rpc_url').strip('/ '), username = self.conf('username'), password = self.conf('password'))
|
||||
return self.trpc
|
||||
|
||||
def download(self, data = None, media = None, filedata = None):
|
||||
"""
|
||||
Send a torrent/nzb file to the downloader
|
||||
|
||||
:param data: dict returned from provider
|
||||
Contains the release information
|
||||
:param media: media dict with information
|
||||
Used for creating the filename when possible
|
||||
:param filedata: downloaded torrent/nzb filedata
|
||||
The file gets downloaded in the searcher and send to this function
|
||||
This is done to have failed checking before using the downloader, so the downloader
|
||||
doesn't need to worry about that
|
||||
:return: boolean
|
||||
One faile returns false, but the downloaded should log his own errors
|
||||
"""
|
||||
|
||||
if not media: media = {}
|
||||
if not data: data = {}
|
||||
|
||||
@@ -80,19 +93,32 @@ class Transmission(DownloaderBase):
|
||||
log.error('Failed sending torrent to Transmission')
|
||||
return False
|
||||
|
||||
data = remote_torrent.get('torrent-added') or remote_torrent.get('torrent-duplicate')
|
||||
|
||||
# Change settings of added torrents
|
||||
if torrent_params:
|
||||
self.trpc.set_torrent(remote_torrent['torrent-added']['hashString'], torrent_params)
|
||||
self.trpc.set_torrent(data['hashString'], torrent_params)
|
||||
|
||||
log.info('Torrent sent to Transmission successfully.')
|
||||
return self.downloadReturnId(remote_torrent['torrent-added']['hashString'])
|
||||
return self.downloadReturnId(data['hashString'])
|
||||
|
||||
def test(self):
|
||||
if self.connect(True) and self.trpc.get_session():
|
||||
""" Check if connection works
|
||||
:return: bool
|
||||
"""
|
||||
|
||||
if self.connect() and self.trpc.get_session():
|
||||
return True
|
||||
return False
|
||||
|
||||
def getAllDownloadStatus(self, ids):
|
||||
""" Get status of all active downloads
|
||||
|
||||
:param ids: list of (mixed) downloader ids
|
||||
Used to match the releases for this downloader as there could be
|
||||
other downloaders active that it should ignore
|
||||
:return: list of releases
|
||||
"""
|
||||
|
||||
log.debug('Checking Transmission download status.')
|
||||
|
||||
@@ -121,6 +147,8 @@ class Transmission(DownloaderBase):
|
||||
status = 'failed'
|
||||
elif torrent['status'] == 0 and torrent['percentDone'] == 1:
|
||||
status = 'completed'
|
||||
elif torrent['status'] == 16 and torrent['percentDone'] == 1:
|
||||
status = 'completed'
|
||||
elif torrent['status'] in [5, 6]:
|
||||
status = 'seeding'
|
||||
|
||||
@@ -164,18 +192,18 @@ class Transmission(DownloaderBase):
|
||||
class TransmissionRPC(object):
|
||||
|
||||
"""TransmissionRPC lite library"""
|
||||
def __init__(self, host = 'localhost', port = 9091, rpc_url = 'transmission', username = None, password = None):
|
||||
def __init__(self, host = 'http://localhost', port = 9091, rpc_url = 'transmission', username = None, password = None):
|
||||
|
||||
super(TransmissionRPC, self).__init__()
|
||||
|
||||
self.url = 'http://' + host + ':' + str(port) + '/' + rpc_url + '/rpc'
|
||||
self.url = host + ':' + str(port) + '/' + rpc_url + '/rpc'
|
||||
self.tag = 0
|
||||
self.session_id = 0
|
||||
self.session = {}
|
||||
if username and password:
|
||||
password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
|
||||
password_manager.add_password(realm = None, uri = self.url, user = username, passwd = password)
|
||||
opener = urllib2.build_opener(urllib2.HTTPBasicAuthHandler(password_manager), urllib2.HTTPDigestAuthHandler(password_manager))
|
||||
password_manager.add_password(realm = 'Transmission', uri = self.url, user = username, passwd = password)
|
||||
opener = urllib2.build_opener(urllib2.HTTPBasicAuthHandler(password_manager))
|
||||
opener.addheaders = [('User-agent', 'couchpotato-transmission-client/1.0')]
|
||||
urllib2.install_opener(opener)
|
||||
elif username or password:
|
||||
@@ -276,8 +304,8 @@ config = [{
|
||||
},
|
||||
{
|
||||
'name': 'host',
|
||||
'default': 'localhost:9091',
|
||||
'description': 'Hostname with port. Usually <strong>localhost:9091</strong>',
|
||||
'default': 'http://localhost:9091',
|
||||
'description': 'Hostname with port. Usually <strong>http://localhost:9091</strong>',
|
||||
},
|
||||
{
|
||||
'name': 'rpc_url',
|
||||
|
||||
@@ -51,6 +51,21 @@ class uTorrent(DownloaderBase):
|
||||
return self.utorrent_api
|
||||
|
||||
def download(self, data = None, media = None, filedata = None):
|
||||
"""
|
||||
Send a torrent/nzb file to the downloader
|
||||
|
||||
:param data: dict returned from provider
|
||||
Contains the release information
|
||||
:param media: media dict with information
|
||||
Used for creating the filename when possible
|
||||
:param filedata: downloaded torrent/nzb filedata
|
||||
The file gets downloaded in the searcher and send to this function
|
||||
This is done to have failed checking before using the downloader, so the downloader
|
||||
doesn't need to worry about that
|
||||
:return: boolean
|
||||
One faile returns false, but the downloaded should log his own errors
|
||||
"""
|
||||
|
||||
if not media: media = {}
|
||||
if not data: data = {}
|
||||
|
||||
@@ -120,6 +135,10 @@ class uTorrent(DownloaderBase):
|
||||
return self.downloadReturnId(torrent_hash)
|
||||
|
||||
def test(self):
|
||||
""" Check if connection works
|
||||
:return: bool
|
||||
"""
|
||||
|
||||
if self.connect():
|
||||
build_version = self.utorrent_api.get_build()
|
||||
if not build_version:
|
||||
@@ -131,6 +150,13 @@ class uTorrent(DownloaderBase):
|
||||
return False
|
||||
|
||||
def getAllDownloadStatus(self, ids):
|
||||
""" Get status of all active downloads
|
||||
|
||||
:param ids: list of (mixed) downloader ids
|
||||
Used to match the releases for this downloader as there could be
|
||||
other downloaders active that it should ignore
|
||||
:return: list of releases
|
||||
"""
|
||||
|
||||
log.debug('Checking uTorrent download status.')
|
||||
|
||||
@@ -168,7 +194,7 @@ class uTorrent(DownloaderBase):
|
||||
status = 'busy'
|
||||
if (torrent[1] & self.status_flags['STARTED'] or torrent[1] & self.status_flags['QUEUED']) and torrent[4] == 1000:
|
||||
status = 'seeding'
|
||||
elif (torrent[1] & self.status_flags['ERROR']):
|
||||
elif torrent[1] & self.status_flags['ERROR']:
|
||||
status = 'failed'
|
||||
elif torrent[4] == 1000:
|
||||
status = 'completed'
|
||||
@@ -229,7 +255,6 @@ class uTorrentAPI(object):
|
||||
password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
|
||||
password_manager.add_password(realm = None, uri = self.url, user = username, passwd = password)
|
||||
self.opener.add_handler(urllib2.HTTPBasicAuthHandler(password_manager))
|
||||
self.opener.add_handler(urllib2.HTTPDigestAuthHandler(password_manager))
|
||||
elif username or password:
|
||||
log.debug('User or password missing, not using authentication.')
|
||||
self.token = self.get_token()
|
||||
|
||||
@@ -90,7 +90,7 @@ def fireEvent(name, *args, **kwargs):
|
||||
|
||||
else:
|
||||
|
||||
e = Event(name = name, threads = 10, exc_info = True, traceback = True, lock = threading.RLock())
|
||||
e = Event(name = name, threads = 10, exc_info = True, traceback = True)
|
||||
|
||||
for event in events[name]:
|
||||
e.handle(event['handler'], priority = event['priority'])
|
||||
|
||||
@@ -5,6 +5,7 @@ import re
|
||||
import traceback
|
||||
import unicodedata
|
||||
|
||||
from chardet import detect
|
||||
from couchpotato.core.logger import CPLog
|
||||
import six
|
||||
|
||||
@@ -35,13 +36,19 @@ def toUnicode(original, *args):
|
||||
return six.text_type(original, *args)
|
||||
except:
|
||||
try:
|
||||
detected = detect(original)
|
||||
try:
|
||||
if detected.get('confidence') > 0.8:
|
||||
return original.decode(detected.get('encoding'))
|
||||
except:
|
||||
pass
|
||||
|
||||
return ek(original, *args)
|
||||
except:
|
||||
raise
|
||||
except:
|
||||
log.error('Unable to decode value "%s..." : %s ', (repr(original)[:20], traceback.format_exc()))
|
||||
ascii_text = str(original).encode('string_escape')
|
||||
return toUnicode(ascii_text)
|
||||
return 'ERROR DECODING STRING'
|
||||
|
||||
|
||||
def ss(original, *args):
|
||||
@@ -52,7 +59,10 @@ def ss(original, *args):
|
||||
return u_original.encode(Env.get('encoding'))
|
||||
except Exception as e:
|
||||
log.debug('Failed ss encoding char, force UTF8: %s', e)
|
||||
return u_original.encode('UTF-8')
|
||||
try:
|
||||
return u_original.encode(Env.get('encoding'), 'replace')
|
||||
except:
|
||||
return u_original.encode('utf-8', 'replace')
|
||||
|
||||
|
||||
def sp(path, *args):
|
||||
@@ -78,14 +88,14 @@ def sp(path, *args):
|
||||
# Replace *NIX ambiguous '//' at the beginning of a path with '/' (crashes guessit)
|
||||
path = re.sub('^//', '/', path)
|
||||
|
||||
return toUnicode(path)
|
||||
return path
|
||||
|
||||
|
||||
def ek(original, *args):
|
||||
if isinstance(original, (str, unicode)):
|
||||
try:
|
||||
from couchpotato.environment import Env
|
||||
return original.decode(Env.get('encoding'))
|
||||
return original.decode(Env.get('encoding'), 'ignore')
|
||||
except UnicodeDecodeError:
|
||||
raise
|
||||
|
||||
|
||||
113
couchpotato/core/helpers/variable.py
Normal file → Executable file
113
couchpotato/core/helpers/variable.py
Normal file → Executable file
@@ -1,4 +1,5 @@
|
||||
import collections
|
||||
import ctypes
|
||||
import hashlib
|
||||
import os
|
||||
import platform
|
||||
@@ -6,8 +7,9 @@ import random
|
||||
import re
|
||||
import string
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
from couchpotato.core.helpers.encoding import simplifyString, toSafeString, ss
|
||||
from couchpotato.core.helpers.encoding import simplifyString, toSafeString, ss, sp
|
||||
from couchpotato.core.logger import CPLog
|
||||
import six
|
||||
from six.moves import map, zip, filter
|
||||
@@ -39,11 +41,11 @@ def symlink(src, dst):
|
||||
def getUserDir():
|
||||
try:
|
||||
import pwd
|
||||
os.environ['HOME'] = pwd.getpwuid(os.geteuid()).pw_dir
|
||||
os.environ['HOME'] = sp(pwd.getpwuid(os.geteuid()).pw_dir)
|
||||
except:
|
||||
pass
|
||||
|
||||
return os.path.expanduser('~')
|
||||
return sp(os.path.expanduser('~'))
|
||||
|
||||
|
||||
def getDownloadDir():
|
||||
@@ -290,9 +292,14 @@ def dictIsSubset(a, b):
|
||||
return all([k in b and b[k] == v for k, v in a.items()])
|
||||
|
||||
|
||||
# Returns True if sub_folder is the same as or inside base_folder
|
||||
def isSubFolder(sub_folder, base_folder):
|
||||
# Returns True if sub_folder is the same as or inside base_folder
|
||||
return base_folder and sub_folder and ss(os.path.normpath(base_folder).rstrip(os.path.sep) + os.path.sep) in ss(os.path.normpath(sub_folder).rstrip(os.path.sep) + os.path.sep)
|
||||
if base_folder and sub_folder:
|
||||
base = sp(os.path.realpath(base_folder)) + os.path.sep
|
||||
subfolder = sp(os.path.realpath(sub_folder)) + os.path.sep
|
||||
return os.path.commonprefix([subfolder, base]) == base
|
||||
|
||||
return False
|
||||
|
||||
|
||||
# From SABNZBD
|
||||
@@ -307,3 +314,99 @@ def scanForPassword(name):
|
||||
|
||||
if m:
|
||||
return m.group(1).strip('. '), m.group(2).strip()
|
||||
|
||||
|
||||
under_pat = re.compile(r'_([a-z])')
|
||||
|
||||
def underscoreToCamel(name):
|
||||
return under_pat.sub(lambda x: x.group(1).upper(), name)
|
||||
|
||||
|
||||
def removePyc(folder, only_excess = True, show_logs = True):
|
||||
|
||||
folder = sp(folder)
|
||||
|
||||
for root, dirs, files in os.walk(folder):
|
||||
|
||||
pyc_files = filter(lambda filename: filename.endswith('.pyc'), files)
|
||||
py_files = set(filter(lambda filename: filename.endswith('.py'), files))
|
||||
excess_pyc_files = filter(lambda pyc_filename: pyc_filename[:-1] not in py_files, pyc_files) if only_excess else pyc_files
|
||||
|
||||
for excess_pyc_file in excess_pyc_files:
|
||||
full_path = os.path.join(root, excess_pyc_file)
|
||||
if show_logs: log.debug('Removing old PYC file: %s', full_path)
|
||||
try:
|
||||
os.remove(full_path)
|
||||
except:
|
||||
log.error('Couldn\'t remove %s: %s', (full_path, traceback.format_exc()))
|
||||
|
||||
for dir_name in dirs:
|
||||
full_path = os.path.join(root, dir_name)
|
||||
if len(os.listdir(full_path)) == 0:
|
||||
try:
|
||||
os.rmdir(full_path)
|
||||
except:
|
||||
log.error('Couldn\'t remove empty directory %s: %s', (full_path, traceback.format_exc()))
|
||||
|
||||
|
||||
def getFreeSpace(directories):
|
||||
|
||||
single = not isinstance(directories, (tuple, list))
|
||||
if single:
|
||||
directories = [directories]
|
||||
|
||||
free_space = {}
|
||||
for folder in directories:
|
||||
|
||||
size = None
|
||||
if os.path.isdir(folder):
|
||||
if os.name == 'nt':
|
||||
_, total, free = ctypes.c_ulonglong(), ctypes.c_ulonglong(), \
|
||||
ctypes.c_ulonglong()
|
||||
if sys.version_info >= (3,) or isinstance(folder, unicode):
|
||||
fun = ctypes.windll.kernel32.GetDiskFreeSpaceExW #@UndefinedVariable
|
||||
else:
|
||||
fun = ctypes.windll.kernel32.GetDiskFreeSpaceExA #@UndefinedVariable
|
||||
ret = fun(folder, ctypes.byref(_), ctypes.byref(total), ctypes.byref(free))
|
||||
if ret == 0:
|
||||
raise ctypes.WinError()
|
||||
return [total.value, free.value]
|
||||
else:
|
||||
s = os.statvfs(folder)
|
||||
size = [s.f_blocks * s.f_frsize / (1024 * 1024), (s.f_bavail * s.f_frsize) / (1024 * 1024)]
|
||||
|
||||
if single: return size
|
||||
|
||||
free_space[folder] = size
|
||||
|
||||
return free_space
|
||||
|
||||
|
||||
def getSize(paths):
|
||||
|
||||
single = not isinstance(paths, (tuple, list))
|
||||
if single:
|
||||
paths = [paths]
|
||||
|
||||
total_size = 0
|
||||
for path in paths:
|
||||
path = sp(path)
|
||||
|
||||
if os.path.isdir(path):
|
||||
total_size = 0
|
||||
for dirpath, _, filenames in os.walk(path):
|
||||
for f in filenames:
|
||||
total_size += os.path.getsize(sp(os.path.join(dirpath, f)))
|
||||
|
||||
elif os.path.isfile(path):
|
||||
total_size += os.path.getsize(path)
|
||||
|
||||
return total_size / 1048576 # MB
|
||||
|
||||
|
||||
def find(func, iterable):
|
||||
for item in iterable:
|
||||
if func(item):
|
||||
return item
|
||||
|
||||
return None
|
||||
|
||||
@@ -25,6 +25,12 @@ class CPLog(object):
|
||||
self.Env = Env
|
||||
self.is_develop = Env.get('dev')
|
||||
|
||||
from couchpotato.core.event import addEvent
|
||||
addEvent('app.after_shutdown', self.close)
|
||||
|
||||
def close(self, *args, **kwargs):
|
||||
logging.shutdown()
|
||||
|
||||
def info(self, msg, replace_tuple = ()):
|
||||
self.logger.info(self.addContext(msg, replace_tuple))
|
||||
|
||||
@@ -53,15 +59,14 @@ class CPLog(object):
|
||||
msg = ss(msg)
|
||||
|
||||
try:
|
||||
msg = msg % replace_tuple
|
||||
except:
|
||||
try:
|
||||
if isinstance(replace_tuple, tuple):
|
||||
msg = msg % tuple([ss(x) for x in list(replace_tuple)])
|
||||
else:
|
||||
msg = msg % ss(replace_tuple)
|
||||
except Exception as e:
|
||||
self.logger.error('Failed encoding stuff to log "%s": %s' % (msg, e))
|
||||
if isinstance(replace_tuple, tuple):
|
||||
msg = msg % tuple([ss(x) if not isinstance(x, (int, float)) else x for x in list(replace_tuple)])
|
||||
elif isinstance(replace_tuple, dict):
|
||||
msg = msg % dict((k, ss(v) if not isinstance(v, (int, float)) else v) for k, v in replace_tuple.iteritems())
|
||||
else:
|
||||
msg = msg % ss(replace_tuple)
|
||||
except Exception as e:
|
||||
self.logger.error('Failed encoding stuff to log "%s": %s' % (msg, e))
|
||||
|
||||
self.setup()
|
||||
if not self.is_develop:
|
||||
|
||||
37
couchpotato/core/media/__init__.py
Normal file → Executable file
37
couchpotato/core/media/__init__.py
Normal file → Executable file
@@ -1,9 +1,10 @@
|
||||
import os
|
||||
import traceback
|
||||
|
||||
from couchpotato import get_db, CPLog
|
||||
from couchpotato import CPLog, md5
|
||||
from couchpotato.core.event import addEvent, fireEvent, fireEventAsync
|
||||
from couchpotato.core.helpers.encoding import toUnicode
|
||||
from couchpotato.core.helpers.variable import getExt
|
||||
from couchpotato.core.plugins.base import Plugin
|
||||
import six
|
||||
|
||||
@@ -25,11 +26,10 @@ class MediaBase(Plugin):
|
||||
|
||||
def onComplete():
|
||||
try:
|
||||
db = get_db()
|
||||
media = fireEvent('media.get', media_id, single = True)
|
||||
event_name = '%s.searcher.single' % media.get('type')
|
||||
|
||||
fireEventAsync(event_name, media, on_complete = self.createNotifyFront(media_id))
|
||||
if media:
|
||||
event_name = '%s.searcher.single' % media.get('type')
|
||||
fireEventAsync(event_name, media, on_complete = self.createNotifyFront(media_id), manual = True)
|
||||
except:
|
||||
log.error('Failed creating onComplete: %s', traceback.format_exc())
|
||||
|
||||
@@ -40,9 +40,9 @@ class MediaBase(Plugin):
|
||||
def notifyFront():
|
||||
try:
|
||||
media = fireEvent('media.get', media_id, single = True)
|
||||
event_name = '%s.update' % media.get('type')
|
||||
|
||||
fireEvent('notify.frontend', type = event_name, data = media)
|
||||
if media:
|
||||
event_name = '%s.update' % media.get('type')
|
||||
fireEvent('notify.frontend', type = event_name, data = media)
|
||||
except:
|
||||
log.error('Failed creating onComplete: %s', traceback.format_exc())
|
||||
|
||||
@@ -66,10 +66,13 @@ class MediaBase(Plugin):
|
||||
|
||||
return def_title or 'UNKNOWN'
|
||||
|
||||
def getPoster(self, image_urls, existing_files):
|
||||
image_type = 'poster'
|
||||
def getPoster(self, media, image_urls):
|
||||
if 'files' not in media:
|
||||
media['files'] = {}
|
||||
|
||||
# Remove non-existing files
|
||||
existing_files = media['files']
|
||||
|
||||
image_type = 'poster'
|
||||
file_type = 'image_%s' % image_type
|
||||
|
||||
# Make existing unique
|
||||
@@ -90,10 +93,18 @@ class MediaBase(Plugin):
|
||||
if not isinstance(image, (str, unicode)):
|
||||
continue
|
||||
|
||||
if file_type not in existing_files or len(existing_files.get(file_type, [])) == 0:
|
||||
# Check if it has top image
|
||||
filename = '%s.%s' % (md5(image), getExt(image))
|
||||
existing = existing_files.get(file_type, [])
|
||||
has_latest = False
|
||||
for x in existing:
|
||||
if filename in x:
|
||||
has_latest = True
|
||||
|
||||
if not has_latest or file_type not in existing_files or len(existing_files.get(file_type, [])) == 0:
|
||||
file_path = fireEvent('file.download', url = image, single = True)
|
||||
if file_path:
|
||||
existing_files[file_type] = [file_path]
|
||||
existing_files[file_type] = [toUnicode(file_path)]
|
||||
break
|
||||
else:
|
||||
break
|
||||
|
||||
110
couchpotato/core/media/_base/library/main.py
Normal file → Executable file
110
couchpotato/core/media/_base/library/main.py
Normal file → Executable file
@@ -1,10 +1,47 @@
|
||||
from couchpotato import get_db
|
||||
from couchpotato.api import addApiView
|
||||
from couchpotato.core.event import addEvent, fireEvent
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.media._base.library.base import LibraryBase
|
||||
|
||||
log = CPLog(__name__)
|
||||
|
||||
|
||||
class Library(LibraryBase):
|
||||
def __init__(self):
|
||||
addEvent('library.title', self.title)
|
||||
addEvent('library.related', self.related)
|
||||
addEvent('library.tree', self.tree)
|
||||
|
||||
addEvent('library.root', self.root)
|
||||
|
||||
addApiView('library.query', self.queryView)
|
||||
addApiView('library.related', self.relatedView)
|
||||
addApiView('library.tree', self.treeView)
|
||||
|
||||
def queryView(self, media_id, **kwargs):
|
||||
db = get_db()
|
||||
media = db.get('id', media_id)
|
||||
|
||||
return {
|
||||
'result': fireEvent('library.query', media, single = True)
|
||||
}
|
||||
|
||||
def relatedView(self, media_id, **kwargs):
|
||||
db = get_db()
|
||||
media = db.get('id', media_id)
|
||||
|
||||
return {
|
||||
'result': fireEvent('library.related', media, single = True)
|
||||
}
|
||||
|
||||
def treeView(self, media_id, **kwargs):
|
||||
db = get_db()
|
||||
media = db.get('id', media_id)
|
||||
|
||||
return {
|
||||
'result': fireEvent('library.tree', media, single = True)
|
||||
}
|
||||
|
||||
def title(self, library):
|
||||
return fireEvent(
|
||||
@@ -16,3 +53,76 @@ class Library(LibraryBase):
|
||||
include_identifier = False,
|
||||
single = True
|
||||
)
|
||||
|
||||
def related(self, media):
|
||||
result = {self.key(media['type']): media}
|
||||
|
||||
db = get_db()
|
||||
cur = media
|
||||
|
||||
while cur and cur.get('parent_id'):
|
||||
cur = db.get('id', cur['parent_id'])
|
||||
|
||||
result[self.key(cur['type'])] = cur
|
||||
|
||||
children = db.get_many('media_children', media['_id'], with_doc = True)
|
||||
|
||||
for item in children:
|
||||
key = self.key(item['doc']['type']) + 's'
|
||||
|
||||
if key not in result:
|
||||
result[key] = []
|
||||
|
||||
result[key].append(item['doc'])
|
||||
|
||||
return result
|
||||
|
||||
def root(self, media):
|
||||
db = get_db()
|
||||
cur = media
|
||||
|
||||
while cur and cur.get('parent_id'):
|
||||
cur = db.get('id', cur['parent_id'])
|
||||
|
||||
return cur
|
||||
|
||||
def tree(self, media = None, media_id = None):
|
||||
db = get_db()
|
||||
|
||||
if media:
|
||||
result = media
|
||||
elif media_id:
|
||||
result = db.get('id', media_id, with_doc = True)
|
||||
else:
|
||||
return None
|
||||
|
||||
# Find children
|
||||
items = db.get_many('media_children', result['_id'], with_doc = True)
|
||||
keys = []
|
||||
|
||||
# Build children arrays
|
||||
for item in items:
|
||||
key = self.key(item['doc']['type']) + 's'
|
||||
|
||||
if key not in result:
|
||||
result[key] = {}
|
||||
elif type(result[key]) is not dict:
|
||||
result[key] = {}
|
||||
|
||||
if key not in keys:
|
||||
keys.append(key)
|
||||
|
||||
result[key][item['_id']] = fireEvent('library.tree', item['doc'], single = True)
|
||||
|
||||
# Unique children
|
||||
for key in keys:
|
||||
result[key] = result[key].values()
|
||||
|
||||
# Include releases
|
||||
result['releases'] = fireEvent('release.for_media', result['_id'], single = True)
|
||||
|
||||
return result
|
||||
|
||||
def key(self, media_type):
|
||||
parts = media_type.split('.')
|
||||
return parts[-1]
|
||||
|
||||
@@ -40,7 +40,7 @@ class Matcher(MatcherBase):
|
||||
return False
|
||||
|
||||
def correctTitle(self, chain, media):
|
||||
root_library = media['library']['root_library']
|
||||
root = fireEvent('library.root', media, single = True)
|
||||
|
||||
if 'show_name' not in chain.info or not len(chain.info['show_name']):
|
||||
log.info('Wrong: missing show name in parsed result')
|
||||
@@ -50,10 +50,10 @@ class Matcher(MatcherBase):
|
||||
chain_words = [x.lower() for x in chain.info['show_name']]
|
||||
|
||||
# Build a list of possible titles of the media we are searching for
|
||||
titles = root_library['info']['titles']
|
||||
titles = root['info']['titles']
|
||||
|
||||
# Add year suffix titles (will result in ['<name_one>', '<name_one> <suffix_one>', '<name_two>', ...])
|
||||
suffixes = [None, root_library['info']['year']]
|
||||
suffixes = [None, root['info']['year']]
|
||||
|
||||
titles = [
|
||||
title + ((' %s' % suffix) if suffix else '')
|
||||
|
||||
@@ -99,7 +99,7 @@ from couchpotato.core.helpers.encoding import simplifyString"""
|
||||
|
||||
|
||||
class TitleIndex(TreeBasedIndex):
|
||||
_version = 2
|
||||
_version = 4
|
||||
|
||||
custom_header = """from CodernityDB.tree_index import TreeBasedIndex
|
||||
from string import ascii_letters
|
||||
@@ -123,16 +123,16 @@ from couchpotato.core.helpers.encoding import toUnicode, simplifyString"""
|
||||
nr_prefix = '' if title and len(title) > 0 and title[0] in ascii_letters else '#'
|
||||
title = simplifyString(title)
|
||||
|
||||
for prefix in ['the ']:
|
||||
for prefix in ['the ', 'an ', 'a ']:
|
||||
if prefix == title[:len(prefix)]:
|
||||
title = title[len(prefix):]
|
||||
break
|
||||
|
||||
return str(nr_prefix + title).ljust(32, '_')[:32]
|
||||
return str(nr_prefix + title).ljust(32, ' ')[:32]
|
||||
|
||||
|
||||
class StartsWithIndex(TreeBasedIndex):
|
||||
_version = 2
|
||||
_version = 3
|
||||
|
||||
custom_header = """from CodernityDB.tree_index import TreeBasedIndex
|
||||
from string import ascii_letters
|
||||
@@ -153,7 +153,7 @@ from couchpotato.core.helpers.encoding import toUnicode, simplifyString"""
|
||||
title = toUnicode(title)
|
||||
title = simplifyString(title)
|
||||
|
||||
for prefix in ['the ']:
|
||||
for prefix in ['the ', 'an ', 'a ']:
|
||||
if prefix == title[:len(prefix)]:
|
||||
title = title[len(prefix):]
|
||||
break
|
||||
@@ -176,3 +176,24 @@ class MediaChildrenIndex(TreeBasedIndex):
|
||||
if data.get('_t') == 'media' and data.get('parent_id'):
|
||||
return data.get('parent_id'), None
|
||||
|
||||
|
||||
class MediaTagIndex(MultiTreeBasedIndex):
|
||||
_version = 2
|
||||
|
||||
custom_header = """from CodernityDB.tree_index import MultiTreeBasedIndex"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
kwargs['key_format'] = '32s'
|
||||
super(MediaTagIndex, self).__init__(*args, **kwargs)
|
||||
|
||||
def make_key_value(self, data):
|
||||
if data.get('_t') == 'media' and data.get('tags') and len(data.get('tags', [])) > 0:
|
||||
|
||||
tags = set()
|
||||
for tag in data.get('tags', []):
|
||||
tags.add(self.make_key(tag))
|
||||
|
||||
return list(tags), None
|
||||
|
||||
def make_key(self, key):
|
||||
return md5(key).hexdigest()
|
||||
|
||||
245
couchpotato/core/media/_base/media/main.py
Normal file → Executable file
245
couchpotato/core/media/_base/media/main.py
Normal file → Executable file
@@ -1,6 +1,9 @@
|
||||
from datetime import timedelta
|
||||
import time
|
||||
import traceback
|
||||
from string import ascii_lowercase
|
||||
|
||||
from CodernityDB.database import RecordNotFound, RecordDeleted
|
||||
from couchpotato import tryInt, get_db
|
||||
from couchpotato.api import addApiView
|
||||
from couchpotato.core.event import fireEvent, fireEventAsync, addEvent
|
||||
@@ -8,7 +11,7 @@ from couchpotato.core.helpers.encoding import toUnicode
|
||||
from couchpotato.core.helpers.variable import splitString, getImdb, getTitle
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.media import MediaBase
|
||||
from .index import MediaIndex, MediaStatusIndex, MediaTypeIndex, TitleSearchIndex, TitleIndex, StartsWithIndex, MediaChildrenIndex
|
||||
from .index import MediaIndex, MediaStatusIndex, MediaTypeIndex, TitleSearchIndex, TitleIndex, StartsWithIndex, MediaChildrenIndex, MediaTagIndex
|
||||
|
||||
|
||||
log = CPLog(__name__)
|
||||
@@ -20,6 +23,7 @@ class MediaPlugin(MediaBase):
|
||||
'media': MediaIndex,
|
||||
'media_search_title': TitleSearchIndex,
|
||||
'media_status': MediaStatusIndex,
|
||||
'media_tag': MediaTagIndex,
|
||||
'media_by_type': MediaTypeIndex,
|
||||
'media_title': TitleIndex,
|
||||
'media_startswith': StartsWithIndex,
|
||||
@@ -39,15 +43,15 @@ class MediaPlugin(MediaBase):
|
||||
'desc': 'List media',
|
||||
'params': {
|
||||
'type': {'type': 'string', 'desc': 'Media type to filter on.'},
|
||||
'status': {'type': 'array or csv', 'desc': 'Filter movie by status. Example:"active,done"'},
|
||||
'release_status': {'type': 'array or csv', 'desc': 'Filter movie by status of its releases. Example:"snatched,available"'},
|
||||
'limit_offset': {'desc': 'Limit and offset the movie list. Examples: "50" or "50,30"'},
|
||||
'starts_with': {'desc': 'Starts with these characters. Example: "a" returns all movies starting with the letter "a"'},
|
||||
'search': {'desc': 'Search movie title'},
|
||||
'status': {'type': 'array or csv', 'desc': 'Filter media by status. Example:"active,done"'},
|
||||
'release_status': {'type': 'array or csv', 'desc': 'Filter media by status of its releases. Example:"snatched,available"'},
|
||||
'limit_offset': {'desc': 'Limit and offset the media list. Examples: "50" or "50,30"'},
|
||||
'starts_with': {'desc': 'Starts with these characters. Example: "a" returns all media starting with the letter "a"'},
|
||||
'search': {'desc': 'Search media title'},
|
||||
},
|
||||
'return': {'type': 'object', 'example': """{
|
||||
'success': True,
|
||||
'empty': bool, any movies returned or not,
|
||||
'empty': bool, any media returned or not,
|
||||
'media': array, media found,
|
||||
}"""}
|
||||
})
|
||||
@@ -73,6 +77,7 @@ class MediaPlugin(MediaBase):
|
||||
addEvent('app.load', self.addSingleListView, priority = 100)
|
||||
addEvent('app.load', self.addSingleCharView, priority = 100)
|
||||
addEvent('app.load', self.addSingleDeleteView, priority = 100)
|
||||
addEvent('app.load', self.cleanupFaults)
|
||||
|
||||
addEvent('media.get', self.get)
|
||||
addEvent('media.with_status', self.withStatus)
|
||||
@@ -80,6 +85,20 @@ class MediaPlugin(MediaBase):
|
||||
addEvent('media.list', self.list)
|
||||
addEvent('media.delete', self.delete)
|
||||
addEvent('media.restatus', self.restatus)
|
||||
addEvent('media.tag', self.tag)
|
||||
addEvent('media.untag', self.unTag)
|
||||
|
||||
# Wrongly tagged media files
|
||||
def cleanupFaults(self):
|
||||
medias = fireEvent('media.with_status', 'ignored', single = True) or []
|
||||
|
||||
db = get_db()
|
||||
for media in medias:
|
||||
try:
|
||||
media['status'] = 'done'
|
||||
db.update(media)
|
||||
except:
|
||||
pass
|
||||
|
||||
def refresh(self, id = '', **kwargs):
|
||||
handlers = []
|
||||
@@ -102,13 +121,12 @@ class MediaPlugin(MediaBase):
|
||||
|
||||
try:
|
||||
media = get_db().get('id', media_id)
|
||||
event = '%s.update_info' % media.get('type')
|
||||
event = '%s.update' % media.get('type')
|
||||
|
||||
def handler():
|
||||
fireEvent(event, media_id = media_id, on_complete = self.createOnComplete(media_id))
|
||||
|
||||
if handler:
|
||||
return handler
|
||||
return handler
|
||||
|
||||
except:
|
||||
log.error('Refresh handler for non existing media: %s', traceback.format_exc())
|
||||
@@ -120,25 +138,30 @@ class MediaPlugin(MediaBase):
|
||||
|
||||
def get(self, media_id):
|
||||
|
||||
db = get_db()
|
||||
try:
|
||||
db = get_db()
|
||||
|
||||
imdb_id = getImdb(str(media_id))
|
||||
imdb_id = getImdb(str(media_id))
|
||||
|
||||
media = None
|
||||
if imdb_id:
|
||||
media = db.get('media', 'imdb-%s' % imdb_id, with_doc = True)['doc']
|
||||
else:
|
||||
media = db.get('id', media_id)
|
||||
if imdb_id:
|
||||
media = db.get('media', 'imdb-%s' % imdb_id, with_doc = True)['doc']
|
||||
else:
|
||||
media = db.get('id', media_id)
|
||||
|
||||
if media:
|
||||
if media:
|
||||
|
||||
# Attach category
|
||||
try: media['category'] = db.get('id', media.get('category_id'))
|
||||
except: pass
|
||||
# Attach category
|
||||
try: media['category'] = db.get('id', media.get('category_id'))
|
||||
except: pass
|
||||
|
||||
media['releases'] = fireEvent('release.for_media', media['_id'], single = True)
|
||||
media['releases'] = fireEvent('release.for_media', media['_id'], single = True)
|
||||
|
||||
return media
|
||||
return media
|
||||
|
||||
except (RecordNotFound, RecordDeleted):
|
||||
log.error('Media with id "%s" not found', media_id)
|
||||
except:
|
||||
raise
|
||||
|
||||
def getView(self, id = None, **kwargs):
|
||||
|
||||
@@ -149,30 +172,45 @@ class MediaPlugin(MediaBase):
|
||||
'media': media,
|
||||
}
|
||||
|
||||
def withStatus(self, status, with_doc = True):
|
||||
def withStatus(self, status, types = None, with_doc = True):
|
||||
|
||||
db = get_db()
|
||||
|
||||
if types and not isinstance(types, (list, tuple)):
|
||||
types = [types]
|
||||
|
||||
status = list(status if isinstance(status, (list, tuple)) else [status])
|
||||
|
||||
for s in status:
|
||||
for ms in db.get_many('media_status', s, with_doc = with_doc):
|
||||
yield ms['doc'] if with_doc else ms
|
||||
for ms in db.get_many('media_status', s):
|
||||
if with_doc:
|
||||
try:
|
||||
doc = db.get('id', ms['_id'])
|
||||
|
||||
if types and doc.get('type') not in types:
|
||||
continue
|
||||
|
||||
yield doc
|
||||
except (RecordDeleted, RecordNotFound):
|
||||
log.debug('Record not found, skipping: %s', ms['_id'])
|
||||
except (ValueError, EOFError):
|
||||
fireEvent('database.delete_corrupted', ms.get('_id'), traceback_error = traceback.format_exc(0))
|
||||
else:
|
||||
yield ms
|
||||
|
||||
def withIdentifiers(self, identifiers, with_doc = False):
|
||||
|
||||
db = get_db()
|
||||
|
||||
for x in identifiers:
|
||||
try:
|
||||
media = db.get('media', '%s-%s' % (x, identifiers[x]), with_doc = with_doc)
|
||||
return media
|
||||
return db.get('media', '%s-%s' % (x, identifiers[x]), with_doc = with_doc)
|
||||
except:
|
||||
pass
|
||||
|
||||
log.debug('No media found with identifiers: %s', identifiers)
|
||||
return False
|
||||
|
||||
def list(self, types = None, status = None, release_status = None, status_or = False, limit_offset = None, starts_with = None, search = None):
|
||||
def list(self, types = None, status = None, release_status = None, status_or = False, limit_offset = None, with_tags = None, starts_with = None, search = None):
|
||||
|
||||
db = get_db()
|
||||
|
||||
@@ -183,6 +221,8 @@ class MediaPlugin(MediaBase):
|
||||
release_status = [release_status]
|
||||
if types and not isinstance(types, (list, tuple)):
|
||||
types = [types]
|
||||
if with_tags and not isinstance(with_tags, (list, tuple)):
|
||||
with_tags = [with_tags]
|
||||
|
||||
# query media ids
|
||||
if types:
|
||||
@@ -209,11 +249,17 @@ class MediaPlugin(MediaBase):
|
||||
|
||||
# Add search filters
|
||||
if starts_with:
|
||||
filter_by['starts_with'] = set()
|
||||
starts_with = toUnicode(starts_with.lower())[0]
|
||||
starts_with = starts_with if starts_with in ascii_lowercase else '#'
|
||||
filter_by['starts_with'] = [x['_id'] for x in db.get_many('media_startswith', starts_with)]
|
||||
|
||||
# Add tag filter
|
||||
if with_tags:
|
||||
filter_by['with_tags'] = set()
|
||||
for tag in with_tags:
|
||||
for x in db.get_many('media_tag', tag):
|
||||
filter_by['with_tags'].add(x['_id'])
|
||||
|
||||
# Filter with search query
|
||||
if search:
|
||||
filter_by['search'] = [x['_id'] for x in db.get_many('media_search_title', search)]
|
||||
@@ -249,6 +295,10 @@ class MediaPlugin(MediaBase):
|
||||
|
||||
media = fireEvent('media.get', media_id, single = True)
|
||||
|
||||
# Skip if no media has been found
|
||||
if not media:
|
||||
continue
|
||||
|
||||
# Merge releases with movie dict
|
||||
medias.append(media)
|
||||
|
||||
@@ -266,6 +316,7 @@ class MediaPlugin(MediaBase):
|
||||
release_status = splitString(kwargs.get('release_status')),
|
||||
status_or = kwargs.get('status_or') is not None,
|
||||
limit_offset = kwargs.get('limit_offset'),
|
||||
with_tags = splitString(kwargs.get('with_tags')),
|
||||
starts_with = kwargs.get('starts_with'),
|
||||
search = kwargs.get('search')
|
||||
)
|
||||
@@ -280,9 +331,22 @@ class MediaPlugin(MediaBase):
|
||||
def addSingleListView(self):
|
||||
|
||||
for media_type in fireEvent('media.types', merge = True):
|
||||
def tempList(*args, **kwargs):
|
||||
return self.listView(types = media_type, **kwargs)
|
||||
addApiView('%s.list' % media_type, tempList)
|
||||
tempList = lambda *args, **kwargs : self.listView(type = media_type, **kwargs)
|
||||
addApiView('%s.list' % media_type, tempList, docs = {
|
||||
'desc': 'List media',
|
||||
'params': {
|
||||
'status': {'type': 'array or csv', 'desc': 'Filter ' + media_type + ' by status. Example:"active,done"'},
|
||||
'release_status': {'type': 'array or csv', 'desc': 'Filter ' + media_type + ' by status of its releases. Example:"snatched,available"'},
|
||||
'limit_offset': {'desc': 'Limit and offset the ' + media_type + ' list. Examples: "50" or "50,30"'},
|
||||
'starts_with': {'desc': 'Starts with these characters. Example: "a" returns all ' + media_type + 's starting with the letter "a"'},
|
||||
'search': {'desc': 'Search ' + media_type + ' title'},
|
||||
},
|
||||
'return': {'type': 'object', 'example': """{
|
||||
'success': True,
|
||||
'empty': bool, any """ + media_type + """s returned or not,
|
||||
'media': array, media found,
|
||||
}"""}
|
||||
})
|
||||
|
||||
def availableChars(self, types = None, status = None, release_status = None):
|
||||
|
||||
@@ -328,7 +392,7 @@ class MediaPlugin(MediaBase):
|
||||
if x['_id'] in media_ids:
|
||||
chars.add(x['key'])
|
||||
|
||||
if len(chars) == 25:
|
||||
if len(chars) == 27:
|
||||
break
|
||||
|
||||
return list(chars)
|
||||
@@ -349,8 +413,7 @@ class MediaPlugin(MediaBase):
|
||||
def addSingleCharView(self):
|
||||
|
||||
for media_type in fireEvent('media.types', merge = True):
|
||||
def tempChar(*args, **kwargs):
|
||||
return self.charView(types = media_type, **kwargs)
|
||||
tempChar = lambda *args, **kwargs : self.charView(type = media_type, **kwargs)
|
||||
addApiView('%s.available_chars' % media_type, tempChar)
|
||||
|
||||
def delete(self, media_id, delete_from = None):
|
||||
@@ -361,13 +424,18 @@ class MediaPlugin(MediaBase):
|
||||
media = db.get('id', media_id)
|
||||
if media:
|
||||
deleted = False
|
||||
|
||||
media_releases = fireEvent('release.for_media', media['_id'], single = True)
|
||||
|
||||
if delete_from == 'all':
|
||||
# Delete connected releases
|
||||
for release in media_releases:
|
||||
db.delete(release)
|
||||
|
||||
db.delete(media)
|
||||
deleted = True
|
||||
else:
|
||||
|
||||
media_releases = fireEvent('release.for_media', media['_id'], single = True)
|
||||
|
||||
total_releases = len(media_releases)
|
||||
total_deleted = 0
|
||||
new_media_status = None
|
||||
@@ -379,16 +447,23 @@ class MediaPlugin(MediaBase):
|
||||
total_deleted += 1
|
||||
new_media_status = 'done'
|
||||
elif delete_from == 'manage':
|
||||
if release.get('status') == 'done':
|
||||
if release.get('status') == 'done' or media.get('status') == 'done':
|
||||
db.delete(release)
|
||||
total_deleted += 1
|
||||
|
||||
if (total_releases == total_deleted and media['status'] != 'active') or (delete_from == 'wanted' and media['status'] == 'active'):
|
||||
if (total_releases == total_deleted) or (total_releases == 0 and not new_media_status) or (not new_media_status and delete_from == 'late'):
|
||||
db.delete(media)
|
||||
deleted = True
|
||||
elif new_media_status:
|
||||
media['status'] = new_media_status
|
||||
|
||||
# Remove profile (no use for in manage)
|
||||
if new_media_status == 'done':
|
||||
media['profile_id'] = None
|
||||
|
||||
db.update(media)
|
||||
|
||||
fireEvent('media.untag', media['_id'], 'recent', single = True)
|
||||
else:
|
||||
fireEvent('media.restatus', media.get('_id'), single = True)
|
||||
|
||||
@@ -412,11 +487,16 @@ class MediaPlugin(MediaBase):
|
||||
def addSingleDeleteView(self):
|
||||
|
||||
for media_type in fireEvent('media.types', merge = True):
|
||||
def tempDelete(*args, **kwargs):
|
||||
return self.deleteView(types = media_type, *args, **kwargs)
|
||||
addApiView('%s.delete' % media_type, tempDelete)
|
||||
tempDelete = lambda *args, **kwargs : self.deleteView(type = media_type, **kwargs)
|
||||
addApiView('%s.delete' % media_type, tempDelete, docs = {
|
||||
'desc': 'Delete a ' + media_type + ' from the wanted list',
|
||||
'params': {
|
||||
'id': {'desc': 'Media ID(s) you want to delete.', 'type': 'int (comma separated)'},
|
||||
'delete_from': {'desc': 'Delete ' + media_type + ' from this page', 'type': 'string: all (default), wanted, manage'},
|
||||
}
|
||||
})
|
||||
|
||||
def restatus(self, media_id):
|
||||
def restatus(self, media_id, tag_recent = True, allowed_restatus = None):
|
||||
|
||||
try:
|
||||
db = get_db()
|
||||
@@ -428,24 +508,77 @@ class MediaPlugin(MediaBase):
|
||||
if not m['profile_id']:
|
||||
m['status'] = 'done'
|
||||
else:
|
||||
move_to_wanted = True
|
||||
m['status'] = 'active'
|
||||
|
||||
profile = db.get('id', m['profile_id'])
|
||||
media_releases = fireEvent('release.for_media', m['_id'], single = True)
|
||||
try:
|
||||
profile = db.get('id', m['profile_id'])
|
||||
media_releases = fireEvent('release.for_media', m['_id'], single = True)
|
||||
done_releases = [release for release in media_releases if release.get('status') == 'done']
|
||||
|
||||
for q_identifier in profile['qualities']:
|
||||
index = profile['qualities'].index(q_identifier)
|
||||
if done_releases:
|
||||
|
||||
for release in media_releases:
|
||||
if q_identifier == release['quality'] and (release.get('status') == 'done' and profile['finish'][index]):
|
||||
move_to_wanted = False
|
||||
# Check if we are finished with the media
|
||||
for release in done_releases:
|
||||
if fireEvent('quality.isfinish', {'identifier': release['quality'], 'is_3d': release.get('is_3d', False)}, profile, timedelta(seconds = time.time() - release['last_edit']).days, single = True):
|
||||
m['status'] = 'done'
|
||||
break
|
||||
|
||||
m['status'] = 'active' if move_to_wanted else 'done'
|
||||
elif previous_status == 'done':
|
||||
m['status'] = 'done'
|
||||
|
||||
except RecordNotFound:
|
||||
log.debug('Failed restatus, keeping previous: %s', traceback.format_exc())
|
||||
m['status'] = previous_status
|
||||
|
||||
# Only update when status has changed
|
||||
if previous_status != m['status']:
|
||||
if previous_status != m['status'] and (not allowed_restatus or m['status'] in allowed_restatus):
|
||||
db.update(m)
|
||||
|
||||
# Tag media as recent
|
||||
if tag_recent:
|
||||
self.tag(media_id, 'recent', update_edited = True)
|
||||
|
||||
return m['status']
|
||||
except:
|
||||
log.error('Failed restatus: %s', traceback.format_exc())
|
||||
|
||||
def tag(self, media_id, tag, update_edited = False):
|
||||
|
||||
try:
|
||||
db = get_db()
|
||||
m = db.get('id', media_id)
|
||||
|
||||
if update_edited:
|
||||
m['last_edit'] = int(time.time())
|
||||
|
||||
tags = m.get('tags') or []
|
||||
if tag not in tags:
|
||||
tags.append(tag)
|
||||
m['tags'] = tags
|
||||
db.update(m)
|
||||
|
||||
return True
|
||||
except:
|
||||
log.error('Failed restatus: %s', traceback.format_exc())
|
||||
log.error('Failed tagging: %s', traceback.format_exc())
|
||||
|
||||
return False
|
||||
|
||||
def unTag(self, media_id, tag):
|
||||
|
||||
try:
|
||||
db = get_db()
|
||||
m = db.get('id', media_id)
|
||||
|
||||
tags = m.get('tags') or []
|
||||
if tag in tags:
|
||||
new_tags = list(set(tags))
|
||||
new_tags.remove(tag)
|
||||
|
||||
m['tags'] = new_tags
|
||||
db.update(m)
|
||||
|
||||
return True
|
||||
except:
|
||||
log.error('Failed untagging: %s', traceback.format_exc())
|
||||
|
||||
return False
|
||||
|
||||
@@ -88,10 +88,16 @@ class Provider(Plugin):
|
||||
|
||||
if data and len(data) > 0:
|
||||
try:
|
||||
data = XMLTree.fromstring(ss(data))
|
||||
data = XMLTree.fromstring(data)
|
||||
return self.getElements(data, item_path)
|
||||
except:
|
||||
log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
|
||||
try:
|
||||
data = XMLTree.fromstring(ss(data))
|
||||
return self.getElements(data, item_path)
|
||||
except XMLTree.ParseError:
|
||||
log.error('Invalid XML returned, check "%s" manually for issues', url)
|
||||
except:
|
||||
log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
|
||||
|
||||
return []
|
||||
|
||||
@@ -125,6 +131,9 @@ class YarrProvider(Provider):
|
||||
else:
|
||||
return []
|
||||
|
||||
def buildUrl(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
def login(self):
|
||||
|
||||
# Check if we are still logged in every hour
|
||||
@@ -177,7 +186,7 @@ class YarrProvider(Provider):
|
||||
try:
|
||||
return self.urlopen(url, headers = {'User-Agent': Env.getIdentifier()}, show_error = False)
|
||||
except:
|
||||
log.error('Failed getting nzb from %s: %s', (self.getName(), traceback.format_exc()))
|
||||
log.error('Failed getting release from %s: %s', (self.getName(), traceback.format_exc()))
|
||||
|
||||
return 'try_next'
|
||||
|
||||
@@ -200,7 +209,7 @@ class YarrProvider(Provider):
|
||||
self._search(media, quality, results)
|
||||
# Search possible titles
|
||||
else:
|
||||
media_title = fireEvent('library.query', media, single = True)
|
||||
media_title = fireEvent('library.query', media, include_year = False, single = True)
|
||||
|
||||
for title in possibleTitles(media_title):
|
||||
self._searchOnTitle(title, media, quality, results)
|
||||
@@ -298,7 +307,7 @@ class ResultList(list):
|
||||
old_score = new_result['score']
|
||||
new_result['score'] = int(old_score * is_correct_weight)
|
||||
|
||||
log.info('Found correct release with weight %.02f, old_score(%d) now scaled to score(%d)', (
|
||||
log.info2('Found correct release with weight %.02f, old_score(%d) now scaled to score(%d)', (
|
||||
is_correct_weight,
|
||||
old_score,
|
||||
new_result['score']
|
||||
|
||||
@@ -2,7 +2,7 @@ import re
|
||||
import traceback
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
from couchpotato.core.helpers.variable import tryInt
|
||||
from couchpotato.core.helpers.variable import tryInt, simplifyString
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.media._base.providers.nzb.base import NZBProvider
|
||||
|
||||
@@ -50,8 +50,8 @@ class Base(NZBProvider):
|
||||
|
||||
def extra_check(item):
|
||||
parts = re.search('available:.(?P<parts>\d+)./.(?P<total>\d+)', info.text)
|
||||
total = tryInt(parts.group('total'))
|
||||
parts = tryInt(parts.group('parts'))
|
||||
total = float(tryInt(parts.group('total')))
|
||||
parts = float(tryInt(parts.group('parts')))
|
||||
|
||||
if (total / parts) < 1 and ((total / parts) < 0.95 or ((total / parts) >= 0.95 and not ('par2' in info.text.lower() or 'pa3' in info.text.lower()))):
|
||||
log.info2('Wrong: \'%s\', not complete: %s out of %s', (item['name'], parts, total))
|
||||
@@ -65,7 +65,7 @@ class Base(NZBProvider):
|
||||
|
||||
results.append({
|
||||
'id': nzb_id,
|
||||
'name': title.text,
|
||||
'name': simplifyString(title.text),
|
||||
'age': tryInt(age),
|
||||
'size': self.parseSize(size_match.group('size')),
|
||||
'url': self.urls['download'] % nzb_id,
|
||||
@@ -100,6 +100,7 @@ config = [{
|
||||
'name': 'binsearch',
|
||||
'description': 'Free provider, less accurate. See <a href="https://www.binsearch.info/">BinSearch</a>',
|
||||
'wizard': True,
|
||||
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAAAAAA6mKC9AAAATklEQVQY02NwQAMMWAXOnz+PKvD//3/CAvM//z+fgiwAAs+RBab4PP//vwbFjPlAffgEChzOo2r5fBuIfRAC5w8D+QUofkkp8MHjOWQAAM3Sbogztg2wAAAAAElFTkSuQmCC',
|
||||
'options': [
|
||||
{
|
||||
'name': 'enabled',
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
from urllib2 import HTTPError
|
||||
from urlparse import urlparse
|
||||
import time
|
||||
import traceback
|
||||
import urllib2
|
||||
import re
|
||||
|
||||
from couchpotato.core.helpers.encoding import tryUrlencode, toUnicode
|
||||
from couchpotato.core.helpers.rss import RSS
|
||||
@@ -12,6 +11,7 @@ from couchpotato.core.media._base.providers.base import ResultList
|
||||
from couchpotato.core.media._base.providers.nzb.base import NZBProvider
|
||||
from couchpotato.environment import Env
|
||||
from dateutil.parser import parse
|
||||
from requests import HTTPError
|
||||
|
||||
|
||||
log = CPLog(__name__)
|
||||
@@ -20,10 +20,11 @@ log = CPLog(__name__)
|
||||
class Base(NZBProvider, RSS):
|
||||
|
||||
urls = {
|
||||
'detail': 'details&id=%s',
|
||||
'detail': 'details/%s',
|
||||
'download': 't=get&id=%s'
|
||||
}
|
||||
|
||||
passwords_regex = 'password|wachtwoord'
|
||||
limits_reached = {}
|
||||
|
||||
http_time_between_calls = 1 # Seconds
|
||||
@@ -43,10 +44,8 @@ class Base(NZBProvider, RSS):
|
||||
|
||||
def _searchOnHost(self, host, media, quality, results):
|
||||
|
||||
query = self.buildUrl(media, host['api_key'])
|
||||
|
||||
url = '%s&%s' % (self.getUrl(host['host']), query)
|
||||
|
||||
query = self.buildUrl(media, host)
|
||||
url = '%s%s' % (self.getUrl(host['host']), query)
|
||||
nzbs = self.getRSSData(url, cache_timeout = 1800, headers = {'User-Agent': Env.getIdentifier()})
|
||||
|
||||
for nzb in nzbs:
|
||||
@@ -69,8 +68,12 @@ class Base(NZBProvider, RSS):
|
||||
if not date:
|
||||
date = self.getTextElement(nzb, 'pubDate')
|
||||
|
||||
nzb_id = self.getTextElement(nzb, 'guid').split('/')[-1:].pop()
|
||||
name = self.getTextElement(nzb, 'title')
|
||||
detail_url = self.getTextElement(nzb, 'guid')
|
||||
nzb_id = detail_url.split('/')[-1:].pop()
|
||||
|
||||
if '://' not in detail_url:
|
||||
detail_url = (cleanHost(host['host']) + self.urls['detail']) % tryUrlencode(nzb_id)
|
||||
|
||||
if not name:
|
||||
continue
|
||||
@@ -79,6 +82,23 @@ class Base(NZBProvider, RSS):
|
||||
if spotter:
|
||||
name_extra = spotter
|
||||
|
||||
description = ''
|
||||
if "@spot.net" in nzb_id:
|
||||
try:
|
||||
# Get details for extended description to retrieve passwords
|
||||
query = self.buildDetailsUrl(nzb_id, host['api_key'])
|
||||
url = '%s%s' % (self.getUrl(host['host']), query)
|
||||
nzb_details = self.getRSSData(url, cache_timeout = 1800, headers = {'User-Agent': Env.getIdentifier()})[0]
|
||||
|
||||
description = self.getTextElement(nzb_details, 'description')
|
||||
|
||||
# Extract a password from the description
|
||||
password = re.search('(?:' + self.passwords_regex + ')(?: *)(?:\:|\=)(?: *)(.*?)\<br\>|\n|$', description, flags = re.I).group(1)
|
||||
if password:
|
||||
name += ' {{%s}}' % password.strip()
|
||||
except:
|
||||
log.debug('Error getting details of "%s": %s', (name, traceback.format_exc()))
|
||||
|
||||
results.append({
|
||||
'id': nzb_id,
|
||||
'provider_extra': urlparse(host['host']).hostname or host['host'],
|
||||
@@ -87,8 +107,9 @@ class Base(NZBProvider, RSS):
|
||||
'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))),
|
||||
'size': int(self.getElement(nzb, 'enclosure').attrib['length']) / 1024 / 1024,
|
||||
'url': ((self.getUrl(host['host']) + self.urls['download']) % tryUrlencode(nzb_id)) + self.getApiExt(host),
|
||||
'detail_url': '%sdetails/%s' % (cleanHost(host['host']), tryUrlencode(nzb_id)),
|
||||
'detail_url': detail_url,
|
||||
'content': self.getTextElement(nzb, 'description'),
|
||||
'description': description,
|
||||
'score': host['extra_score'],
|
||||
})
|
||||
|
||||
@@ -166,24 +187,16 @@ class Base(NZBProvider, RSS):
|
||||
return 'try_next'
|
||||
|
||||
try:
|
||||
# Get final redirected url
|
||||
log.debug('Checking %s for redirects.', url)
|
||||
req = urllib2.Request(url)
|
||||
req.add_header('User-Agent', self.user_agent)
|
||||
res = urllib2.urlopen(req)
|
||||
finalurl = res.geturl()
|
||||
if finalurl != url:
|
||||
log.debug('Redirect url used: %s', finalurl)
|
||||
|
||||
data = self.urlopen(finalurl, show_error = False)
|
||||
data = self.urlopen(url, show_error = False, headers = {'User-Agent': Env.getIdentifier()})
|
||||
self.limits_reached[host] = False
|
||||
return data
|
||||
except HTTPError as e:
|
||||
if e.code == 503:
|
||||
sc = e.response.status_code
|
||||
if sc in [503, 429]:
|
||||
response = e.read().lower()
|
||||
if 'maximum api' in response or 'download limit' in response:
|
||||
if sc == 429 or 'maximum api' in response or 'download limit' in response:
|
||||
if not self.limits_reached.get(host):
|
||||
log.error('Limit reached for newznab provider: %s', host)
|
||||
log.error('Limit reached / to many requests for newznab provider: %s', host)
|
||||
self.limits_reached[host] = time.time()
|
||||
return 'try_next'
|
||||
|
||||
@@ -191,6 +204,15 @@ class Base(NZBProvider, RSS):
|
||||
|
||||
return 'try_next'
|
||||
|
||||
def buildDetailsUrl(self, nzb_id, api_key):
|
||||
query = tryUrlencode({
|
||||
't': 'details',
|
||||
'id': nzb_id,
|
||||
'apikey': api_key,
|
||||
})
|
||||
return query
|
||||
|
||||
|
||||
|
||||
config = [{
|
||||
'name': 'newznab',
|
||||
@@ -203,8 +225,9 @@ config = [{
|
||||
'description': 'Enable <a href="http://newznab.com/" target="_blank">NewzNab</a> such as <a href="https://nzb.su" target="_blank">NZB.su</a>, \
|
||||
<a href="https://nzbs.org" target="_blank">NZBs.org</a>, <a href="http://dognzb.cr/" target="_blank">DOGnzb.cr</a>, \
|
||||
<a href="https://github.com/spotweb/spotweb" target="_blank">Spotweb</a>, <a href="https://nzbgeek.info/" target="_blank">NZBGeek</a>, \
|
||||
<a href="https://smackdownonyou.com" target="_blank">SmackDown</a>, <a href="https://www.nzbfinder.ws" target="_blank">NZBFinder</a>',
|
||||
<a href="https://www.nzbfinder.ws" target="_blank">NZBFinder</a>',
|
||||
'wizard': True,
|
||||
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQAgMAAABinRfyAAAACVBMVEVjhwD///86aRovd/sBAAAAMklEQVQI12NgAIPQUCCRmQkjssDEShiRuRIqwZqZGcDAGBrqANUhGgIkWAOABKMDxCAA24UK50b26SAAAAAASUVORK5CYII=',
|
||||
'options': [
|
||||
{
|
||||
'name': 'enabled',
|
||||
@@ -213,30 +236,30 @@ config = [{
|
||||
},
|
||||
{
|
||||
'name': 'use',
|
||||
'default': '0,0,0,0,0,0'
|
||||
'default': '0,0,0,0,0'
|
||||
},
|
||||
{
|
||||
'name': 'host',
|
||||
'default': 'api.nzb.su,dognzb.cr,nzbs.org,https://index.nzbgeek.info, https://smackdownonyou.com, https://www.nzbfinder.ws',
|
||||
'default': 'api.nzb.su,api.dognzb.cr,nzbs.org,https://api.nzbgeek.info,https://www.nzbfinder.ws',
|
||||
'description': 'The hostname of your newznab provider',
|
||||
},
|
||||
{
|
||||
'name': 'extra_score',
|
||||
'advanced': True,
|
||||
'label': 'Extra Score',
|
||||
'default': '0,0,0,0,0,0',
|
||||
'default': '0,0,0,0,0',
|
||||
'description': 'Starting score for each release found via this provider.',
|
||||
},
|
||||
{
|
||||
'name': 'custom_tag',
|
||||
'advanced': True,
|
||||
'label': 'Custom tag',
|
||||
'default': ',,,,,',
|
||||
'default': ',,,,',
|
||||
'description': 'Add custom tags, for example add rls=1 to get only scene releases from nzbs.org',
|
||||
},
|
||||
{
|
||||
'name': 'api_key',
|
||||
'default': ',,,,,',
|
||||
'default': ',,,,',
|
||||
'label': 'Api Key',
|
||||
'description': 'Can be found on your profile page',
|
||||
'type': 'combined',
|
||||
|
||||
@@ -80,6 +80,7 @@ config = [{
|
||||
'name': 'NZBClub',
|
||||
'description': 'Free provider, less accurate. See <a href="https://www.nzbclub.com/">NZBClub</a>',
|
||||
'wizard': True,
|
||||
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACEUlEQVQ4y3VSMWgUQRR9/8/s7OzeJSdnTsVGghLEYBNQjBpQiRBFhIB2EcHG1kbs0murhZAmVocExEZQ0c7CxkLINYcJJpoYj9wZcnu72fF21uJSXMzuhyne58/j/fcf4b+KokgBIOSU53lxP5b9oNVqDT36dH+5UjoiKvIwPFEEgWBshGZ3E7/NOupL9fMjx0e+ZhKsrq+c/FPZKJi0w4FsQXMBDEJsd7BNW9h2tuyP9vfTALIJkMIu1hYRtINM+dpzcWc0sbkreK4fUEogyraAmKGF3+7vcT/wtR9QwkCabSAzQQuvk0uglAo5YaQ5DASGYjfMXcHVOqKu6NmR7iehlKAdHWUqWPv1c3i+9uwVdRlEBGaGEAJCCrDo9ShhvF6qPq8tL57bp+DbRn2sHtUuCY9YphLMu5921VhrwYJ5tbt0tt6sjQP4vEfB2Ikz7/ytwbeR6ljHkXCUA6UcOLtPOg4MYhtH8ZcLw5er+xQMDAwEURRNl96X596Y6oxFwsw9fmtTOAr2Ik19nL365FZpsLSdnQPPM8aYewc+lDcX4rkHqbQMAGTJXulOLzycmr1bKBTi3DOGYagajcahiaOT89fbM0/dxEsUu3aidfPljWO3HzebzYNBELi5Z5RSJlrrHd/3w8lT114MrVTWOn875fHRiYVisRhorWMpZXdvNnLKGCOstb0AMlulVJI19w/+nceU4D0aCwAAAABJRU5ErkJggg==',
|
||||
'options': [
|
||||
{
|
||||
'name': 'enabled',
|
||||
|
||||
@@ -1,125 +0,0 @@
|
||||
import re
|
||||
import time
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
from couchpotato.core.helpers.encoding import toUnicode
|
||||
from couchpotato.core.helpers.rss import RSS
|
||||
from couchpotato.core.helpers.variable import tryInt
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.event import fireEvent
|
||||
from couchpotato.core.media._base.providers.nzb.base import NZBProvider
|
||||
from dateutil.parser import parse
|
||||
|
||||
|
||||
log = CPLog(__name__)
|
||||
|
||||
|
||||
class Base(NZBProvider, RSS):
|
||||
|
||||
urls = {
|
||||
'download': 'https://www.nzbindex.com/download/',
|
||||
'search': 'https://www.nzbindex.com/rss/?%s',
|
||||
}
|
||||
|
||||
http_time_between_calls = 1 # Seconds
|
||||
|
||||
def _search(self, media, quality, results):
|
||||
|
||||
nzbs = self.getRSSData(self.urls['search'] % self.buildUrl(media, quality))
|
||||
|
||||
for nzb in nzbs:
|
||||
|
||||
enclosure = self.getElement(nzb, 'enclosure').attrib
|
||||
nzbindex_id = int(self.getTextElement(nzb, "link").split('/')[4])
|
||||
|
||||
title = self.getTextElement(nzb, "title")
|
||||
|
||||
match = fireEvent('matcher.parse', title, parser='usenet', single = True)
|
||||
if not match.chains:
|
||||
log.info('Unable to parse release with title "%s"', title)
|
||||
continue
|
||||
|
||||
# TODO should we consider other lower-weight chains here?
|
||||
info = fireEvent('matcher.flatten_info', match.chains[0].info, single = True)
|
||||
|
||||
release_name = fireEvent('matcher.construct_from_raw', info.get('release_name'), single = True)
|
||||
|
||||
file_name = info.get('detail', {}).get('file_name')
|
||||
file_name = file_name[0] if file_name else None
|
||||
|
||||
title = release_name or file_name
|
||||
|
||||
# Strip extension from parsed title (if one exists)
|
||||
ext_pos = title.rfind('.')
|
||||
|
||||
# Assume extension if smaller than 4 characters
|
||||
# TODO this should probably be done a better way
|
||||
if len(title[ext_pos + 1:]) <= 4:
|
||||
title = title[:ext_pos]
|
||||
|
||||
if not title:
|
||||
log.info('Unable to find release name from match')
|
||||
continue
|
||||
|
||||
try:
|
||||
description = self.getTextElement(nzb, "description")
|
||||
except:
|
||||
description = ''
|
||||
|
||||
def extra_check(item):
|
||||
if '#c20000' in item['description'].lower():
|
||||
log.info('Wrong: Seems to be passworded: %s', item['name'])
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
results.append({
|
||||
'id': nzbindex_id,
|
||||
'name': title,
|
||||
'age': self.calculateAge(int(time.mktime(parse(self.getTextElement(nzb, "pubDate")).timetuple()))),
|
||||
'size': tryInt(enclosure['length']) / 1024 / 1024,
|
||||
'url': enclosure['url'],
|
||||
'detail_url': enclosure['url'].replace('/download/', '/release/'),
|
||||
'description': description,
|
||||
'get_more_info': self.getMoreInfo,
|
||||
'extra_check': extra_check,
|
||||
})
|
||||
|
||||
def getMoreInfo(self, item):
|
||||
try:
|
||||
if '/nfo/' in item['description'].lower():
|
||||
nfo_url = re.search('href=\"(?P<nfo>.+)\" ', item['description']).group('nfo')
|
||||
full_description = self.getCache('nzbindex.%s' % item['id'], url = nfo_url, cache_timeout = 25920000)
|
||||
html = BeautifulSoup(full_description)
|
||||
item['description'] = toUnicode(html.find('pre', attrs = {'id': 'nfo0'}).text)
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
config = [{
|
||||
'name': 'nzbindex',
|
||||
'groups': [
|
||||
{
|
||||
'tab': 'searcher',
|
||||
'list': 'nzb_providers',
|
||||
'name': 'nzbindex',
|
||||
'description': 'Free provider, less accurate. See <a href="https://www.nzbindex.com/">NZBIndex</a>',
|
||||
'wizard': True,
|
||||
'options': [
|
||||
{
|
||||
'name': 'enabled',
|
||||
'type': 'enabler',
|
||||
'default': True,
|
||||
},
|
||||
{
|
||||
'name': 'extra_score',
|
||||
'advanced': True,
|
||||
'label': 'Extra Score',
|
||||
'type': 'int',
|
||||
'default': 0,
|
||||
'description': 'Starting score for each release found via this provider.',
|
||||
}
|
||||
],
|
||||
},
|
||||
],
|
||||
}]
|
||||
@@ -1,13 +1,9 @@
|
||||
from urlparse import urlparse, parse_qs
|
||||
import time
|
||||
|
||||
from couchpotato.core.event import fireEvent
|
||||
from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode
|
||||
from couchpotato.core.helpers.rss import RSS
|
||||
from couchpotato.core.helpers.variable import tryInt
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.media._base.providers.nzb.base import NZBProvider
|
||||
from dateutil.parser import parse
|
||||
|
||||
|
||||
log = CPLog(__name__)
|
||||
@@ -16,27 +12,19 @@ log = CPLog(__name__)
|
||||
class Base(NZBProvider, RSS):
|
||||
|
||||
urls = {
|
||||
'search': 'https://rss.omgwtfnzbs.org/rss-search.php?%s',
|
||||
'detail_url': 'https://omgwtfnzbs.org/details.php?id=%s',
|
||||
'search': 'https://api.omgwtfnzbs.org/json/?%s',
|
||||
}
|
||||
|
||||
http_time_between_calls = 1 # Seconds
|
||||
|
||||
cat_ids = [
|
||||
([15], ['dvdrip']),
|
||||
([15], ['dvdrip', 'scr', 'r5', 'tc', 'ts', 'cam']),
|
||||
([15, 16], ['brrip']),
|
||||
([16], ['720p', '1080p', 'bd50']),
|
||||
([17], ['dvdr']),
|
||||
]
|
||||
cat_backup_id = 'movie'
|
||||
|
||||
def search(self, movie, quality):
|
||||
|
||||
if quality['identifier'] in fireEvent('quality.pre_releases', single = True):
|
||||
return []
|
||||
|
||||
return super(Base, self).search(movie, quality)
|
||||
|
||||
def _searchOnTitle(self, title, movie, quality, results):
|
||||
|
||||
q = '%s %s' % (title, movie['info']['year'])
|
||||
@@ -47,22 +35,20 @@ class Base(NZBProvider, RSS):
|
||||
'api': self.conf('api_key', default = ''),
|
||||
})
|
||||
|
||||
nzbs = self.getRSSData(self.urls['search'] % params)
|
||||
nzbs = self.getJsonData(self.urls['search'] % params)
|
||||
|
||||
for nzb in nzbs:
|
||||
if isinstance(nzbs, list):
|
||||
for nzb in nzbs:
|
||||
|
||||
enclosure = self.getElement(nzb, 'enclosure').attrib
|
||||
nzb_id = parse_qs(urlparse(self.getTextElement(nzb, 'link')).query).get('id')[0]
|
||||
|
||||
results.append({
|
||||
'id': nzb_id,
|
||||
'name': toUnicode(self.getTextElement(nzb, 'title')),
|
||||
'age': self.calculateAge(int(time.mktime(parse(self.getTextElement(nzb, 'pubDate')).timetuple()))),
|
||||
'size': tryInt(enclosure['length']) / 1024 / 1024,
|
||||
'url': enclosure['url'],
|
||||
'detail_url': self.urls['detail_url'] % nzb_id,
|
||||
'description': self.getTextElement(nzb, 'description')
|
||||
})
|
||||
results.append({
|
||||
'id': nzb.get('nzbid'),
|
||||
'name': toUnicode(nzb.get('release')),
|
||||
'age': self.calculateAge(tryInt(nzb.get('usenetage'))),
|
||||
'size': tryInt(nzb.get('sizebytes')) / 1024 / 1024,
|
||||
'url': nzb.get('getnzb'),
|
||||
'detail_url': nzb.get('details'),
|
||||
'description': nzb.get('weblink')
|
||||
})
|
||||
|
||||
|
||||
config = [{
|
||||
@@ -74,6 +60,7 @@ config = [{
|
||||
'name': 'OMGWTFNZBs',
|
||||
'description': 'See <a href="http://omgwtfnzbs.org/">OMGWTFNZBs</a>',
|
||||
'wizard': True,
|
||||
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQEAIAAADAAbR1AAADbElEQVR4AZ2UW0ybZRiAy/OvdHaLYvB0YTRIFi7GkM44zRLmIfNixkWdiRMyYoxRE8/TC7MYvXCGEBmr3mxLwVMwY0wYA7e6Wso4lB6h/U9taSlMGIfBXLYlJMyo0S///2dJI5lxN8/F2/f9nu9737e/jYmXr6KTbN9BGG9HE/NotQ76UWziNzrXFiETk/5ARUNH+7+0kW7fSgTl0VKGOLZzidOkmuuIo7q2oTArNLPIzhdIkqXkerFOm2CaD/5bcKrjIL2c3fkhPxOq93Kcb91v46fV9TQKF4TgV/TbUsQtzfCaK6jMOd5DJrguSIIhexmqqVxN0FXbRR8/ND/LYTTj6J7nl2gnL47OkDW4KJhnQHCa6JpKVNJGA3OC58nwBJoZ//ebbIyKpBxjrr0o1q1FMRkrKXZnHWF85VvxMrJxibwhGyd0f5bLnKzqJs1k0Sfo+EU8hdAUvkbcwKEgs2D0OiV4jmmD1zb+Tp6er0JMMvDxPo5xev9zTBF683NS+N56n1YiB95B5crr93KRuKhKI0tb0Kw2mgLLqTjLEWO8424i9IvURaYeOckwf3+/yCC9e3bQQ/MuD+Monk0k+XFXMUfx7z5EEP+XlXi5tLlMxH8zLppw7idJrugcus30kC86gc7UrQqjLIukM8zWHOACeU+TiMxXN6ExVOkgz4lvPEzice1GIVhxhG4CrZvpl6TH55giKWqXGLy9hZh5aUtgDSew/msSyCKpl+DDNfxJc8NBIsxUxUnz14O/oONu+IIIvso9TLBQ1SY5rUhuSzUhAqJ2mRXBLDOCeUtgUZXsaObT8BffhUJPqWgiV+3zKKzYH0ClvTRLhD77HIqVkyh5jThnivehoG+qJctIRSPn6bxvO4FCgTl9c1DmbpjLajbQFE8aW5SU3rg+zOPGUjTUF9NFpLEbH2c/KmGYlY69/GQJVtGMSUcEp9eCbB1nctbxHTLRdTUkGDf+B02uGWRG3OvpJ/zSMwzif+oxVBID3cQKBavLCiPmB2PM2UuSCUPgrX4VDb97AwEG67bh4+KTOlncvu3M31BwA5rLHbCfEjwkNDky9e/SSbSxnD46Pg0RJtpXRvhmBSZHpRjWtKwFybjuQeXaKxto4WjLZZZvVmC17pZLJFkwxm5++PS2Mrwc7nyIMYZe/IzoP5d6QgEybqTXAAAAAElFTkSuQmCC',
|
||||
'options': [
|
||||
{
|
||||
'name': 'enabled',
|
||||
|
||||
@@ -61,7 +61,7 @@ class Base(TorrentProvider):
|
||||
'name': re.sub('[^A-Za-z0-9\-_ \(\).]+', '', '%s (%s) %s' % (name, year, torrent_desc)),
|
||||
'url': self.urls['download'] % (torrent_id, authkey, self.conf('passkey')),
|
||||
'detail_url': self.urls['detail'] % torrent_id,
|
||||
'size': self.parseSize(entry.find('size').get_text()),
|
||||
'size': tryInt(entry.find('size').get_text()) / 1048576,
|
||||
'seeders': tryInt(entry.find('seeders').get_text()),
|
||||
'leechers': tryInt(entry.find('leechers').get_text()),
|
||||
'score': torrentscore
|
||||
@@ -78,8 +78,9 @@ config = [{
|
||||
'tab': 'searcher',
|
||||
'list': 'torrent_providers',
|
||||
'name': 'Awesome-HD',
|
||||
'description': 'See <a href="https://awesome-hd.net">AHD</a>',
|
||||
'description': '<a href="https://awesome-hd.net">AHD</a>',
|
||||
'wizard': True,
|
||||
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAC+UlEQVR4AV1SO0y6dxQ9H4g8CoIoohZ5NA0aR2UgkYpNB5uocTSaLlrDblMH09Gt8d90r3YpJkanxjA4GGkbO7RNxSABq8jDGnkpD+UD5NV7Bxvbk9wvv+/3uPece66A/yEWi42FQqHVfD7/cbPZtIEglUpjOp3uZHR0dBvAn3gDIRqNgjE4OKj0+Xzf3NzcfD4wMCCjf5TLZbTbbajVatzf3+Pu7q5uNpt35ufnvwBQAScQRREEldfr9RWLxan+/n5YrVa+jFarhVfQQyQSCU4EhULhX15engEgSrjC0dHRVqlUmjQYDBgaGgKtuTqz4mTgIoVCASaTCX19fajVapOHh4dbFJBks9mxcDi8qtFoJEajkfVyJWi1WkxMTMDhcIAT8x6D7/Dd6+vr1fHx8TGp2+3+iqo5+YCzBwIBToK5ubl/mQwPDyMSibAs2Gw2UHNRrValz8/PDUk8Hv9EqVRCr9fj4uICTNflcqFer+Pg4AB7e3uoVCq8x9Rxfn6O7u5uqFQq8FspZXxHTekggByA3W4Hr9PpNDeRL3I1cMhkMrBrnZ2dyGQyvNYIs7OzVbJNPjIyAraLwYdcjR8wXl5eIJfLwRIFQQDLYkm3t7c1CdGPPT4+cpOImp4PODMeaK+n10As2jBbrHifHOjS6qAguVFimkqlwAMmIQnHV1dX4NDQhVwuhyZTV6pgIktzDzkkk0lEwhEEzs7ASQr5Ai4vL1nuccfCwsLO/v6+p9FoyJhF6ekJro/cPCzIZLNQa7rQoK77/SdgWWpKkCaJ5EB9aWnpe6nH40nRMBnJV4f5gw+FX3/5GX/8/htXRZdOzzqhJWn6nl6YbTZqqhrhULD16fT0d8FgcFtYW1vD5uamfGVl5cd4IjldKhZACdkJvKfWUANrxEaJV4hiGVaL1b+7653hXzwRZQr2X76xsfG1xWIRaZzbNPv/CdrjEL9cX/+WXFBSgEPgzxuwG3Yans9OT0+naBZMIJDNfzudzp8WFxd/APAX3uAf9WOTxOPLdosAAAAASUVORK5CYII=',
|
||||
'options': [
|
||||
{
|
||||
'name': 'enabled',
|
||||
|
||||
@@ -44,7 +44,8 @@ class TorrentProvider(YarrProvider):
|
||||
|
||||
prop_name = 'proxy.%s' % proxy
|
||||
last_check = float(Env.prop(prop_name, default = 0))
|
||||
if last_check > time.time() - 1209600:
|
||||
|
||||
if last_check > time.time() - 86400:
|
||||
continue
|
||||
|
||||
data = ''
|
||||
|
||||
@@ -13,11 +13,11 @@ log = CPLog(__name__)
|
||||
class Base(TorrentProvider):
|
||||
|
||||
urls = {
|
||||
'test': 'http://www.bit-hdtv.com/',
|
||||
'login': 'http://www.bit-hdtv.com/takelogin.php',
|
||||
'login_check': 'http://www.bit-hdtv.com/messages.php',
|
||||
'detail': 'http://www.bit-hdtv.com/details.php?id=%s',
|
||||
'search': 'http://www.bit-hdtv.com/torrents.php?',
|
||||
'test': 'https://www.bit-hdtv.com/',
|
||||
'login': 'https://www.bit-hdtv.com/takelogin.php',
|
||||
'login_check': 'https://www.bit-hdtv.com/messages.php',
|
||||
'detail': 'https://www.bit-hdtv.com/details.php?id=%s',
|
||||
'search': 'https://www.bit-hdtv.com/torrents.php?',
|
||||
}
|
||||
|
||||
# Searches for movies only - BiT-HDTV's subcategory and resolution search filters appear to be broken
|
||||
@@ -25,7 +25,7 @@ class Base(TorrentProvider):
|
||||
|
||||
def _search(self, media, quality, results):
|
||||
|
||||
query = self.buildUrl(media)
|
||||
query = self.buildUrl(media, quality)
|
||||
|
||||
url = "%s&%s" % (self.urls['search'], query)
|
||||
|
||||
@@ -93,8 +93,9 @@ config = [{
|
||||
'tab': 'searcher',
|
||||
'list': 'torrent_providers',
|
||||
'name': 'BiT-HDTV',
|
||||
'description': 'See <a href="http://bit-hdtv.com">BiT-HDTV</a>',
|
||||
'description': '<a href="https://bit-hdtv.com">BiT-HDTV</a>',
|
||||
'wizard': True,
|
||||
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAABnRSTlMAAAAAAABupgeRAAABMklEQVR4AZ3Qu0ojcQCF8W9MJcQbJNgEEQUbQVIqWgnaWfkIvoCgggixEAmIhRtY2GV3w7KwU61B0EYIxmiw0YCik84ipaCuc0nmP5dcjIUgOjqDvxf4OAdf9mnMLcUJyPyGSCP+YRdC+Kp8iagJKhuS+InYRhTGgDbeV2uEMand4ZRxizjXHQEimxhraAnUr73BNqQxMiNeV2SwcjTLEVtb4Zl10mXutvOWm2otw5Sxz6TGTbdd6ncuYvVLXAXrvM+ruyBpy1S3JLGDfUQ1O6jn5vTsrJXvqSt4UNfj6vxTRPxBHER5QeSirhLGk/5rWN+ffB1XZuxjnDy1q87m7TS+xOGA+Iv4gfkbaw+nOMXHDHnITGEk0VfRFnn4Po4vNYm6RGukmggR0L08+l+e4HMeASo/i6AJUjLgAAAAAElFTkSuQmCC',
|
||||
'options': [
|
||||
{
|
||||
'name': 'enabled',
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import traceback
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
from couchpotato.core.helpers.encoding import simplifyString, tryUrlencode
|
||||
from bs4 import BeautifulSoup, SoupStrainer
|
||||
from couchpotato.core.helpers.variable import tryInt
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
|
||||
@@ -16,25 +15,23 @@ class Base(TorrentProvider):
|
||||
'test': 'https://www.bitsoup.me/',
|
||||
'login': 'https://www.bitsoup.me/takelogin.php',
|
||||
'login_check': 'https://www.bitsoup.me/my.php',
|
||||
'search': 'https://www.bitsoup.me/browse.php?',
|
||||
'search': 'https://www.bitsoup.me/browse.php?%s',
|
||||
'baseurl': 'https://www.bitsoup.me/%s',
|
||||
}
|
||||
|
||||
http_time_between_calls = 1 # Seconds
|
||||
only_tables_tags = SoupStrainer('table')
|
||||
|
||||
torrent_name_cell = 1
|
||||
torrent_download_cell = 2
|
||||
|
||||
def _searchOnTitle(self, title, movie, quality, results):
|
||||
|
||||
q = '"%s" %s' % (simplifyString(title), movie['info']['year'])
|
||||
arguments = tryUrlencode({
|
||||
'search': q,
|
||||
})
|
||||
url = "%s&%s" % (self.urls['search'], arguments)
|
||||
|
||||
url = self.urls['search'] % self.buildUrl(movie, quality)
|
||||
url = self.urls['search'] % self.buildUrl(title, movie, quality)
|
||||
data = self.getHTMLData(url)
|
||||
|
||||
if data:
|
||||
html = BeautifulSoup(data)
|
||||
html = BeautifulSoup(data, 'html.parser', parse_only = self.only_tables_tags)
|
||||
|
||||
try:
|
||||
result_table = html.find('table', attrs = {'class': 'koptekst'})
|
||||
@@ -46,8 +43,8 @@ class Base(TorrentProvider):
|
||||
|
||||
all_cells = result.find_all('td')
|
||||
|
||||
torrent = all_cells[1].find('a')
|
||||
download = all_cells[3].find('a')
|
||||
torrent = all_cells[self.torrent_name_cell].find('a')
|
||||
download = all_cells[self.torrent_download_cell].find('a')
|
||||
|
||||
torrent_id = torrent['href']
|
||||
torrent_id = torrent_id.replace('details.php?id=', '')
|
||||
@@ -55,9 +52,9 @@ class Base(TorrentProvider):
|
||||
|
||||
torrent_name = torrent.getText()
|
||||
|
||||
torrent_size = self.parseSize(all_cells[7].getText())
|
||||
torrent_seeders = tryInt(all_cells[9].getText())
|
||||
torrent_leechers = tryInt(all_cells[10].getText())
|
||||
torrent_size = self.parseSize(all_cells[8].getText())
|
||||
torrent_seeders = tryInt(all_cells[10].getText())
|
||||
torrent_leechers = tryInt(all_cells[11].getText())
|
||||
torrent_url = self.urls['baseurl'] % download['href']
|
||||
torrent_detail_url = self.urls['baseurl'] % torrent['href']
|
||||
|
||||
@@ -94,8 +91,9 @@ config = [{
|
||||
'tab': 'searcher',
|
||||
'list': 'torrent_providers',
|
||||
'name': 'Bitsoup',
|
||||
'description': 'See <a href="https://bitsoup.me">Bitsoup</a>',
|
||||
'description': '<a href="https://bitsoup.me">Bitsoup</a>',
|
||||
'wizard': True,
|
||||
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAB8ElEQVR4AbWSS2sTURiGz3euk0mswaE37HhNhIrajQheFgF3rgR/lAt/gOBCXNZlo6AbqfUWRVCxi04wqUnTRibpJLaJzdzOOZ6WUumyC5/VHOb9eN/FA91uFx0FjI4IPfgiGLTWH73tn348GKmN7ijD0d2b41fO5qJEaX24AWNIUrVQCTTJ3Llx6vbV6Vtzk7Gi9+ebi996guFDDYAQAVj4FExP5qdOZB49W62t/zH3hECcwsPnbWeMXz6Xi2K1f0ApeK3hMCHHbP5gvvoriBgFAAQJEAxhjJ4u+YWTNsVI6b1JgtPWZkoIefKy4fcii2OTw2BABs7wj3bYDlLL4rvjGWOdTser1j5Xf7c3Q/MbHQYApxItvnm31mhQQ71eX2vUB76/vsWB2hg0QuogrMwLIG8P3InM2/eVGXeDViqVwWB79vRU2lgJYmdHcgXCTAXQFJTN5HguvDCR2Hxsxe8EvT54nlcul5vNpqDIEgwRQanAhAAABgRIyiQcjpIkkTOuWyqVoN/vSylX67XXH74uV1vHRUyxxFqbLBCSmBpiXSq6xcL5QrGYzWZ3XQIAwdlOJB+/aL764ucdmncYs0WsCI7kvTnn+qyDMEnTVCn1Tz5KsBFg6fvWcmsUAcnYNC/g2hnromvvqbHvxv+39S+MX+bWkFXwAgAAAABJRU5ErkJggg==',
|
||||
'options': [
|
||||
{
|
||||
'name': 'enabled',
|
||||
|
||||
130
couchpotato/core/media/_base/providers/torrent/hdaccess.py
Normal file
130
couchpotato/core/media/_base/providers/torrent/hdaccess.py
Normal file
@@ -0,0 +1,130 @@
|
||||
import re
|
||||
import traceback
|
||||
|
||||
from couchpotato.core.helpers.variable import tryInt, getIdentifier
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
|
||||
|
||||
|
||||
log = CPLog(__name__)
|
||||
|
||||
|
||||
class Base(TorrentProvider):
|
||||
|
||||
urls = {
|
||||
'test': 'https://hdaccess.net/',
|
||||
'detail': 'https://hdaccess.net/details.php?id=%s',
|
||||
'search': 'https://hdaccess.net/searchapi.php?apikey=%s&username=%s&imdbid=%s&internal=%s',
|
||||
'download': 'https://hdaccess.net/grab.php?torrent=%s&apikey=%s',
|
||||
}
|
||||
|
||||
http_time_between_calls = 1 # Seconds
|
||||
|
||||
def _search(self, movie, quality, results):
|
||||
data = self.getJsonData(self.urls['search'] % (self.conf('apikey'), self.conf('username'), getIdentifier(movie), self.conf('internal_only')))
|
||||
|
||||
if data:
|
||||
try:
|
||||
#for result in data[]:
|
||||
for key, result in data.iteritems():
|
||||
if tryInt(result['total_results']) == 0:
|
||||
return
|
||||
torrentscore = self.conf('extra_score')
|
||||
releasegroup = result['releasegroup']
|
||||
resolution = result['resolution']
|
||||
encoding = result['encoding']
|
||||
freeleech = tryInt(result['freeleech'])
|
||||
seeders = tryInt(result['seeders'])
|
||||
torrent_desc = '/ %s / %s / %s / %s seeders' % (releasegroup, resolution, encoding, seeders)
|
||||
|
||||
if freeleech > 0 and self.conf('prefer_internal'):
|
||||
torrent_desc += '/ Internal'
|
||||
torrentscore += 200
|
||||
|
||||
if seeders == 0:
|
||||
torrentscore = 0
|
||||
|
||||
name = result['release_name']
|
||||
year = tryInt(result['year'])
|
||||
|
||||
results.append({
|
||||
'id': tryInt(result['torrentid']),
|
||||
'name': re.sub('[^A-Za-z0-9\-_ \(\).]+', '', '%s (%s) %s' % (name, year, torrent_desc)),
|
||||
'url': self.urls['download'] % (result['torrentid'], self.conf('apikey')),
|
||||
'detail_url': self.urls['detail'] % result['torrentid'],
|
||||
'size': tryInt(result['size']),
|
||||
'seeders': tryInt(result['seeders']),
|
||||
'leechers': tryInt(result['leechers']),
|
||||
'age': tryInt(result['age']),
|
||||
'score': torrentscore
|
||||
})
|
||||
except:
|
||||
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
|
||||
config = [{
|
||||
'name': 'hdaccess',
|
||||
'groups': [
|
||||
{
|
||||
'tab': 'searcher',
|
||||
'list': 'torrent_providers',
|
||||
'name': 'HDAccess',
|
||||
'wizard': True,
|
||||
'description': '<a href="https://hdaccess.net">HDAccess</a>',
|
||||
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABmJLR0QA/wD/AP+gvaeTAAADuUlEQVQ4yz3T209bdQAH8O/vnNNzWno5FIpAKZdSLi23gWMDtumWuSXOyzJj9M1kyIOPS1xiYuKe9GUPezZZnGIiMTqTxS1bdIuYkG2MWKBAKYVszOgKFkrbA+259HfO+fli/PwPHzI+Pg5CCEAI2VcUlEsl1tHdU7P5bGOkWChEaaUCwvHpmkD93POn6bwgCMQGAMYYYwyCruuQnE7SPzjIstvb8l+bm5fXkokJSmlQEkUQAIpSRH5vd0tyum7I/sA1Z5VH2ctmiGWZjHw4McE1NAZtQ9fD25kXt1VN7es7dNjuGRjiJFeVpWo6slsZPhF/Ys/PPeIs2056ff7zIOS5rpU5/viJEwwEnu3Mi18dojjw0aWP6amz57h9RSE/35zinq2nuGjvIQwOj7K2SKeZWkk0auXSSZ+/ZopSy+CbW1pQKpWu6Jr2/qVPPqWRjm6HWi6Tm999g3RyGbndLCqGgVBrO3F7fHykK0YX47NNtGLYlBq/c+H2iD+3k704dHQUDcFmQVXLyP6zhfTqCl45fQYjx17FemoJunoAk1bQFGoVhkdPwNC0ix2dMT+3llodM02rKdo7gN3dHAEhuH/vNgDg3Pl3cPaNt2GZJpYX5lBbFwClBukfGobL5WrayW6NccVCISY4HIQxYts2Q3J5CXOPHuLlo6NoCoXQ2hbG0JFRpJYWcVDIQ5ZlyL5qW5b9hNlWjKsYBgzDgKppMCoGHty7A0orOHbyNNweL+obGnDm9TdhWSYS8Vn4a2shOZ0QJRGSKIHjeGGtWNhjqqpyG+k04k8eozPai9ZwByavf4kfpyZxZGwMfYOHsbwQx34hB5dL4syKweRq/xpXHwzNapqWSSYWMDszzYqFPEaOn4KiKJiZfoCZ6d8Am+GtC++iXCpjaf4P9vefT8HzfKarp3eWRKMxCILwuWXSz977YIK2RTodDoGH1+OG1+tDlbsKkuiAJEngeWBjNUUnv7rucIiOLyzTvMKJTgnVtbVXLctK3L31g+NAUajL5bEptaDpOnTdgGkzVHl9drms0ju3fnJIkphoaQtfbQiFwAcCAY5wnCE5Xff3i8XX4o9nGksH+8zl9hAGZlWMCivkc9z0L3fZ999+LTCGZKi55YJTFHfye3sc6e/vB88LpK6+iWlqSS4WcpcNXZtwOp3B6mo/REmCSSkEgd+qq3vpRkt75Fp9Y1BZWZwnhq4zEovF/u/MATAti4U7umvyu9kR27aikihC9vvTnV2xufVUMu/2uIksy/9tZvgX49fLmAMx3bsAAAAASUVORK5CYII=',
|
||||
'options': [
|
||||
{
|
||||
'name': 'enabled',
|
||||
'type': 'enabler',
|
||||
'default': False,
|
||||
},
|
||||
{
|
||||
'name': 'username',
|
||||
'default': '',
|
||||
'description': 'Enter your site username.',
|
||||
},
|
||||
{
|
||||
'name': 'apikey',
|
||||
'default': '',
|
||||
'label': 'API Key',
|
||||
'description': 'Enter your site api key. This can be find on <a href="https://hdaccess.net/usercp.php?action=security">Profile Security</a>',
|
||||
},
|
||||
{
|
||||
'name': 'seed_ratio',
|
||||
'label': 'Seed ratio',
|
||||
'type': 'float',
|
||||
'default': 0,
|
||||
'description': 'Will not be (re)moved until this seed ratio is met. HDAccess minimum is 1:1.',
|
||||
},
|
||||
{
|
||||
'name': 'seed_time',
|
||||
'label': 'Seed time',
|
||||
'type': 'int',
|
||||
'default': 0,
|
||||
'description': 'Will not be (re)moved until this seed time (in hours) is met. HDAccess minimum is 48 hours.',
|
||||
},
|
||||
{
|
||||
'name': 'prefer_internal',
|
||||
'advanced': True,
|
||||
'type': 'bool',
|
||||
'default': 1,
|
||||
'description': 'Favors internal releases over non-internal releases.',
|
||||
},
|
||||
{
|
||||
'name': 'internal_only',
|
||||
'advanced': True,
|
||||
'label': 'Internal Only',
|
||||
'type': 'bool',
|
||||
'default': False,
|
||||
'description': 'Only download releases marked as HDAccess internal',
|
||||
},
|
||||
{
|
||||
'name': 'extra_score',
|
||||
'advanced': True,
|
||||
'label': 'Extra Score',
|
||||
'type': 'int',
|
||||
'default': 0,
|
||||
'description': 'Starting score for each release found via this provider.',
|
||||
}
|
||||
],
|
||||
},
|
||||
],
|
||||
}]
|
||||
@@ -29,6 +29,9 @@ class Base(TorrentProvider):
|
||||
}
|
||||
post_data.update(params)
|
||||
|
||||
if self.conf('internal_only'):
|
||||
post_data.update({'origin': [1]})
|
||||
|
||||
try:
|
||||
result = self.getJsonData(self.urls['api'], data = json.dumps(post_data))
|
||||
|
||||
@@ -71,7 +74,9 @@ config = [{
|
||||
'tab': 'searcher',
|
||||
'list': 'torrent_providers',
|
||||
'name': 'HDBits',
|
||||
'description': 'See <a href="http://hdbits.org">HDBits</a>',
|
||||
'wizard': True,
|
||||
'description': '<a href="http://hdbits.org">HDBits</a>',
|
||||
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAABi0lEQVR4AZWSzUsbQRjGdyabTcvSNPTSHlpQQeMHJApC8CJRvHgQQU969+LJP8G7f4N3DwpeFRQvRr0EKaUl0ATSpkigUNFsMl/r9NmZLCEHA/nNO5PfvMPDm0DI6fV3ZxiolEICe1oZCBVCCmBPKwOh2ErKBHGE4KYEXBpSLkUlqO4LcM7f+6nVhRnOhSkOz/hexk+tL+YL0yPF2YmN4tynD++4gTLGkNNac9YFLoREBR1+cnF3dFY6v/m6PD+FaXiNJtgA4xYbABxiGrz6+6HWaI5/+Qh37YS0/3Znc8UxwNGBIIBX22z+/ZdJ+4wzyjpR4PEpODg8tgUXBv2iWUzSpa12B0IR6n6lvt8Aek2lZHb084+fdRNgrwY8z81PjhVy2d2ttUrtV/lbBa+JXGEpDMPnoF2tN1QYRqVUtf6nFbThb7wk7le395elcqhASLb39okDiHY00VCtCTEHwSiH4AI0lkOiT1dwMeSfT3SRxiQWNO7Zwj1egkoVIQFMKvSiC3bcjXq9Jf8DcDIRT3hh10kAAAAASUVORK5CYII=',
|
||||
'options': [
|
||||
{
|
||||
'name': 'enabled',
|
||||
@@ -108,6 +113,14 @@ config = [{
|
||||
'default': 0,
|
||||
'description': 'Starting score for each release found via this provider.',
|
||||
},
|
||||
{
|
||||
'name': 'internal_only',
|
||||
'advanced': True,
|
||||
'label': 'Internal Only',
|
||||
'type': 'bool',
|
||||
'default': False,
|
||||
'description': 'Only download releases marked as HDBits internal'
|
||||
}
|
||||
],
|
||||
},
|
||||
],
|
||||
|
||||
@@ -3,7 +3,7 @@ import traceback
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode
|
||||
from couchpotato.core.helpers.variable import tryInt
|
||||
from couchpotato.core.helpers.variable import tryInt, splitString
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
|
||||
|
||||
@@ -15,7 +15,7 @@ class Base(TorrentProvider):
|
||||
|
||||
urls = {
|
||||
'download': 'https://www.ilovetorrents.me/%s',
|
||||
'detail': 'https//www.ilovetorrents.me/%s',
|
||||
'detail': 'https://www.ilovetorrents.me/%s',
|
||||
'search': 'https://www.ilovetorrents.me/browse.php?search=%s&page=%s&cat=%s',
|
||||
'test': 'https://www.ilovetorrents.me/',
|
||||
'login': 'https://www.ilovetorrents.me/takelogin.php',
|
||||
@@ -47,17 +47,24 @@ class Base(TorrentProvider):
|
||||
data = self.getHTMLData(search_url)
|
||||
if data:
|
||||
try:
|
||||
soup = BeautifulSoup(data)
|
||||
|
||||
results_table = soup.find('table', attrs = {'class': 'koptekst'})
|
||||
results_table = None
|
||||
|
||||
data_split = splitString(data, '<table')
|
||||
soup = None
|
||||
for x in data_split:
|
||||
soup = BeautifulSoup(x)
|
||||
results_table = soup.find('table', attrs = {'class': 'koptekst'})
|
||||
if results_table:
|
||||
break
|
||||
|
||||
if not results_table:
|
||||
return
|
||||
|
||||
try:
|
||||
pagelinks = soup.findAll(href = re.compile('page'))
|
||||
pageNumbers = [int(re.search('page=(?P<pageNumber>.+'')', i['href']).group('pageNumber')) for i in pagelinks]
|
||||
total_pages = max(pageNumbers)
|
||||
|
||||
page_numbers = [int(re.search('page=(?P<page_number>.+'')', i['href']).group('page_number')) for i in pagelinks]
|
||||
total_pages = max(page_numbers)
|
||||
except:
|
||||
pass
|
||||
|
||||
@@ -139,6 +146,7 @@ config = [{
|
||||
'name': 'ILoveTorrents',
|
||||
'description': 'Where the Love of Torrents is Born',
|
||||
'wizard': True,
|
||||
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAACPUlEQVR4AYWM0U9SbxjH3+v266I/oNvWZTfd2J1d0ZqbZEFwWrUImOKs4YwtumFKZvvlJJADR2TCQQlMPKg5NmpREgaekAPnBATKgmK1LqQlx6awHnZWF1Tr2Xfvvs+7z+dB0mlO7StpAh+M4S/2jbo3w8+xvJvlnSneEt+10zwer5ujNUOoChjALWFw5XOwdCAk/P57cGvPl+Oht0W7VJHN5NC1uW1BON4hGjXbwpVWMZhsy9v7sEIXAsDNYBXgdkEoIKyWD2CF8ut/aOXTZc/fBSgLWw1BgA4BDHOV0GkT90cBQpXahU5TFomsb38XhJC5/Tbh1P8c6rJlBeGfAeyMhUFwNVcs9lxV9Ot0dwmyd+mrNvRtbJ2fSPC6Z3Vsvub2z3sDFACAAYzk0+kUyxEkyfN7PopqNBro55A+P6yPKIrL5zF1HwjdeBJJCObIsZO79bo3sHhWhglo5WMV3mazuVPb4fLvSL8/FAkB1hK6rXQPwYhMyROK8VK5LAiH/jsMt0HQjxiN4/ePdoilllcqDyt3Mkg8mRBNbIhMb8RERkowQA/p76g0/UDDdCoNmDminM0qSK5vlpE5kugCHhNPxntwWmJPYTMZtYcFR6ABHQsVRlYLukVORaaULvqKI46keFSCv77kSPS6kxrPptLNDHgz16fWBtyxe6v5h08LUy+KI8ushqTPWWIX8Sg6b45IrGtyW6zXFb/hpQf9m3oqfWuB0fpSw0uZ4WB69En69uOk2rmO2V52PXj+A/mI4ESKpb2HAAAAAElFTkSuQmCC',
|
||||
'options': [
|
||||
{
|
||||
'name': 'enabled',
|
||||
|
||||
@@ -14,11 +14,11 @@ log = CPLog(__name__)
|
||||
class Base(TorrentProvider):
|
||||
|
||||
urls = {
|
||||
'test': 'https://www.iptorrents.com/',
|
||||
'base_url': 'https://www.iptorrents.com',
|
||||
'login': 'https://www.iptorrents.com/torrents/',
|
||||
'login_check': 'https://www.iptorrents.com/inbox.php',
|
||||
'search': 'https://www.iptorrents.com/torrents/?%s%%s&q=%s&qf=ti&p=%%d',
|
||||
'test': 'https://iptorrents.eu/',
|
||||
'base_url': 'https://iptorrents.eu',
|
||||
'login': 'https://iptorrents.eu/torrents/',
|
||||
'login_check': 'https://iptorrents.eu/inbox.php',
|
||||
'search': 'https://iptorrents.eu/torrents/?%s%%s&q=%s&qf=ti&p=%%d',
|
||||
}
|
||||
|
||||
http_time_between_calls = 1 # Seconds
|
||||
@@ -120,8 +120,9 @@ config = [{
|
||||
'tab': 'searcher',
|
||||
'list': 'torrent_providers',
|
||||
'name': 'IPTorrents',
|
||||
'description': 'See <a href="http://www.iptorrents.com">IPTorrents</a>',
|
||||
'description': '<a href="https://iptorrents.eu">IPTorrents</a>',
|
||||
'wizard': True,
|
||||
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAABRklEQVR42qWQO0vDUBiG8zeKY3EqQUtNO7g0J6ZJ1+ifKIIFQXAqDYKCyaaYxM3udrZLHdRFhXrZ6liCW6mubfk874EESgqaeOCF7/Y8hEh41aq6yZi2nyZgBGya9XKtZs4No05pAkZV2YbEmyMMsoSxLQeC46wCTdPPY4HruPQyGIhF97qLWsS78Miydn4XdK46NJ9OsQAYBzMIMf8MQ9wtCnTdWCaIDx/u7uljOIQEe0hiIWPamSTLay3+RxOCSPI9+RJAo7Er9r2bnqjBFAqyK+VyK4f5/Cr5ni8OFKVCz49PFI5GdNvvU7ttE1M1zMU+8AMqFksEhrMnQsBDzqmDAwzx2ehRLwT7yyCI+vSC99c3mozH1NxrJgWWtR1BOECfEJSVCm6WCzJGCA7+IWhBsM4zywDPwEp4vCjx2DzBH2ODAfsDb33Ps6dQwJgAAAAASUVORK5CYII=',
|
||||
'options': [
|
||||
{
|
||||
'name': 'enabled',
|
||||
|
||||
@@ -32,8 +32,11 @@ class Base(TorrentMagnetProvider):
|
||||
proxy_list = [
|
||||
'https://kickass.to',
|
||||
'http://kickass.pw',
|
||||
'http://www.kickassunblock.info',
|
||||
'http://www.kickassproxy.info',
|
||||
'http://kickassto.come.in',
|
||||
'http://katproxy.ws',
|
||||
'http://kickass.bitproxy.eu',
|
||||
'http://katph.eu',
|
||||
'http://kickassto.come.in',
|
||||
]
|
||||
|
||||
def _search(self, media, quality, results):
|
||||
@@ -65,12 +68,13 @@ class Base(TorrentMagnetProvider):
|
||||
if column_name:
|
||||
|
||||
if column_name == 'name':
|
||||
link = td.find('div', {'class': 'torrentname'}).find_all('a')[1]
|
||||
new['id'] = temp.get('id')[-8:]
|
||||
link = td.find('div', {'class': 'torrentname'}).find_all('a')[2]
|
||||
new['id'] = temp.get('id')[-7:]
|
||||
new['name'] = link.text
|
||||
new['url'] = td.find('a', 'imagnet')['href']
|
||||
new['detail_url'] = self.urls['detail'] % (self.getDomain(), link['href'][1:])
|
||||
new['score'] = 20 if td.find('a', 'iverif') else 0
|
||||
new['verified'] = True if td.find('a', 'iverify') else False
|
||||
new['score'] = 100 if new['verified'] else 0
|
||||
elif column_name is 'size':
|
||||
new['size'] = self.parseSize(td.text)
|
||||
elif column_name is 'age':
|
||||
@@ -82,6 +86,10 @@ class Base(TorrentMagnetProvider):
|
||||
|
||||
nr += 1
|
||||
|
||||
# Only store verified torrents
|
||||
if self.conf('only_verified') and not new['verified']:
|
||||
continue
|
||||
|
||||
results.append(new)
|
||||
except:
|
||||
log.error('Failed parsing KickAssTorrents: %s', traceback.format_exc())
|
||||
@@ -123,8 +131,9 @@ config = [{
|
||||
'tab': 'searcher',
|
||||
'list': 'torrent_providers',
|
||||
'name': 'KickAssTorrents',
|
||||
'description': 'See <a href="https://kat.ph/">KickAssTorrents</a>',
|
||||
'description': '<a href="https://kat.ph/">KickAssTorrents</a>',
|
||||
'wizard': True,
|
||||
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACD0lEQVR42pXK20uTcRjA8d/fsJsuap0orBuFlm3hir3JJvQOVmuwllN20Lb2isI2nVHKjBqrCWYaNnNuBrkSWxglhDVJOkBdSWUOq5FgoiOrMdRJ2xPPxW+8OUf1ge/FcyCUSVe2qedK5U/OxNTTXRNXEQ52Glb4O6dNEfK1auJkvRY7+/zxnQbA/D596laXcY3OWOiaIX2393SGznUmxkUo/YkDgqHemuzobQ7+NV+reo5Q1mqp68GABdY3+/EloO+JeN4tEqiFU8f3CwhyWo9E7wfMgI0ELTDx0AvjIxcgvZoC9P7NMN7yMmrFeoKa68rfDfmrARsNN0Ihr55cx59ctZWSiwS5bLKpwW4dYJH+M/B6/CYszE0BFZ+egG+Ln+HRoBN/cpl1pV6COIMkOnBVA/w+fXgGKJVM4LxhumMleoL06hJ3wKcCfl+/TAKKx17gnFePRwkqxR4BQSpFkbCrrQJueI7mWpyfATQ9OQY43+uv/+PutBycJ3y2qn2x7jY50GJvnwLKZjOwspyE5I8F4N+1yr1uwqcs3ym63Hwo29EiAyzUWQVr6WVAS4lZCPutQG/2GtES2YiW3d3XflYKtL72kzAcdEDHeSa3czeIMyyz/TApRKvcFfE0isHbJMnrHCf6xTLb1ORvWNlWo91cvHrJUQo0o6ZoRi7dIiT/g2WEDi27Iyov21xMCvgNfXvtwIACfHwAAAAASUVORK5CYII=',
|
||||
'options': [
|
||||
{
|
||||
'name': 'enabled',
|
||||
@@ -151,6 +160,13 @@ config = [{
|
||||
'default': 40,
|
||||
'description': 'Will not be (re)moved until this seed time (in hours) is met.',
|
||||
},
|
||||
{
|
||||
'name': 'only_verified',
|
||||
'advanced': True,
|
||||
'type': 'bool',
|
||||
'default': False,
|
||||
'description': 'Only search for verified releases.'
|
||||
},
|
||||
{
|
||||
'name': 'extra_score',
|
||||
'advanced': True,
|
||||
|
||||
@@ -64,6 +64,10 @@ class Base(TorrentProvider):
|
||||
torrentdesc += ' HQ'
|
||||
if self.conf('prefer_golden'):
|
||||
torrentscore += 5000
|
||||
if 'FreeleechType' in torrent:
|
||||
torrentdesc += ' Freeleech'
|
||||
if self.conf('prefer_freeleech'):
|
||||
torrentscore += 7000
|
||||
if 'Scene' in torrent and torrent['Scene']:
|
||||
torrentdesc += ' Scene'
|
||||
if self.conf('prefer_scene'):
|
||||
@@ -187,8 +191,9 @@ config = [{
|
||||
'tab': 'searcher',
|
||||
'list': 'torrent_providers',
|
||||
'name': 'PassThePopcorn',
|
||||
'description': 'See <a href="https://passthepopcorn.me">PassThePopcorn.me</a>',
|
||||
'description': '<a href="https://passthepopcorn.me">PassThePopcorn.me</a>',
|
||||
'wizard': True,
|
||||
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAARklEQVQoz2NgIAP8BwMiGWRpIN1JNWn/t6T9f532+W8GkNt7vzz9UkfarZVpb68BuWlbnqW1nU7L2DMx7eCoBlpqGOppCQB83zIgIg+wWQAAAABJRU5ErkJggg==',
|
||||
'options': [
|
||||
{
|
||||
'name': 'enabled',
|
||||
@@ -222,6 +227,14 @@ config = [{
|
||||
'default': 1,
|
||||
'description': 'Favors Golden Popcorn-releases over all other releases.'
|
||||
},
|
||||
{
|
||||
'name': 'prefer_freeleech',
|
||||
'advanced': True,
|
||||
'type': 'bool',
|
||||
'label': 'Prefer Freeleech',
|
||||
'default': 1,
|
||||
'description': 'Favors torrents marked as freeleech over all other releases.'
|
||||
},
|
||||
{
|
||||
'name': 'prefer_scene',
|
||||
'advanced': True,
|
||||
|
||||
@@ -1,136 +0,0 @@
|
||||
from urlparse import parse_qs
|
||||
import re
|
||||
import traceback
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
from couchpotato.core.helpers.encoding import tryUrlencode, toUnicode
|
||||
from couchpotato.core.helpers.variable import tryInt
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.media._base.providers.torrent.base import TorrentMagnetProvider
|
||||
import six
|
||||
|
||||
|
||||
log = CPLog(__name__)
|
||||
|
||||
|
||||
class Base(TorrentMagnetProvider):
|
||||
|
||||
urls = {
|
||||
'test': 'https://publichd.se',
|
||||
'detail': 'https://publichd.se/index.php?page=torrent-details&id=%s',
|
||||
'search': 'https://publichd.se/index.php',
|
||||
}
|
||||
http_time_between_calls = 0
|
||||
|
||||
def search(self, movie, quality):
|
||||
|
||||
if not quality.get('hd', False):
|
||||
return []
|
||||
|
||||
return super(Base, self).search(movie, quality)
|
||||
|
||||
def _search(self, media, quality, results):
|
||||
|
||||
query = self.buildUrl(media)
|
||||
|
||||
params = tryUrlencode({
|
||||
'page': 'torrents',
|
||||
'search': query,
|
||||
'active': 1,
|
||||
})
|
||||
|
||||
data = self.getHTMLData('%s?%s' % (self.urls['search'], params))
|
||||
|
||||
if data:
|
||||
|
||||
try:
|
||||
soup = BeautifulSoup(data)
|
||||
|
||||
results_table = soup.find('table', attrs = {'id': 'bgtorrlist2'})
|
||||
entries = results_table.find_all('tr')
|
||||
|
||||
for result in entries[2:len(entries) - 1]:
|
||||
info_url = result.find(href = re.compile('torrent-details'))
|
||||
download = result.find(href = re.compile('magnet:'))
|
||||
|
||||
if info_url and download:
|
||||
|
||||
url = parse_qs(info_url['href'])
|
||||
|
||||
results.append({
|
||||
'id': url['id'][0],
|
||||
'name': six.text_type(info_url.string),
|
||||
'url': download['href'],
|
||||
'detail_url': self.urls['detail'] % url['id'][0],
|
||||
'size': self.parseSize(result.find_all('td')[7].string),
|
||||
'seeders': tryInt(result.find_all('td')[4].string),
|
||||
'leechers': tryInt(result.find_all('td')[5].string),
|
||||
'get_more_info': self.getMoreInfo
|
||||
})
|
||||
|
||||
except:
|
||||
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
|
||||
|
||||
def getMoreInfo(self, item):
|
||||
|
||||
cache_key = 'publichd.%s' % item['id']
|
||||
description = self.getCache(cache_key)
|
||||
|
||||
if not description:
|
||||
|
||||
try:
|
||||
full_description = self.urlopen(item['detail_url'])
|
||||
html = BeautifulSoup(full_description)
|
||||
nfo_pre = html.find('div', attrs = {'id': 'torrmain'})
|
||||
description = toUnicode(nfo_pre.text) if nfo_pre else ''
|
||||
except:
|
||||
log.error('Failed getting more info for %s', item['name'])
|
||||
description = ''
|
||||
|
||||
self.setCache(cache_key, description, timeout = 25920000)
|
||||
|
||||
item['description'] = description
|
||||
return item
|
||||
|
||||
|
||||
config = [{
|
||||
'name': 'publichd',
|
||||
'groups': [
|
||||
{
|
||||
'tab': 'searcher',
|
||||
'list': 'torrent_providers',
|
||||
'name': 'PublicHD',
|
||||
'description': 'Public Torrent site with only HD content. See <a href="https://publichd.se/">PublicHD</a>',
|
||||
'wizard': True,
|
||||
'options': [
|
||||
{
|
||||
'name': 'enabled',
|
||||
'type': 'enabler',
|
||||
'default': True,
|
||||
},
|
||||
{
|
||||
'name': 'seed_ratio',
|
||||
'label': 'Seed ratio',
|
||||
'type': 'float',
|
||||
'default': 1,
|
||||
'description': 'Will not be (re)moved until this seed ratio is met.',
|
||||
},
|
||||
{
|
||||
'name': 'seed_time',
|
||||
'label': 'Seed time',
|
||||
'type': 'int',
|
||||
'default': 40,
|
||||
'description': 'Will not be (re)moved until this seed time (in hours) is met.',
|
||||
},
|
||||
{
|
||||
'name': 'extra_score',
|
||||
'advanced': True,
|
||||
'label': 'Extra Score',
|
||||
'type': 'int',
|
||||
'default': 0,
|
||||
'description': 'Starting score for each release found via this provider.',
|
||||
}
|
||||
],
|
||||
},
|
||||
],
|
||||
}]
|
||||
@@ -24,9 +24,9 @@ class Base(TorrentProvider):
|
||||
|
||||
http_time_between_calls = 1 # Seconds
|
||||
|
||||
def _search(self, media, quality, results):
|
||||
def _searchOnTitle(self, title, media, quality, results):
|
||||
|
||||
url = self.buildUrl(media, quality)
|
||||
url = self.buildUrl(title, media, quality)
|
||||
data = self.getHTMLData(url)
|
||||
|
||||
if data:
|
||||
@@ -42,6 +42,7 @@ class Base(TorrentProvider):
|
||||
|
||||
link = result.find('td', attrs = {'class': 'ttr_name'}).find('a')
|
||||
url = result.find('td', attrs = {'class': 'td_dl'}).find('a')
|
||||
seeders = result.find('td', attrs = {'class': 'ttr_seeders'}).find('a')
|
||||
leechers = result.find('td', attrs = {'class': 'ttr_leechers'}).find('a')
|
||||
torrent_id = link['href'].replace('details?id=', '')
|
||||
|
||||
@@ -51,7 +52,7 @@ class Base(TorrentProvider):
|
||||
'url': self.urls['download'] % url['href'],
|
||||
'detail_url': self.urls['detail'] % torrent_id,
|
||||
'size': self.parseSize(result.find('td', attrs = {'class': 'ttr_size'}).contents[0]),
|
||||
'seeders': tryInt(result.find('td', attrs = {'class': 'ttr_seeders'}).find('a').string),
|
||||
'seeders': tryInt(seeders.string) if seeders else 0,
|
||||
'leechers': tryInt(leechers.string) if leechers else 0,
|
||||
'get_more_info': self.getMoreInfo,
|
||||
})
|
||||
@@ -89,8 +90,9 @@ config = [{
|
||||
'tab': 'searcher',
|
||||
'list': 'torrent_providers',
|
||||
'name': 'SceneAccess',
|
||||
'description': 'See <a href="https://sceneaccess.eu/">SceneAccess</a>',
|
||||
'description': '<a href="https://sceneaccess.eu/">SceneAccess</a>',
|
||||
'wizard': True,
|
||||
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAABnRSTlMAAAAAAABupgeRAAACT0lEQVR4AYVQS0sbURidO3OTmajJ5FElTTOkPmZ01GhHrIq0aoWAj1Vc+A/cuRMXbl24V9SlCGqrLhVFCrooEhCp2BAx0mobTY2kaR7qmOm87EXL1EWxh29xL+c7nPMdgGHYO5bF/gdbefnr6WlbWRnxluMwAB4Z0uEgXa7nwaDL7+/RNPzxbYvb/XJ0FBYVfd/ayh0fQ4qCGEHcm0KLRZUk7Pb2YRJPRwcsKMidnKD3t9VVT3s7BDh+z5FOZ3Vfn3h+Hltfx00mRRSRWFcUmmVNhYVqPn8dj3va2oh+txvcQRVF9ebm1fi4k+dRFbosY5rm4Hk7xxULQnJnx93S4g0EIEEQRoDLo6PrWEw8Pc0eHLwYGopMTDirqlJ7eyhYYGHhfgfHCcKYksZGVB/NcXI2mw6HhZERqrjYTNPHi4tFPh8aJIYIhgPlcCRDoZLW1s75+Z/7+59nZ/OJhLWigqAoKZX6Mjf3dXkZ3pydGYLc4aEoCCkInzQ1fRobS2xuvllaonkedfArnY5OTdGVldBkOADgqq2Nr6z8CIWaJietDHOhKB+HhwFKC6Gnq4ukKJvP9zcSbjYDXbeVlkKzuZBhnnV3e3t6UOmaJO0ODibW1hB1GYkg8R/gup7Z3TVZLJ5AILW9LcZiVpYtYBhw16O3t7cauckyeF9Tgz0ATpL2+nopmWycmbnY2LiKRjFk6/d7+/vRJfl4HGzV1T0UIM43MGBvaIBWK/YvwM5w+IMgGH8tkyEgvIpE7M3Nt6qqZrNyOq1kMmouh455Ggz+BhKY4GEc2CfwAAAAAElFTkSuQmCC',
|
||||
'options': [
|
||||
{
|
||||
'name': 'enabled',
|
||||
|
||||
@@ -24,15 +24,18 @@ class Base(TorrentMagnetProvider):
|
||||
http_time_between_calls = 0
|
||||
|
||||
proxy_list = [
|
||||
'https://tpb.ipredator.se',
|
||||
'https://dieroschtibay.org',
|
||||
'https://thebay.al',
|
||||
'https://thepiratebay.se',
|
||||
'http://pirateproxy.ca',
|
||||
'http://tpb.al',
|
||||
'http://www.tpb.gr',
|
||||
'http://bayproxy.me',
|
||||
'http://proxybay.eu',
|
||||
'http://www.getpirate.com',
|
||||
'http://piratebay.io',
|
||||
'http://thepiratebay.se.net',
|
||||
'http://thebootlegbay.com',
|
||||
'http://tpb.ninja.so',
|
||||
'http://proxybay.fr',
|
||||
'http://pirateproxy.in',
|
||||
'http://piratebay.skey.sk',
|
||||
'http://pirateproxy.be',
|
||||
'http://bayproxy.li',
|
||||
'http://proxybay.pw',
|
||||
]
|
||||
|
||||
def _search(self, media, quality, results):
|
||||
@@ -65,7 +68,7 @@ class Base(TorrentMagnetProvider):
|
||||
pass
|
||||
|
||||
entries = results_table.find_all('tr')
|
||||
for result in entries[2:]:
|
||||
for result in entries[1:]:
|
||||
link = result.find(href = re.compile('torrent\/\d+\/'))
|
||||
download = result.find(href = re.compile('magnet:'))
|
||||
|
||||
@@ -109,7 +112,11 @@ class Base(TorrentMagnetProvider):
|
||||
full_description = self.getCache('tpb.%s' % item['id'], item['detail_url'], cache_timeout = 25920000)
|
||||
html = BeautifulSoup(full_description)
|
||||
nfo_pre = html.find('div', attrs = {'class': 'nfo'})
|
||||
description = toUnicode(nfo_pre.text) if nfo_pre else ''
|
||||
description = ''
|
||||
try:
|
||||
description = toUnicode(nfo_pre.text)
|
||||
except:
|
||||
pass
|
||||
|
||||
item['description'] = description
|
||||
return item
|
||||
@@ -122,8 +129,9 @@ config = [{
|
||||
'tab': 'searcher',
|
||||
'list': 'torrent_providers',
|
||||
'name': 'ThePirateBay',
|
||||
'description': 'The world\'s largest bittorrent tracker. See <a href="http://fucktimkuik.org/">ThePirateBay</a>',
|
||||
'description': 'The world\'s largest bittorrent tracker. <a href="http://fucktimkuik.org/">ThePirateBay</a>',
|
||||
'wizard': True,
|
||||
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAAAAAA6mKC9AAAA3UlEQVQY02P4DwT/YADIZvj//7qnozMYODmtAAusZoCDELDAegYGViZhAWZmRoYoqIDupfhNN1M3dTBEggXWMZg9jZRXV77YxhAOFpjDwMAPMoCXmcHsF1SAQZ6bQY2VgUEbKHClcAYzg3mINEO8jSCD478/DPsZmvqWblu1bOmStes3Pp0ezVDF4Gif0Hfx9///74/ObRZ2YNiZ47C8XIRBxFJR0jbSSUud4f9zAQWn8NTuziAt2zy5xIMM/z8LFX0E+fD/x0MRDCeA1v7Z++Y/FDzyvAtyBxIA+h8A8ZKLeT+lJroAAAAASUVORK5CYII=',
|
||||
'options': [
|
||||
{
|
||||
'name': 'enabled',
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import traceback
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
from couchpotato.core.helpers.encoding import tryUrlencode
|
||||
from couchpotato.core.helpers.encoding import tryUrlencode, toUnicode
|
||||
from couchpotato.core.helpers.variable import tryInt
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
|
||||
@@ -56,11 +56,12 @@ class Base(TorrentProvider):
|
||||
|
||||
full_id = link['href'].replace('details.php?id=', '')
|
||||
torrent_id = full_id[:6]
|
||||
name = toUnicode(link.get('title', link.contents[0]).encode('ISO-8859-1')).strip()
|
||||
|
||||
results.append({
|
||||
'id': torrent_id,
|
||||
'name': link.contents[0],
|
||||
'url': self.urls['download'] % (torrent_id, link.contents[0]),
|
||||
'name': name,
|
||||
'url': self.urls['download'] % (torrent_id, name),
|
||||
'detail_url': self.urls['detail'] % torrent_id,
|
||||
'size': self.parseSize(cells[6].contents[0] + cells[6].contents[2]),
|
||||
'seeders': tryInt(cells[8].find('span').contents[0]),
|
||||
@@ -90,8 +91,9 @@ config = [{
|
||||
'tab': 'searcher',
|
||||
'list': 'torrent_providers',
|
||||
'name': 'TorrentBytes',
|
||||
'description': 'See <a href="http://torrentbytes.net">TorrentBytes</a>',
|
||||
'description': '<a href="http://torrentbytes.net">TorrentBytes</a>',
|
||||
'wizard': True,
|
||||
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAMAAAAoLQ9TAAAAeFBMVEUAAAAAAEQAA1QAEmEAKnQALHYAMoEAOokAQpIASYsASZgAS5UATZwATosATpgAVJ0AWZwAYZ4AZKAAaZ8Ab7IAcbMAfccAgcQAgcsAhM4AiscAjMkAmt0AoOIApecAp/EAqvQAs+kAt+wA3P8A4f8A//8VAAAfDbiaAl08AAAAjUlEQVQYGQXBO04DQRAFwHqz7Z8sECIl5f73ISRD5GBs7UxTlWfg9vYXnvJRQJqOL88D6BAwJtMMumHUVCl60aa6H93IrIv0b+157f1lpk+fm87lMWrZH0vncKbXdRUQrRmrh9C6Iwkq6rg4PXZcyXmbizzeV/g+rDra0rGve8jPKLSOJNi2AQAwAGjwD7ApPkEHdtPQAAAAAElFTkSuQmCC',
|
||||
'options': [
|
||||
{
|
||||
'name': 'enabled',
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import re
|
||||
from couchpotato.core.helpers.variable import tryInt
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
|
||||
@@ -8,19 +9,19 @@ log = CPLog(__name__)
|
||||
class Base(TorrentProvider):
|
||||
|
||||
urls = {
|
||||
'test': 'http://www.td.af/',
|
||||
'login': 'http://www.td.af/torrents/',
|
||||
'login_check': 'http://www.torrentday.com/userdetails.php',
|
||||
'detail': 'http://www.td.af/details.php?id=%s',
|
||||
'search': 'http://www.td.af/V3/API/API.php',
|
||||
'download': 'http://www.td.af/download.php/%s/%s',
|
||||
'test': 'https://torrentday.eu/',
|
||||
'login': 'https://torrentday.eu/torrents/',
|
||||
'login_check': 'https://torrentday.eu/userdetails.php',
|
||||
'detail': 'https://torrentday.eu/details.php?id=%s',
|
||||
'search': 'https://torrentday.eu/V3/API/API.php',
|
||||
'download': 'https://torrentday.eu/download.php/%s/%s',
|
||||
}
|
||||
|
||||
http_time_between_calls = 1 # Seconds
|
||||
|
||||
def _search(self, media, quality, results):
|
||||
def _searchOnTitle(self, title, media, quality, results):
|
||||
|
||||
query = self.buildUrl(media)
|
||||
query = '"%s" %s' % (title, media['info']['year'])
|
||||
|
||||
data = {
|
||||
'/browse.php?': None,
|
||||
@@ -55,6 +56,10 @@ class Base(TorrentProvider):
|
||||
}
|
||||
|
||||
def loginSuccess(self, output):
|
||||
often = re.search('You tried too often, please wait .*</div>', output)
|
||||
if often:
|
||||
raise Exception(often.group(0)[:-6].strip())
|
||||
|
||||
return 'Password not correct' not in output
|
||||
|
||||
def loginCheckSuccess(self, output):
|
||||
@@ -68,8 +73,9 @@ config = [{
|
||||
'tab': 'searcher',
|
||||
'list': 'torrent_providers',
|
||||
'name': 'TorrentDay',
|
||||
'description': 'See <a href="http://www.td.af/">TorrentDay</a>',
|
||||
'description': '<a href="https://torrentday.eu/">TorrentDay</a>',
|
||||
'wizard': True,
|
||||
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAC5ElEQVQ4y12TXUgUURTH//fO7Di7foeQJH6gEEEIZZllVohfSG/6UA+RSFAQQj74VA8+Bj30lmAlRVSEvZRfhNhaka5ZUG1paKaW39tq5O6Ou+PM3M4o6m6X+XPPzD3zm/+dcy574r515WfIW8CZBM4YAA5Gc/aQC3yd7oXYEONcsISE5dTDh91HS0t7FEWhBUAeN9ynV/d9qJAgE4AECURAcVsGlCCnly26LMA0IQwTa52dje3d3e3hcPi8qqrrMjcVYI3EHCQZlkFOHBwR2QHh2ASAAIJxWGAQEDxjePhs3527XjJwnb37OHBq0T+Tyyjh+9KnEzNJ7nouc1Q/3A3HGsOvnJy+PSUlj81w2Lny9WuJ6+3AmTjD4HOcrdR2dWXLRQePvyaSLfQOPMPC8mC9iHCsOxSyzJCelzdSXlNzD5ujpb25Wbfc/XXJemTXF4+nnCNq+AMLe50uFfEJTiw4GXSFtiHL0SnIq66+p0kSArqO+eH3RdsAv9+f5vW7L7GICq6rmM8XBCAXlBw90rOyxibn5yzfkg/L09M52/jxqdESaIrBXHYZZbB1GX8cEpySxKIB8S5XcOnvqpli1zuwmrTtoLjw5LOK/eeuWsE4JH5IRPaPZKiKigmPp+5pa+u1aEjIMhEgrRkmi9mgxGUhM7LNJSzOzsE3+cOeExovXOjdytE0LV4zqNZUtV0uZzAGoGkhDH/2YHZiErmv4uyWQnZZWc+hoqL3WzlTExN5hhA8IEwkZWZOxwB++30YG/9GkYCPvqAaHAW5uWPROW86OmqCprUR7z1yZDAGQNuCvkoB/baIKUBWMTYymv+gra3eJNvjXu+B562tFyXqTJ6YuHK8rKwvBmC3vR7cOCPQLWFz8LnfXWUrJo9U19BwMyUlJRjTSMJ2ENxUiGxq9KXQfwqYlnWstvbR5aamG9g0uzM8Q4OFt++3NNixQ2NgYmeN03FOTUv7XVpV9aKisvLl1vN/WVhNc/Fi1NEAAAAASUVORK5CYII=',
|
||||
'options': [
|
||||
{
|
||||
'name': 'enabled',
|
||||
|
||||
@@ -13,20 +13,20 @@ log = CPLog(__name__)
|
||||
class Base(TorrentProvider):
|
||||
|
||||
urls = {
|
||||
'test': 'http://www.torrentleech.org/',
|
||||
'login': 'http://www.torrentleech.org/user/account/login/',
|
||||
'login_check': 'http://torrentleech.org/user/messages',
|
||||
'detail': 'http://www.torrentleech.org/torrent/%s',
|
||||
'search': 'http://www.torrentleech.org/torrents/browse/index/query/%s/categories/%d',
|
||||
'download': 'http://www.torrentleech.org%s',
|
||||
'test': 'https://www.torrentleech.org/',
|
||||
'login': 'https://www.torrentleech.org/user/account/login/',
|
||||
'login_check': 'https://torrentleech.org/user/messages',
|
||||
'detail': 'https://www.torrentleech.org/torrent/%s',
|
||||
'search': 'https://www.torrentleech.org/torrents/browse/index/query/%s/categories/%s',
|
||||
'download': 'https://www.torrentleech.org%s',
|
||||
}
|
||||
|
||||
http_time_between_calls = 1 # Seconds
|
||||
cat_backup_id = None
|
||||
|
||||
def _search(self, media, quality, results):
|
||||
def _searchOnTitle(self, title, media, quality, results):
|
||||
|
||||
url = self.urls['search'] % self.buildUrl(media, quality)
|
||||
url = self.urls['search'] % self.buildUrl(title, media, quality)
|
||||
|
||||
data = self.getHTMLData(url)
|
||||
|
||||
@@ -80,8 +80,9 @@ config = [{
|
||||
'tab': 'searcher',
|
||||
'list': 'torrent_providers',
|
||||
'name': 'TorrentLeech',
|
||||
'description': 'See <a href="http://torrentleech.org">TorrentLeech</a>',
|
||||
'description': '<a href="http://torrentleech.org">TorrentLeech</a>',
|
||||
'wizard': True,
|
||||
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAACHUlEQVR4AZVSO48SYRSdGTCBEMKzILLAWiybkKAGMZRUUJEoDZX7B9zsbuQPYEEjNLTQkYgJDwsoSaxspEBsCITXjjNAIKi8AkzceXgmbHQ1NJ5iMufmO9/9zrmXlCSJ+B8o75J8Pp/NZj0eTzweBy0Wi4PBYD6f12o1r9ebTCZx+22HcrnMsuxms7m6urTZ7LPZDMVYLBZ8ZV3yo8aq9Pq0wzCMTqe77dDv9y8uLyAWBH6xWOyL0K/56fcb+rrPgPZ6PZfLRe1fsl6vCUmGKIqoqNXqdDr9Dbjps9znUV0uTqdTjuPkDoVCIfcuJ4gizjMMm8u9vW+1nr04czqdK56c37CbKY9j2+1WEARZ0Gq1RFHAz2q1qlQqXxoN69HRcDjUarW8ZD6QUigUOnY8uKYH8N1sNkul9yiGw+F6vS4Rxn8EsodEIqHRaOSnq9T7ajQazWQycEIR1AEBYDabSZJyHDucJyegwWBQr9ebTCaKvHd4cCQANUU9evwQ1Ofz4YvUKUI43GE8HouSiFiNRhOowWBIpVLyHITJkuW3PwgAEf3pgIwxF5r+OplMEsk3CPT5szCMnY7EwUdhwUh/CXiej0Qi3idPz89fdrpdbsfBzH7S3Q9K5pP4c0sAKpVKoVAQGO1ut+t0OoFAQHkH2Da/3/+but3uarWK0ZMQoNdyucRutdttmqZxMTzY7XaYxsrgtUjEZrNhkSwWyy/0NCatZumrNQAAAABJRU5ErkJggg==',
|
||||
'options': [
|
||||
{
|
||||
'name': 'enabled',
|
||||
|
||||
@@ -134,6 +134,7 @@ config = [{
|
||||
'order': 10,
|
||||
'description': 'CouchPotato torrent provider. Checkout <a href="https://github.com/RuudBurger/CouchPotatoServer/wiki/CouchPotato-Torrent-Provider">the wiki page about this provider</a> for more info.',
|
||||
'wizard': True,
|
||||
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAABnRSTlMAAAAAAABupgeRAAABSElEQVR4AZ2Nz0oCURTGv8t1YMpqUxt9ARFxoQ/gQtppgvUKcu/sxB5iBJkogspaBC6iVUplEC6kv+oiiKDNhAtt16roP0HQgdsMLgaxfvy4nHP4Pi48qE2g4v91JOqT1CH/UnA7w7icUlLawyEdj+ZI/7h6YluWbRiddHonHh9M70aj7VTKzuXuikUMci/EO/ACnAI15599oAk8AR/AgxBQNCzreD7bmpl+FOIVuAHqQDUcJo+AK+CZFKLt95/MpSmMt0TiW9POxse6UvYZ6zB2wFgjFiNpOGesR0rZ0PVPXf8KhUCl22CwClz4eN8weoZBb9c0bdPsOWvHx/cYu9Y0CoNoZTJrwAbn5DrnZc6XOV+igVbnsgo0IxEomlJuA1vUIYGyq3PZBChwmExCUSmVZgMBDIUCK4UCFIv5vHIhm/XUDeAf/ADbcpd5+aXSWQAAAABJRU5ErkJggg==',
|
||||
'options': [
|
||||
{
|
||||
'name': 'enabled',
|
||||
|
||||
@@ -13,12 +13,12 @@ log = CPLog(__name__)
|
||||
class Base(TorrentProvider):
|
||||
|
||||
urls = {
|
||||
'test': 'https://torrentshack.net/',
|
||||
'login': 'https://torrentshack.net/login.php',
|
||||
'login_check': 'https://torrentshack.net/inbox.php',
|
||||
'detail': 'https://torrentshack.net/torrent/%s',
|
||||
'search': 'https://torrentshack.net/torrents.php?action=advanced&searchstr=%s&scene=%s&filter_cat[%d]=1',
|
||||
'download': 'https://torrentshack.net/%s',
|
||||
'test': 'https://theshack.us.to/',
|
||||
'login': 'https://theshack.us.to/login.php',
|
||||
'login_check': 'https://theshack.us.to/inbox.php',
|
||||
'detail': 'https://theshack.us.to/torrent/%s',
|
||||
'search': 'https://theshack.us.to/torrents.php?action=advanced&searchstr=%s&scene=%s&filter_cat[%d]=1',
|
||||
'download': 'https://theshack.us.to/%s',
|
||||
}
|
||||
|
||||
http_time_between_calls = 1 # Seconds
|
||||
@@ -42,15 +42,17 @@ class Base(TorrentProvider):
|
||||
|
||||
link = result.find('span', attrs = {'class': 'torrent_name_link'}).parent
|
||||
url = result.find('td', attrs = {'class': 'torrent_td'}).find('a')
|
||||
size = result.find('td', attrs = {'class': 'size'}).contents[0].strip('\n ')
|
||||
tds = result.find_all('td')
|
||||
|
||||
results.append({
|
||||
'id': link['href'].replace('torrents.php?torrentid=', ''),
|
||||
'name': six.text_type(link.span.string).translate({ord(six.u('\xad')): None}),
|
||||
'url': self.urls['download'] % url['href'],
|
||||
'detail_url': self.urls['download'] % link['href'],
|
||||
'size': self.parseSize(result.find_all('td')[4].string),
|
||||
'seeders': tryInt(result.find_all('td')[6].string),
|
||||
'leechers': tryInt(result.find_all('td')[7].string),
|
||||
'size': self.parseSize(size),
|
||||
'seeders': tryInt(tds[len(tds)-2].string),
|
||||
'leechers': tryInt(tds[len(tds)-1].string),
|
||||
})
|
||||
|
||||
except:
|
||||
@@ -80,7 +82,9 @@ config = [{
|
||||
'tab': 'searcher',
|
||||
'list': 'torrent_providers',
|
||||
'name': 'TorrentShack',
|
||||
'description': 'See <a href="https://www.torrentshack.net/">TorrentShack</a>',
|
||||
'description': '<a href="http://torrentshack.eu/">TorrentShack</a>',
|
||||
'wizard': True,
|
||||
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAABmElEQVQoFQXBzY2cVRiE0afqvd84CQiAnxWWtyxsS6ThINBYg2Dc7mZBMEjE4mzs6e9WcY5+ePNuVFJJodQAoLo+SaWCy9rcV8cmjah3CI6iYu7oRU30kE5xxELRfamklY3k1NL19sSm7vPzP/ZdNZzKVDaY2sPZJBh9fv5ITrmG2+Vp4e1sPchVqTCQZJnVXi+/L4uuAJGly1+Pw8CprLbi8Om7tbT19/XRqJUk11JP9uHj9ulxhXbvJbI9qJvr5YkGXFG2IBT8tXczt+sfzDZCp3765f3t9tHEHGEDACma77+8o4oATKk+/PfW9YmHruRFjWoVSFsVsGu1YSKq6Oc37+n98unPZSRlY7vsKDqN+92X3yR9+PdXee3iJNKMStqdcZqoTJbUSi5JOkpfRlhSI0mSpEmCFKoU7FqSNOLAk54uGwCStMUCgLrVic62g7oDoFmmdI+P3S0pDe1xvDqb6XrZqbtzShWNoh9fv/XQHaDdM9OqrZi2M7M3UrB2vlkPS1IbdEBk7UiSoD6VlZ6aKWer4aH4f/AvKoHUTjuyAAAAAElFTkSuQmCC',
|
||||
'options': [
|
||||
{
|
||||
'name': 'enabled',
|
||||
|
||||
@@ -22,12 +22,12 @@ class Base(TorrentMagnetProvider, RSS):
|
||||
|
||||
http_time_between_calls = 0
|
||||
|
||||
def _search(self, media, quality, results):
|
||||
def _searchOnTitle(self, title, media, quality, results):
|
||||
|
||||
search_url = self.urls['verified_search'] if self.conf('verified_only') else self.urls['search']
|
||||
|
||||
# Create search parameters
|
||||
search_params = self.buildUrl(media)
|
||||
search_params = self.buildUrl(title, media, quality)
|
||||
|
||||
smin = quality.get('size_min')
|
||||
smax = quality.get('size_max')
|
||||
@@ -80,11 +80,12 @@ config = [{
|
||||
'name': 'Torrentz',
|
||||
'description': 'Torrentz is a free, fast and powerful meta-search engine. <a href="https://torrentz.eu/">Torrentz</a>',
|
||||
'wizard': True,
|
||||
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAQklEQVQ4y2NgAALjtJn/ycEMlGiGG0IVAxiwAKzOxaKGARcgxgC8YNSAwWoAzuRMjgsIugqfAUR5CZcBRIcHsWEAADSA96Ig020yAAAAAElFTkSuQmCC',
|
||||
'options': [
|
||||
{
|
||||
'name': 'enabled',
|
||||
'type': 'enabler',
|
||||
'default': False
|
||||
'default': True
|
||||
},
|
||||
{
|
||||
'name': 'verified_only',
|
||||
|
||||
@@ -11,19 +11,16 @@ log = CPLog(__name__)
|
||||
class Base(TorrentProvider):
|
||||
|
||||
urls = {
|
||||
'test': '%s/api',
|
||||
'search': '%s/api/list.json?keywords=%s&quality=%s',
|
||||
'detail': '%s/api/movie.json?id=%s'
|
||||
'test': '%s/api/v2',
|
||||
'search': '%s/api/v2/list_movies.json?limit=50&query_term=%s'
|
||||
}
|
||||
|
||||
http_time_between_calls = 1 # seconds
|
||||
|
||||
proxy_list = [
|
||||
'http://yify.unlocktorrent.com',
|
||||
'http://yify-torrents.com.come.in',
|
||||
'http://yts.re',
|
||||
'http://yts.im'
|
||||
'http://yify-torrents.im',
|
||||
'https://yts.re',
|
||||
'https://yts.wf',
|
||||
'https://yts.im',
|
||||
]
|
||||
|
||||
def search(self, movie, quality):
|
||||
@@ -35,30 +32,35 @@ class Base(TorrentProvider):
|
||||
|
||||
def _search(self, movie, quality, results):
|
||||
|
||||
search_url = self.urls['search'] % (self.getDomain(), getIdentifier(movie), quality['identifier'])
|
||||
domain = self.getDomain()
|
||||
if not domain:
|
||||
return
|
||||
|
||||
search_url = self.urls['search'] % (domain, getIdentifier(movie))
|
||||
|
||||
data = self.getJsonData(search_url)
|
||||
data = data.get('data')
|
||||
|
||||
if data and data.get('MovieList'):
|
||||
if isinstance(data, dict) and data.get('movies'):
|
||||
try:
|
||||
for result in data.get('MovieList'):
|
||||
for result in data.get('movies'):
|
||||
|
||||
try:
|
||||
title = result['TorrentUrl'].split('/')[-1][:-8].replace('_', '.').strip('._')
|
||||
title = title.replace('.-.', '-')
|
||||
title = title.replace('..', '.')
|
||||
except:
|
||||
continue
|
||||
for release in result.get('torrents', []):
|
||||
|
||||
results.append({
|
||||
'id': result['MovieID'],
|
||||
'name': title,
|
||||
'url': result['TorrentMagnetUrl'],
|
||||
'detail_url': self.urls['detail'] % (self.getDomain(), result['MovieID']),
|
||||
'size': self.parseSize(result['Size']),
|
||||
'seeders': tryInt(result['TorrentSeeds']),
|
||||
'leechers': tryInt(result['TorrentPeers'])
|
||||
})
|
||||
if release['quality'] and release['quality'] not in result['title_long']:
|
||||
title = result['title_long'] + ' BRRip ' + release['quality']
|
||||
else:
|
||||
title = result['title_long'] + ' BRRip'
|
||||
|
||||
results.append({
|
||||
'id': release['hash'],
|
||||
'name': title,
|
||||
'url': release['url'],
|
||||
'detail_url': result['url'],
|
||||
'size': self.parseSize(release['size']),
|
||||
'seeders': tryInt(release['seeds']),
|
||||
'leechers': tryInt(release['peers']),
|
||||
})
|
||||
|
||||
except:
|
||||
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
|
||||
@@ -77,6 +79,7 @@ config = [{
|
||||
'name': 'Yify',
|
||||
'description': 'Free provider, less accurate. Small HD movies, encoded by <a href="https://yify-torrents.com/">Yify</a>.',
|
||||
'wizard': False,
|
||||
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAACL0lEQVR4AS1SPW/UQBAd23fxne/Ld2dvzvHuzPocEBAKokCBqGiQ6IgACYmvUKRBFEQgKKGg4BAlUoggggYUEQpSHOI7CIEoQs/fYcbLaU/efTvvvZlnA1qydoxU5kcxX0CkgmQZtPy0hCUjvK+WgEByOZ5dns1O5bzna8fRVkgsxH8B0YouIvBhdD5T11NiVOoKrsttyUcpRW0InUrFnwe9HzuP2uaQZYhF2LQ76TTXw2RVMTK8mYYbjfh+zNquMVCrqn93aArLSixPxnafdGDLaz1tjY5rmNa8z5BczEQOxQfCl1GyoqoWxYRN1bkh7ELw3q/vhP6HIL4TG9KumpjgvwuyM7OsjSj98E/vszMfZ7xvPtMaWxGO5crwIumKCR5HxDtJ0AWKGG204RfUd/3smJYqwem/Q7BTS1ZGfM4LNpVwuKAz6cMeROst0S2EwNE7GjTehO2H3dxqIpdkydat15G3F8SXBi4GlpBNlSz012L/k2+W0CLLk/jbcf13rf41yJeMQ8QWUZiHCfCA9ad+81nEKPtoS9mJOf9v0NmMJHgUT6xayheK9EIK7JJeU/AF4scDF7Y5SPlJrRcxJ+um4ibNEdObxLiIwJim+eT2AL5D9CIcnZ5zvSJi9eIlNHVVtZ831dk5svPgvjPWTq+ktWkd/kD0qtm71x+sDQe3kt6DXnM7Ct+GajmTxKlkAokWljyAKSm5oWa2w+BH4P2UuVub7eTyiGOQYapY/wEztHduSDYz5gAAAABJRU5ErkJggg==',
|
||||
'options': [
|
||||
{
|
||||
'name': 'enabled',
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from couchpotato.api import addApiView
|
||||
from couchpotato.core.event import fireEvent, addEvent
|
||||
from couchpotato.core.helpers.variable import mergeDicts
|
||||
from couchpotato.core.helpers.variable import mergeDicts, getImdb
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.plugins.base import Plugin
|
||||
|
||||
@@ -35,12 +35,21 @@ class Search(Plugin):
|
||||
elif isinstance(types, (list, tuple, set)):
|
||||
types = list(types)
|
||||
|
||||
imdb_identifier = getImdb(q)
|
||||
|
||||
if not types:
|
||||
result = fireEvent('info.search', q = q, merge = True)
|
||||
if imdb_identifier:
|
||||
result = fireEvent('movie.info', identifier = imdb_identifier, merge = True)
|
||||
result = {result['type']: [result]}
|
||||
else:
|
||||
result = fireEvent('info.search', q = q, merge = True)
|
||||
else:
|
||||
result = {}
|
||||
for media_type in types:
|
||||
result[media_type] = fireEvent('%s.search' % media_type)
|
||||
if imdb_identifier:
|
||||
result[media_type] = fireEvent('%s.info' % media_type, identifier = imdb_identifier)
|
||||
else:
|
||||
result[media_type] = fireEvent('%s.search' % media_type, q = q)
|
||||
|
||||
return mergeDicts({
|
||||
'success': True,
|
||||
|
||||
@@ -1,278 +0,0 @@
|
||||
.search_form {
|
||||
display: inline-block;
|
||||
vertical-align: middle;
|
||||
position: absolute;
|
||||
right: 105px;
|
||||
top: 0;
|
||||
text-align: right;
|
||||
height: 100%;
|
||||
transition: all .4s cubic-bezier(0.9,0,0.1,1);
|
||||
position: absolute;
|
||||
z-index: 20;
|
||||
border: 0 solid transparent;
|
||||
border-bottom-width: 4px;
|
||||
}
|
||||
.search_form:hover {
|
||||
border-color: #047792;
|
||||
}
|
||||
|
||||
@media all and (max-width: 480px) {
|
||||
.search_form {
|
||||
right: 44px;
|
||||
}
|
||||
}
|
||||
|
||||
.search_form.focused,
|
||||
.search_form.shown {
|
||||
border-color: #04bce6;
|
||||
}
|
||||
|
||||
.search_form .input {
|
||||
height: 100%;
|
||||
overflow: hidden;
|
||||
width: 45px;
|
||||
transition: all .4s cubic-bezier(0.9,0,0.1,1);
|
||||
}
|
||||
|
||||
.search_form.focused .input,
|
||||
.search_form.shown .input {
|
||||
width: 380px;
|
||||
background: #4e5969;
|
||||
}
|
||||
|
||||
.search_form .input input {
|
||||
border-radius: 0;
|
||||
display: block;
|
||||
border: 0;
|
||||
background: none;
|
||||
color: #FFF;
|
||||
font-size: 25px;
|
||||
height: 100%;
|
||||
width: 100%;
|
||||
opacity: 0;
|
||||
padding: 0 40px 0 10px;
|
||||
transition: all .4s ease-in-out .2s;
|
||||
}
|
||||
.search_form.focused .input input,
|
||||
.search_form.shown .input input {
|
||||
opacity: 1;
|
||||
}
|
||||
|
||||
.search_form input::-ms-clear {
|
||||
width : 0;
|
||||
height: 0;
|
||||
}
|
||||
|
||||
@media all and (max-width: 480px) {
|
||||
.search_form .input input {
|
||||
font-size: 15px;
|
||||
}
|
||||
|
||||
.search_form.focused .input,
|
||||
.search_form.shown .input {
|
||||
width: 277px;
|
||||
}
|
||||
}
|
||||
|
||||
.search_form .input a {
|
||||
position: absolute;
|
||||
top: 0;
|
||||
right: 0;
|
||||
width: 44px;
|
||||
height: 100%;
|
||||
cursor: pointer;
|
||||
vertical-align: middle;
|
||||
text-align: center;
|
||||
line-height: 66px;
|
||||
font-size: 15px;
|
||||
color: #FFF;
|
||||
}
|
||||
|
||||
.search_form .input a:after {
|
||||
content: "\e03e";
|
||||
}
|
||||
|
||||
.search_form.shown.filled .input a:after {
|
||||
content: "\e04e";
|
||||
}
|
||||
|
||||
@media all and (max-width: 480px) {
|
||||
.search_form .input a {
|
||||
line-height: 44px;
|
||||
}
|
||||
}
|
||||
|
||||
.search_form .results_container {
|
||||
text-align: left;
|
||||
position: absolute;
|
||||
background: #5c697b;
|
||||
margin: 4px 0 0;
|
||||
width: 470px;
|
||||
min-height: 50px;
|
||||
box-shadow: 0 20px 20px -10px rgba(0,0,0,0.55);
|
||||
display: none;
|
||||
}
|
||||
@media all and (max-width: 480px) {
|
||||
.search_form .results_container {
|
||||
width: 320px;
|
||||
}
|
||||
}
|
||||
.search_form.focused.filled .results_container,
|
||||
.search_form.shown.filled .results_container {
|
||||
display: block;
|
||||
}
|
||||
|
||||
.search_form .results {
|
||||
max-height: 570px;
|
||||
overflow-x: hidden;
|
||||
}
|
||||
|
||||
.media_result {
|
||||
overflow: hidden;
|
||||
height: 50px;
|
||||
position: relative;
|
||||
}
|
||||
|
||||
.media_result .options {
|
||||
position: absolute;
|
||||
height: 100%;
|
||||
top: 0;
|
||||
left: 30px;
|
||||
right: 0;
|
||||
padding: 13px;
|
||||
border: 1px solid transparent;
|
||||
border-width: 1px 0;
|
||||
border-radius: 0;
|
||||
box-shadow: inset 0 1px 8px rgba(0,0,0,0.25);
|
||||
}
|
||||
.media_result .options > .in_library_wanted {
|
||||
margin-top: -7px;
|
||||
}
|
||||
|
||||
.media_result .options > div {
|
||||
border: 0;
|
||||
}
|
||||
|
||||
.media_result .options .thumbnail {
|
||||
vertical-align: middle;
|
||||
}
|
||||
|
||||
.media_result .options select {
|
||||
vertical-align: middle;
|
||||
display: inline-block;
|
||||
margin-right: 10px;
|
||||
}
|
||||
.media_result .options select[name=title] { width: 170px; }
|
||||
.media_result .options select[name=profile] { width: 90px; }
|
||||
.media_result .options select[name=category] { width: 80px; }
|
||||
|
||||
@media all and (max-width: 480px) {
|
||||
|
||||
.media_result .options select[name=title] { width: 90px; }
|
||||
.media_result .options select[name=profile] { width: 50px; }
|
||||
.media_result .options select[name=category] { width: 50px; }
|
||||
|
||||
}
|
||||
|
||||
.media_result .options .button {
|
||||
vertical-align: middle;
|
||||
display: inline-block;
|
||||
}
|
||||
|
||||
.media_result .options .message {
|
||||
height: 100%;
|
||||
font-size: 20px;
|
||||
color: #fff;
|
||||
line-height: 20px;
|
||||
}
|
||||
|
||||
.media_result .data {
|
||||
position: absolute;
|
||||
height: 100%;
|
||||
top: 0;
|
||||
left: 30px;
|
||||
right: 0;
|
||||
background: #5c697b;
|
||||
cursor: pointer;
|
||||
border-top: 1px solid rgba(255,255,255, 0.08);
|
||||
transition: all .4s cubic-bezier(0.9,0,0.1,1);
|
||||
}
|
||||
.media_result .data.open {
|
||||
left: 100% !important;
|
||||
}
|
||||
|
||||
.media_result:last-child .data { border-bottom: 0; }
|
||||
|
||||
.media_result .in_wanted, .media_result .in_library {
|
||||
position: absolute;
|
||||
bottom: 2px;
|
||||
left: 14px;
|
||||
font-size: 11px;
|
||||
}
|
||||
|
||||
.media_result .thumbnail {
|
||||
width: 34px;
|
||||
min-height: 100%;
|
||||
display: block;
|
||||
margin: 0;
|
||||
vertical-align: top;
|
||||
}
|
||||
|
||||
.media_result .info {
|
||||
position: absolute;
|
||||
top: 20%;
|
||||
left: 15px;
|
||||
right: 7px;
|
||||
vertical-align: middle;
|
||||
}
|
||||
|
||||
.media_result .info h2 {
|
||||
margin: 0;
|
||||
font-weight: normal;
|
||||
font-size: 20px;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
.search_form .info h2 {
|
||||
position: absolute;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
.media_result .info h2 .title {
|
||||
display: block;
|
||||
margin: 0;
|
||||
text-overflow: ellipsis;
|
||||
overflow: hidden;
|
||||
white-space: nowrap;
|
||||
}
|
||||
|
||||
.search_form .info h2 .title {
|
||||
position: absolute;
|
||||
width: 88%;
|
||||
}
|
||||
|
||||
.media_result .info h2 .year {
|
||||
padding: 0 5px;
|
||||
text-align: center;
|
||||
position: absolute;
|
||||
width: 12%;
|
||||
right: 0;
|
||||
}
|
||||
|
||||
@media all and (max-width: 480px) {
|
||||
|
||||
.search_form .info h2 .year {
|
||||
font-size: 12px;
|
||||
margin-top: 7px;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
.search_form .mask,
|
||||
.media_result .mask {
|
||||
position: absolute;
|
||||
height: 100%;
|
||||
width: 100%;
|
||||
left: 0;
|
||||
top: 0;
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
Block.Search = new Class({
|
||||
var BlockSearch = new Class({
|
||||
|
||||
Extends: BlockBase,
|
||||
|
||||
@@ -9,42 +9,46 @@ Block.Search = new Class({
|
||||
|
||||
var focus_timer = 0;
|
||||
self.el = new Element('div.search_form').adopt(
|
||||
new Element('div.input').adopt(
|
||||
self.input = new Element('input', {
|
||||
'placeholder': 'Search & add a new media',
|
||||
new Element('a.icon-search', {
|
||||
'events': {
|
||||
'click': self.clear.bind(self),
|
||||
'touchend': self.clear.bind(self)
|
||||
}
|
||||
}),
|
||||
new Element('div.wrapper').adopt(
|
||||
self.result_container = new Element('div.results_container', {
|
||||
'tween': {
|
||||
'duration': 200
|
||||
},
|
||||
'events': {
|
||||
'keyup': self.keyup.bind(self),
|
||||
'focus': function(){
|
||||
if(focus_timer) clearTimeout(focus_timer);
|
||||
self.el.addClass('focused');
|
||||
if(this.get('value'))
|
||||
self.hideResults(false)
|
||||
},
|
||||
'blur': function(){
|
||||
focus_timer = (function(){
|
||||
self.el.removeClass('focused')
|
||||
}).delay(100);
|
||||
'mousewheel': function(e){
|
||||
(e).stopPropagation();
|
||||
}
|
||||
}
|
||||
}),
|
||||
new Element('a.icon2', {
|
||||
'events': {
|
||||
'click': self.clear.bind(self),
|
||||
'touchend': self.clear.bind(self)
|
||||
}
|
||||
})
|
||||
),
|
||||
self.result_container = new Element('div.results_container', {
|
||||
'tween': {
|
||||
'duration': 200
|
||||
},
|
||||
'events': {
|
||||
'mousewheel': function(e){
|
||||
(e).stopPropagation();
|
||||
}
|
||||
}
|
||||
}).adopt(
|
||||
self.results = new Element('div.results')
|
||||
}).grab(
|
||||
self.results = new Element('div.results')
|
||||
),
|
||||
new Element('div.input').grab(
|
||||
self.input = new Element('input', {
|
||||
'placeholder': 'Search & add a new media',
|
||||
'events': {
|
||||
'input': self.keyup.bind(self),
|
||||
'paste': self.keyup.bind(self),
|
||||
'change': self.keyup.bind(self),
|
||||
'keyup': self.keyup.bind(self),
|
||||
'focus': function(){
|
||||
if(focus_timer) clearTimeout(focus_timer);
|
||||
if(this.get('value'))
|
||||
self.hideResults(false);
|
||||
},
|
||||
'blur': function(){
|
||||
focus_timer = (function(){
|
||||
self.el.removeClass('focused');
|
||||
}).delay(100);
|
||||
}
|
||||
}
|
||||
})
|
||||
)
|
||||
)
|
||||
);
|
||||
|
||||
@@ -64,11 +68,12 @@ Block.Search = new Class({
|
||||
|
||||
self.last_q = '';
|
||||
self.input.set('value', '');
|
||||
self.el.addClass('focused');
|
||||
self.input.focus();
|
||||
|
||||
self.media = {};
|
||||
self.results.empty();
|
||||
self.el.removeClass('filled')
|
||||
self.el.removeClass('filled');
|
||||
|
||||
}
|
||||
},
|
||||
@@ -102,7 +107,7 @@ Block.Search = new Class({
|
||||
self.api_request.cancel();
|
||||
|
||||
if(self.autocomplete_timer) clearTimeout(self.autocomplete_timer);
|
||||
self.autocomplete_timer = self.autocomplete.delay(300, self)
|
||||
self.autocomplete_timer = self.autocomplete.delay(300, self);
|
||||
}
|
||||
|
||||
},
|
||||
@@ -112,10 +117,10 @@ Block.Search = new Class({
|
||||
|
||||
if(!self.q()){
|
||||
self.hideResults(true);
|
||||
return
|
||||
return;
|
||||
}
|
||||
|
||||
self.list()
|
||||
self.list();
|
||||
},
|
||||
|
||||
list: function(){
|
||||
@@ -136,7 +141,7 @@ Block.Search = new Class({
|
||||
'q': q
|
||||
},
|
||||
'onComplete': self.fill.bind(self, q)
|
||||
})
|
||||
});
|
||||
}
|
||||
else
|
||||
self.fill(q, cache);
|
||||
@@ -155,30 +160,25 @@ Block.Search = new Class({
|
||||
|
||||
Object.each(json, function(media){
|
||||
if(typeOf(media) == 'array'){
|
||||
Object.each(media, function(m){
|
||||
Object.each(media, function(me){
|
||||
|
||||
var m = new Block.Search[m.type.capitalize() + 'Item'](m);
|
||||
var m = new window['BlockSearch' + me.type.capitalize() + 'Item'](me);
|
||||
$(m).inject(self.results);
|
||||
self.media[m.imdb || 'r-'+Math.floor(Math.random()*10000)] = m;
|
||||
|
||||
if(q == m.imdb)
|
||||
m.showOptions()
|
||||
m.showOptions();
|
||||
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// Calculate result heights
|
||||
var w = window.getSize(),
|
||||
rc = self.result_container.getCoordinates();
|
||||
|
||||
self.results.setStyle('max-height', (w.y - rc.top - 50) + 'px');
|
||||
self.mask.fade('out')
|
||||
self.mask.fade('out');
|
||||
|
||||
},
|
||||
|
||||
loading: function(bool){
|
||||
this.el[bool ? 'addClass' : 'removeClass']('loading')
|
||||
this.el[bool ? 'addClass' : 'removeClass']('loading');
|
||||
},
|
||||
|
||||
q: function(){
|
||||
|
||||
242
couchpotato/core/media/_base/search/static/search.scss
Normal file
242
couchpotato/core/media/_base/search/static/search.scss
Normal file
@@ -0,0 +1,242 @@
|
||||
@import "couchpotato/static/style/mixins";
|
||||
|
||||
.search_form {
|
||||
display: inline-block;
|
||||
z-index: 200;
|
||||
width: 44px;
|
||||
position: relative;
|
||||
|
||||
.icon-search {
|
||||
position: absolute;
|
||||
z-index: 2;
|
||||
top: 50%;
|
||||
left: 0;
|
||||
height: 100%;
|
||||
cursor: pointer;
|
||||
text-align: center;
|
||||
color: #FFF;
|
||||
font-size: 20px;
|
||||
|
||||
@include translateY(-50%);
|
||||
|
||||
}
|
||||
|
||||
.wrapper {
|
||||
position: absolute;
|
||||
left: 44px;
|
||||
bottom: 0;
|
||||
background: $primary_color;
|
||||
border-radius: $border_radius 0 0 $border_radius;
|
||||
display: none;
|
||||
box-shadow: 0 0 15px 2px rgba(0,0,0,.15);
|
||||
|
||||
&:before {
|
||||
@include transform(rotate(45deg));
|
||||
content: '';
|
||||
display: block;
|
||||
position: absolute;
|
||||
height: 10px;
|
||||
width: 10px;
|
||||
background: $primary_color;
|
||||
left: -6px;
|
||||
bottom: 16px;
|
||||
z-index: 1;
|
||||
}
|
||||
}
|
||||
|
||||
.input {
|
||||
background: $background_color;
|
||||
border-radius: $border_radius 0 0 $border_radius;
|
||||
position: relative;
|
||||
left: 4px;
|
||||
height: 44px;
|
||||
overflow: hidden;
|
||||
width: 100%;
|
||||
|
||||
input {
|
||||
position: absolute;
|
||||
top: 0;
|
||||
left: 0;
|
||||
height: 100%;
|
||||
width: 100%;
|
||||
z-index: 1;
|
||||
|
||||
&::-ms-clear {
|
||||
width : 0;
|
||||
height: 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
&.focused,
|
||||
&.shown {
|
||||
border-color: #04bce6;
|
||||
|
||||
.wrapper {
|
||||
display: block;
|
||||
width: 380px;
|
||||
}
|
||||
|
||||
.input {
|
||||
|
||||
input {
|
||||
opacity: 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
.results_container {
|
||||
min-height: 50px;
|
||||
text-align: left;
|
||||
position: relative;
|
||||
left: 4px;
|
||||
display: none;
|
||||
background: $background_color;
|
||||
border-radius: $border_radius 0 0 0;
|
||||
overflow: hidden;
|
||||
|
||||
.results {
|
||||
max-height: 280px;
|
||||
overflow-x: hidden;
|
||||
|
||||
.media_result {
|
||||
overflow: hidden;
|
||||
height: 50px;
|
||||
position: relative;
|
||||
|
||||
.options {
|
||||
position: absolute;
|
||||
height: 100%;
|
||||
top: 0;
|
||||
left: 30px;
|
||||
right: 0;
|
||||
padding: 10px;
|
||||
background: rgba(0,0,0,.3);
|
||||
|
||||
> .in_library_wanted {
|
||||
margin-top: -7px;
|
||||
}
|
||||
|
||||
> div {
|
||||
border: 0;
|
||||
@include flexbox();
|
||||
}
|
||||
|
||||
.thumbnail {
|
||||
vertical-align: middle;
|
||||
}
|
||||
|
||||
select {
|
||||
vertical-align: middle;
|
||||
display: inline-block;
|
||||
margin-right: 10px;
|
||||
min-width: 70px;
|
||||
@include flex(1 auto);
|
||||
}
|
||||
|
||||
.button {
|
||||
@include flex(1 auto);
|
||||
vertical-align: middle;
|
||||
display: inline-block;
|
||||
}
|
||||
|
||||
.message {
|
||||
height: 100%;
|
||||
font-size: 20px;
|
||||
color: #fff;
|
||||
line-height: 20px;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
.thumbnail {
|
||||
width: 30px;
|
||||
min-height: 100%;
|
||||
display: block;
|
||||
margin: 0;
|
||||
vertical-align: top;
|
||||
}
|
||||
|
||||
.data {
|
||||
position: absolute;
|
||||
height: 100%;
|
||||
top: 0;
|
||||
left: 30px;
|
||||
right: 0;
|
||||
cursor: pointer;
|
||||
border-top: 1px solid rgba(255,255,255, 0.08);
|
||||
transition: all .4s cubic-bezier(0.9,0,0.1,1);
|
||||
@include translateX(0%);
|
||||
background: $background_color;
|
||||
|
||||
&.open {
|
||||
@include translateX(100%);
|
||||
}
|
||||
|
||||
.in_wanted,
|
||||
.in_library {
|
||||
position: absolute;
|
||||
bottom: 2px;
|
||||
left: 14px;
|
||||
font-size: 11px;
|
||||
}
|
||||
|
||||
.info {
|
||||
position: absolute;
|
||||
top: 20%;
|
||||
left: 15px;
|
||||
right: 7px;
|
||||
vertical-align: middle;
|
||||
|
||||
h2 {
|
||||
margin: 0;
|
||||
font-weight: 300;
|
||||
font-size: 1.25em;
|
||||
padding: 0;
|
||||
position: absolute;
|
||||
width: 100%;
|
||||
@include flexbox();
|
||||
|
||||
.title {
|
||||
display: inline-block;
|
||||
margin: 0;
|
||||
text-overflow: ellipsis;
|
||||
overflow: hidden;
|
||||
white-space: nowrap;
|
||||
@include flex(1 auto);
|
||||
}
|
||||
|
||||
.year {
|
||||
opacity: .4;
|
||||
padding: 0 5px;
|
||||
width: auto;
|
||||
display: none;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
&:hover .info h2 .year {
|
||||
display: inline-block;
|
||||
}
|
||||
|
||||
&:last-child .data {
|
||||
border-bottom: 0;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
&.focused.filled,
|
||||
&.shown.filled {
|
||||
.results_container {
|
||||
display: block;
|
||||
}
|
||||
|
||||
.input {
|
||||
border-radius: 0 0 0 $border_radius;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -73,4 +73,24 @@ config = [{
|
||||
],
|
||||
},
|
||||
],
|
||||
}, {
|
||||
'name': 'torrent',
|
||||
'groups': [
|
||||
{
|
||||
'tab': 'searcher',
|
||||
'name': 'searcher',
|
||||
'wizard': True,
|
||||
'options': [
|
||||
{
|
||||
'name': 'minimum_seeders',
|
||||
'advanced': True,
|
||||
'label': 'Minimum seeders',
|
||||
'description': 'Ignore torrents with seeders below this number',
|
||||
'default': 1,
|
||||
'type': 'int',
|
||||
'unit': 'seeders'
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
}]
|
||||
|
||||
@@ -87,31 +87,23 @@ class Searcher(SearcherBase):
|
||||
def containsOtherQuality(self, nzb, movie_year = None, preferred_quality = None):
|
||||
if not preferred_quality: preferred_quality = {}
|
||||
|
||||
name = nzb['name']
|
||||
size = nzb.get('size', 0)
|
||||
nzb_words = re.split('\W+', simplifyString(name))
|
||||
|
||||
qualities = fireEvent('quality.all', single = True)
|
||||
|
||||
found = {}
|
||||
for quality in qualities:
|
||||
# Main in words
|
||||
if quality['identifier'] in nzb_words:
|
||||
found[quality['identifier']] = True
|
||||
|
||||
# Alt in words
|
||||
if list(set(nzb_words) & set(quality['alternative'])):
|
||||
found[quality['identifier']] = True
|
||||
|
||||
# Try guessing via quality tags
|
||||
guess = fireEvent('quality.guess', [nzb.get('name')], single = True)
|
||||
guess = fireEvent('quality.guess', files = [nzb.get('name')], size = nzb.get('size', None), single = True)
|
||||
if guess:
|
||||
found[guess['identifier']] = True
|
||||
|
||||
# Hack for older movies that don't contain quality tag
|
||||
name = nzb['name']
|
||||
size = nzb.get('size', 0)
|
||||
|
||||
year_name = fireEvent('scanner.name_year', name, single = True)
|
||||
if len(found) == 0 and movie_year < datetime.datetime.now().year - 3 and not year_name.get('year', None):
|
||||
if size > 3000: # Assume dvdr
|
||||
if size > 20000: # Assume bd50
|
||||
log.info('Quality was missing in name, assuming it\'s a BR-Disk based on the size: %s', size)
|
||||
found['bd50'] = True
|
||||
elif size > 3000: # Assume dvdr
|
||||
log.info('Quality was missing in name, assuming it\'s a DVD-R based on the size: %s', size)
|
||||
found['dvdr'] = True
|
||||
else: # Assume dvdrip
|
||||
@@ -123,7 +115,10 @@ class Searcher(SearcherBase):
|
||||
if found.get(allowed):
|
||||
del found[allowed]
|
||||
|
||||
return not (found.get(preferred_quality['identifier']) and len(found) == 1)
|
||||
if found.get(preferred_quality['identifier']) and len(found) == 1:
|
||||
return False
|
||||
|
||||
return found
|
||||
|
||||
def correct3D(self, nzb, preferred_quality = None):
|
||||
if not preferred_quality: preferred_quality = {}
|
||||
@@ -134,7 +129,11 @@ class Searcher(SearcherBase):
|
||||
# Try guessing via quality tags
|
||||
guess = fireEvent('quality.guess', [nzb.get('name')], single = True)
|
||||
|
||||
return threed == guess.get('is_3d')
|
||||
if guess:
|
||||
return threed == guess.get('is_3d')
|
||||
# If no quality guess, assume not 3d
|
||||
else:
|
||||
return threed == False
|
||||
|
||||
def correctYear(self, haystack, year, year_range):
|
||||
|
||||
@@ -179,6 +178,25 @@ class Searcher(SearcherBase):
|
||||
|
||||
return False
|
||||
|
||||
def containsWords(self, rel_name, rel_words, conf, media):
|
||||
|
||||
# Make sure it has required words
|
||||
words = splitString(self.conf('%s_words' % conf, section = 'searcher').lower())
|
||||
try: words = removeDuplicate(words + splitString(media['category'][conf].lower()))
|
||||
except: pass
|
||||
|
||||
req_match = 0
|
||||
for req_set in words:
|
||||
if len(req_set) >= 2 and (req_set[:1] + req_set[-1:]) == '//':
|
||||
if re.search(req_set[1:-1], rel_name):
|
||||
log.debug('Regex match: %s', req_set[1:-1])
|
||||
req_match += 1
|
||||
else:
|
||||
req = splitString(req_set, '&')
|
||||
req_match += len(list(set(rel_words) & set(req))) == len(req)
|
||||
|
||||
return words, req_match > 0
|
||||
|
||||
def correctWords(self, rel_name, media):
|
||||
media_title = fireEvent('searcher.get_search_title', media, single = True)
|
||||
media_words = re.split('\W+', simplifyString(media_title))
|
||||
@@ -186,31 +204,13 @@ class Searcher(SearcherBase):
|
||||
rel_name = simplifyString(rel_name)
|
||||
rel_words = re.split('\W+', rel_name)
|
||||
|
||||
# Make sure it has required words
|
||||
required_words = splitString(self.conf('required_words', section = 'searcher').lower())
|
||||
try: required_words = removeDuplicate(required_words + splitString(media['category']['required'].lower()))
|
||||
except: pass
|
||||
|
||||
req_match = 0
|
||||
for req_set in required_words:
|
||||
req = splitString(req_set, '&')
|
||||
req_match += len(list(set(rel_words) & set(req))) == len(req)
|
||||
|
||||
if len(required_words) > 0 and req_match == 0:
|
||||
required_words, contains_required = self.containsWords(rel_name, rel_words, 'required', media)
|
||||
if len(required_words) > 0 and not contains_required:
|
||||
log.info2('Wrong: Required word missing: %s', rel_name)
|
||||
return False
|
||||
|
||||
# Ignore releases
|
||||
ignored_words = splitString(self.conf('ignored_words', section = 'searcher').lower())
|
||||
try: ignored_words = removeDuplicate(ignored_words + splitString(media['category']['ignored'].lower()))
|
||||
except: pass
|
||||
|
||||
ignored_match = 0
|
||||
for ignored_set in ignored_words:
|
||||
ignored = splitString(ignored_set, '&')
|
||||
ignored_match += len(list(set(rel_words) & set(ignored))) == len(ignored)
|
||||
|
||||
if len(ignored_words) > 0 and ignored_match:
|
||||
ignored_words, contains_ignored = self.containsWords(rel_name, rel_words, 'ignored', media)
|
||||
if len(ignored_words) > 0 and contains_ignored:
|
||||
log.info2("Wrong: '%s' contains 'ignored words'", rel_name)
|
||||
return False
|
||||
|
||||
|
||||
80
couchpotato/core/media/movie/_base/main.py
Normal file → Executable file
80
couchpotato/core/media/movie/_base/main.py
Normal file → Executable file
@@ -1,7 +1,7 @@
|
||||
import os
|
||||
import traceback
|
||||
import time
|
||||
|
||||
from CodernityDB.database import RecordNotFound
|
||||
from couchpotato import get_db
|
||||
from couchpotato.api import addApiView
|
||||
from couchpotato.core.event import fireEvent, fireEventAsync, addEvent
|
||||
@@ -27,6 +27,10 @@ class MovieBase(MovieTypeBase):
|
||||
|
||||
addApiView('movie.add', self.addView, docs = {
|
||||
'desc': 'Add new movie to the wanted list',
|
||||
'return': {'type': 'object', 'example': """{
|
||||
'success': True,
|
||||
'movie': object
|
||||
}"""},
|
||||
'params': {
|
||||
'identifier': {'desc': 'IMDB id of the movie your want to add.'},
|
||||
'profile_id': {'desc': 'ID of quality profile you want the add the movie in. If empty will use the default profile.'},
|
||||
@@ -45,7 +49,7 @@ class MovieBase(MovieTypeBase):
|
||||
})
|
||||
|
||||
addEvent('movie.add', self.add)
|
||||
addEvent('movie.update_info', self.updateInfo)
|
||||
addEvent('movie.update', self.update)
|
||||
addEvent('movie.update_release_dates', self.updateReleaseDate)
|
||||
|
||||
def add(self, params = None, force_readd = True, search_after = True, update_after = True, notify_after = True, status = None):
|
||||
@@ -61,7 +65,7 @@ class MovieBase(MovieTypeBase):
|
||||
return False
|
||||
elif not params.get('info'):
|
||||
try:
|
||||
is_movie = fireEvent('movie.is_movie', identifier = params.get('identifier'), single = True)
|
||||
is_movie = fireEvent('movie.is_movie', identifier = params.get('identifier'), adding = True, single = True)
|
||||
if not is_movie:
|
||||
msg = 'Can\'t add movie, seems to be a TV show.'
|
||||
log.error(msg)
|
||||
@@ -90,7 +94,7 @@ class MovieBase(MovieTypeBase):
|
||||
|
||||
# Default profile and category
|
||||
default_profile = {}
|
||||
if not params.get('profile_id'):
|
||||
if (not params.get('profile_id') and status != 'done') or params.get('ignore_previous', False):
|
||||
default_profile = fireEvent('profile.default', single = True)
|
||||
cat_id = params.get('category_id')
|
||||
|
||||
@@ -105,7 +109,7 @@ class MovieBase(MovieTypeBase):
|
||||
'imdb': params.get('identifier')
|
||||
},
|
||||
'status': status if status else 'active',
|
||||
'profile_id': params.get('profile_id', default_profile.get('_id')),
|
||||
'profile_id': params.get('profile_id') or default_profile.get('_id'),
|
||||
'category_id': cat_id if cat_id is not None and len(cat_id) > 0 and cat_id != '-1' else None,
|
||||
}
|
||||
|
||||
@@ -117,8 +121,17 @@ class MovieBase(MovieTypeBase):
|
||||
media['info'] = info
|
||||
|
||||
new = False
|
||||
previous_profile = None
|
||||
try:
|
||||
m = db.get('media', 'imdb-%s' % params.get('identifier'), with_doc = True)['doc']
|
||||
|
||||
try:
|
||||
db.get('id', m.get('profile_id'))
|
||||
previous_profile = m.get('profile_id')
|
||||
except RecordNotFound:
|
||||
pass
|
||||
except:
|
||||
log.error('Failed getting previous profile: %s', traceback.format_exc())
|
||||
except:
|
||||
new = True
|
||||
m = db.insert(media)
|
||||
@@ -139,16 +152,16 @@ class MovieBase(MovieTypeBase):
|
||||
|
||||
# Clean snatched history
|
||||
for release in fireEvent('release.for_media', m['_id'], single = True):
|
||||
if release.get('status') in ['downloaded', 'snatched', 'done']:
|
||||
if release.get('status') in ['downloaded', 'snatched', 'seeding', 'done']:
|
||||
if params.get('ignore_previous', False):
|
||||
release['status'] = 'ignored'
|
||||
db.update(release)
|
||||
fireEvent('release.update_status', release['_id'], status = 'ignored')
|
||||
else:
|
||||
fireEvent('release.delete', release['_id'], single = True)
|
||||
|
||||
m['profile_id'] = params.get('profile_id', default_profile.get('id'))
|
||||
m['profile_id'] = (params.get('profile_id') or default_profile.get('_id')) if not previous_profile else previous_profile
|
||||
m['category_id'] = cat_id if cat_id is not None and len(cat_id) > 0 else (m.get('category_id') or None)
|
||||
m['last_edit'] = int(time.time())
|
||||
m['tags'] = []
|
||||
|
||||
do_search = True
|
||||
db.update(m)
|
||||
@@ -161,7 +174,7 @@ class MovieBase(MovieTypeBase):
|
||||
# Trigger update info
|
||||
if added and update_after:
|
||||
# Do full update to get images etc
|
||||
fireEventAsync('movie.update_info', m['_id'], default_title = params.get('title'), on_complete = onComplete)
|
||||
fireEventAsync('movie.update', m['_id'], default_title = params.get('title'), on_complete = onComplete)
|
||||
|
||||
# Remove releases
|
||||
for rel in fireEvent('release.for_media', m['_id'], single = True):
|
||||
@@ -169,6 +182,9 @@ class MovieBase(MovieTypeBase):
|
||||
db.delete(rel)
|
||||
|
||||
movie_dict = fireEvent('media.get', m['_id'], single = True)
|
||||
if not movie_dict:
|
||||
log.debug('Failed adding media, can\'t find it anymore')
|
||||
return False
|
||||
|
||||
if do_search and search_after:
|
||||
onComplete = self.createOnComplete(m['_id'])
|
||||
@@ -225,7 +241,7 @@ class MovieBase(MovieTypeBase):
|
||||
|
||||
db.update(m)
|
||||
|
||||
fireEvent('media.restatus', m['_id'])
|
||||
fireEvent('media.restatus', m['_id'], single = True)
|
||||
|
||||
m = db.get('id', media_id)
|
||||
|
||||
@@ -245,7 +261,7 @@ class MovieBase(MovieTypeBase):
|
||||
'success': False,
|
||||
}
|
||||
|
||||
def updateInfo(self, media_id = None, identifier = None, default_title = None, extended = False):
|
||||
def update(self, media_id = None, identifier = None, default_title = None, extended = False):
|
||||
"""
|
||||
Update movie information inside media['doc']['info']
|
||||
|
||||
@@ -258,6 +274,10 @@ class MovieBase(MovieTypeBase):
|
||||
if self.shuttingDown():
|
||||
return
|
||||
|
||||
lock_key = 'media.get.%s' % media_id if media_id else identifier
|
||||
self.acquireLock(lock_key)
|
||||
|
||||
media = {}
|
||||
try:
|
||||
db = get_db()
|
||||
|
||||
@@ -301,42 +321,16 @@ class MovieBase(MovieTypeBase):
|
||||
media['title'] = def_title
|
||||
|
||||
# Files
|
||||
images = info.get('images', [])
|
||||
media['files'] = media.get('files', {})
|
||||
for image_type in ['poster']:
|
||||
image_urls = info.get('images', [])
|
||||
|
||||
# Remove non-existing files
|
||||
file_type = 'image_%s' % image_type
|
||||
existing_files = list(set(media['files'].get(file_type, [])))
|
||||
for ef in media['files'].get(file_type, []):
|
||||
if not os.path.isfile(ef):
|
||||
existing_files.remove(ef)
|
||||
|
||||
# Replace new files list
|
||||
media['files'][file_type] = existing_files
|
||||
if len(existing_files) == 0:
|
||||
del media['files'][file_type]
|
||||
|
||||
# Loop over type
|
||||
for image in images.get(image_type, []):
|
||||
if not isinstance(image, (str, unicode)):
|
||||
continue
|
||||
|
||||
if file_type not in media['files'] or len(media['files'].get(file_type, [])) == 0:
|
||||
file_path = fireEvent('file.download', url = image, single = True)
|
||||
if file_path:
|
||||
media['files'][file_type] = [file_path]
|
||||
break
|
||||
else:
|
||||
break
|
||||
self.getPoster(media, image_urls)
|
||||
|
||||
db.update(media)
|
||||
|
||||
return media
|
||||
except:
|
||||
log.error('Failed update media: %s', traceback.format_exc())
|
||||
|
||||
return {}
|
||||
self.releaseLock(lock_key)
|
||||
return media
|
||||
|
||||
def updateReleaseDate(self, media_id):
|
||||
"""
|
||||
@@ -352,7 +346,7 @@ class MovieBase(MovieTypeBase):
|
||||
media = db.get('id', media_id)
|
||||
|
||||
if not media.get('info'):
|
||||
media = self.updateInfo(media_id)
|
||||
media = self.update(media_id)
|
||||
dates = media.get('info', {}).get('release_date')
|
||||
else:
|
||||
dates = media.get('info').get('release_date')
|
||||
|
||||
52
couchpotato/core/media/movie/_base/static/details.js
Normal file
52
couchpotato/core/media/movie/_base/static/details.js
Normal file
@@ -0,0 +1,52 @@
|
||||
var MovieDetails = new Class({
|
||||
|
||||
Extends: BlockBase,
|
||||
|
||||
sections: null,
|
||||
|
||||
initialize: function(parent, options){
|
||||
var self = this;
|
||||
|
||||
self.sections = {};
|
||||
|
||||
self.el = new Element('div',{
|
||||
'class': 'page active movie_details level_' + (options.level || 0)
|
||||
}).adopt(
|
||||
self.overlay = new Element('div.overlay', {
|
||||
'events': {
|
||||
'click': self.close.bind(self)
|
||||
}
|
||||
}).grab(
|
||||
new Element('a.close.icon-left-arrow')
|
||||
),
|
||||
self.content = new Element('div.content').grab(
|
||||
new Element('h1', {
|
||||
'text': parent.getTitle() + (parent.get('year') ? ' (' + parent.get('year') + ')' : '')
|
||||
})
|
||||
)
|
||||
);
|
||||
|
||||
self.addSection('description', new Element('div', {
|
||||
'text': parent.get('plot')
|
||||
}));
|
||||
|
||||
},
|
||||
|
||||
addSection: function(name, section_el){
|
||||
var self = this;
|
||||
name = name.toLowerCase();
|
||||
|
||||
self.content.grab(
|
||||
self.sections[name] = new Element('div', {
|
||||
'class': 'section section_' + name
|
||||
}).grab(section_el)
|
||||
);
|
||||
},
|
||||
|
||||
close: function(){
|
||||
var self = this;
|
||||
|
||||
self.el.dispose();
|
||||
}
|
||||
|
||||
});
|
||||
@@ -45,15 +45,16 @@ var MovieList = new Class({
|
||||
}) : null
|
||||
);
|
||||
|
||||
if($(window).getSize().x <= 480 && !self.options.force_view)
|
||||
self.changeView('list');
|
||||
else
|
||||
self.changeView(self.getSavedView() || self.options.view || 'details');
|
||||
self.changeView(self.getSavedView() || self.options.view || 'thumb');
|
||||
|
||||
// Create the alphabet nav
|
||||
if(self.options.navigation)
|
||||
self.createNavigation();
|
||||
|
||||
self.getMovies();
|
||||
|
||||
App.on('movie.added', self.movieAdded.bind(self));
|
||||
App.on('movie.deleted', self.movieDeleted.bind(self))
|
||||
App.on('movie.deleted', self.movieDeleted.bind(self));
|
||||
},
|
||||
|
||||
movieDeleted: function(notification){
|
||||
@@ -67,7 +68,7 @@ var MovieList = new Class({
|
||||
self.setCounter(self.counter_count-1);
|
||||
self.total_movies--;
|
||||
}
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
self.checkIfEmpty();
|
||||
@@ -89,15 +90,11 @@ var MovieList = new Class({
|
||||
create: function(){
|
||||
var self = this;
|
||||
|
||||
// Create the alphabet nav
|
||||
if(self.options.navigation)
|
||||
self.createNavigation();
|
||||
|
||||
if(self.options.load_more)
|
||||
self.scrollspy = new ScrollSpy({
|
||||
min: function(){
|
||||
var c = self.load_more.getCoordinates();
|
||||
return c.top - window.document.getSize().y - 300
|
||||
return c.top - window.document.getSize().y - 300;
|
||||
},
|
||||
onEnter: self.loadMore.bind(self)
|
||||
});
|
||||
@@ -138,7 +135,7 @@ var MovieList = new Class({
|
||||
self.empty_message = null;
|
||||
}
|
||||
|
||||
if(self.total_movies && count == 0 && !self.empty_message){
|
||||
if(self.total_movies && count === 0 && !self.empty_message){
|
||||
var message = (self.filter.search ? 'for "'+self.filter.search+'"' : '') +
|
||||
(self.filter.starts_with ? ' in <strong>'+self.filter.starts_with+'</strong>' : '');
|
||||
|
||||
@@ -230,30 +227,33 @@ var MovieList = new Class({
|
||||
),
|
||||
new Element('div.menus').adopt(
|
||||
self.navigation_counter = new Element('span.counter[title=Total]'),
|
||||
self.filter_menu = new Block.Menu(self, {
|
||||
'class': 'filter'
|
||||
self.filter_menu = new BlockMenu(self, {
|
||||
'class': 'filter',
|
||||
'button_class': 'icon-filter'
|
||||
}),
|
||||
self.navigation_actions = new Element('ul.actions', {
|
||||
self.navigation_actions = new Element('div.actions', {
|
||||
'events': {
|
||||
'click:relay(li)': function(e, el){
|
||||
'click': function(e, el){
|
||||
(e).stop();
|
||||
|
||||
var new_view = self.current_view == 'list' ? 'thumb' : 'list';
|
||||
|
||||
var a = 'active';
|
||||
self.navigation_actions.getElements('.'+a).removeClass(a);
|
||||
self.changeView(el.get('data-view'));
|
||||
this.addClass(a);
|
||||
self.changeView(new_view);
|
||||
|
||||
self.navigation_actions.getElement('[data-view='+new_view+']')
|
||||
.addClass(a);
|
||||
|
||||
el.inject(el.getParent(), 'top');
|
||||
el.getSiblings().hide();
|
||||
setTimeout(function(){
|
||||
el.getSiblings().setStyle('display', null);
|
||||
}, 100)
|
||||
}
|
||||
}
|
||||
}),
|
||||
self.navigation_menu = new Block.Menu(self, {
|
||||
'class': 'extra'
|
||||
self.navigation_menu = new BlockMenu(self, {
|
||||
'class': 'extra',
|
||||
'button_class': 'icon-dots'
|
||||
})
|
||||
)
|
||||
).inject(self.el, 'top');
|
||||
);
|
||||
|
||||
// Mass edit
|
||||
self.mass_edit_select_class = new Form.Check(self.mass_edit_select);
|
||||
@@ -261,7 +261,7 @@ var MovieList = new Class({
|
||||
new Element('option', {
|
||||
'value': profile.get('_id'),
|
||||
'text': profile.get('label')
|
||||
}).inject(self.mass_edit_quality)
|
||||
}).inject(self.mass_edit_quality);
|
||||
});
|
||||
|
||||
self.filter_menu.addLink(
|
||||
@@ -273,7 +273,7 @@ var MovieList = new Class({
|
||||
'change': self.search.bind(self)
|
||||
}
|
||||
})
|
||||
).addClass('search');
|
||||
).addClass('search icon-search');
|
||||
|
||||
var available_chars;
|
||||
self.filter_menu.addEvent('open', function(){
|
||||
@@ -289,8 +289,8 @@ var MovieList = new Class({
|
||||
available_chars = json.chars;
|
||||
|
||||
available_chars.each(function(c){
|
||||
self.letters[c.capitalize()].addClass('available')
|
||||
})
|
||||
self.letters[c.capitalize()].addClass('available');
|
||||
});
|
||||
|
||||
}
|
||||
});
|
||||
@@ -301,23 +301,23 @@ var MovieList = new Class({
|
||||
'events': {
|
||||
'click:relay(li.available)': function(e, el){
|
||||
self.activateLetter(el.get('data-letter'));
|
||||
self.getMovies(true)
|
||||
self.getMovies(true);
|
||||
}
|
||||
}
|
||||
})
|
||||
);
|
||||
|
||||
// Actions
|
||||
['mass_edit', 'details', 'list'].each(function(view){
|
||||
['thumb', 'list'].each(function(view){
|
||||
var current = self.current_view == view;
|
||||
new Element('li', {
|
||||
'class': 'icon2 ' + view + (current ? ' active ' : ''),
|
||||
new Element('a', {
|
||||
'class': 'icon-' + view + (current ? ' active ' : ''),
|
||||
'data-view': view
|
||||
}).inject(self.navigation_actions, current ? 'top' : 'bottom');
|
||||
});
|
||||
|
||||
// All
|
||||
self.letters['all'] = new Element('li.letter_all.available.active', {
|
||||
self.letters.all = new Element('li.letter_all.available.active', {
|
||||
'text': 'ALL'
|
||||
}).inject(self.navigation_alpha);
|
||||
|
||||
@@ -346,7 +346,7 @@ var MovieList = new Class({
|
||||
var selected = 0,
|
||||
movies = self.movies.length;
|
||||
self.movies.each(function(movie){
|
||||
selected += movie.isSelected() ? 1 : 0
|
||||
selected += movie.isSelected() ? 1 : 0;
|
||||
});
|
||||
|
||||
var indeterminate = selected > 0 && selected < movies,
|
||||
@@ -373,6 +373,7 @@ var MovieList = new Class({
|
||||
(e).preventDefault();
|
||||
this.set('text', 'Deleting..');
|
||||
Api.request('media.delete', {
|
||||
'method': 'post',
|
||||
'data': {
|
||||
'id': ids.join(','),
|
||||
'delete_from': self.options.identifier
|
||||
@@ -413,6 +414,7 @@ var MovieList = new Class({
|
||||
var ids = self.getSelectedMovies();
|
||||
|
||||
Api.request('movie.edit', {
|
||||
'method': 'post',
|
||||
'data': {
|
||||
'id': ids.join(','),
|
||||
'profile_id': self.mass_edit_quality.get('value')
|
||||
@@ -426,6 +428,7 @@ var MovieList = new Class({
|
||||
var ids = self.getSelectedMovies();
|
||||
|
||||
Api.request('media.refresh', {
|
||||
'method': 'post',
|
||||
'data': {
|
||||
'id': ids.join(',')
|
||||
}
|
||||
@@ -438,10 +441,10 @@ var MovieList = new Class({
|
||||
var ids = [];
|
||||
self.movies.each(function(movie){
|
||||
if (movie.isSelected())
|
||||
ids.include(movie.get('_id'))
|
||||
ids.include(movie.get('_id'));
|
||||
});
|
||||
|
||||
return ids
|
||||
return ids;
|
||||
},
|
||||
|
||||
massEditToggleAll: function(){
|
||||
@@ -450,10 +453,10 @@ var MovieList = new Class({
|
||||
var select = self.mass_edit_select.get('checked');
|
||||
|
||||
self.movies.each(function(movie){
|
||||
movie.select(select)
|
||||
movie.select(select);
|
||||
});
|
||||
|
||||
self.calculateSelected()
|
||||
self.calculateSelected();
|
||||
},
|
||||
|
||||
reset: function(){
|
||||
@@ -490,12 +493,12 @@ var MovieList = new Class({
|
||||
.addClass(new_view+'_list');
|
||||
|
||||
self.current_view = new_view;
|
||||
Cookie.write(self.options.identifier+'_view2', new_view, {duration: 1000});
|
||||
Cookie.write(self.options.identifier+'_view3', new_view, {duration: 1000});
|
||||
},
|
||||
|
||||
getSavedView: function(){
|
||||
var self = this;
|
||||
return Cookie.read(self.options.identifier+'_view2');
|
||||
return Cookie.read(self.options.identifier+'_view3');
|
||||
},
|
||||
|
||||
search: function(){
|
||||
@@ -534,7 +537,7 @@ var MovieList = new Class({
|
||||
self.load_more.set('text', 'loading...');
|
||||
}
|
||||
|
||||
if(self.movies.length == 0 && self.options.loader){
|
||||
if(self.movies.length === 0 && self.options.loader){
|
||||
|
||||
self.loader_first = new Element('div.loading').adopt(
|
||||
new Element('div.message', {'text': self.options.title ? 'Loading \'' + self.options.title + '\'' : 'Loading...'})
|
||||
@@ -587,7 +590,7 @@ var MovieList = new Class({
|
||||
loadMore: function(){
|
||||
var self = this;
|
||||
if(self.offset >= self.options.limit)
|
||||
self.getMovies()
|
||||
self.getMovies();
|
||||
},
|
||||
|
||||
store: function(movies){
|
||||
@@ -600,7 +603,7 @@ var MovieList = new Class({
|
||||
checkIfEmpty: function(){
|
||||
var self = this;
|
||||
|
||||
var is_empty = self.movies.length == 0 && (self.total_movies == 0 || self.total_movies === undefined);
|
||||
var is_empty = self.movies.length === 0 && (self.total_movies === 0 || self.total_movies === undefined);
|
||||
|
||||
if(self.title)
|
||||
self.title[is_empty ? 'hide' : 'show']();
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
Page.Manage = new Class({
|
||||
var MoviesManage = new Class({
|
||||
|
||||
Extends: PageBase,
|
||||
|
||||
order: 20,
|
||||
name: 'manage',
|
||||
title: 'Do stuff to your existing movies!',
|
||||
|
||||
@@ -125,12 +126,12 @@ Page.Manage = new Class({
|
||||
(folder_progress.eta > 0 ? ', ' + new Date ().increment('second', folder_progress.eta).timeDiffInWords().replace('from now', 'to go') : '')
|
||||
}),
|
||||
new Element('span.percentage', {'text': folder_progress.total ? Math.round(((folder_progress.total-folder_progress.to_go)/folder_progress.total)*100) + '%' : '0%'})
|
||||
).inject(self.progress_container)
|
||||
).inject(self.progress_container);
|
||||
});
|
||||
|
||||
}
|
||||
}
|
||||
})
|
||||
});
|
||||
|
||||
}, 1000);
|
||||
},
|
||||
@@ -140,10 +141,10 @@ Page.Manage = new Class({
|
||||
|
||||
for (folder in progress_object) {
|
||||
if (progress_object.hasOwnProperty(folder)) {
|
||||
temp_array.push(folder)
|
||||
temp_array.push(folder);
|
||||
}
|
||||
}
|
||||
return temp_array.stableSort()
|
||||
return temp_array.stableSort();
|
||||
}
|
||||
|
||||
});
|
||||
@@ -2,7 +2,10 @@ var MovieAction = new Class({
|
||||
|
||||
Implements: [Options],
|
||||
|
||||
class_name: 'action icon2',
|
||||
class_name: 'action',
|
||||
label: 'UNKNOWN',
|
||||
button: null,
|
||||
details: null,
|
||||
|
||||
initialize: function(movie, options){
|
||||
var self = this;
|
||||
@@ -11,20 +14,33 @@ var MovieAction = new Class({
|
||||
self.movie = movie;
|
||||
|
||||
self.create();
|
||||
if(self.el)
|
||||
self.el.addClass(self.class_name)
|
||||
|
||||
if(self.button)
|
||||
self.button.addClass(self.class_name);
|
||||
},
|
||||
|
||||
create: function(){},
|
||||
|
||||
getButton: function(){
|
||||
return this.button || null;
|
||||
},
|
||||
|
||||
getDetails: function(){
|
||||
return this.details || null;
|
||||
},
|
||||
|
||||
getLabel: function(){
|
||||
return this.label;
|
||||
},
|
||||
|
||||
disable: function(){
|
||||
if(this.el)
|
||||
this.el.addClass('disable')
|
||||
this.el.addClass('disable');
|
||||
},
|
||||
|
||||
enable: function(){
|
||||
if(this.el)
|
||||
this.el.removeClass('disable')
|
||||
this.el.removeClass('disable');
|
||||
},
|
||||
|
||||
getTitle: function(){
|
||||
@@ -37,7 +53,7 @@ var MovieAction = new Class({
|
||||
try {
|
||||
return self.movie.original_title ? self.movie.original_title : self.movie.titles[0];
|
||||
}
|
||||
catch(e){
|
||||
catch(e2){
|
||||
return 'Unknown';
|
||||
}
|
||||
}
|
||||
@@ -46,10 +62,10 @@ var MovieAction = new Class({
|
||||
get: function(key){
|
||||
var self = this;
|
||||
try {
|
||||
return self.movie.get(key)
|
||||
return self.movie.get(key);
|
||||
}
|
||||
catch(e){
|
||||
return self.movie[key]
|
||||
return self.movie[key];
|
||||
}
|
||||
},
|
||||
|
||||
@@ -63,7 +79,7 @@ var MovieAction = new Class({
|
||||
},
|
||||
|
||||
toElement: function(){
|
||||
return this.el || null
|
||||
return this.el || null;
|
||||
}
|
||||
|
||||
});
|
||||
@@ -78,9 +94,10 @@ MA.IMDB = new Class({
|
||||
create: function(){
|
||||
var self = this;
|
||||
|
||||
self.id = self.movie.get('imdb') || self.movie.get('identifier');
|
||||
self.id = self.movie.getIdentifier ? self.movie.getIdentifier() : self.get('imdb');
|
||||
|
||||
self.el = new Element('a.imdb', {
|
||||
self.button = new Element('a.imdb', {
|
||||
'text': 'IMDB',
|
||||
'title': 'Go to the IMDB page of ' + self.getTitle(),
|
||||
'href': 'http://www.imdb.com/title/'+self.id+'/',
|
||||
'target': '_blank'
|
||||
@@ -94,47 +111,34 @@ MA.IMDB = new Class({
|
||||
MA.Release = new Class({
|
||||
|
||||
Extends: MovieAction,
|
||||
label: 'Releases',
|
||||
|
||||
create: function(){
|
||||
var self = this;
|
||||
|
||||
self.el = new Element('a.releases.download', {
|
||||
'title': 'Show the releases that are available for ' + self.getTitle(),
|
||||
'events': {
|
||||
'click': self.show.bind(self)
|
||||
}
|
||||
});
|
||||
|
||||
if(!self.movie.data.releases || self.movie.data.releases.length == 0)
|
||||
self.el.hide();
|
||||
else
|
||||
self.showHelper();
|
||||
|
||||
App.on('movie.searcher.ended', function(notification){
|
||||
if(self.movie.data._id != notification.data._id) return;
|
||||
|
||||
self.releases = null;
|
||||
if(self.options_container){
|
||||
self.options_container.destroy();
|
||||
self.options_container = null;
|
||||
// Releases are currently displayed
|
||||
if(self.options_container.isDisplayed()){
|
||||
self.options_container.destroy();
|
||||
self.getDetails();
|
||||
}
|
||||
else {
|
||||
self.options_container.destroy();
|
||||
self.options_container = null;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
},
|
||||
|
||||
show: function(e){
|
||||
var self = this;
|
||||
if(e)
|
||||
(e).preventDefault();
|
||||
|
||||
self.createReleases();
|
||||
|
||||
},
|
||||
|
||||
createReleases: function(){
|
||||
getDetails: function(refresh){
|
||||
var self = this;
|
||||
|
||||
if(!self.options_container){
|
||||
if(!self.options_container || refresh){
|
||||
self.options_container = new Element('div.options').grab(
|
||||
self.release_container = new Element('div.releases.table')
|
||||
);
|
||||
@@ -155,14 +159,14 @@ MA.Release = new Class({
|
||||
|
||||
var quality = Quality.getQuality(release.quality) || {},
|
||||
info = release.info || {},
|
||||
provider = self.get(release, 'provider') + (info['provider_extra'] ? self.get(release, 'provider_extra') : '');
|
||||
provider = self.get(release, 'provider') + (info.provider_extra ? self.get(release, 'provider_extra') : '');
|
||||
|
||||
var release_name = self.get(release, 'name');
|
||||
if(release.files && release.files.length > 0){
|
||||
try {
|
||||
var movie_file = release.files.filter(function(file){
|
||||
var type = File.Type.get(file.type_id);
|
||||
return type && type.identifier == 'movie'
|
||||
return type && type.identifier == 'movie';
|
||||
}).pick();
|
||||
release_name = movie_file.path.split(Api.getOption('path_sep')).getLast();
|
||||
}
|
||||
@@ -170,19 +174,19 @@ MA.Release = new Class({
|
||||
}
|
||||
|
||||
// Create release
|
||||
release['el'] = new Element('div', {
|
||||
release.el = new Element('div', {
|
||||
'class': 'item '+release.status,
|
||||
'id': 'release_'+release._id
|
||||
}).adopt(
|
||||
new Element('span.name', {'text': release_name, 'title': release_name}),
|
||||
new Element('span.status', {'text': release.status, 'class': 'release_status '+release.status}),
|
||||
new Element('span.status', {'text': release.status, 'class': 'status '+release.status}),
|
||||
new Element('span.quality', {'text': quality.label + (release.is_3d ? ' 3D' : '') || 'n/a'}),
|
||||
new Element('span.size', {'text': info['size'] ? Math.floor(self.get(release, 'size')) : 'n/a'}),
|
||||
new Element('span.size', {'text': info.size ? Math.floor(self.get(release, 'size')) : 'n/a'}),
|
||||
new Element('span.age', {'text': self.get(release, 'age')}),
|
||||
new Element('span.score', {'text': self.get(release, 'score')}),
|
||||
new Element('span.provider', { 'text': provider, 'title': provider }),
|
||||
info['detail_url'] ? new Element('a.info.icon2', {
|
||||
'href': info['detail_url'],
|
||||
info.detail_url ? new Element('a.info.icon2', {
|
||||
'href': info.detail_url,
|
||||
'target': '_blank'
|
||||
}) : new Element('a'),
|
||||
new Element('a.download.icon2', {
|
||||
@@ -252,7 +256,7 @@ MA.Release = new Class({
|
||||
|
||||
self.trynext_container.adopt(
|
||||
new Element('span.or', {
|
||||
'text': 'This movie is snatched, if anything went wrong, download'
|
||||
'text': 'If anything went wrong, download'
|
||||
}),
|
||||
lr ? new Element('a.button.orange', {
|
||||
'text': 'the same release again',
|
||||
@@ -276,7 +280,7 @@ MA.Release = new Class({
|
||||
new Element('span.or', {
|
||||
'text': 'or pick one below'
|
||||
})] : null
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
self.last_release = null;
|
||||
@@ -284,9 +288,7 @@ MA.Release = new Class({
|
||||
|
||||
}
|
||||
|
||||
// Show it
|
||||
self.options_container.inject(self.movie, 'top');
|
||||
self.movie.slide('in', self.options_container);
|
||||
return self.options_container;
|
||||
|
||||
},
|
||||
|
||||
@@ -302,7 +304,7 @@ MA.Release = new Class({
|
||||
self.movie.data.releases.each(function(release){
|
||||
if(has_available && has_snatched) return;
|
||||
|
||||
if(['snatched', 'downloaded', 'seeding'].contains(release.status))
|
||||
if(['snatched', 'downloaded', 'seeding', 'done'].contains(release.status))
|
||||
has_snatched = true;
|
||||
|
||||
if(['available'].contains(release.status))
|
||||
@@ -335,13 +337,13 @@ MA.Release = new Class({
|
||||
'click': self.markMovieDone.bind(self)
|
||||
}
|
||||
})
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
},
|
||||
|
||||
get: function(release, type){
|
||||
return (release.info && release.info[type] !== undefined) ? release.info[type] : 'n/a'
|
||||
return (release.info && release.info[type] !== undefined) ? release.info[type] : 'n/a';
|
||||
},
|
||||
|
||||
download: function(release){
|
||||
@@ -379,7 +381,7 @@ MA.Release = new Class({
|
||||
'data': {
|
||||
'id': release._id
|
||||
}
|
||||
})
|
||||
});
|
||||
|
||||
},
|
||||
|
||||
@@ -396,7 +398,7 @@ MA.Release = new Class({
|
||||
movie.set('tween', {
|
||||
'duration': 300,
|
||||
'onComplete': function(){
|
||||
self.movie.destroy()
|
||||
self.movie.destroy();
|
||||
}
|
||||
});
|
||||
movie.tween('height', 0);
|
||||
@@ -422,49 +424,35 @@ MA.Trailer = new Class({
|
||||
|
||||
Extends: MovieAction,
|
||||
id: null,
|
||||
label: 'Trailer',
|
||||
|
||||
create: function(){
|
||||
getDetails: function(){
|
||||
var self = this;
|
||||
|
||||
self.el = new Element('a.trailer', {
|
||||
'title': 'Watch the trailer of ' + self.getTitle(),
|
||||
'events': {
|
||||
'click': self.watch.bind(self)
|
||||
}
|
||||
});
|
||||
if(!self.player_container){
|
||||
var id = 'trailer-'+randomString();
|
||||
self.player_container = new Element('div.icon-play[id='+id+']', {
|
||||
'events': {
|
||||
'click': function(e){
|
||||
self.watch(id);
|
||||
}
|
||||
}
|
||||
});
|
||||
self.container = new Element('div.trailer_container')
|
||||
.grab(self.player_container);
|
||||
}
|
||||
|
||||
return self.player_container;
|
||||
},
|
||||
|
||||
watch: function(offset){
|
||||
watch: function(){
|
||||
var self = this;
|
||||
|
||||
var data_url = 'https://gdata.youtube.com/feeds/videos?vq="{title}" {year} trailer&max-results=1&alt=json-in-script&orderby=relevance&sortorder=descending&format=5&fmt=18';
|
||||
var url = data_url.substitute({
|
||||
var data_url = 'https://gdata.youtube.com/feeds/videos?vq="{title}" {year} trailer&max-results=1&alt=json-in-script&orderby=relevance&sortorder=descending&format=5&fmt=18',
|
||||
url = data_url.substitute({
|
||||
'title': encodeURI(self.getTitle()),
|
||||
'year': self.get('year'),
|
||||
'offset': offset || 1
|
||||
}),
|
||||
size = $(self.movie).getSize(),
|
||||
height = self.options.height || (size.x/16)*9,
|
||||
id = 'trailer-'+randomString();
|
||||
|
||||
self.player_container = new Element('div[id='+id+']');
|
||||
self.container = new Element('div.hide.trailer_container')
|
||||
.adopt(self.player_container)
|
||||
.inject($(self.movie), 'top');
|
||||
|
||||
self.container.setStyle('height', 0);
|
||||
self.container.removeClass('hide');
|
||||
|
||||
self.close_button = new Element('a.hide.hide_trailer', {
|
||||
'text': 'Hide trailer',
|
||||
'events': {
|
||||
'click': self.stop.bind(self)
|
||||
}
|
||||
}).inject(self.movie);
|
||||
|
||||
self.container.setStyle('height', height);
|
||||
$(self.movie).setStyle('height', height);
|
||||
'year': self.get('year')
|
||||
});
|
||||
|
||||
new Request.JSONP({
|
||||
'url': url,
|
||||
@@ -484,8 +472,6 @@ MA.Trailer = new Class({
|
||||
}
|
||||
});
|
||||
|
||||
self.close_button.removeClass('hide');
|
||||
|
||||
var quality_set = false;
|
||||
var change_quality = function(state){
|
||||
if(!quality_set && (state.data == 1 || state.data || 2)){
|
||||
@@ -501,7 +487,9 @@ MA.Trailer = new Class({
|
||||
self.player.addEventListener('onStateChange', change_quality);
|
||||
|
||||
}
|
||||
}).send()
|
||||
}).send();
|
||||
|
||||
return self.container;
|
||||
|
||||
},
|
||||
|
||||
@@ -516,7 +504,7 @@ MA.Trailer = new Class({
|
||||
setTimeout(function(){
|
||||
self.container.destroy();
|
||||
self.close_button.destroy();
|
||||
}, 1800)
|
||||
}, 1800);
|
||||
}
|
||||
|
||||
|
||||
@@ -529,7 +517,8 @@ MA.Edit = new Class({
|
||||
create: function(){
|
||||
var self = this;
|
||||
|
||||
self.el = new Element('a.edit', {
|
||||
self.button = new Element('a.edit', {
|
||||
'text': 'Edit',
|
||||
'title': 'Change movie information, like title and quality.',
|
||||
'events': {
|
||||
'click': self.editMovie.bind(self)
|
||||
@@ -578,7 +567,7 @@ MA.Edit = new Class({
|
||||
// Fill categories
|
||||
var categories = CategoryList.getAll();
|
||||
|
||||
if(categories.length == 0)
|
||||
if(categories.length === 0)
|
||||
self.category_select.hide();
|
||||
else {
|
||||
self.category_select.show();
|
||||
@@ -652,7 +641,8 @@ MA.Refresh = new Class({
|
||||
create: function(){
|
||||
var self = this;
|
||||
|
||||
self.el = new Element('a.refresh', {
|
||||
self.button = new Element('a.refresh', {
|
||||
'text': 'Refresh',
|
||||
'title': 'Refresh the movie info and do a forced search',
|
||||
'events': {
|
||||
'click': self.doRefresh.bind(self)
|
||||
@@ -663,7 +653,7 @@ MA.Refresh = new Class({
|
||||
|
||||
doRefresh: function(e){
|
||||
var self = this;
|
||||
(e).preventDefault();
|
||||
(e).stop();
|
||||
|
||||
Api.request('media.refresh', {
|
||||
'data': {
|
||||
@@ -679,17 +669,18 @@ MA.Readd = new Class({
|
||||
Extends: MovieAction,
|
||||
|
||||
create: function(){
|
||||
var self = this;
|
||||
var self = this,
|
||||
movie_done = self.movie.data.status == 'done',
|
||||
snatched;
|
||||
|
||||
var movie_done = self.movie.data.status == 'done';
|
||||
if(self.movie.data.releases && !movie_done)
|
||||
var snatched = self.movie.data.releases.filter(function(release){
|
||||
return release.status && (release.status == 'snatched' || release.status == 'downloaded' || release.status == 'done');
|
||||
snatched = self.movie.data.releases.filter(function(release){
|
||||
return release.status && (release.status == 'snatched' || release.status == 'seeding' || release.status == 'downloaded' || release.status == 'done');
|
||||
}).length;
|
||||
|
||||
if(movie_done || snatched && snatched > 0)
|
||||
self.el = new Element('a.readd', {
|
||||
'title': 'Readd the movie and mark all previous snatched/downloaded as ignored',
|
||||
'title': 'Re-add the movie and mark all previous snatched/downloaded as ignored',
|
||||
'events': {
|
||||
'click': self.doReadd.bind(self)
|
||||
}
|
||||
@@ -703,7 +694,7 @@ MA.Readd = new Class({
|
||||
|
||||
Api.request('movie.add', {
|
||||
'data': {
|
||||
'identifier': self.movie.get('identifier'),
|
||||
'identifier': self.movie.getIdentifier(),
|
||||
'ignore_previous': 1
|
||||
}
|
||||
});
|
||||
@@ -785,7 +776,7 @@ MA.Delete = new Class({
|
||||
movie.set('tween', {
|
||||
'duration': 300,
|
||||
'onComplete': function(){
|
||||
self.movie.destroy()
|
||||
self.movie.destroy();
|
||||
}
|
||||
});
|
||||
movie.tween('height', 0);
|
||||
@@ -840,7 +831,7 @@ MA.Files = new Class({
|
||||
new Element('div.file.item').adopt(
|
||||
new Element('span.name', {'text': file}),
|
||||
new Element('span.type', {'text': type})
|
||||
).inject(rel)
|
||||
).inject(rel);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -2,22 +2,51 @@ var Movie = new Class({
|
||||
|
||||
Extends: BlockBase,
|
||||
|
||||
action: {},
|
||||
actions: [],
|
||||
details: null,
|
||||
|
||||
initialize: function(list, options, data){
|
||||
var self = this;
|
||||
|
||||
self.data = data;
|
||||
self.view = options.view || 'details';
|
||||
self.list = list;
|
||||
|
||||
self.el = new Element('div.movie');
|
||||
self.el = new Element('a.movie', {
|
||||
'events': {
|
||||
'click': function(e){
|
||||
(e).stop();
|
||||
self.openDetails();
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
self.profile = Quality.getProfile(data.profile_id) || {};
|
||||
self.category = CategoryList.getCategory(data.category_id) || {};
|
||||
self.parent(self, options);
|
||||
|
||||
self.addEvents();
|
||||
|
||||
if(data.identifiers.imdb == 'tt1228705')
|
||||
self.openDetails();
|
||||
},
|
||||
|
||||
openDetails: function(){
|
||||
var self = this;
|
||||
|
||||
if(!self.details){
|
||||
self.details = new MovieDetails(self, {
|
||||
'level': 3
|
||||
});
|
||||
|
||||
// Add action items
|
||||
self.actions.each(function(action, nr){
|
||||
var details = action.getDetails();
|
||||
if(details)
|
||||
self.details.addSection(action.getLabel(), details);
|
||||
});
|
||||
}
|
||||
|
||||
App.getPageContainer().grab(self.details);
|
||||
},
|
||||
|
||||
addEvents: function(){
|
||||
@@ -30,7 +59,6 @@ var Movie = new Class({
|
||||
if(self.data._id != notification.data._id) return;
|
||||
|
||||
self.busy(false);
|
||||
self.removeView();
|
||||
self.update.delay(2000, self, notification);
|
||||
};
|
||||
App.on('movie.update', self.global_events['movie.update']);
|
||||
@@ -47,20 +75,28 @@ var Movie = new Class({
|
||||
// Remove spinner
|
||||
self.global_events['movie.searcher.ended'] = function(notification){
|
||||
if(notification.data && self.data._id == notification.data._id)
|
||||
self.busy(false)
|
||||
self.busy(false);
|
||||
};
|
||||
App.on('movie.searcher.ended', self.global_events['movie.searcher.ended']);
|
||||
|
||||
// Reload when releases have updated
|
||||
self.global_events['release.update_status'] = function(notification){
|
||||
var data = notification.data;
|
||||
if(data && self.data._id == data.movie_id){
|
||||
if(data && self.data._id == data.media_id){
|
||||
|
||||
if(!self.data.releases)
|
||||
self.data.releases = [];
|
||||
|
||||
self.data.releases.push({'quality': data.quality, 'status': data.status});
|
||||
self.updateReleases();
|
||||
var updated = false;
|
||||
self.data.releases.each(function(release){
|
||||
if(release._id == data._id){
|
||||
release.status = data.status;
|
||||
updated = true;
|
||||
}
|
||||
});
|
||||
|
||||
if(updated)
|
||||
self.updateReleases();
|
||||
}
|
||||
};
|
||||
|
||||
@@ -94,12 +130,12 @@ var Movie = new Class({
|
||||
if(self.mask)
|
||||
self.mask.destroy();
|
||||
if(self.spinner)
|
||||
self.spinner.el.destroy();
|
||||
self.spinner.destroy();
|
||||
self.spinner = null;
|
||||
self.mask = null;
|
||||
}, timeout || 400);
|
||||
}
|
||||
}, timeout || 1000)
|
||||
}, timeout || 1000);
|
||||
}
|
||||
else if(!self.spinner) {
|
||||
self.createMask();
|
||||
@@ -122,7 +158,6 @@ var Movie = new Class({
|
||||
|
||||
self.data = notification.data;
|
||||
self.el.empty();
|
||||
self.removeView();
|
||||
|
||||
self.profile = Quality.getProfile(self.data.profile_id) || {};
|
||||
self.category = CategoryList.getCategory(self.data.category_id) || {};
|
||||
@@ -136,15 +171,30 @@ var Movie = new Class({
|
||||
|
||||
self.el.addClass('status_'+self.get('status'));
|
||||
|
||||
var eta = null,
|
||||
eta_date = null,
|
||||
now = Math.round(+new Date()/1000);
|
||||
|
||||
if(self.data.info.release_date)
|
||||
[self.data.info.release_date.dvd, self.data.info.release_date.theater].each(function(timestamp){
|
||||
if (timestamp > 0 && (eta === null || Math.abs(timestamp - now) < Math.abs(eta - now)))
|
||||
eta = timestamp;
|
||||
});
|
||||
|
||||
if(eta){
|
||||
eta_date = new Date(eta * 1000);
|
||||
eta_date = eta_date.toLocaleString('en-us', { month: "long" }) + ' ' + eta_date.getFullYear();
|
||||
}
|
||||
|
||||
self.el.adopt(
|
||||
self.select_checkbox = new Element('input[type=checkbox].inlay', {
|
||||
'events': {
|
||||
'change': function(){
|
||||
self.fireEvent('select')
|
||||
self.fireEvent('select');
|
||||
}
|
||||
}
|
||||
}),
|
||||
self.thumbnail = (self.data.files && self.data.files.image_poster) ? new Element('img', {
|
||||
self.thumbnail = (self.data.files && self.data.files.image_poster && self.data.files.image_poster.length > 0) ? new Element('img', {
|
||||
'class': 'type_image poster',
|
||||
'src': Api.createUrl('file.cache') + self.data.files.image_poster[0].split(Api.getOption('path_sep')).pop()
|
||||
}): null,
|
||||
@@ -158,27 +208,33 @@ var Movie = new Class({
|
||||
'text': self.data.info.year || 'n/a'
|
||||
})
|
||||
),
|
||||
self.description = new Element('div.description', {
|
||||
'text': self.data.info.plot
|
||||
}),
|
||||
self.eta = eta_date && (now+8035200 > eta) ? new Element('div.eta', {
|
||||
'text': eta_date,
|
||||
'title': 'ETA'
|
||||
}) : null,
|
||||
self.quality = new Element('div.quality', {
|
||||
'events': {
|
||||
'click': function(e){
|
||||
var releases = self.el.getElement('.actions .releases');
|
||||
if(releases.isVisible())
|
||||
releases.fireEvent('click', [e])
|
||||
releases.fireEvent('click', [e]);
|
||||
}
|
||||
}
|
||||
})
|
||||
),
|
||||
self.actions = new Element('div.actions')
|
||||
self.actions_el = new Element('div.actions', {
|
||||
'events': {
|
||||
'click': function(e){
|
||||
(e).stopPropagation();
|
||||
}
|
||||
}
|
||||
})
|
||||
)
|
||||
);
|
||||
|
||||
if(!self.thumbnail)
|
||||
self.el.addClass('no_thumbnail');
|
||||
|
||||
//self.changeView(self.view);
|
||||
self.select_checkbox_class = new Form.Check(self.select_checkbox);
|
||||
|
||||
// Add profile
|
||||
@@ -186,9 +242,9 @@ var Movie = new Class({
|
||||
self.profile.getTypes().each(function(type){
|
||||
|
||||
var q = self.addQuality(type.get('quality'), type.get('3d'));
|
||||
if((type.finish == true || type.get('finish')) && !q.hasClass('finish')){
|
||||
if((type.finish === true || type.get('finish')) && !q.hasClass('finish')){
|
||||
q.addClass('finish');
|
||||
q.set('title', q.get('title') + ' Will finish searching for this movie if this quality is found.')
|
||||
q.set('title', q.get('title') + ' Will finish searching for this movie if this quality is found.');
|
||||
}
|
||||
|
||||
});
|
||||
@@ -196,17 +252,20 @@ var Movie = new Class({
|
||||
// Add releases
|
||||
self.updateReleases();
|
||||
|
||||
Object.each(self.options.actions, function(action, key){
|
||||
self.action[key.toLowerCase()] = action = new self.options.actions[key](self);
|
||||
if(action.el)
|
||||
self.actions.adopt(action)
|
||||
self.options.actions.each(function(action){
|
||||
var action = new action(self),
|
||||
button = action.getButton();
|
||||
if(button)
|
||||
self.actions_el.grab(button);
|
||||
|
||||
self.actions.push(action);
|
||||
});
|
||||
|
||||
},
|
||||
|
||||
updateReleases: function(){
|
||||
var self = this;
|
||||
if(!self.data.releases || self.data.releases.length == 0) return;
|
||||
if(!self.data.releases || self.data.releases.length === 0) return;
|
||||
|
||||
self.data.releases.each(function(release){
|
||||
|
||||
@@ -218,7 +277,7 @@ var Movie = new Class({
|
||||
|
||||
if (q && !q.hasClass(status)){
|
||||
q.addClass(status);
|
||||
q.set('title', (q.get('title') ? q.get('title') : '') + ' status: '+ status)
|
||||
q.set('title', (q.get('title') ? q.get('title') : '') + ' status: '+ status);
|
||||
}
|
||||
|
||||
});
|
||||
@@ -244,65 +303,37 @@ var Movie = new Class({
|
||||
else if(self.data.info.titles.length > 0)
|
||||
return self.getUnprefixedTitle(self.data.info.titles[0]);
|
||||
|
||||
return 'Unknown movie'
|
||||
return 'Unknown movie';
|
||||
},
|
||||
|
||||
getUnprefixedTitle: function(t){
|
||||
if(t.substr(0, 4).toLowerCase() == 'the ')
|
||||
t = t.substr(4) + ', The';
|
||||
else if(t.substr(0, 3).toLowerCase() == 'an ')
|
||||
t = t.substr(3) + ', An';
|
||||
else if(t.substr(0, 2).toLowerCase() == 'a ')
|
||||
t = t.substr(2) + ', A';
|
||||
return t;
|
||||
},
|
||||
|
||||
slide: function(direction, el){
|
||||
getIdentifier: function(){
|
||||
var self = this;
|
||||
|
||||
if(direction == 'in'){
|
||||
self.temp_view = self.view;
|
||||
self.changeView('details');
|
||||
|
||||
self.el.addEvent('outerClick', function(){
|
||||
self.removeView();
|
||||
self.slide('out')
|
||||
});
|
||||
el.show();
|
||||
self.data_container.addClass('hide_right');
|
||||
try {
|
||||
return self.get('identifiers').imdb;
|
||||
}
|
||||
else {
|
||||
self.el.removeEvents('outerClick');
|
||||
catch (e){ }
|
||||
|
||||
setTimeout(function(){
|
||||
if(self.el)
|
||||
self.el.getElements('> :not(.data):not(.poster):not(.movie_container)').hide();
|
||||
}, 600);
|
||||
|
||||
self.data_container.removeClass('hide_right');
|
||||
}
|
||||
},
|
||||
|
||||
changeView: function(new_view){
|
||||
var self = this;
|
||||
|
||||
if(self.el)
|
||||
self.el
|
||||
.removeClass(self.view+'_view')
|
||||
.addClass(new_view+'_view');
|
||||
|
||||
self.view = new_view;
|
||||
},
|
||||
|
||||
removeView: function(){
|
||||
var self = this;
|
||||
|
||||
self.el.removeClass(self.view+'_view')
|
||||
return self.get('imdb');
|
||||
},
|
||||
|
||||
get: function(attr){
|
||||
return this.data[attr] || this.data.info[attr]
|
||||
return this.data[attr] || this.data.info[attr];
|
||||
},
|
||||
|
||||
select: function(bool){
|
||||
var self = this;
|
||||
self.select_checkbox_class[bool ? 'check' : 'uncheck']()
|
||||
self.select_checkbox_class[bool ? 'check' : 'uncheck']();
|
||||
},
|
||||
|
||||
isSelected: function(){
|
||||
|
||||
367
couchpotato/core/media/movie/_base/static/movie.scss
Normal file
367
couchpotato/core/media/movie/_base/static/movie.scss
Normal file
@@ -0,0 +1,367 @@
|
||||
@import "couchpotato/static/style/mixins";
|
||||
|
||||
.page.movies {
|
||||
z-index: 21; // Sets navigation above
|
||||
bottom: auto;
|
||||
}
|
||||
|
||||
.page.movies_wanted, .page.movies_manage {
|
||||
top: $header_height;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
.list_list {
|
||||
font-weight: 300;
|
||||
|
||||
.poster {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.movie {
|
||||
display: block;
|
||||
border-top: 1px solid $theme_off;
|
||||
position: relative;
|
||||
cursor: pointer;
|
||||
|
||||
&:last-child {
|
||||
border-bottom: none;
|
||||
}
|
||||
|
||||
&:hover {
|
||||
background: rgba(0,0,0,.1);
|
||||
}
|
||||
|
||||
.data {
|
||||
padding: $padding/2 $padding;
|
||||
|
||||
.info {
|
||||
|
||||
@include flexbox();
|
||||
flex-flow: row nowrap;
|
||||
|
||||
.title {
|
||||
@include flex(1 auto);
|
||||
|
||||
.year {
|
||||
display: inline-block;
|
||||
margin-left: 10px;
|
||||
opacity: .5;
|
||||
}
|
||||
}
|
||||
|
||||
.quality span {
|
||||
float: left;
|
||||
color: #FFF;
|
||||
font-size: .7em;
|
||||
padding: 2px 4px;
|
||||
background: rgba(0,0,0,.2);
|
||||
border-radius: 1px;
|
||||
margin: 2px 0 0 2px;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
.thumb_list {
|
||||
|
||||
font-size: 12px;
|
||||
padding: 0 $padding;
|
||||
|
||||
.movie {
|
||||
@include span(6);
|
||||
float: left;
|
||||
margin-bottom: $padding;
|
||||
position: relative;
|
||||
|
||||
&:nth-child(4n+4){
|
||||
@include span(last);
|
||||
}
|
||||
|
||||
&:nth-child(4n+5){
|
||||
clear: both;
|
||||
}
|
||||
|
||||
.poster {
|
||||
border-radius: $border_radius;
|
||||
overflow: hidden;
|
||||
width: 100%;
|
||||
float: left;
|
||||
}
|
||||
|
||||
.data {
|
||||
clear: both;
|
||||
|
||||
.info {
|
||||
height: 44px;
|
||||
|
||||
.title {
|
||||
@include flexbox();
|
||||
padding: 3px 0;
|
||||
|
||||
span {
|
||||
@include flex(1 auto);
|
||||
white-space: nowrap;
|
||||
overflow: hidden;
|
||||
text-overflow: ellipsis;
|
||||
}
|
||||
|
||||
.year {
|
||||
display: inline-block;
|
||||
margin-left: 5px;
|
||||
opacity: .5;
|
||||
}
|
||||
}
|
||||
|
||||
.quality {
|
||||
white-space: nowrap;
|
||||
overflow: hidden;
|
||||
|
||||
span {
|
||||
color: #FFF;
|
||||
font-size: .8em;
|
||||
padding: 2px 4px;
|
||||
background: rgba(0,0,0,.2);
|
||||
border-radius: 1px;
|
||||
margin-right: 2px;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
.actions {
|
||||
position: absolute;
|
||||
top: $padding / 2;
|
||||
right: $padding / 2;
|
||||
display: none;
|
||||
|
||||
a {
|
||||
display: block;
|
||||
background: $background_color;
|
||||
padding: $padding / 3;
|
||||
width: auto;
|
||||
margin-bottom: 1px;
|
||||
clear: both;
|
||||
float: right;
|
||||
}
|
||||
}
|
||||
|
||||
&:hover .actions {
|
||||
display: block;
|
||||
}
|
||||
|
||||
.mask {
|
||||
bottom: 44px;
|
||||
border-radius: $border_radius;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
.check {
|
||||
position: absolute;
|
||||
top: 0;
|
||||
left: $padding;
|
||||
display: none;
|
||||
}
|
||||
|
||||
.eta {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.page.movie_details {
|
||||
|
||||
$gab-width: $header_width/3;
|
||||
|
||||
.overlay {
|
||||
position: fixed;
|
||||
top: 0;
|
||||
bottom: 0;
|
||||
right: 0;
|
||||
left: $header_width;
|
||||
background: rgba(0,0,0,.6);
|
||||
border-radius: 3px 0 0 3px;
|
||||
|
||||
.close {
|
||||
display: inline-block;
|
||||
text-align: center;
|
||||
font-size: 60px;
|
||||
line-height: $header_height;
|
||||
color: #FFF;
|
||||
width: $gab-width;
|
||||
cursor: pointer;
|
||||
height: 100%;
|
||||
}
|
||||
}
|
||||
|
||||
.content {
|
||||
position: fixed;
|
||||
top: 0;
|
||||
bottom: 0;
|
||||
right: 0;
|
||||
left: $header_width + $gab-width;
|
||||
background: $background_color;
|
||||
z-index: 200;
|
||||
border-radius: 3px 0 0 3px;
|
||||
|
||||
h1 {
|
||||
margin: 0;
|
||||
padding: 0 $padding;
|
||||
font-size: 24px;
|
||||
line-height: $header_height;
|
||||
color: rgba(0,0,0,.5);
|
||||
font-weight: 300;
|
||||
}
|
||||
|
||||
.section {
|
||||
padding: $padding $padding;
|
||||
border-top: 1px solid rgba(0,0,0,.1);
|
||||
}
|
||||
}
|
||||
|
||||
.releases {
|
||||
|
||||
.buttons {
|
||||
margin-bottom: $padding/2;
|
||||
}
|
||||
|
||||
.item span {
|
||||
overflow: hidden;
|
||||
white-space: nowrap;
|
||||
text-overflow: ellipsis;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
.item .name {
|
||||
@include flex(1 auto);
|
||||
text-align: left;
|
||||
}
|
||||
.status { min-width: 70px; max-width: 70px; }
|
||||
.quality { min-width: 60px; max-width: 60px; }
|
||||
.size { min-width: 40px; max-width: 40px; }
|
||||
.age { min-width: 40px; max-width: 40px; }
|
||||
.score { min-width: 45px; max-width: 45px; }
|
||||
.provider { min-width: 110px; max-width: 110px; }
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
.alph_nav {
|
||||
|
||||
.mass_edit_form {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.menus {
|
||||
margin-right: $padding;
|
||||
|
||||
.button {
|
||||
padding: 0 $padding/2;
|
||||
line-height: $header_height;
|
||||
color: rgba(0, 0, 0, 0.5);
|
||||
}
|
||||
|
||||
.counter, .more_menu, .actions {
|
||||
float: left;
|
||||
}
|
||||
|
||||
.counter {
|
||||
line-height: $header_height;
|
||||
}
|
||||
|
||||
.actions {
|
||||
|
||||
a {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.active {
|
||||
display: inline-block;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
.filter {
|
||||
.wrapper {
|
||||
width: 320px;
|
||||
}
|
||||
|
||||
.button {
|
||||
margin-top: -2px;
|
||||
}
|
||||
|
||||
.search {
|
||||
position: relative;
|
||||
|
||||
&:before {
|
||||
position: absolute;
|
||||
height: 100%;
|
||||
line-height: 38px;
|
||||
padding-left: $padding/2;
|
||||
font-size: 16px;
|
||||
opacity: .5;
|
||||
}
|
||||
|
||||
input {
|
||||
width: 100%;
|
||||
padding: $padding/2 $padding/2 $padding/2 $padding*1.5;
|
||||
background: $background_color;
|
||||
border: none;
|
||||
border-bottom: 1px solid $theme_off;
|
||||
}
|
||||
}
|
||||
|
||||
.numbers {
|
||||
padding: $padding/2;
|
||||
|
||||
li {
|
||||
float: left;
|
||||
width: 10%;
|
||||
height: 30px;
|
||||
line-height: 30px;
|
||||
text-align: center;
|
||||
color: rgba(0,0,0,.2);
|
||||
cursor: default;
|
||||
|
||||
&.active {
|
||||
background: $theme_off;
|
||||
}
|
||||
|
||||
&.available {
|
||||
color: rgba(0,0,0,1);
|
||||
cursor: pointer;
|
||||
|
||||
&:hover {
|
||||
background: $theme_off;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
.more_menu {
|
||||
|
||||
&.show .button {
|
||||
color: rgba(0, 0, 0, 1);
|
||||
}
|
||||
|
||||
.wrapper {
|
||||
top: $header_height - 10px;
|
||||
padding-top: 4px;
|
||||
border-radius: $border_radius $border_radius 0 0;
|
||||
|
||||
&:before {
|
||||
top: 0;
|
||||
left: auto;
|
||||
right: 22px;
|
||||
}
|
||||
|
||||
ul {
|
||||
border-radius: $border_radius $border_radius 0 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
49
couchpotato/core/media/movie/_base/static/page.js
Normal file
49
couchpotato/core/media/movie/_base/static/page.js
Normal file
@@ -0,0 +1,49 @@
|
||||
Page.Movies = new Class({
|
||||
|
||||
Extends: PageBase,
|
||||
|
||||
name: 'movies',
|
||||
sub_pages: ['Wanted', 'Manage'],
|
||||
default_page: 'Wanted',
|
||||
current_page: null,
|
||||
|
||||
initialize: function(parent, options){
|
||||
var self = this;
|
||||
self.parent(parent, options);
|
||||
|
||||
self.navigation = new BlockNavigation();
|
||||
$(self.navigation).inject(self.el, 'top');
|
||||
|
||||
},
|
||||
|
||||
defaultAction: function(action, params){
|
||||
var self = this;
|
||||
|
||||
if(self.current_page){
|
||||
self.current_page.hide();
|
||||
|
||||
if(self.current_page.list && self.current_page.list.navigation)
|
||||
self.current_page.list.navigation.dispose();
|
||||
}
|
||||
|
||||
var route = new Route();
|
||||
route.parse(action);
|
||||
|
||||
var page_name = route.getPage() != 'index' ? route.getPage().capitalize() : self.default_page;
|
||||
|
||||
var page = self.sub_pages.filter(function(page){
|
||||
return page.name == page_name;
|
||||
}).pick()['class'];
|
||||
|
||||
page.open(route.getAction() || 'index', params);
|
||||
page.show();
|
||||
|
||||
if(page.list && page.list.navigation)
|
||||
page.list.navigation.inject(self.navigation);
|
||||
|
||||
self.current_page = page;
|
||||
self.navigation.activate(page_name.toLowerCase());
|
||||
|
||||
}
|
||||
|
||||
});
|
||||
@@ -1,4 +1,4 @@
|
||||
Block.Search.MovieItem = new Class({
|
||||
var BlockSearchMovieItem = new Class({
|
||||
|
||||
Implements: [Options, Events],
|
||||
|
||||
@@ -31,9 +31,11 @@ Block.Search.MovieItem = new Class({
|
||||
}
|
||||
}).adopt(
|
||||
self.info_container = new Element('div.info').adopt(
|
||||
new Element('h2').adopt(
|
||||
new Element('h2', {
|
||||
'title': self.getTitle()
|
||||
}).adopt(
|
||||
self.title = new Element('span.title', {
|
||||
'text': info.titles && info.titles.length > 0 ? info.titles[0] : 'Unknown'
|
||||
'text': self.getTitle()
|
||||
}),
|
||||
self.year = info.year ? new Element('span.year', {
|
||||
'text': info.year
|
||||
@@ -48,7 +50,7 @@ Block.Search.MovieItem = new Class({
|
||||
self.alternativeTitle({
|
||||
'title': title
|
||||
});
|
||||
})
|
||||
});
|
||||
},
|
||||
|
||||
alternativeTitle: function(alternative){
|
||||
@@ -68,7 +70,7 @@ Block.Search.MovieItem = new Class({
|
||||
},
|
||||
|
||||
get: function(key){
|
||||
return this.info[key]
|
||||
return this.info[key];
|
||||
},
|
||||
|
||||
showOptions: function(){
|
||||
@@ -77,7 +79,7 @@ Block.Search.MovieItem = new Class({
|
||||
self.createOptions();
|
||||
|
||||
self.data_container.addClass('open');
|
||||
self.el.addEvent('outerClick', self.closeOptions.bind(self))
|
||||
self.el.addEvent('outerClick', self.closeOptions.bind(self));
|
||||
|
||||
},
|
||||
|
||||
@@ -85,7 +87,7 @@ Block.Search.MovieItem = new Class({
|
||||
var self = this;
|
||||
|
||||
self.data_container.removeClass('open');
|
||||
self.el.removeEvents('outerClick')
|
||||
self.el.removeEvents('outerClick');
|
||||
},
|
||||
|
||||
add: function(e){
|
||||
@@ -132,10 +134,11 @@ Block.Search.MovieItem = new Class({
|
||||
|
||||
if(!self.options_el.hasClass('set')){
|
||||
|
||||
var in_library;
|
||||
if(info.in_library){
|
||||
var in_library = [];
|
||||
in_library = [];
|
||||
(info.in_library.releases || []).each(function(release){
|
||||
in_library.include(release.quality)
|
||||
in_library.include(release.quality);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -171,14 +174,14 @@ Block.Search.MovieItem = new Class({
|
||||
Array.each(self.alternative_titles, function(alt){
|
||||
new Element('option', {
|
||||
'text': alt.title
|
||||
}).inject(self.title_select)
|
||||
}).inject(self.title_select);
|
||||
});
|
||||
|
||||
|
||||
// Fill categories
|
||||
var categories = CategoryList.getAll();
|
||||
|
||||
if(categories.length == 0)
|
||||
if(categories.length === 0)
|
||||
self.category_select.hide();
|
||||
else {
|
||||
self.category_select.show();
|
||||
@@ -199,12 +202,12 @@ Block.Search.MovieItem = new Class({
|
||||
new Element('option', {
|
||||
'value': profile.get('_id'),
|
||||
'text': profile.get('label')
|
||||
}).inject(self.profile_select)
|
||||
}).inject(self.profile_select);
|
||||
});
|
||||
|
||||
self.options_el.addClass('set');
|
||||
|
||||
if(categories.length == 0 && self.title_select.getElements('option').length == 1 && profiles.length == 1 &&
|
||||
if(categories.length === 0 && self.title_select.getElements('option').length == 1 && profiles.length == 1 &&
|
||||
!(self.info.in_wanted && self.info.in_wanted.profile_id || in_library))
|
||||
self.add();
|
||||
|
||||
@@ -218,12 +221,12 @@ Block.Search.MovieItem = new Class({
|
||||
self.mask = new Element('div.mask').inject(self.el).fade('hide');
|
||||
|
||||
createSpinner(self.mask);
|
||||
self.mask.fade('in')
|
||||
self.mask.fade('in');
|
||||
|
||||
},
|
||||
|
||||
toElement: function(){
|
||||
return this.el
|
||||
return this.el;
|
||||
}
|
||||
|
||||
});
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
Page.Wanted = new Class({
|
||||
var MoviesWanted = new Class({
|
||||
|
||||
Extends: PageBase,
|
||||
|
||||
order: 10,
|
||||
name: 'wanted',
|
||||
title: 'Gimmy gimmy gimmy!',
|
||||
folder_browser: null,
|
||||
@@ -9,7 +10,7 @@ Page.Wanted = new Class({
|
||||
indexAction: function(){
|
||||
var self = this;
|
||||
|
||||
if(!self.wanted){
|
||||
if(!self.list){
|
||||
|
||||
self.manual_search = new Element('a', {
|
||||
'title': 'Force a search for the full wanted list',
|
||||
@@ -19,7 +20,6 @@ Page.Wanted = new Class({
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
self.scan_folder = new Element('a', {
|
||||
'title': 'Scan a folder and rename all movies in it',
|
||||
'text': 'Manual folder scan',
|
||||
@@ -29,7 +29,7 @@ Page.Wanted = new Class({
|
||||
});
|
||||
|
||||
// Wanted movies
|
||||
self.wanted = new MovieList({
|
||||
self.list = new MovieList({
|
||||
'identifier': 'wanted',
|
||||
'status': 'active',
|
||||
'actions': [MA.IMDB, MA.Trailer, MA.Release, MA.Edit, MA.Refresh, MA.Readd, MA.Delete],
|
||||
@@ -37,7 +37,7 @@ Page.Wanted = new Class({
|
||||
'menu': [self.manual_search, self.scan_folder],
|
||||
'on_empty_element': App.createUserscriptButtons().addClass('empty_wanted')
|
||||
});
|
||||
$(self.wanted).inject(self.el);
|
||||
$(self.list).inject(self.el);
|
||||
|
||||
// Check if search is in progress
|
||||
self.startProgressInterval.delay(4000, self);
|
||||
@@ -90,7 +90,7 @@ Page.Wanted = new Class({
|
||||
};
|
||||
|
||||
if(!self.folder_browser){
|
||||
self.folder_browser = new Option['Directory']("Scan", "folder", "", options);
|
||||
self.folder_browser = new Option.Directory("Scan", "folder", "", options);
|
||||
|
||||
self.folder_browser.save = function() {
|
||||
var folder = self.folder_browser.getValue();
|
||||
@@ -22,11 +22,18 @@ config = [{
|
||||
'description': 'Maximum number of items displayed from each chart.',
|
||||
},
|
||||
{
|
||||
'name': 'update_interval',
|
||||
'default': 12,
|
||||
'type': 'int',
|
||||
'name': 'hide_wanted',
|
||||
'default': False,
|
||||
'type': 'bool',
|
||||
'advanced': True,
|
||||
'description': '(hours)',
|
||||
'description': 'Hide the chart movies that are already in your wanted list.',
|
||||
},
|
||||
{
|
||||
'name': 'hide_library',
|
||||
'default': False,
|
||||
'type': 'bool',
|
||||
'advanced': True,
|
||||
'description': 'Hide the chart movies that are already in your library.',
|
||||
},
|
||||
],
|
||||
},
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import time
|
||||
|
||||
from couchpotato import tryInt
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.api import addApiView
|
||||
from couchpotato.core.event import addEvent,fireEvent
|
||||
@@ -13,13 +12,14 @@ log = CPLog(__name__)
|
||||
class Charts(Plugin):
|
||||
|
||||
update_in_progress = False
|
||||
update_interval = 72 # hours
|
||||
|
||||
def __init__(self):
|
||||
addApiView('charts.view', self.automationView)
|
||||
addEvent('app.load', self.setCrons)
|
||||
|
||||
def setCrons(self):
|
||||
fireEvent('schedule.interval', 'charts.update_cache', self.updateViewCache, hours = self.conf('update_interval', default = 12))
|
||||
fireEvent('schedule.interval', 'charts.update_cache', self.updateViewCache, hours = self.update_interval)
|
||||
|
||||
def automationView(self, force_update = False, **kwargs):
|
||||
|
||||
@@ -36,7 +36,6 @@ class Charts(Plugin):
|
||||
'charts': charts
|
||||
}
|
||||
|
||||
|
||||
def updateViewCache(self):
|
||||
|
||||
if self.update_in_progress:
|
||||
@@ -46,10 +45,14 @@ class Charts(Plugin):
|
||||
if catched_charts:
|
||||
return catched_charts
|
||||
|
||||
charts = []
|
||||
try:
|
||||
self.update_in_progress = True
|
||||
charts = fireEvent('automation.get_chart_list', merge = True)
|
||||
self.setCache('charts_cached', charts, timeout = 7200 * tryInt(self.conf('update_interval', default = 12)))
|
||||
for chart in charts:
|
||||
chart['hide_wanted'] = self.conf('hide_wanted')
|
||||
chart['hide_library'] = self.conf('hide_library')
|
||||
self.setCache('charts_cached', charts, timeout = self.update_interval * 3600)
|
||||
except:
|
||||
log.error('Failed refreshing charts')
|
||||
|
||||
|
||||
@@ -2,6 +2,8 @@ var Charts = new Class({
|
||||
|
||||
Implements: [Options, Events],
|
||||
|
||||
shown_once: false,
|
||||
|
||||
initialize: function(options){
|
||||
var self = this;
|
||||
self.setOptions(options);
|
||||
@@ -22,9 +24,11 @@ var Charts = new Class({
|
||||
'events': {
|
||||
'click': function(e) {
|
||||
e.preventDefault();
|
||||
self.el.getChildren('div.chart').destroy();
|
||||
|
||||
self.el.getElements('.chart').destroy();
|
||||
self.el_refreshing_text.show();
|
||||
self.el_refresh_link.hide();
|
||||
|
||||
self.api_request = Api.request('charts.view', {
|
||||
'data': { 'force_update': 1 },
|
||||
'onComplete': self.fill.bind(self)
|
||||
@@ -38,15 +42,12 @@ var Charts = new Class({
|
||||
)
|
||||
);
|
||||
|
||||
if( Cookie.read('suggestions_charts_menu_selected') === 'charts')
|
||||
self.el.show();
|
||||
if( Cookie.read('suggestions_charts_menu_selected') === 'charts'){
|
||||
self.show();
|
||||
}
|
||||
else
|
||||
self.el.hide();
|
||||
|
||||
self.api_request = Api.request('charts.view', {
|
||||
'onComplete': self.fill.bind(self)
|
||||
});
|
||||
|
||||
self.fireEvent.delay(0, self, 'created');
|
||||
|
||||
},
|
||||
@@ -58,7 +59,7 @@ var Charts = new Class({
|
||||
self.el_refreshing_text.hide();
|
||||
self.el_refresh_link.show();
|
||||
|
||||
if(!json || json.count == 0){
|
||||
if(!json || json.count === 0){
|
||||
self.el_no_charts_enabled.show();
|
||||
self.el_refresh_link.show();
|
||||
self.el_refreshing_text.hide();
|
||||
@@ -72,7 +73,7 @@ var Charts = new Class({
|
||||
|
||||
Object.each(json.charts, function(chart){
|
||||
|
||||
var c = new Element('div.chart').grab(
|
||||
var c = new Element('div.chart.tiny_scroll').grab(
|
||||
new Element('h3').grab( new Element('a', {
|
||||
'text': chart.name,
|
||||
'href': chart.url
|
||||
@@ -83,17 +84,16 @@ var Charts = new Class({
|
||||
|
||||
Object.each(chart.list, function(movie){
|
||||
|
||||
var m = new Block.Search.MovieItem(movie, {
|
||||
var m = new BlockSearchMovieItem(movie, {
|
||||
'onAdded': function(){
|
||||
self.afterAdded(m, movie)
|
||||
self.afterAdded(m, movie);
|
||||
}
|
||||
});
|
||||
|
||||
var in_database_class = movie.in_wanted ? 'chart_in_wanted' : (movie.in_library ? 'chart_in_library' : ''),
|
||||
var in_database_class = (chart.hide_wanted && movie.in_wanted) ? 'hidden' : (movie.in_wanted ? 'chart_in_wanted' : ((chart.hide_library && movie.in_library) ? 'hidden': (movie.in_library ? 'chart_in_library' : ''))),
|
||||
in_database_title = movie.in_wanted ? 'Movie in wanted list' : (movie.in_library ? 'Movie in library' : '');
|
||||
|
||||
m.el
|
||||
.addClass(in_database_class)
|
||||
m.el.addClass(in_database_class)
|
||||
.grab(
|
||||
new Element('div.chart_number', {
|
||||
'text': it++,
|
||||
@@ -135,7 +135,7 @@ var Charts = new Class({
|
||||
'text': plot,
|
||||
'events': {
|
||||
'click': function(){
|
||||
this.toggleClass('full')
|
||||
this.toggleClass('full');
|
||||
}
|
||||
}
|
||||
}) : null
|
||||
@@ -155,6 +155,24 @@ var Charts = new Class({
|
||||
|
||||
},
|
||||
|
||||
show: function(){
|
||||
var self = this;
|
||||
|
||||
self.el.show();
|
||||
|
||||
if(!self.shown_once){
|
||||
self.api_request = Api.request('charts.view', {
|
||||
'onComplete': self.fill.bind(self)
|
||||
});
|
||||
|
||||
self.shown_once = true;
|
||||
}
|
||||
},
|
||||
|
||||
hide: function(){
|
||||
this.el.hide();
|
||||
},
|
||||
|
||||
afterAdded: function(m){
|
||||
|
||||
$(m).getElement('div.chart_number')
|
||||
|
||||
@@ -3,15 +3,21 @@
|
||||
margin-bottom: 30px;
|
||||
}
|
||||
|
||||
.charts > h2 {
|
||||
height: 40px;
|
||||
}
|
||||
.charts > h2 {
|
||||
height: 40px;
|
||||
}
|
||||
|
||||
.charts .chart {
|
||||
display: inline-block;
|
||||
width: 50%;
|
||||
vertical-align: top;
|
||||
}
|
||||
.charts .chart {
|
||||
display: inline-block;
|
||||
width: 50%;
|
||||
vertical-align: top;
|
||||
max-height: 510px;
|
||||
scrollbar-base-color: #4e5969;
|
||||
}
|
||||
|
||||
.charts .chart .media_result.hidden {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.charts .refresh {
|
||||
clear:both;
|
||||
@@ -25,30 +31,30 @@
|
||||
text-align:center;
|
||||
}
|
||||
|
||||
.charts .refresh a {
|
||||
text-align: center;
|
||||
padding: 0;
|
||||
display: none;
|
||||
width: 30px;
|
||||
height: 30px;
|
||||
position: absolute;
|
||||
right: 10px;
|
||||
top: -40px;
|
||||
opacity: .7;
|
||||
}
|
||||
.charts .refresh a {
|
||||
text-align: center;
|
||||
padding: 0;
|
||||
display: none;
|
||||
width: 30px;
|
||||
height: 30px;
|
||||
position: absolute;
|
||||
right: 10px;
|
||||
top: -40px;
|
||||
opacity: .7;
|
||||
}
|
||||
|
||||
.charts .refresh a:hover {
|
||||
opacity: 1;
|
||||
}
|
||||
.charts .refresh a:hover {
|
||||
opacity: 1;
|
||||
}
|
||||
|
||||
.charts p.no_charts_enabled {
|
||||
padding: 0.7em 1em;
|
||||
display: none;
|
||||
}
|
||||
.charts p.no_charts_enabled {
|
||||
padding: 0.7em 1em;
|
||||
display: none;
|
||||
}
|
||||
|
||||
.charts .chart h3 a {
|
||||
color: #fff;
|
||||
}
|
||||
.charts .chart h3 a {
|
||||
color: #fff;
|
||||
}
|
||||
|
||||
|
||||
.charts .chart .media_result {
|
||||
@@ -137,7 +143,6 @@
|
||||
padding: 0 3px 10px 0;
|
||||
}
|
||||
.charts .chart .media_result .data:before {
|
||||
bottom: 0;
|
||||
content: '';
|
||||
display: block;
|
||||
height: 10px;
|
||||
@@ -259,3 +264,11 @@
|
||||
height: 40px;
|
||||
}
|
||||
|
||||
@media all and (max-width: 480px) {
|
||||
.toggle_menu h2 {
|
||||
font-size: 16px;
|
||||
text-align: center;
|
||||
height: 30px;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
from couchpotato.core.event import addEvent
|
||||
from couchpotato.core.helpers.variable import getTitle
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.media._base.library.base import LibraryBase
|
||||
|
||||
@@ -17,7 +18,9 @@ class MovieLibraryPlugin(LibraryBase):
|
||||
if media.get('type') != 'movie':
|
||||
return
|
||||
|
||||
default_title = getTitle(media)
|
||||
titles = media['info'].get('titles', [])
|
||||
titles.insert(0, default_title)
|
||||
|
||||
# Add year identifier to titles
|
||||
if include_year:
|
||||
|
||||
@@ -1,9 +1,13 @@
|
||||
import traceback
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
from couchpotato import fireEvent
|
||||
from couchpotato.core.helpers.rss import RSS
|
||||
from couchpotato.core.helpers.variable import tryInt
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.media.movie.providers.automation.base import Automation
|
||||
|
||||
|
||||
log = CPLog(__name__)
|
||||
|
||||
autoload = 'Bluray'
|
||||
@@ -33,27 +37,49 @@ class Bluray(Automation, RSS):
|
||||
|
||||
try:
|
||||
# Stop if the release year is before the minimal year
|
||||
page_year = soup.body.find_all('center')[3].table.tr.find_all('td', recursive = False)[3].h3.get_text().split(', ')[1]
|
||||
if tryInt(page_year) < self.getMinimal('year'):
|
||||
brk = False
|
||||
h3s = soup.body.find_all('h3')
|
||||
for h3 in h3s:
|
||||
if h3.parent.name != 'a':
|
||||
|
||||
try:
|
||||
page_year = tryInt(h3.get_text()[-4:])
|
||||
if page_year > 0 and page_year < self.getMinimal('year'):
|
||||
brk = True
|
||||
except:
|
||||
log.error('Failed determining page year: %s', traceback.format_exc())
|
||||
brk = True
|
||||
break
|
||||
|
||||
if brk:
|
||||
break
|
||||
|
||||
for table in soup.body.find_all('center')[3].table.tr.find_all('td', recursive = False)[3].find_all('table')[1:20]:
|
||||
name = table.h3.get_text().lower().split('blu-ray')[0].strip()
|
||||
year = table.small.get_text().split('|')[1].strip()
|
||||
for h3 in h3s:
|
||||
try:
|
||||
if h3.parent.name == 'a':
|
||||
name = h3.get_text().lower().split('blu-ray')[0].strip()
|
||||
|
||||
if not name.find('/') == -1: # make sure it is not a double movie release
|
||||
continue
|
||||
if not name.find('/') == -1: # make sure it is not a double movie release
|
||||
continue
|
||||
|
||||
if tryInt(year) < self.getMinimal('year'):
|
||||
continue
|
||||
if not h3.parent.parent.small: # ignore non-movie tables
|
||||
continue
|
||||
|
||||
imdb = self.search(name, year)
|
||||
year = h3.parent.parent.small.get_text().split('|')[1].strip()
|
||||
|
||||
if imdb:
|
||||
if self.isMinimalMovie(imdb):
|
||||
movies.append(imdb['imdb'])
|
||||
if tryInt(year) < self.getMinimal('year'):
|
||||
continue
|
||||
|
||||
imdb = self.search(name, year)
|
||||
|
||||
if imdb:
|
||||
if self.isMinimalMovie(imdb):
|
||||
movies.append(imdb['imdb'])
|
||||
except:
|
||||
log.debug('Error parsing movie html: %s', traceback.format_exc())
|
||||
break
|
||||
except:
|
||||
log.debug('Error loading page: %s', page)
|
||||
log.debug('Error loading page %s: %s', (page, traceback.format_exc()))
|
||||
break
|
||||
|
||||
self.conf('backlog', value = False)
|
||||
@@ -82,6 +108,7 @@ class Bluray(Automation, RSS):
|
||||
def getChartList(self):
|
||||
# Nearly identical to 'getIMDBids', but we don't care about minimalMovie and return all movie data (not just id)
|
||||
movie_list = {'name': 'Blu-ray.com - New Releases', 'url': self.display_url, 'order': self.chart_order, 'list': []}
|
||||
movie_ids = []
|
||||
max_items = int(self.conf('max_items', section='charts', default=5))
|
||||
rss_movies = self.getRSSData(self.rss_url)
|
||||
|
||||
@@ -95,6 +122,15 @@ class Bluray(Automation, RSS):
|
||||
movie = self.search(name, year)
|
||||
|
||||
if movie:
|
||||
|
||||
if movie.get('imdb') in movie_ids:
|
||||
continue
|
||||
|
||||
is_movie = fireEvent('movie.is_movie', identifier = movie.get('imdb'), single = True)
|
||||
if not is_movie:
|
||||
continue
|
||||
|
||||
movie_ids.append(movie.get('imdb'))
|
||||
movie_list['list'].append( movie )
|
||||
if len(movie_list['list']) >= max_items:
|
||||
break
|
||||
@@ -123,7 +159,7 @@ config = [{
|
||||
{
|
||||
'name': 'backlog',
|
||||
'advanced': True,
|
||||
'description': 'Parses the history until the minimum movie year is reached. (Will be disabled once it has completed)',
|
||||
'description': ('Parses the history until the minimum movie year is reached. (Takes a while)', 'Will be disabled once it has completed'),
|
||||
'default': False,
|
||||
'type': 'bool',
|
||||
},
|
||||
|
||||
89
couchpotato/core/media/movie/providers/automation/crowdai.py
Normal file
89
couchpotato/core/media/movie/providers/automation/crowdai.py
Normal file
@@ -0,0 +1,89 @@
|
||||
import re
|
||||
|
||||
from couchpotato.core.helpers.rss import RSS
|
||||
from couchpotato.core.helpers.variable import tryInt, splitString
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.media.movie.providers.automation.base import Automation
|
||||
|
||||
|
||||
log = CPLog(__name__)
|
||||
|
||||
autoload = 'CrowdAI'
|
||||
|
||||
|
||||
class CrowdAI(Automation, RSS):
|
||||
|
||||
interval = 1800
|
||||
|
||||
def getIMDBids(self):
|
||||
|
||||
movies = []
|
||||
|
||||
urls = dict(zip(splitString(self.conf('automation_urls')), [tryInt(x) for x in splitString(self.conf('automation_urls_use'))]))
|
||||
|
||||
for url in urls:
|
||||
|
||||
if not urls[url]:
|
||||
continue
|
||||
|
||||
rss_movies = self.getRSSData(url)
|
||||
|
||||
for movie in rss_movies:
|
||||
|
||||
description = self.getTextElement(movie, 'description')
|
||||
grabs = 0
|
||||
|
||||
for item in movie:
|
||||
if item.attrib.get('name') == 'grabs':
|
||||
grabs = item.attrib.get('value')
|
||||
break
|
||||
|
||||
if int(grabs) > tryInt(self.conf('number_grabs')):
|
||||
title = re.match(r'.*Title: .a href.*/">(.*) \(\d{4}\).*', description).group(1)
|
||||
log.info2('%s grabs for movie: %s, enqueue...', (grabs, title))
|
||||
year = re.match(r'.*Year: (\d{4}).*', description).group(1)
|
||||
imdb = self.search(title, year)
|
||||
|
||||
if imdb and self.isMinimalMovie(imdb):
|
||||
movies.append(imdb['imdb'])
|
||||
|
||||
return movies
|
||||
|
||||
|
||||
config = [{
|
||||
'name': 'crowdai',
|
||||
'groups': [
|
||||
{
|
||||
'tab': 'automation',
|
||||
'list': 'automation_providers',
|
||||
'name': 'crowdai_automation',
|
||||
'label': 'CrowdAI',
|
||||
'description': 'Imports from any newznab powered NZB providers RSS feed depending on the number of grabs per movie. Go to your newznab site and find the RSS section. Then copy the copy paste the link under "Movies > x264 feed" here.',
|
||||
'options': [
|
||||
{
|
||||
'name': 'automation_enabled',
|
||||
'default': False,
|
||||
'type': 'enabler',
|
||||
},
|
||||
{
|
||||
'name': 'automation_urls_use',
|
||||
'label': 'Use',
|
||||
'default': '1',
|
||||
},
|
||||
{
|
||||
'name': 'automation_urls',
|
||||
'label': 'url',
|
||||
'type': 'combined',
|
||||
'combine': ['automation_urls_use', 'automation_urls'],
|
||||
'default': 'http://YOUR_PROVIDER/rss?t=THE_MOVIE_CATEGORY&i=YOUR_USER_ID&r=YOUR_API_KEY&res=2&rls=2&num=100',
|
||||
},
|
||||
{
|
||||
'name': 'number_grabs',
|
||||
'default': '500',
|
||||
'label': 'Grab threshold',
|
||||
'description': 'Number of grabs required',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
}]
|
||||
@@ -3,6 +3,7 @@ import re
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
from couchpotato import fireEvent
|
||||
from couchpotato.core.helpers.encoding import ss
|
||||
from couchpotato.core.helpers.rss import RSS
|
||||
from couchpotato.core.helpers.variable import getImdb, splitString, tryInt
|
||||
from couchpotato.core.logger import CPLog
|
||||
@@ -28,6 +29,39 @@ class IMDBBase(Automation, RSS):
|
||||
def getInfo(self, imdb_id):
|
||||
return fireEvent('movie.info', identifier = imdb_id, extended = False, merge = True)
|
||||
|
||||
def getFromURL(self, url):
|
||||
log.debug('Getting IMDBs from: %s', url)
|
||||
html = self.getHTMLData(url)
|
||||
|
||||
try:
|
||||
split = splitString(html, split_on = "<div class=\"list compact\">")[1]
|
||||
html = splitString(split, split_on = "<div class=\"pages\">")[0]
|
||||
except:
|
||||
try:
|
||||
split = splitString(html, split_on = "<div id=\"main\">")
|
||||
|
||||
if len(split) < 2:
|
||||
log.error('Failed parsing IMDB page "%s", unexpected html.', url)
|
||||
return []
|
||||
|
||||
html = BeautifulSoup(split[1])
|
||||
for x in ['list compact', 'lister', 'list detail sub-list']:
|
||||
html2 = html.find('div', attrs = {
|
||||
'class': x
|
||||
})
|
||||
|
||||
if html2:
|
||||
html = html2.contents
|
||||
html = ''.join([str(x) for x in html])
|
||||
break
|
||||
except:
|
||||
log.error('Failed parsing IMDB page "%s": %s', (url, traceback.format_exc()))
|
||||
|
||||
html = ss(html)
|
||||
imdbs = getImdb(html, multiple = True) if html else []
|
||||
|
||||
return imdbs
|
||||
|
||||
|
||||
class IMDBWatchlist(IMDBBase):
|
||||
|
||||
@@ -65,16 +99,7 @@ class IMDBWatchlist(IMDBBase):
|
||||
try:
|
||||
|
||||
w_url = '%s&start=%s' % (watchlist_url, start)
|
||||
log.debug('Started IMDB watchlists: %s', w_url)
|
||||
html = self.getHTMLData(w_url)
|
||||
|
||||
try:
|
||||
split = splitString(html, split_on="<div class=\"list compact\">")[1]
|
||||
html = splitString(split, split_on="<div class=\"pages\">")[0]
|
||||
except:
|
||||
pass
|
||||
|
||||
imdbs = getImdb(html, multiple = True) if html else []
|
||||
imdbs = self.getFromURL(w_url)
|
||||
|
||||
for imdb in imdbs:
|
||||
if imdb not in movies:
|
||||
@@ -85,13 +110,14 @@ class IMDBWatchlist(IMDBBase):
|
||||
|
||||
log.debug('Found %s movies on %s', (len(imdbs), w_url))
|
||||
|
||||
if len(imdbs) < 250:
|
||||
if len(imdbs) < 225:
|
||||
break
|
||||
|
||||
start += 250
|
||||
start = len(movies)
|
||||
|
||||
except:
|
||||
log.error('Failed loading IMDB watchlist: %s %s', (watchlist_url, traceback.format_exc()))
|
||||
break
|
||||
|
||||
return movies
|
||||
|
||||
@@ -100,95 +126,88 @@ class IMDBAutomation(IMDBBase):
|
||||
|
||||
enabled_option = 'automation_providers_enabled'
|
||||
|
||||
chart_urls = {
|
||||
'theater': 'http://www.imdb.com/movies-in-theaters/',
|
||||
'top250': 'http://www.imdb.com/chart/top',
|
||||
'boxoffice': 'http://www.imdb.com/chart/',
|
||||
charts = {
|
||||
'theater': {
|
||||
'order': 1,
|
||||
'name': 'IMDB - Movies in Theaters',
|
||||
'url': 'http://www.imdb.com/movies-in-theaters/',
|
||||
},
|
||||
'boxoffice': {
|
||||
'order': 2,
|
||||
'name': 'IMDB - Box Office',
|
||||
'url': 'http://www.imdb.com/boxoffice/',
|
||||
},
|
||||
'rentals': {
|
||||
'order': 3,
|
||||
'name': 'IMDB - Top DVD rentals',
|
||||
'url': 'http://www.imdb.com/boxoffice/rentals',
|
||||
'type': 'json',
|
||||
},
|
||||
'top250': {
|
||||
'order': 4,
|
||||
'name': 'IMDB - Top 250 Movies',
|
||||
'url': 'http://www.imdb.com/chart/top',
|
||||
},
|
||||
}
|
||||
chart_names = {
|
||||
'theater': 'IMDB - Movies in Theaters',
|
||||
'top250': 'IMDB - Top 250 Movies',
|
||||
'boxoffice': 'IMDB - Box Office',
|
||||
}
|
||||
chart_order = {
|
||||
'theater': 2,
|
||||
'top250': 4,
|
||||
'boxoffice': 3,
|
||||
}
|
||||
|
||||
first_table = ['boxoffice']
|
||||
|
||||
def getIMDBids(self):
|
||||
|
||||
movies = []
|
||||
|
||||
for url in self.chart_urls:
|
||||
if self.conf('automation_charts_%s' % url):
|
||||
data = self.getHTMLData(self.chart_urls[url])
|
||||
if data:
|
||||
html = BeautifulSoup(data)
|
||||
for name in self.charts:
|
||||
chart = self.charts[name]
|
||||
url = chart.get('url')
|
||||
|
||||
try:
|
||||
result_div = html.find('div', attrs = {'id': 'main'})
|
||||
if self.conf('automation_charts_%s' % name):
|
||||
imdb_ids = self.getFromURL(url)
|
||||
|
||||
try:
|
||||
if url in self.first_table:
|
||||
table = result_div.find('table')
|
||||
result_div = table if table else result_div
|
||||
except:
|
||||
pass
|
||||
try:
|
||||
for imdb_id in imdb_ids:
|
||||
info = self.getInfo(imdb_id)
|
||||
if info and self.isMinimalMovie(info):
|
||||
movies.append(imdb_id)
|
||||
|
||||
imdb_ids = getImdb(str(result_div), multiple = True)
|
||||
if self.shuttingDown():
|
||||
break
|
||||
|
||||
for imdb_id in imdb_ids:
|
||||
info = self.getInfo(imdb_id)
|
||||
if info and self.isMinimalMovie(info):
|
||||
movies.append(imdb_id)
|
||||
|
||||
if self.shuttingDown():
|
||||
break
|
||||
|
||||
except:
|
||||
log.error('Failed loading IMDB chart results from %s: %s', (url, traceback.format_exc()))
|
||||
except:
|
||||
log.error('Failed loading IMDB chart results from %s: %s', (url, traceback.format_exc()))
|
||||
|
||||
return movies
|
||||
|
||||
|
||||
def getChartList(self):
|
||||
|
||||
# Nearly identical to 'getIMDBids', but we don't care about minimalMovie and return all movie data (not just id)
|
||||
movie_lists = []
|
||||
max_items = int(self.conf('max_items', section='charts', default=5))
|
||||
max_items = int(self.conf('max_items', section = 'charts', default=5))
|
||||
|
||||
for url in self.chart_urls:
|
||||
if self.conf('chart_display_%s' % url):
|
||||
movie_list = {'name': self.chart_names[url], 'url': self.chart_urls[url], 'order': self.chart_order[url], 'list': []}
|
||||
data = self.getHTMLData(self.chart_urls[url])
|
||||
if data:
|
||||
html = BeautifulSoup(data)
|
||||
for name in self.charts:
|
||||
chart = self.charts[name].copy()
|
||||
url = chart.get('url')
|
||||
|
||||
try:
|
||||
result_div = html.find('div', attrs = {'id': 'main'})
|
||||
if self.conf('chart_display_%s' % name):
|
||||
|
||||
try:
|
||||
if url in self.first_table:
|
||||
table = result_div.find('table')
|
||||
result_div = table if table else result_div
|
||||
except:
|
||||
pass
|
||||
chart['list'] = []
|
||||
|
||||
imdb_ids = getImdb(str(result_div), multiple = True)
|
||||
imdb_ids = self.getFromURL(url)
|
||||
|
||||
for imdb_id in imdb_ids[0:max_items]:
|
||||
info = self.getInfo(imdb_id)
|
||||
movie_list['list'].append(info)
|
||||
try:
|
||||
for imdb_id in imdb_ids[0:max_items]:
|
||||
|
||||
if self.shuttingDown():
|
||||
break
|
||||
except:
|
||||
log.error('Failed loading IMDB chart results from %s: %s', (url, traceback.format_exc()))
|
||||
is_movie = fireEvent('movie.is_movie', identifier = imdb_id, single = True)
|
||||
if not is_movie:
|
||||
continue
|
||||
|
||||
if movie_list['list']:
|
||||
movie_lists.append(movie_list)
|
||||
info = self.getInfo(imdb_id)
|
||||
chart['list'].append(info)
|
||||
|
||||
if self.shuttingDown():
|
||||
break
|
||||
except:
|
||||
log.error('Failed loading IMDB chart results from %s: %s', (url, traceback.format_exc()))
|
||||
|
||||
if chart['list']:
|
||||
movie_lists.append(chart)
|
||||
|
||||
|
||||
return movie_lists
|
||||
@@ -240,12 +259,19 @@ config = [{
|
||||
'description': 'New Movies <a href="http://www.imdb.com/movies-in-theaters/">In-Theaters</a> chart',
|
||||
'default': True,
|
||||
},
|
||||
{
|
||||
'name': 'automation_charts_rentals',
|
||||
'type': 'bool',
|
||||
'label': 'DVD Rentals',
|
||||
'description': 'Top DVD <a href="http://www.imdb.com/boxoffice/rentals">rentals</a> chart',
|
||||
'default': True,
|
||||
},
|
||||
{
|
||||
'name': 'automation_charts_top250',
|
||||
'type': 'bool',
|
||||
'label': 'TOP 250',
|
||||
'description': 'IMDB <a href="http://www.imdb.com/chart/top/">TOP 250</a> chart',
|
||||
'default': True,
|
||||
'default': False,
|
||||
},
|
||||
{
|
||||
'name': 'automation_charts_boxoffice',
|
||||
@@ -282,6 +308,13 @@ config = [{
|
||||
'description': 'IMDB <a href="http://www.imdb.com/chart/top/">TOP 250</a> chart',
|
||||
'default': False,
|
||||
},
|
||||
{
|
||||
'name': 'chart_display_rentals',
|
||||
'type': 'bool',
|
||||
'label': 'DVD Rentals',
|
||||
'description': 'Top DVD <a href="http://www.imdb.com/boxoffice/rentals">rentals</a> chart',
|
||||
'default': True,
|
||||
},
|
||||
{
|
||||
'name': 'chart_display_boxoffice',
|
||||
'type': 'bool',
|
||||
|
||||
@@ -48,11 +48,12 @@ class Letterboxd(Automation):
|
||||
|
||||
soup = BeautifulSoup(self.getHTMLData(self.url % username))
|
||||
|
||||
for movie in soup.find_all('a', attrs = {'class': 'frame'}):
|
||||
match = removeEmpty(self.pattern.split(movie['title']))
|
||||
for movie in soup.find_all('li', attrs = {'class': 'poster-container'}):
|
||||
img = movie.find('img', movie)
|
||||
title = img.get('alt')
|
||||
|
||||
movies.append({
|
||||
'title': match[0],
|
||||
'year': match[1]
|
||||
'title': title
|
||||
})
|
||||
|
||||
return movies
|
||||
|
||||
@@ -21,11 +21,15 @@ class Moviemeter(Automation, RSS):
|
||||
|
||||
for movie in rss_movies:
|
||||
|
||||
name_year = fireEvent('scanner.name_year', self.getTextElement(movie, 'title'), single = True)
|
||||
imdb = self.search(name_year.get('name'), name_year.get('year'))
|
||||
title = self.getTextElement(movie, 'title')
|
||||
name_year = fireEvent('scanner.name_year', title, single = True)
|
||||
if name_year.get('name') and name_year.get('year'):
|
||||
imdb = self.search(name_year.get('name'), name_year.get('year'))
|
||||
|
||||
if imdb and self.isMinimalMovie(imdb):
|
||||
movies.append(imdb['imdb'])
|
||||
if imdb and self.isMinimalMovie(imdb):
|
||||
movies.append(imdb['imdb'])
|
||||
else:
|
||||
log.error('Failed getting name and year from: %s', title)
|
||||
|
||||
return movies
|
||||
|
||||
|
||||
@@ -0,0 +1,47 @@
|
||||
from couchpotato import fireEvent
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.media.movie.providers.automation.base import Automation
|
||||
|
||||
log = CPLog(__name__)
|
||||
|
||||
autoload = 'PopularMovies'
|
||||
|
||||
|
||||
class PopularMovies(Automation):
|
||||
|
||||
interval = 1800
|
||||
url = 'https://s3.amazonaws.com/popular-movies/movies.json'
|
||||
|
||||
def getIMDBids(self):
|
||||
|
||||
movies = []
|
||||
retrieved_movies = self.getJsonData(self.url)
|
||||
|
||||
for movie in retrieved_movies.get('movies'):
|
||||
imdb_id = movie.get('imdb_id')
|
||||
info = fireEvent('movie.info', identifier = imdb_id, extended = False, merge = True)
|
||||
if self.isMinimalMovie(info):
|
||||
movies.append(imdb_id)
|
||||
|
||||
return movies
|
||||
|
||||
|
||||
config = [{
|
||||
'name': 'popularmovies',
|
||||
'groups': [
|
||||
{
|
||||
'tab': 'automation',
|
||||
'list': 'automation_providers',
|
||||
'name': 'popularmovies_automation',
|
||||
'label': 'Popular Movies',
|
||||
'description': 'Imports the <a href="http://movies.stevenlu.com/">top titles of movies that have been in theaters</a>. Script provided by <a href="https://github.com/sjlu/popular-movies">Steven Lu</a>',
|
||||
'options': [
|
||||
{
|
||||
'name': 'automation_enabled',
|
||||
'default': False,
|
||||
'type': 'enabler',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
}]
|
||||
@@ -39,15 +39,14 @@ class Rottentomatoes(Automation, RSS):
|
||||
|
||||
if result:
|
||||
|
||||
log.info2('Something smells...')
|
||||
rating = tryInt(self.getTextElement(movie, rating_tag))
|
||||
name = result.group(0)
|
||||
|
||||
print rating, tryInt(self.conf('tomatometer_percent'))
|
||||
if rating < tryInt(self.conf('tomatometer_percent')):
|
||||
log.info2('%s seems to be rotten...', name)
|
||||
else:
|
||||
|
||||
log.info2('Found %s fresh enough movies, enqueuing: %s', (rating, name))
|
||||
log.info2('Found %s with fresh rating %s', (name, rating))
|
||||
year = datetime.datetime.now().strftime("%Y")
|
||||
imdb = self.search(name, year)
|
||||
|
||||
|
||||
@@ -26,7 +26,14 @@ class MovieResultModifier(Plugin):
|
||||
'backdrop': [],
|
||||
'poster_original': [],
|
||||
'backdrop_original': [],
|
||||
'actors': {}
|
||||
'actors': {},
|
||||
'landscape': [],
|
||||
'logo': [],
|
||||
'clear_art': [],
|
||||
'disc_art': [],
|
||||
'banner': [],
|
||||
'extra_thumbs': [],
|
||||
'extra_fanart': []
|
||||
},
|
||||
'runtime': 0,
|
||||
'plot': '',
|
||||
|
||||
@@ -2,7 +2,7 @@ import base64
|
||||
import time
|
||||
|
||||
from couchpotato.core.event import addEvent, fireEvent
|
||||
from couchpotato.core.helpers.encoding import tryUrlencode
|
||||
from couchpotato.core.helpers.encoding import tryUrlencode, ss
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.media.movie.providers.base import MovieProvider
|
||||
from couchpotato.environment import Env
|
||||
@@ -29,7 +29,7 @@ class CouchPotatoApi(MovieProvider):
|
||||
api_version = 1
|
||||
|
||||
def __init__(self):
|
||||
addEvent('movie.info', self.getInfo, priority = 1)
|
||||
addEvent('movie.info', self.getInfo, priority = 2)
|
||||
addEvent('movie.info.release_date', self.getReleaseDate)
|
||||
|
||||
addEvent('info.search', self.search, priority = 1)
|
||||
@@ -66,15 +66,18 @@ class CouchPotatoApi(MovieProvider):
|
||||
if not name:
|
||||
return
|
||||
|
||||
name_enc = base64.b64encode(name)
|
||||
name_enc = base64.b64encode(ss(name))
|
||||
return self.getJsonData(self.urls['validate'] % name_enc, headers = self.getRequestHeaders())
|
||||
|
||||
def isMovie(self, identifier = None):
|
||||
def isMovie(self, identifier = None, adding = False):
|
||||
|
||||
if not identifier:
|
||||
return
|
||||
|
||||
data = self.getJsonData(self.urls['is_movie'] % identifier, headers = self.getRequestHeaders())
|
||||
url = self.urls['is_movie'] % identifier
|
||||
url += '?adding=1' if adding else ''
|
||||
|
||||
data = self.getJsonData(url, headers = self.getRequestHeaders())
|
||||
if data:
|
||||
return data.get('is_movie', True)
|
||||
|
||||
|
||||
133
couchpotato/core/media/movie/providers/info/fanarttv.py
Normal file
133
couchpotato/core/media/movie/providers/info/fanarttv.py
Normal file
@@ -0,0 +1,133 @@
|
||||
import traceback
|
||||
|
||||
from couchpotato import tryInt
|
||||
from couchpotato.core.event import addEvent
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.media.movie.providers.base import MovieProvider
|
||||
from requests import HTTPError
|
||||
|
||||
|
||||
log = CPLog(__name__)
|
||||
|
||||
autoload = 'FanartTV'
|
||||
|
||||
|
||||
class FanartTV(MovieProvider):
|
||||
|
||||
urls = {
|
||||
'api': 'http://webservice.fanart.tv/v3/movies/%s?api_key=b28b14e9be662e027cfbc7c3dd600405'
|
||||
}
|
||||
|
||||
MAX_EXTRAFANART = 20
|
||||
http_time_between_calls = 0
|
||||
|
||||
def __init__(self):
|
||||
addEvent('movie.info', self.getArt, priority = 1)
|
||||
|
||||
def getArt(self, identifier = None, extended = True, **kwargs):
|
||||
|
||||
if not identifier or not extended:
|
||||
return {}
|
||||
|
||||
images = {}
|
||||
|
||||
try:
|
||||
url = self.urls['api'] % identifier
|
||||
fanart_data = self.getJsonData(url, show_error = False)
|
||||
|
||||
if fanart_data:
|
||||
log.debug('Found images for %s', fanart_data.get('name'))
|
||||
images = self._parseMovie(fanart_data)
|
||||
except HTTPError as e:
|
||||
log.debug('Failed getting extra art for %s: %s',
|
||||
(identifier, e))
|
||||
except:
|
||||
log.error('Failed getting extra art for %s: %s',
|
||||
(identifier, traceback.format_exc()))
|
||||
return {}
|
||||
|
||||
return {
|
||||
'images': images
|
||||
}
|
||||
|
||||
def _parseMovie(self, movie):
|
||||
images = {
|
||||
'landscape': self._getMultImages(movie.get('moviethumb', []), 1),
|
||||
'logo': [],
|
||||
'disc_art': self._getMultImages(self._trimDiscs(movie.get('moviedisc', [])), 1),
|
||||
'clear_art': self._getMultImages(movie.get('hdmovieart', []), 1),
|
||||
'banner': self._getMultImages(movie.get('moviebanner', []), 1),
|
||||
'extra_fanart': [],
|
||||
}
|
||||
|
||||
if len(images['clear_art']) == 0:
|
||||
images['clear_art'] = self._getMultImages(movie.get('movieart', []), 1)
|
||||
|
||||
images['logo'] = self._getMultImages(movie.get('hdmovielogo', []), 1)
|
||||
if len(images['logo']) == 0:
|
||||
images['logo'] = self._getMultImages(movie.get('movielogo', []), 1)
|
||||
|
||||
fanarts = self._getMultImages(movie.get('moviebackground', []), self.MAX_EXTRAFANART + 1)
|
||||
|
||||
if fanarts:
|
||||
images['backdrop_original'] = [fanarts[0]]
|
||||
images['extra_fanart'] = fanarts[1:]
|
||||
|
||||
return images
|
||||
|
||||
def _trimDiscs(self, disc_images):
|
||||
"""
|
||||
Return a subset of discImages. Only bluray disc images will be returned.
|
||||
"""
|
||||
|
||||
trimmed = []
|
||||
for disc in disc_images:
|
||||
if disc.get('disc_type') == 'bluray':
|
||||
trimmed.append(disc)
|
||||
|
||||
if len(trimmed) == 0:
|
||||
return disc_images
|
||||
|
||||
return trimmed
|
||||
|
||||
def _getImage(self, images):
|
||||
image_url = None
|
||||
highscore = -1
|
||||
for image in images:
|
||||
if tryInt(image.get('likes')) > highscore:
|
||||
highscore = tryInt(image.get('likes'))
|
||||
image_url = image.get('url') or image.get('href')
|
||||
|
||||
return image_url
|
||||
|
||||
def _getMultImages(self, images, n):
|
||||
"""
|
||||
Chooses the best n images and returns them as a list.
|
||||
If n<0, all images will be returned.
|
||||
"""
|
||||
image_urls = []
|
||||
pool = []
|
||||
for image in images:
|
||||
if image.get('lang') == 'en':
|
||||
pool.append(image)
|
||||
orig_pool_size = len(pool)
|
||||
|
||||
while len(pool) > 0 and (n < 0 or orig_pool_size - len(pool) < n):
|
||||
best = None
|
||||
highscore = -1
|
||||
for image in pool:
|
||||
if tryInt(image.get('likes')) > highscore:
|
||||
highscore = tryInt(image.get('likes'))
|
||||
best = image
|
||||
url = best.get('url') or best.get('href')
|
||||
if url:
|
||||
image_urls.append(url)
|
||||
pool.remove(best)
|
||||
|
||||
return image_urls
|
||||
|
||||
def isDisabled(self):
|
||||
if self.conf('api_key') == '':
|
||||
log.error('No API key provided.')
|
||||
return True
|
||||
return False
|
||||
@@ -2,6 +2,7 @@ import json
|
||||
import re
|
||||
import traceback
|
||||
|
||||
from couchpotato import Env
|
||||
from couchpotato.core.event import addEvent, fireEvent
|
||||
from couchpotato.core.helpers.encoding import tryUrlencode
|
||||
from couchpotato.core.helpers.variable import tryInt, tryFloat, splitString
|
||||
@@ -17,8 +18,8 @@ autoload = 'OMDBAPI'
|
||||
class OMDBAPI(MovieProvider):
|
||||
|
||||
urls = {
|
||||
'search': 'http://www.omdbapi.com/?%s',
|
||||
'info': 'http://www.omdbapi.com/?i=%s',
|
||||
'search': 'http://www.omdbapi.com/?type=movie&%s',
|
||||
'info': 'http://www.omdbapi.com/?type=movie&i=%s',
|
||||
}
|
||||
|
||||
http_time_between_calls = 0
|
||||
@@ -38,7 +39,8 @@ class OMDBAPI(MovieProvider):
|
||||
}
|
||||
|
||||
cache_key = 'omdbapi.cache.%s' % q
|
||||
cached = self.getCache(cache_key, self.urls['search'] % tryUrlencode({'t': name_year.get('name'), 'y': name_year.get('year', '')}), timeout = 3)
|
||||
url = self.urls['search'] % tryUrlencode({'t': name_year.get('name'), 'y': name_year.get('year', '')})
|
||||
cached = self.getCache(cache_key, url, timeout = 3, headers = {'User-Agent': Env.getIdentifier()})
|
||||
|
||||
if cached:
|
||||
result = self.parseMovie(cached)
|
||||
@@ -56,7 +58,7 @@ class OMDBAPI(MovieProvider):
|
||||
return {}
|
||||
|
||||
cache_key = 'omdbapi.cache.%s' % identifier
|
||||
cached = self.getCache(cache_key, self.urls['info'] % identifier, timeout = 3)
|
||||
cached = self.getCache(cache_key, self.urls['info'] % identifier, timeout = 3, headers = {'User-Agent': Env.getIdentifier()})
|
||||
|
||||
if cached:
|
||||
result = self.parseMovie(cached)
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
import traceback
|
||||
|
||||
from couchpotato.core.event import addEvent
|
||||
from couchpotato.core.helpers.encoding import simplifyString, toUnicode, ss
|
||||
from couchpotato.core.event import addEvent, fireEvent
|
||||
from couchpotato.core.helpers.encoding import toUnicode, ss, tryUrlencode
|
||||
from couchpotato.core.helpers.variable import tryInt
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.media.movie.providers.base import MovieProvider
|
||||
import tmdb3
|
||||
|
||||
log = CPLog(__name__)
|
||||
|
||||
@@ -14,52 +13,65 @@ autoload = 'TheMovieDb'
|
||||
|
||||
class TheMovieDb(MovieProvider):
|
||||
|
||||
http_time_between_calls = .35
|
||||
|
||||
configuration = {
|
||||
'images': {
|
||||
'secure_base_url': 'https://image.tmdb.org/t/p/',
|
||||
},
|
||||
}
|
||||
|
||||
def __init__(self):
|
||||
addEvent('movie.info', self.getInfo, priority = 2)
|
||||
addEvent('info.search', self.search, priority = 3)
|
||||
addEvent('movie.search', self.search, priority = 3)
|
||||
addEvent('movie.info', self.getInfo, priority = 3)
|
||||
addEvent('movie.info_by_tmdb', self.getInfo)
|
||||
addEvent('app.load', self.config)
|
||||
|
||||
# Configure TMDB settings
|
||||
tmdb3.set_key(self.conf('api_key'))
|
||||
tmdb3.set_cache('null')
|
||||
def config(self):
|
||||
configuration = self.request('configuration')
|
||||
if configuration:
|
||||
self.configuration = configuration
|
||||
|
||||
def search(self, q, limit = 12):
|
||||
def search(self, q, limit = 3):
|
||||
""" Find movie by name """
|
||||
|
||||
if self.isDisabled():
|
||||
return False
|
||||
|
||||
search_string = simplifyString(q)
|
||||
cache_key = 'tmdb.cache.%s.%s' % (search_string, limit)
|
||||
results = self.getCache(cache_key)
|
||||
log.debug('Searching for movie: %s', q)
|
||||
|
||||
if not results:
|
||||
log.debug('Searching for movie: %s', q)
|
||||
raw = None
|
||||
try:
|
||||
name_year = fireEvent('scanner.name_year', q, single = True)
|
||||
raw = self.request('search/movie', {
|
||||
'query': name_year.get('name', q),
|
||||
'year': name_year.get('year'),
|
||||
'search_type': 'ngram' if limit > 1 else 'phrase'
|
||||
}, return_key = 'results')
|
||||
except:
|
||||
log.error('Failed searching TMDB for "%s": %s', (q, traceback.format_exc()))
|
||||
|
||||
raw = None
|
||||
results = []
|
||||
if raw:
|
||||
try:
|
||||
raw = tmdb3.searchMovie(search_string)
|
||||
except:
|
||||
log.error('Failed searching TMDB for "%s": %s', (search_string, traceback.format_exc()))
|
||||
nr = 0
|
||||
|
||||
results = []
|
||||
if raw:
|
||||
try:
|
||||
nr = 0
|
||||
for movie in raw:
|
||||
parsed_movie = self.parseMovie(movie, extended = False)
|
||||
if parsed_movie:
|
||||
results.append(parsed_movie)
|
||||
|
||||
for movie in raw:
|
||||
results.append(self.parseMovie(movie, extended = False))
|
||||
nr += 1
|
||||
if nr == limit:
|
||||
break
|
||||
|
||||
nr += 1
|
||||
if nr == limit:
|
||||
break
|
||||
log.info('Found: %s', [result['titles'][0] + ' (' + str(result.get('year', 0)) + ')' for result in results])
|
||||
|
||||
log.info('Found: %s', [result['titles'][0] + ' (' + str(result.get('year', 0)) + ')' for result in results])
|
||||
|
||||
self.setCache(cache_key, results)
|
||||
return results
|
||||
except SyntaxError as e:
|
||||
log.error('Failed to parse XML response: %s', e)
|
||||
return False
|
||||
return results
|
||||
except SyntaxError as e:
|
||||
log.error('Failed to parse XML response: %s', e)
|
||||
return False
|
||||
|
||||
return results
|
||||
|
||||
@@ -68,97 +80,91 @@ class TheMovieDb(MovieProvider):
|
||||
if not identifier:
|
||||
return {}
|
||||
|
||||
cache_key = 'tmdb.cache.%s%s' % (identifier, '.ex' if extended else '')
|
||||
result = self.getCache(cache_key)
|
||||
result = self.parseMovie({
|
||||
'id': identifier
|
||||
}, extended = extended)
|
||||
|
||||
if not result:
|
||||
try:
|
||||
log.debug('Getting info: %s', cache_key)
|
||||
# noinspection PyArgumentList
|
||||
movie = tmdb3.Movie(identifier)
|
||||
try: exists = movie.title is not None
|
||||
except: exists = False
|
||||
|
||||
if exists:
|
||||
result = self.parseMovie(movie, extended = extended)
|
||||
self.setCache(cache_key, result)
|
||||
else:
|
||||
result = {}
|
||||
except:
|
||||
log.error('Failed getting info for %s: %s', (identifier, traceback.format_exc()))
|
||||
|
||||
return result
|
||||
return result or {}
|
||||
|
||||
def parseMovie(self, movie, extended = True):
|
||||
|
||||
cache_key = 'tmdb.cache.%s%s' % (movie.id, '.ex' if extended else '')
|
||||
movie_data = self.getCache(cache_key)
|
||||
# Do request, append other items
|
||||
movie = self.request('movie/%s' % movie.get('id'), {
|
||||
'append_to_response': 'alternative_titles' + (',images,casts' if extended else '')
|
||||
})
|
||||
if not movie:
|
||||
return
|
||||
|
||||
if not movie_data:
|
||||
# Images
|
||||
poster = self.getImage(movie, type = 'poster', size = 'w154')
|
||||
poster_original = self.getImage(movie, type = 'poster', size = 'original')
|
||||
backdrop_original = self.getImage(movie, type = 'backdrop', size = 'original')
|
||||
extra_thumbs = self.getMultImages(movie, type = 'backdrops', size = 'original') if extended else []
|
||||
|
||||
# Images
|
||||
poster = self.getImage(movie, type = 'poster', size = 'poster')
|
||||
poster_original = self.getImage(movie, type = 'poster', size = 'original')
|
||||
backdrop_original = self.getImage(movie, type = 'backdrop', size = 'original')
|
||||
images = {
|
||||
'poster': [poster] if poster else [],
|
||||
#'backdrop': [backdrop] if backdrop else [],
|
||||
'poster_original': [poster_original] if poster_original else [],
|
||||
'backdrop_original': [backdrop_original] if backdrop_original else [],
|
||||
'actors': {},
|
||||
'extra_thumbs': extra_thumbs
|
||||
}
|
||||
|
||||
images = {
|
||||
'poster': [poster] if poster else [],
|
||||
#'backdrop': [backdrop] if backdrop else [],
|
||||
'poster_original': [poster_original] if poster_original else [],
|
||||
'backdrop_original': [backdrop_original] if backdrop_original else [],
|
||||
'actors': {}
|
||||
}
|
||||
# Genres
|
||||
try:
|
||||
genres = [genre.get('name') for genre in movie.get('genres', [])]
|
||||
except:
|
||||
genres = []
|
||||
|
||||
# Genres
|
||||
try:
|
||||
genres = [genre.name for genre in movie.genres]
|
||||
except:
|
||||
genres = []
|
||||
# 1900 is the same as None
|
||||
year = str(movie.get('release_date') or '')[:4]
|
||||
if not movie.get('release_date') or year == '1900' or year.lower() == 'none':
|
||||
year = None
|
||||
|
||||
# 1900 is the same as None
|
||||
year = str(movie.releasedate or '')[:4]
|
||||
if not movie.releasedate or year == '1900' or year.lower() == 'none':
|
||||
year = None
|
||||
# Gather actors data
|
||||
actors = {}
|
||||
if extended:
|
||||
|
||||
# Gather actors data
|
||||
actors = {}
|
||||
if extended:
|
||||
for cast_item in movie.cast:
|
||||
try:
|
||||
actors[toUnicode(cast_item.name)] = toUnicode(cast_item.character)
|
||||
images['actors'][toUnicode(cast_item.name)] = self.getImage(cast_item, type = 'profile', size = 'original')
|
||||
except:
|
||||
log.debug('Error getting cast info for %s: %s', (cast_item, traceback.format_exc()))
|
||||
# Full data
|
||||
cast = movie.get('casts', {}).get('cast', [])
|
||||
|
||||
movie_data = {
|
||||
'type': 'movie',
|
||||
'via_tmdb': True,
|
||||
'tmdb_id': movie.id,
|
||||
'titles': [toUnicode(movie.title)],
|
||||
'original_title': movie.originaltitle,
|
||||
'images': images,
|
||||
'imdb': movie.imdb,
|
||||
'runtime': movie.runtime,
|
||||
'released': str(movie.releasedate),
|
||||
'year': tryInt(year, None),
|
||||
'plot': movie.overview,
|
||||
'genres': genres,
|
||||
'collection': getattr(movie.collection, 'name', None),
|
||||
'actor_roles': actors
|
||||
}
|
||||
for cast_item in cast:
|
||||
try:
|
||||
actors[toUnicode(cast_item.get('name'))] = toUnicode(cast_item.get('character'))
|
||||
images['actors'][toUnicode(cast_item.get('name'))] = self.getImage(cast_item, type = 'profile', size = 'original')
|
||||
except:
|
||||
log.debug('Error getting cast info for %s: %s', (cast_item, traceback.format_exc()))
|
||||
|
||||
movie_data = dict((k, v) for k, v in movie_data.items() if v)
|
||||
movie_data = {
|
||||
'type': 'movie',
|
||||
'via_tmdb': True,
|
||||
'tmdb_id': movie.get('id'),
|
||||
'titles': [toUnicode(movie.get('title'))],
|
||||
'original_title': movie.get('original_title'),
|
||||
'images': images,
|
||||
'imdb': movie.get('imdb_id'),
|
||||
'runtime': movie.get('runtime'),
|
||||
'released': str(movie.get('release_date')),
|
||||
'year': tryInt(year, None),
|
||||
'plot': movie.get('overview'),
|
||||
'genres': genres,
|
||||
'collection': getattr(movie.get('belongs_to_collection'), 'name', None),
|
||||
'actor_roles': actors
|
||||
}
|
||||
|
||||
# Add alternative names
|
||||
if extended:
|
||||
movie_data['titles'].append(movie.originaltitle)
|
||||
for alt in movie.alternate_titles:
|
||||
alt_name = alt.title
|
||||
if alt_name and alt_name not in movie_data['titles'] and alt_name.lower() != 'none' and alt_name is not None:
|
||||
movie_data['titles'].append(alt_name)
|
||||
movie_data = dict((k, v) for k, v in movie_data.items() if v)
|
||||
|
||||
# Cache movie parsed
|
||||
self.setCache(cache_key, movie_data)
|
||||
# Add alternative names
|
||||
if movie_data['original_title'] and movie_data['original_title'] not in movie_data['titles']:
|
||||
movie_data['titles'].append(movie_data['original_title'])
|
||||
|
||||
# Add alternative titles
|
||||
alternate_titles = movie.get('alternative_titles', {}).get('titles', [])
|
||||
|
||||
for alt in alternate_titles:
|
||||
alt_name = alt.get('title')
|
||||
if alt_name and alt_name not in movie_data['titles'] and alt_name.lower() != 'none' and alt_name is not None:
|
||||
movie_data['titles'].append(alt_name)
|
||||
|
||||
return movie_data
|
||||
|
||||
@@ -166,12 +172,41 @@ class TheMovieDb(MovieProvider):
|
||||
|
||||
image_url = ''
|
||||
try:
|
||||
image_url = getattr(movie, type).geturl(size = size)
|
||||
path = movie.get('%s_path' % type)
|
||||
image_url = '%s%s%s' % (self.configuration['images']['secure_base_url'], size, path)
|
||||
except:
|
||||
log.debug('Failed getting %s.%s for "%s"', (type, size, ss(str(movie))))
|
||||
|
||||
return image_url
|
||||
|
||||
def getMultImages(self, movie, type = 'backdrops', size = 'original'):
|
||||
|
||||
image_urls = []
|
||||
try:
|
||||
for image in movie.get('images', {}).get(type, [])[1:5]:
|
||||
image_urls.append(self.getImage(image, 'file', size))
|
||||
except:
|
||||
log.debug('Failed getting %s.%s for "%s"', (type, size, ss(str(movie))))
|
||||
|
||||
return image_urls
|
||||
|
||||
def request(self, call = '', params = {}, return_key = None):
|
||||
|
||||
params = dict((k, v) for k, v in params.items() if v)
|
||||
params = tryUrlencode(params)
|
||||
|
||||
try:
|
||||
url = 'http://api.themoviedb.org/3/%s?api_key=%s%s' % (call, self.conf('api_key'), '&%s' % params if params else '')
|
||||
data = self.getJsonData(url, show_error = False)
|
||||
except:
|
||||
log.debug('Movie not found: %s, %s', (call, params))
|
||||
data = None
|
||||
|
||||
if data and return_key and return_key in data:
|
||||
data = data.get(return_key)
|
||||
|
||||
return data
|
||||
|
||||
def isDisabled(self):
|
||||
if self.conf('api_key') == '':
|
||||
log.error('No API key provided.')
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user