237 lines
5.8 KiB
Perl
237 lines
5.8 KiB
Perl
#!/usr/bin/env perl
|
|
package pcurse;
|
|
use JSON;
|
|
use HTML::HTML5::Parser;
|
|
use LWP::UserAgent ();
|
|
use Getopt::Long;
|
|
use Data::Dumper;
|
|
use Archive::Extract;
|
|
|
|
sub parse_arguments {
|
|
my %toret;
|
|
GetOptions (
|
|
"verbose+" => \$toret{'verbose'},
|
|
"wowpath=s" => \$toret{'wowpath'},
|
|
"baseuri=s" => \$toret{'baseuri'},
|
|
"config=s" => \$toret{'config'},
|
|
);
|
|
return %toret;
|
|
}
|
|
|
|
sub load_config {
|
|
my $file = shift;
|
|
my $toret;
|
|
unless(-e $file) {
|
|
my @p = split(/\//, $file);
|
|
my $file = pop(@p);
|
|
my $path = join('/', @p);
|
|
unless(-d $path) {
|
|
print 'Will create path: '.$path."\n";
|
|
system("mkdir","-p","$path");
|
|
}
|
|
} else {
|
|
$toret = pcurse::import_json($file);
|
|
}
|
|
$toret = pcurse::sane_defaults($toret);
|
|
foreach my $k(keys %{$toret}) {
|
|
print "k: $k, val: $toret->{$k}\n";
|
|
}
|
|
return $toret;
|
|
}
|
|
|
|
sub check_config {
|
|
my $conf = shift;
|
|
unless($conf->{'wowpath'}) {
|
|
print 'Where is your addons installed? (complete path, including AddOns on the end): ';
|
|
while(my $line = <>) {
|
|
chomp($line);
|
|
if(-e $line) {
|
|
$conf->{'wowpath'} = $line;
|
|
last;
|
|
}
|
|
print 'You sure? Cannot read that path. Try again: ';
|
|
}
|
|
}
|
|
return $conf;
|
|
}
|
|
|
|
sub sane_defaults {
|
|
my $in = shift;
|
|
$in->{'baseuri'} = 'http://www.curseforge.com' unless(exists($in->{'baseuri'}));
|
|
$in->{'config'} = $ENV{'HOME'}.'/.pcurse/config.json' unless(exists($in->{'config'}));
|
|
$in->{'addons'} = $ENV{'HOME'}.'/.pcurse/addons.json' unless(exists($in->{'addons'}));
|
|
return $in;
|
|
}
|
|
|
|
sub load_addons {
|
|
my $addons_file = shift;
|
|
unless(-e $addons_file) {
|
|
my @parts = split(/\//, $addons_file);
|
|
my $f = pop(@parts);
|
|
my $d = join('/', @parts);
|
|
unless(-e $d) {
|
|
system("mkdir","-p",$d);
|
|
print 'Created '.$d."\n";
|
|
}
|
|
if(-e $ENV{'HOME'}.'/.lcurse/addons.json') {
|
|
print 'There seems to be an addons.json from lcurse around, and we have no list ourself yet. Stealing it:)'."\n";
|
|
my $json = &import_json($ENV{'HOME'}.'/.lcurse/addons.json');
|
|
my $jsonref = ref $json;
|
|
$json = $json->{'addons'};
|
|
my $jsonreff = ref $json;
|
|
foreach my $addon(@{$json}) {
|
|
$addon->{'uri'} =~ s/^http\:\/\/www.curseforge\.com//;
|
|
}
|
|
return $json;
|
|
}
|
|
} else {
|
|
my $json = &import_json($addons_file);
|
|
return $json;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
sub save_config {
|
|
my $json = JSON->new;
|
|
my $file = shift;
|
|
my $json_data = shift;
|
|
my $text = $json->pretty->encode($json_data);
|
|
open my $fh, ">", $file or return (0,'Could not open '.$file.' for writing: '.$!);
|
|
print $fh $text;
|
|
close $fh;
|
|
return (1,$file.' saved successfully');
|
|
}
|
|
|
|
sub import_json {
|
|
my $file = shift;
|
|
my $json = JSON->new;
|
|
my $json_data = do {
|
|
local $/ = undef;
|
|
open my $fh, "<", $file or die 'Could not read file '.$file.': '.$!;
|
|
<$fh>;
|
|
};
|
|
my $toret = $json->decode($json_data);
|
|
return $toret;
|
|
}
|
|
|
|
sub html_parse {
|
|
my $parser = HTML::HTML5::Parser->new();
|
|
my $html = shift;
|
|
my %parseopts = ();
|
|
my $doc = $parser->parse_string($html,\%parseopts);
|
|
my $doctype = $parser->dtd_element($doc);
|
|
return $html unless(defined($doctype));
|
|
return $doc;
|
|
}
|
|
|
|
sub http_get {
|
|
my $uri = shift;
|
|
my $ua = LWP::UserAgent->new(timeout => 10);
|
|
$ua->agent('Mozilla/5.0 (Linux; Android 5.1.1; Nexus 5 Build/LMY48B; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/43.0.2357.65 Mobile Safari/537.36');
|
|
my $response = $ua->get($uri);
|
|
return $response if($response->is_success);
|
|
return undef;
|
|
}
|
|
|
|
sub html_get {
|
|
my $uri = shift;
|
|
my $response = pcurse::http_get($uri);
|
|
return undef unless($response);
|
|
my $html = pcurse::html_parse($response->decoded_content);
|
|
return $html;
|
|
}
|
|
|
|
sub get_latest_file_id {
|
|
my $html = shift;
|
|
my $uri = shift;
|
|
$uri .= '/download/';
|
|
my $retstr = pcurse::find_in_html('dlstring',$html,$uri);
|
|
return $retstr;
|
|
}
|
|
|
|
sub get_product_version {
|
|
my $html = shift;
|
|
my $uri = shift;
|
|
my $fileid = shift;
|
|
$uri .= '/files/'.$fileid;
|
|
my $retstr = pcurse::find_in_html('vstring',$html,$uri);
|
|
return $retstr;
|
|
}
|
|
|
|
sub find_in_html {
|
|
my $mode = shift;
|
|
my $html = shift;
|
|
my $sstring = shift;
|
|
my $ref = ref $html;
|
|
my $retstr;
|
|
if($ref) {
|
|
#This means the http parser knows what to do and we're working on a parseable document
|
|
my $results = $html->getElementsByTagName('a');
|
|
my @nodes = $results->get_nodelist;
|
|
foreach my $context(@nodes) {
|
|
my $href = $context->getAttribute('href');
|
|
next unless($href);
|
|
if($href =~ m/$sstring/) {
|
|
if($mode eq 'dlstring') {
|
|
$retstr = (split(/$sstring/, $href,2))[1];
|
|
} elsif($mode eq 'vstring') {
|
|
$retstr = $context->getAttribute('data-name');
|
|
}
|
|
return $retstr if($retstr);
|
|
}
|
|
}
|
|
} else {
|
|
#This means we're on our own = whatever we're getting here is a html document as a string, unparsed.
|
|
my $parser = HTML::HTML5::Parser->new();
|
|
my @file = split(/\n/, $html);
|
|
foreach my $line(@file) {
|
|
if($line =~ m/$sstring/) {
|
|
my $parsed = $parser->parse_balanced_chunk($line);
|
|
my @nodes = $parsed->nonBlankChildNodes();
|
|
foreach my $node(@nodes) {
|
|
my @atr = $node->attributes();
|
|
if($mode eq 'dlstring') {
|
|
my $href = $node->getAttribute('href');
|
|
$retstr = (split(/$sstring/, $href,2))[1];
|
|
} elsif($mode eq 'vstring') {
|
|
$retstr = $node->getAttribute('data-name');
|
|
}
|
|
return $retstr if($retstr);
|
|
}
|
|
|
|
}
|
|
}
|
|
}
|
|
return undef;
|
|
}
|
|
|
|
sub update {
|
|
my $uri = shift;
|
|
my $fileid = shift;
|
|
my $targetpath = shift;
|
|
$uri .= '/download/'.$fileid.'/file';
|
|
#print "\n".'Would update from '.$uri."\n";
|
|
my ($filename,$file) = pcurse::download($uri);
|
|
#print 'We got hold of '.$filename;
|
|
unless(-e "/tmp/$filename") {
|
|
open my $fh, '>', "/tmp/$filename" or return 0;
|
|
print $fh $file;
|
|
close $fh;
|
|
}
|
|
if(-e "/tmp/$filename") {
|
|
my $ae = Archive::Extract->new(archive => "/tmp/$filename");
|
|
return 1 if($ae->extract(to=>$targetpath));
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
sub download {
|
|
my $uri = shift;
|
|
my $file = pcurse::http_get($uri);
|
|
my $filename = $file->filename;
|
|
my $content = $file->decoded_content;
|
|
return ($filename,$content);
|
|
}
|
|
|
|
1;
|