#!/usr/bin/env python3 import requests from requests.auth import HTTPDigestAuth siteroot="http://search.stefans.computer:8090/Crawler_p.html" username=[USERNAME] password=[PASSWORD] qs={} qs['countryMustMatchSwitch']=0 qs['indexText']='on' qs['deleteold']='off' qs['crawlingDepth']=0 #just index the one page qs['directDocByURL']='on' #qs['ignoreclassname'] qs['range']='wide' qs['agentName']='Random Browser' qs['snapshotsMaxDepth'] = -1 qs['cachePolicy'] = 'iffresh' qs['snapshotsReplaceOld'] = 'on' qs['crawlingstart'] = 1 qs['obeyHtmlRobotsNoIndex']='on' #qs['snapshotsMustnotmatch']= qs['snapshotsLoadImage'] = 'false' qs['timezoneOffset']=240 qs['indexMedia']='on' qs['crawlerAlwaysCheckMediaType']='true' qs['crawlingQ']='on' qs['recrawl']='nodoubles' qs['crawlingMode']='url' qs['storeHTCache']='on' qs['crawlingURL'] = '' bookmark = input("Enter bookmark: ") qs['crawlingURL'] = bookmark r = requests.get( siteroot, auth=HTTPDigestAuth(username, password), params = qs ) print(r.status_code)