User:R. Hillgentleman/scripta

出自維基百科,自由嘅百科全書

user-config.py[編輯]

family='wikipedia'
mylang='zh-yue'

usernames['wikipedia']['en'] = 'R. Hillgentleman'
usernames['wikipedia']['zh-yue'] = 'R. Hillgentleman'
usernames['wikiversity']['beta'] = 'R. Hillgentleman'
usernames['wikipedia']['test'] = 'R. Hillgentleman'


console_encoding = 'utf-8'

readYueHead.py[編輯]

import wikipedia
enWikisrcSite = wikipedia.getSite('zh-yue', 'wikipedia') # loading a defined project's page
page = wikipedia.Page(enWikisrcSite, '%E9%A0%AD%E7%89%88')
text = page.get() # Taking the text of the page
print text
wikipedia.stopme()

testEditYue.py[編輯]

import wikipedia

# Define the main function
def main():
    site = wikipedia.getSite()
    pagename = 'wikipedia:Sandbox'
    page = wikipedia.Page(site, pagename)
    wikipedia.output(u"Loading %s..." % pagename) # Please, see the "u" before the text
    try:
        text = page.get(force = False, get_redirect=False, throttle = True, sysop = False, 
                                             nofollow_redirects=False, change_edit_time = True) # text = page.get() <-- is the same
    except wikipedia.NoPage: # First except, prevent empty pages
        text = ''
    except wikipedia.IsRedirectPage: # second except, prevent redirect
        wikipedia.output(u'%s is a redirect!' % pagename)
        exit()# wikipedia.stopme() is in the finally, we don't need to use it twice, exit() will only close the script
    except wikipedia.Error: # third exception, take the problem and print
        wikipedia.output(u"Some Error, skipping..")
        exit()
    newtext = text + '\nHello, World!'
    page.put(newtext, comment='Bot: Test', watchArticle = None, minorEdit = True)  # page.put(newtext, 'Bot: Test') <-- is the same
 
if __name__ == '__main__':
    try:
        main()
    finally:
        wikipedia.stopme()

查新文[編輯]

#to get a list of newpages from wikipedia.newpages() 
#and decide whether to add the tags {{notcantonese}}{{cleanup}}{{wikify}}

import wikipedia

site=wikipedia.getSite()

newPageList = site.newpages()
for i in newPageList:
 page, timestamp, length, empty, username, comment = i
 t = page.title()
 y = page.get()
 intro = y[0:200]
 print(intro)            #sure, it breaks sometimes, hard, but wikipedia.output(intro) is crap
 print('User:'+username+'.....Title:'+t)

 x=raw_input ('tag ? (space for {{notcantonese}} ')
 if x != '':
   if x ==' ':
      y = u'{{唔係廣東話}}{{清理}}{{維基化}}\n'+y
   else:
      y= u'{{清理}}{{維基化}}\n'+page.get()
   page.put(y,u'機械清道夫:嘜住')
   wikipedia.output('...tagged\n')
#####################################################################
#             some commented out crap:
#
#sand = wikipedia.Page(site, 'wikipedia:sandbox')
#y = sand.get() #getting the current sandbox
#for i in x:
#  y = y+i   #appending the crap
#sand.put( y , 'Robot testing: getting the contributing users of the page [[template:copyvio]] and dump the result on [[wikipedia:sandbox]]')

#The objects yielded are tuples composed of the Page object,
#        timestamp (unicode), length (int), an empty unicode string, username
#        or IP address (str), comment (unicode).

anniversaryInterwiki.py[編輯]

  • 好原始咁(用 .find('match'),case sensitive)喺wikipedia:當年今日拎住嘅頁中揾下有無呢串字:'[[en:Wikipedia:Selected anniversaries/January'
#anniversaryInterwiki.py
import wikipedia
#import pagegenerators

site=wikipedia.getSite()
wikipedia.output('pages linked from Dong Nin Gum Yat:')
page = wikipedia.Page(site,u'Wikipedia:當年今日')
links = page.linkedPages()
for i in links:
 if i.isRedirectPage():
  print('Page:'+i.title()+ '...is redirect!')
 else:
  t = i.title() # t= title
  y = i.get()   # y=text
  print '\n=====================================\nPAGE: '+ t
  print t[-3:]  #e.g. ... 10號
  print t[-2:]  #e.g. ...  3號
  string = '[[en:Wikipedia:Selected anniversaries/January'
  n = y.find(string)
  if n == -1 :
     print('January interwiki links not found!\n') 
  else:
    print('interwiki link found: \n')
    y_tail = (y[n:])
    wikipedia.output(y_tail+'\n')

wikipedia.output('stopme')
wikipedia.stopme()

replaceOnce.py[編輯]

轉字

#replaceOnce.py
import wikipedia
site=wikipedia.getSite()
page=wikipedia.Page(site, u'中國哺乳動物')
oldtext = page.get()
newtext = wikipedia.replaceExcept(oldtext, 'critically endangered', u'極度瀕危',[], caseInsensitive=True)
page.put(newtext,u'機械人轉字:極度瀕危')

wikipedia.stopme()

new[編輯]