nodebox-web (1.9.4.6-1) standalone_examples.diff

Summary

 standalone-examples/_web_example1.py |   20 ++++++++++++++++++++
 standalone-examples/_web_example2.py |   24 ++++++++++++++++++++++++
 standalone-examples/_web_example3.py |   24 ++++++++++++++++++++++++
 standalone-examples/_web_example4.py |   22 ++++++++++++++++++++++
 standalone-examples/_web_example5.py |   25 +++++++++++++++++++++++++
 standalone-examples/_web_example6.py |   16 ++++++++++++++++
 standalone-examples/_web_example7.py |   18 ++++++++++++++++++
 standalone-examples/_web_example9.py |   13 +++++++++++++
 8 files changed, 162 insertions(+)

    
download this patch

Patch contents

# Description: create standalone examples
# Author: Serafeim Zanikolas <serzan@hellug.gr>
# Last-Update: 2009-02-20
# Forwarded: not-needed
diff -Nur nodebox-web-1.9.2/standalone-examples/_web_example1.py blah/standalone-examples/_web_example1.py
--- nodebox-web-1.9.2/standalone-examples/_web_example1.py	1970-01-01 01:00:00.000000000 +0100
+++ blah/standalone-examples/_web_example1.py	2008-04-05 23:12:08.000000000 +0100
@@ -0,0 +1,20 @@
+# Working with URL's.
+
+from nodebox_web import web
+
+# Is this a valid URL?
+print web.is_url("http://nodebox.net")
+
+# Does the page exist?
+print web.url.not_found("http://nodebox.net/nothing")
+
+# Split the URL into different components.
+url = web.url.parse("http://nodebox.net/code/index.php/Home")
+print "domain:", url.domain
+print "page:", url.page
+
+# Retrieve data from the web.
+url = "http://nodebox.net/code/data/media/header.jpg"
+print web.url.is_image(url)
+img = web.url.retrieve(url)
+print "download errors:", img.error
diff -Nur nodebox-web-1.9.2/standalone-examples/_web_example2.py blah/standalone-examples/_web_example2.py
--- nodebox-web-1.9.2/standalone-examples/_web_example2.py	1970-01-01 01:00:00.000000000 +0100
+++ blah/standalone-examples/_web_example2.py	2008-04-05 23:13:08.000000000 +0100
@@ -0,0 +1,24 @@
+# Parsing web pages.
+
+from nodebox_web import web
+
+url = "http://nodebox.net"
+print web.url.is_webpage(url)
+
+# Retrieve the data from the web page and put it in an easy object.
+html = web.page.parse(url)
+
+# The actual URL you are redirected to.
+# This will be None when the page is retrieved from cache.
+print html.redirect
+
+# Get the web page title.
+print html.title
+
+# Get all the links, including internal links in the same site.
+print html.links(external=False)
+
+# Browse through the HTML tree, find <div id="content">,
+# strip tags from it and print out the contents.
+content = html.find(id="content")
+web.html.plain(content)
diff -Nur nodebox-web-1.9.2/standalone-examples/_web_example3.py blah/standalone-examples/_web_example3.py
--- nodebox-web-1.9.2/standalone-examples/_web_example3.py	1970-01-01 01:00:00.000000000 +0100
+++ blah/standalone-examples/_web_example3.py	2008-04-05 23:14:54.000000000 +0100
@@ -0,0 +1,24 @@
+# Querying Yahoo!
+
+from nodebox_web import web
+from nodebox_web.web import yahoo
+
+# Get a list of links for a search query.
+links = yahoo.search_images("food")
+print links
+
+# Retrieve a random image.
+img = web.url.retrieve(links)
+
+# We can't always trust the validity of data from the web,
+# the site may be down, the image removed, etc.
+# If you're going to do a lot of batch operations and
+# you don't want the script to halt on an error,
+# put your code inside a try/except statement.
+try:
+    data=img.data
+except:
+    print str(img.error)
+    
+# An easier command is web.download():
+img = web.download(links)
diff -Nur nodebox-web-1.9.2/standalone-examples/_web_example4.py blah/standalone-examples/_web_example4.py
--- nodebox-web-1.9.2/standalone-examples/_web_example4.py	1970-01-01 01:00:00.000000000 +0100
+++ blah/standalone-examples/_web_example4.py	2008-04-05 23:16:00.000000000 +0100
@@ -0,0 +1,22 @@
+# Reading newsfeeds.
+
+from nodebox_web import web
+from nodebox_web.web import newsfeed
+
+url = "http://rss.slashdot.org/Slashdot/slashdot"
+
+# Parse the newsfeed data into a handy object.
+feed = newsfeed.parse(url)
+
+# Get the title and the description of the feed.
+print feed.title, "|", feed.description
+
+for item in feed.items:
+    print "-" * 40
+    print "- Title       :", item.title
+    print "- Link        :", item.link
+    print "- Description :", web.html.plain(item.description)
+    print "- Date        :", item.date
+    print "- Author      :", item.author
+
+print item.description
diff -Nur nodebox-web-1.9.2/standalone-examples/_web_example5.py blah/standalone-examples/_web_example5.py
--- nodebox-web-1.9.2/standalone-examples/_web_example5.py	1970-01-01 01:00:00.000000000 +0100
+++ blah/standalone-examples/_web_example5.py	2008-04-05 23:17:15.000000000 +0100
@@ -0,0 +1,25 @@
+# Wikipedia articles.
+
+from nodebox_web import web
+from nodebox_web.web import wikipedia
+
+q = "Finland"
+article = wikipedia.search(q, language="nl")
+
+# Print the article title.
+print article.title
+
+# Get a list of all the links to other articles.
+# We can supply these to a new search.
+print article.links
+
+# The title of each paragraph
+for p in article.paragraphs: 
+    print p.title
+    #print "-"*40
+    #print p
+
+print article.paragraphs[0]
+
+print
+print article.references[0]
diff -Nur nodebox-web-1.9.2/standalone-examples/_web_example6.py blah/standalone-examples/_web_example6.py
--- nodebox-web-1.9.2/standalone-examples/_web_example6.py	1970-01-01 01:00:00.000000000 +0100
+++ blah/standalone-examples/_web_example6.py	2008-04-05 23:28:50.000000000 +0100
@@ -0,0 +1,16 @@
+# Retrieve images from MorgueFile.
+
+from nodebox_web import web
+from nodebox_web.web import morguefile
+
+q = "cloud"
+img = morguefile.search(q)[0]
+
+print img
+
+# A morgueFile image in the list has 
+# a number of methods and properties.
+# The download() method caches the image locally 
+# and returns the path to the file.
+img = img.download()
+
diff -Nur nodebox-web-1.9.2/standalone-examples/_web_example7.py blah/standalone-examples/_web_example7.py
--- nodebox-web-1.9.2/standalone-examples/_web_example7.py	1970-01-01 01:00:00.000000000 +0100
+++ blah/standalone-examples/_web_example7.py	2008-04-06 13:56:13.000000000 +0100
@@ -0,0 +1,18 @@
+# Color themes from Kuler.
+
+from nodebox_web import web
+from nodebox_web.web import kuler
+
+# Get the current most popular themes.
+themes = kuler.search_by_popularity()
+
+# the code below assumes the availability of methods that are defined in other
+# parts of the nodebox library
+#
+# Display colors from the first theme.
+#for i in range(100):
+#    for r, g, b in themes[0]:
+#        fill(r, g, b, 0.8)
+#        rotate(random(360))
+#        s = random(50) + 10
+#        oval(random(300), random(300), s, s)
diff -Nur nodebox-web-1.9.2/standalone-examples/_web_example9.py blah/standalone-examples/_web_example9.py
--- nodebox-web-1.9.2/standalone-examples/_web_example9.py	1970-01-01 01:00:00.000000000 +0100
+++ blah/standalone-examples/_web_example9.py	2008-04-05 23:38:11.000000000 +0100
@@ -0,0 +1,13 @@
+# Clearing the cache.
+
+from nodebox_web import web
+
+# Queries and images are cached locally for speed,
+# so it's a good idea to empty the cache now and then.
+# Also, when a query fails (internet is down etc.),
+# this "bad" query is also cached.
+# Then you may want to clear the cache of the specific
+# portion of the library you're working with,
+# for example: morguefile.clear_cache()
+
+web.clear_cache()