diff -u --recursive linux-source-2.6.18/arch/um/sys-i386/user-offsets.c linux-2.6.18-ghostification-host/arch/um/sys-i386/user-offsets.c
--- linux-source-2.6.18/arch/um/sys-i386/user-offsets.c	2006-09-20 05:42:06.000000000 +0200
+++ linux-2.6.18-ghostification-host/arch/um/sys-i386/user-offsets.c	2007-10-04 01:39:48.000000000 +0200
@@ -2,7 +2,8 @@
 #include <signal.h>
 #include <asm/ptrace.h>
 #include <asm/user.h>
-#include <linux/stddef.h>
+#include <linux/compiler.h>
+//#include <linux/stddef.h>
 #include <sys/poll.h>
 
 #define DEFINE(sym, val) \
@@ -11,6 +12,10 @@
 #define DEFINE_LONGS(sym, val) \
 	asm volatile("\n->" #sym " %0 " #val : : "i" (val/sizeof(unsigned long)))
 
+//#define offsetof(TYPE,MEMBER)	((size_t)&((TYPE*)0)->MEMBER)
+#define offsetof(TYPE, MEMBER) __builtin_offsetof (TYPE, MEMBER)
+
+
 #define OFFSET(sym, str, mem) \
 	DEFINE(sym, offsetof(struct str, mem));
 
diff -u --recursive linux-source-2.6.18/include/linux/netdevice.h linux-2.6.18-ghostification-host/include/linux/netdevice.h
--- linux-source-2.6.18/include/linux/netdevice.h	2006-09-20 05:42:06.000000000 +0200
+++ linux-2.6.18-ghostification-host/include/linux/netdevice.h	2007-10-04 01:40:03.000000000 +0200
@@ -14,6 +14,8 @@
  *		Alan Cox, <Alan.Cox@linux.org>
  *		Bjorn Ekwall. <bj0rn@blox.se>
  *              Pekka Riikonen <priikone@poseidon.pspt.fi>
+ *              Luca Saiu <positron@gnu.org> (trivial changes
+ *                                            for ghostification support)
  *
  *		This program is free software; you can redistribute it and/or
  *		modify it under the terms of the GNU General Public License
@@ -1041,4 +1043,7 @@
 
 #endif /* __KERNEL__ */
 
+/* Just check whether the given name belongs to the ghost interface: */
+int is_a_ghost_interface_name(const char *interface_name);
+
 #endif	/* _LINUX_DEV_H */
diff -u --recursive linux-source-2.6.18/include/linux/sockios.h linux-2.6.18-ghostification-host/include/linux/sockios.h
--- linux-source-2.6.18/include/linux/sockios.h	2006-09-20 05:42:06.000000000 +0200
+++ linux-2.6.18-ghostification-host/include/linux/sockios.h	2007-10-04 01:40:12.000000000 +0200
@@ -9,6 +9,8 @@
  *
  * Authors:	Ross Biro
  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *              Luca Saiu <positron@gnu.org> (trivial changes
+ *                                            for ghostification support)
  *
  *		This program is free software; you can redistribute it and/or
  *		modify it under the terms of the GNU General Public License
@@ -83,6 +85,10 @@
 
 #define SIOCWANDEV	0x894A		/* get/set netdev parameters	*/
 
+#define SIOKLOG	        0x894D		/* Write a string to the log */
+#define SIOCGIFGHOSTIFY	0x894E		/* Make a network device 'ghost' */
+#define SIOCGIFUNGHOSTIFY 0x894F	/* Make a network device 'ghost' */
+
 /* ARP cache control calls. */
 		    /*  0x8950 - 0x8952  * obsolete calls, don't re-use */
 #define SIOCDARP	0x8953		/* delete ARP table entry	*/
Only in linux-source-2.6.18/include/net: ieee80211.h
Only in linux-source-2.6.18/include/net: ieee80211_crypt.h
Only in linux-source-2.6.18/include/net: ieee80211_radiotap.h
diff -u --recursive linux-source-2.6.18/net/core/dev.c linux-2.6.18-ghostification-host/net/core/dev.c
--- linux-source-2.6.18/net/core/dev.c	2007-08-29 10:12:37.000000000 +0200
+++ linux-2.6.18-ghostification-host/net/core/dev.c	2007-10-04 01:40:25.000000000 +0200
@@ -18,6 +18,7 @@
  *		Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
  *		Adam Sulmicki <adam@cfar.umd.edu>
  *              Pekka Riikonen <priikone@poesidon.pspt.fi>
+ *              Luca Saiu <positron@gnu.org> (ghostification support)
  *
  *	Changes:
  *              D.J. Barrow     :       Fixed bug where dev->refcnt gets set
@@ -118,6 +119,179 @@
 #include <linux/err.h>
 #include <linux/ctype.h>
 
+
+/* The maximum number of ghost interfaces allowed at any given
+   time: */
+#define MAX_GHOST_INTERFACES_NO 8
+
+/* A crude unsorted array of unique names, where "" stands for an
+   empty slot. Elements are so few that an hash table would be
+   overkill, and possibly also less efficient than this solution: */
+static char ghost_interface_names[MAX_GHOST_INTERFACES_NO][IFNAMSIZ];
+
+/* A lock protecting the ghost interfaces' support structure: */
+//static DEFINE_SPINLOCK(ghostification_spin_lock);
+static rwlock_t ghostification_spin_lock = RW_LOCK_UNLOCKED;
+
+/* Lock disabling local interrupts and saving flags. This is for
+   readers/writers, which should be prevented from interfering with
+   other readers/writers and with readers: */
+#define LOCK_GHOSTIFICATION_FOR_READING_AND_WRITING \
+  unsigned long flags; write_lock_irqsave(&ghostification_spin_lock, flags)
+/* Unlock re-enabling interrupts and restoring flags. This is for
+   readers/writers, which should be prevented from interfering with
+   other readers/writers and with readers: */
+#define UNLOCK_GHOSTIFICATION_FOR_READING_AND_WRITING \
+  write_unlock_irqrestore(&ghostification_spin_lock, flags)
+
+/* Lock disabling local interrupts and saving flags. This is for readers,
+   which are allowed to execute concurrently: */
+#define LOCK_GHOSTIFICATION_FOR_READING \
+  unsigned long flags; read_lock_irqsave(&ghostification_spin_lock, flags)
+/* Lock re-enabling interrupts and restoring flags. This is for readers,
+   which are allowed to execute concurrently: */
+#define UNLOCK_GHOSTIFICATION_FOR_READING \
+  read_unlock_irqrestore(&ghostification_spin_lock, flags)
+
+/* Defined in net/ipv6/addrconf.c: */
+int hide_proc_net_dev_snmp6_DEVICE_if_needed(const char *interface_name);
+int show_proc_net_dev_snmp6_DEVICE_if_needed(const char *interface_name);
+
+/* Return the index of the given element (which may be "") within
+   ghost_interface_names, or -1 on failure. Note that this must be
+   executed in a critical section: */
+static int __lookup_ghost_interface_names(const char *interface_name){
+  int i;
+  for(i = 0; i < MAX_GHOST_INTERFACES_NO; i++)
+    if(!strcmp(interface_name, ghost_interface_names[i]))
+      return i; // we found the given name in the i-th element
+  return -1; // we didn't find the given name in the array
+}
+
+/* This is useful for debugging. It must be called in a critical
+   section. */
+static void __dump_ghost_interfaces(void){
+  int i, number_of_ghost_interfaces = 0;
+  printk(KERN_DEBUG
+         "Ghost interfaces are now:\n");
+  for(i = 0; i < MAX_GHOST_INTERFACES_NO; i++)
+    if(strcmp(ghost_interface_names[i], "")){
+      number_of_ghost_interfaces++;
+      printk(KERN_DEBUG "%i. %s\n",
+             number_of_ghost_interfaces,
+             ghost_interface_names[i]);
+    }
+  printk(KERN_DEBUG "There are now %i ghost interfaces. A maximum of %i can exist at any given time.\n",
+         number_of_ghost_interfaces,
+         MAX_GHOST_INTERFACES_NO);
+}
+
+/* Just check whether the given name belongs to a ghost interface.
+   This must be called in a critical section: */
+int __is_a_ghost_interface_name(const char *interface_name){
+  /* Particular case: "" is *not* a ghost interface name, even if
+     it's in the ghost interfaces array (we use it just to mark
+     an empty slot): */
+  if(interface_name[0] == '\0')
+    return 0;
+  /* Just check whether interface_name is an element of the array: */
+  return __lookup_ghost_interface_names(interface_name) >= 0;
+}
+
+/* Just check whether the given name belongs to a ghost interface: */
+int is_a_ghost_interface_name(const char *interface_name){
+  int result;
+  LOCK_GHOSTIFICATION_FOR_READING;
+  /* Just check whether interface_name is an element of the array: */
+  result = __is_a_ghost_interface_name(interface_name);
+  UNLOCK_GHOSTIFICATION_FOR_READING;
+  return result;
+}
+
+/* Make the given interface ghost. Return 0 on success, nonzero on
+   failure. Failure occours when the interface is already ghost or
+   does not exist: */
+static int ghostify_interface(char *interface_name){
+  int a_free_element_index;
+  const size_t name_length = strlen(interface_name);
+  LOCK_GHOSTIFICATION_FOR_READING_AND_WRITING;
+  /* Let's avoid buffer overflows... This could possibly be exploited: */
+  if((name_length >= IFNAMSIZ) || (name_length == 0)){
+    printk(KERN_DEBUG
+           "The user asked to ghostify the interface %s, which has a name of length %i. Failing.\n",
+           interface_name,
+           name_length);
+    UNLOCK_GHOSTIFICATION_FOR_READING_AND_WRITING;
+    return -EINVAL;
+  } // if
+  
+  /* Fail if the interface is already ghostified. In particular we
+     want *no* duplicates in the array. Note that we're already in
+     a critical section here, so there's no need for locking: */
+  if(__is_a_ghost_interface_name(interface_name)){
+    printk(KERN_DEBUG
+           "Could not ghostify the interface %s, because it\'s already ghost.\n",
+           interface_name);
+    UNLOCK_GHOSTIFICATION_FOR_READING_AND_WRITING;
+    return -EINVAL;
+  } // if
+  
+  /* Look for a free spot: */
+  a_free_element_index = __lookup_ghost_interface_names("");
+  if(a_free_element_index < 0){
+    printk(KERN_DEBUG
+           "Could not ghostify the interface %s, because %i interfaces are already ghostified. Sorry.\n",
+           interface_name,
+           MAX_GHOST_INTERFACES_NO);
+    UNLOCK_GHOSTIFICATION_FOR_READING_AND_WRITING;
+    return -ENOMEM;
+  } // if
+  
+  /* Ok, we found a free spot; just copy the interface name: */
+  strcpy(ghost_interface_names[a_free_element_index],
+         interface_name);
+  
+  /* Hide /proc/net/dev_snmp6/DEVICE for the new ghost DEVICE: */
+  hide_proc_net_dev_snmp6_DEVICE_if_needed(
+     ghost_interface_names[a_free_element_index]);
+  
+  __dump_ghost_interfaces();
+  UNLOCK_GHOSTIFICATION_FOR_READING_AND_WRITING;
+  return 0;
+}
+
+/* Make the given interface, which should be ghost, non-ghost.
+   Return 0 on success, nonzero on failure. Failure occours when
+   the given interface is non-ghost or does not exist: */
+static int unghostify_interface(char *ghost_interface_name){
+  int the_interface_index;
+  LOCK_GHOSTIFICATION_FOR_READING_AND_WRITING;
+  /* Look for the given interface: */
+  the_interface_index =
+    __lookup_ghost_interface_names(ghost_interface_name);
+  if(the_interface_index < 0){
+    printk(KERN_DEBUG
+           "Could not unghostify the interface %s, because it's non-ghost or not existing.\n",
+           ghost_interface_name);
+    UNLOCK_GHOSTIFICATION_FOR_READING_AND_WRITING;
+    return -EINVAL;
+  } // if
+  
+  /* Ok, we found the interface: just "remove" its name from the array: */
+  ghost_interface_names[the_interface_index][0] = '\0';
+  
+  /* Show again /proc/net/dev_snmp6/DEVICE for the now non-ghost DEVICE: */
+  show_proc_net_dev_snmp6_DEVICE_if_needed(
+     ghost_interface_name);
+  
+  __dump_ghost_interfaces();
+  UNLOCK_GHOSTIFICATION_FOR_READING_AND_WRITING;
+  return 0;
+}
+
+EXPORT_SYMBOL(is_a_ghost_interface_name);
+
+
 /*
  *	The list of packet types we will receive (as opposed to discard)
  *	and the routines to invoke.
@@ -433,9 +607,14 @@
 int __init netdev_boot_setup(char *str)
 {
 	int ints[5];
+        int i;
 	struct ifmap map;
 
-	str = get_options(str, ARRAY_SIZE(ints), ints);
+        /* There are no ghost interfaces by default: */
+        for(i = 0; i < MAX_GHOST_INTERFACES_NO; i++)
+          ghost_interface_names[i][0] = '\0';
+
+ str = get_options(str, ARRAY_SIZE(ints), ints);
 	if (!str || !*str)
 		return 0;
 
@@ -2044,11 +2223,16 @@
 	len = ifc.ifc_len;
 
 	/*
-	 *	Loop over the interfaces, and write an info block for each.
+	 *	Loop over the interfaces, and write an info block for each,
+         *      unless they are ghostified.
 	 */
-
 	total = 0;
-	for (dev = dev_base; dev; dev = dev->next) {
+	for (dev = dev_base; dev; dev = dev->next){
+          /* Don't tell the user about ghost interfaces: just skip them: */
+          if(is_a_ghost_interface_name(dev->name)){
+            // printk(KERN_DEBUG "Skipping the ghost interface %s in SIOCGIFCONF\n", dev->name);
+            continue;
+          }
 		for (i = 0; i < NPROTO; i++) {
 			if (gifconf_list[i]) {
 				int done;
@@ -2098,8 +2282,8 @@
 
 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
-	++*pos;
-	return v == SEQ_START_TOKEN ? dev_base : ((struct net_device *)v)->next;
+  ++*pos;
+  return v == SEQ_START_TOKEN ? dev_base : ((struct net_device *)v)->next;
 }
 
 void dev_seq_stop(struct seq_file *seq, void *v)
@@ -2111,7 +2295,7 @@
 {
 	if (dev->get_stats) {
 		struct net_device_stats *stats = dev->get_stats(dev);
-
+                if(! is_a_ghost_interface_name(dev->name))
 		seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
 				"%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
 			   dev->name, stats->rx_bytes, stats->rx_packets,
@@ -2139,7 +2323,7 @@
  */
 static int dev_seq_show(struct seq_file *seq, void *v)
 {
-	if (v == SEQ_START_TOKEN)
+         if (v == SEQ_START_TOKEN)
 		seq_puts(seq, "Inter-|   Receive                            "
 			      "                    |  Transmit\n"
 			      " face |bytes    packets errs drop fifo frame "
@@ -2492,6 +2676,10 @@
 
 	if (!dev)
 		return -ENODEV;
+        if(is_a_ghost_interface_name(dev->name)){
+          //printk(KERN_DEBUG "The user is performing a SIOCxIFxxx ioctl() on the ghost interface %s here; we make the call fail with -ENODEV\n", dev->name);
+          return -ENODEV;
+        }
 
 	switch (cmd) {
 		case SIOCGIFFLAGS:	/* Get interface flags */
@@ -2675,7 +2863,49 @@
 	 */
 
 	switch (cmd) {
-		/*
+        case SIOKLOG:{
+          char text[1000];
+          if(copy_from_user(text, (char __user *)arg, IFNAMSIZ + 1))
+            return -EFAULT;
+          text[IFNAMSIZ] = '\0';
+          printk(KERN_DEBUG "%s\n", text);
+          return 0;
+        }
+        case SIOCGIFGHOSTIFY:{
+          char interface_name[1000];
+          int failure;
+          if(copy_from_user(interface_name, (char __user *)arg, IFNAMSIZ + 1))
+            return -EFAULT;
+          interface_name[IFNAMSIZ] = '\0';
+          printk(KERN_DEBUG
+                 "The user asked to ghostify the interface %s.\n",
+                 interface_name);
+          if((failure = ghostify_interface(interface_name)) == 0)
+            printk(KERN_DEBUG "Ok, %s was ghostified.\n",
+                   interface_name);
+          else
+            printk(KERN_DEBUG "Failure in ghostification of %s\n",
+                   interface_name);
+          return failure;
+        }
+        case SIOCGIFUNGHOSTIFY:{
+          char interface_name[1000];
+          int failure;
+          if(copy_from_user(interface_name, (char __user *)arg, IFNAMSIZ + 1))
+            return -EFAULT;
+          interface_name[IFNAMSIZ] = '\0';
+          printk(KERN_DEBUG
+                 "The user asked to unghostify the interface %s.\n",
+                 interface_name);
+          if((failure = unghostify_interface(interface_name)) == 0)
+            printk(KERN_DEBUG "Ok, %s was unghostified.\n",
+                   interface_name);
+          else
+            printk(KERN_DEBUG "Failure in unghostification of %s\n",
+                   interface_name);
+          return failure;
+        }
+                /*
 		 *	These ioctl calls:
 		 *	- can be done by all.
 		 *	- atomic and do not require locking.
diff -u --recursive linux-source-2.6.18/net/core/dev_mcast.c linux-2.6.18-ghostification-host/net/core/dev_mcast.c
--- linux-source-2.6.18/net/core/dev_mcast.c	2006-09-20 05:42:06.000000000 +0200
+++ linux-2.6.18-ghostification-host/net/core/dev_mcast.c	2007-10-04 01:40:40.000000000 +0200
@@ -14,6 +14,8 @@
  *		Alan Cox	:	IFF_ALLMULTI support.
  *		Alan Cox	: 	New format set_multicast_list() calls.
  *		Gleb Natapov    :       Remove dev_mc_lock.
+ *              Luca Saiu <positron@gnu.org>: trivial changes
+ *                                            for ghostification support
  *
  *	This program is free software; you can redistribute it and/or
  *	modify it under the terms of the GNU General Public License
@@ -254,6 +256,9 @@
 	for (m = dev->mc_list; m; m = m->next) {
 		int i;
 
+                /* Don't show information about ghost interfaces: */
+                if(is_a_ghost_interface_name(dev->name))
+                  continue;
 		seq_printf(seq, "%-4d %-15s %-5d %-5d ", dev->ifindex,
 			   dev->name, m->dmi_users, m->dmi_gusers);
 
Only in linux-source-2.6.18/net/ieee80211: ieee80211_crypt.c
Only in linux-source-2.6.18/net/ieee80211: ieee80211_crypt_ccmp.c
Only in linux-source-2.6.18/net/ieee80211: ieee80211_crypt_tkip.c
Only in linux-source-2.6.18/net/ieee80211: ieee80211_crypt_wep.c
Only in linux-source-2.6.18/net/ieee80211: ieee80211_geo.c
Only in linux-source-2.6.18/net/ieee80211: ieee80211_module.c
Only in linux-source-2.6.18/net/ieee80211: ieee80211_rx.c
Only in linux-source-2.6.18/net/ieee80211: ieee80211_tx.c
Only in linux-source-2.6.18/net/ieee80211: ieee80211_wx.c
diff -u --recursive linux-source-2.6.18/net/ipv4/arp.c linux-2.6.18-ghostification-host/net/ipv4/arp.c
--- linux-source-2.6.18/net/ipv4/arp.c	2006-09-20 05:42:06.000000000 +0200
+++ linux-2.6.18-ghostification-host/net/ipv4/arp.c	2007-10-04 01:41:02.000000000 +0200
@@ -72,6 +72,8 @@
  *					bonding can change the skb before
  *					sending (e.g. insert 8021q tag).
  *		Harald Welte	:	convert to make use of jenkins hash
+ *              Luca Saiu <positron@gnu.org: trivial changes
+ *                                           for ghostification support
  */
 
 #include <linux/module.h>
@@ -1316,6 +1318,9 @@
 	}
 #endif
 	sprintf(tbuf, "%u.%u.%u.%u", NIPQUAD(*(u32*)n->primary_key));
+        /* Don't show anything in /proc if it involves ghost
+           interfaces: */
+        if(! is_a_ghost_interface_name(dev->name))
 	seq_printf(seq, "%-16s 0x%-10x0x%-10x%s     *        %s\n",
 		   tbuf, hatype, arp_state_to_flags(n), hbuffer, dev->name);
 	read_unlock(&n->lock);
@@ -1329,6 +1334,9 @@
 	char tbuf[16];
 
 	sprintf(tbuf, "%u.%u.%u.%u", NIPQUAD(*(u32*)n->key));
+        /* Don't show anything in /proc if it involves ghost
+           interfaces: */
+        if(! is_a_ghost_interface_name(dev->name))
 	seq_printf(seq, "%-16s 0x%-10x0x%-10x%s     *        %s\n",
 		   tbuf, hatype, ATF_PUBL | ATF_PERM, "00:00:00:00:00:00",
 		   dev ? dev->name : "*");
diff -u --recursive linux-source-2.6.18/net/ipv4/fib_frontend.c linux-2.6.18-ghostification-host/net/ipv4/fib_frontend.c
--- linux-source-2.6.18/net/ipv4/fib_frontend.c	2007-08-29 10:12:39.000000000 +0200
+++ linux-2.6.18-ghostification-host/net/ipv4/fib_frontend.c	2007-10-04 01:41:13.000000000 +0200
@@ -8,6 +8,8 @@
  * Version:	$Id: fib_frontend.c,v 1.26 2001/10/31 21:55:54 davem Exp $
  *
  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
+ *              Luca Saiu <positron@gnu.org> (simple changes
+ *                                            for ghostification support)
  *
  *		This program is free software; you can redistribute it and/or
  *		modify it under the terms of the GNU General Public License
@@ -235,7 +237,10 @@
 /*
  *	Handle IP routing ioctl calls. These are used to manipulate the routing tables
  */
- 
+
+/* A function implemented in net/core/dev.c: */
+int is_a_ghost_interface_name(const char *interface_name);
+
 int ip_rt_ioctl(unsigned int cmd, void __user *arg)
 {
 	int err;
@@ -249,16 +254,31 @@
 	switch (cmd) {
 	case SIOCADDRT:		/* Add a route */
 	case SIOCDELRT:		/* Delete a route */
-		if (!capable(CAP_NET_ADMIN))
+                if (!capable(CAP_NET_ADMIN))
 			return -EPERM;
-		if (copy_from_user(&r, arg, sizeof(struct rtentry)))
+                if (copy_from_user(&r, arg, sizeof(struct rtentry)))
 			return -EFAULT;
+                /* Forbid any action involving a ghost interface: */
+                if(r.rt_dev != (char __user*)NULL){
+                  /* We need to have this name in kernel space to check
+                     for ghostification: */
+                  char interface_name[1000];
+                  if(copy_from_user(interface_name, r.rt_dev, IFNAMSIZ + 1))
+                    return -EFAULT;
+                  if(is_a_ghost_interface_name(interface_name)){
+                    printk(KERN_DEBUG "The user aked to add a route involving the ghost interface %s. We make this operation fail\n", interface_name);
+                    return -ENODEV;
+                  } // if
+                } // block
 		rtnl_lock();
 		err = fib_convert_rtentry(cmd, &req.nlh, &req.rtm, &rta, &r);
 		if (err == 0) {
 			if (cmd == SIOCDELRT) {
 				struct fib_table *tb = fib_get_table(req.rtm.rtm_table);
 				err = -ESRCH;
+        /* The function pointed by tb->tb_delete was also modified to deal
+           with ghost interfaces. Such function may be either
+           fn_hash_delete() or fn_trie_delete() */
 				if (tb)
 					err = tb->tb_delete(tb, &req.rtm, &rta, &req.nlh, NULL);
 			} else {
diff -u --recursive linux-source-2.6.18/net/ipv4/fib_hash.c linux-2.6.18-ghostification-host/net/ipv4/fib_hash.c
--- linux-source-2.6.18/net/ipv4/fib_hash.c	2006-09-20 05:42:06.000000000 +0200
+++ linux-2.6.18-ghostification-host/net/ipv4/fib_hash.c	2007-10-04 01:41:19.000000000 +0200
@@ -8,6 +8,8 @@
  * Version:	$Id: fib_hash.c,v 1.13 2001/10/31 21:55:54 davem Exp $
  *
  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
+ *              Luca Saiu <positron@gnu.org> (trivial changes
+ *                                            for ghostification support)
  *
  *		This program is free software; you can redistribute it and/or
  *		modify it under the terms of the GNU General Public License
@@ -576,6 +578,10 @@
 	fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
 	list_for_each_entry_continue(fa, &f->fn_alias, fa_list) {
 		struct fib_info *fi = fa->fa_info;
+                if(is_a_ghost_interface_name(fi->fib_dev->name)){
+                  printk(KERN_DEBUG "Trying to delete a route involving the ghost device %s: we make this operation fail.\n", fi->fib_dev->name);
+                  return -ENODEV;
+                } // if
 
 		if (fa->fa_tos != tos)
 			break;
@@ -1018,6 +1024,8 @@
 	mask	= FZ_MASK(iter->zone);
 	flags	= fib_flag_trans(fa->fa_type, mask, fi);
 	if (fi)
+          {
+            if (! is_a_ghost_interface_name((const char*)fi->fib_dev)){
 		snprintf(bf, sizeof(bf),
 			 "%s\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u",
 			 fi->fib_dev ? fi->fib_dev->name : "*", prefix,
@@ -1025,11 +1033,16 @@
 			 mask, (fi->fib_advmss ? fi->fib_advmss + 40 : 0),
 			 fi->fib_window,
 			 fi->fib_rtt >> 3);
-	else
+                seq_printf(seq, "%-127s\n", bf);
+            } // inner if
+          } // block
+	else{
 		snprintf(bf, sizeof(bf),
 			 "*\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u",
 			 prefix, 0, flags, 0, 0, 0, mask, 0, 0, 0);
-	seq_printf(seq, "%-127s\n", bf);
+                seq_printf(seq, "%-127s\n", bf);
+        }
+	//seq_printf(seq, "%-127s\n", bf);
 out:
 	return 0;
 }
diff -u --recursive linux-source-2.6.18/net/ipv4/fib_trie.c linux-2.6.18-ghostification-host/net/ipv4/fib_trie.c
--- linux-source-2.6.18/net/ipv4/fib_trie.c	2006-09-20 05:42:06.000000000 +0200
+++ linux-2.6.18-ghostification-host/net/ipv4/fib_trie.c	2007-10-04 01:41:25.000000000 +0200
@@ -12,6 +12,9 @@
  * 
  *   Hans Liss <hans.liss@its.uu.se>  Uppsala Universitet
  *
+ *   Luca Saiu <positron@gnu.org> (trivial changes
+ *                                 for ghostification support)
+ *
  * This work is based on the LPC-trie which is originally descibed in:
  * 
  * An experimental study of compression methods for dynamic tries
@@ -1552,7 +1555,7 @@
 fn_trie_delete(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta,
 		struct nlmsghdr *nlhdr, struct netlink_skb_parms *req)
 {
-	struct trie *t = (struct trie *) tb->tb_data;
+        struct trie *t = (struct trie *) tb->tb_data;
 	u32 key, mask;
 	int plen = r->rtm_dst_len;
 	u8 tos = r->rtm_tos;
@@ -1594,7 +1597,10 @@
 
 	list_for_each_entry(fa, fa_head, fa_list) {
 		struct fib_info *fi = fa->fa_info;
-
+                if(is_a_ghost_interface_name(fi->fib_dev->name)){
+                  printk(KERN_DEBUG "Trying to delete a route involving the ghost device %s: we make this operation fail.\n", fi->fib_dev->name);
+                  return -ENODEV;
+                } // if
 		if (fa->fa_tos != tos)
 			break;
 
@@ -2432,6 +2438,9 @@
 				continue;
 
 			if (fi)
+                          {
+                            if (! is_a_ghost_interface_name((const char*)
+                                                            fi->fib_dev))
 				snprintf(bf, sizeof(bf),
 					 "%s\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u",
 					 fi->fib_dev ? fi->fib_dev->name : "*",
@@ -2442,6 +2451,7 @@
 					 (fi->fib_advmss ? fi->fib_advmss + 40 : 0),
 					 fi->fib_window,
 					 fi->fib_rtt >> 3);
+                          }
 			else
 				snprintf(bf, sizeof(bf),
 					 "*\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u",
diff -u --recursive linux-source-2.6.18/net/ipv4/igmp.c linux-2.6.18-ghostification-host/net/ipv4/igmp.c
--- linux-source-2.6.18/net/ipv4/igmp.c	2006-09-20 05:42:06.000000000 +0200
+++ linux-2.6.18-ghostification-host/net/ipv4/igmp.c	2007-10-04 01:41:41.000000000 +0200
@@ -70,6 +70,8 @@
  *		Alexey Kuznetsov:	Accordance to igmp-v2-06 draft.
  *		David L Stevens:	IGMPv3 support, with help from
  *					Vinay Kulkarni
+ *              Luca Saiu <positron@gnu.org>: trivial changes
+ *                                            for ghostification support
  */
 
 #include <linux/module.h>
@@ -2359,6 +2361,8 @@
 #endif
 
 		if (state->in_dev->mc_list == im) {
+                  /* Don't show any info about ghost interfaces: */
+                  if(! is_a_ghost_interface_name(state->dev->name))
 			seq_printf(seq, "%d\t%-10s: %5d %7s\n",
 				   state->dev->ifindex, state->dev->name, state->dev->mc_count, querier);
 		}
@@ -2535,7 +2539,9 @@
 			   "Device", "MCA",
 			   "SRC", "INC", "EXC");
 	} else {
-		seq_printf(seq,
+          /* Don't show any info about ghost interfaces: */
+          if(! is_a_ghost_interface_name(state->dev->name))
+                seq_printf(seq,
 			   "%3d %6.6s 0x%08x "
 			   "0x%08x %6lu %6lu\n", 
 			   state->dev->ifindex, state->dev->name, 
diff -u --recursive linux-source-2.6.18/net/ipv4/route.c linux-2.6.18-ghostification-host/net/ipv4/route.c
--- linux-source-2.6.18/net/ipv4/route.c	2007-08-29 10:12:38.000000000 +0200
+++ linux-2.6.18-ghostification-host/net/ipv4/route.c	2007-10-04 01:41:50.000000000 +0200
@@ -1,67 +1,69 @@
 /*
- * INET		An implementation of the TCP/IP protocol suite for the LINUX
- *		operating system.  INET is implemented using the  BSD Socket
- *		interface as the means of communication with the user level.
+ * INET         An implementation of the TCP/IP protocol suite for the LINUX
+ *              operating system.  INET is implemented using the  BSD Socket
+ *              interface as the means of communication with the user level.
  *
- *		ROUTE - implementation of the IP router.
+ *              ROUTE - implementation of the IP router.
  *
- * Version:	$Id: route.c,v 1.103 2002/01/12 07:44:09 davem Exp $
+ * Version:     $Id: route.c,v 1.103 2002/01/12 07:44:09 davem Exp $
  *
- * Authors:	Ross Biro
- *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
- *		Alan Cox, <gw4pts@gw4pts.ampr.org>
- *		Linus Torvalds, <Linus.Torvalds@helsinki.fi>
- *		Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
+ * Authors:     Ross Biro
+ *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *              Alan Cox, <gw4pts@gw4pts.ampr.org>
+ *              Linus Torvalds, <Linus.Torvalds@helsinki.fi>
+ *              Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  *
  * Fixes:
- *		Alan Cox	:	Verify area fixes.
- *		Alan Cox	:	cli() protects routing changes
- *		Rui Oliveira	:	ICMP routing table updates
- *		(rco@di.uminho.pt)	Routing table insertion and update
- *		Linus Torvalds	:	Rewrote bits to be sensible
- *		Alan Cox	:	Added BSD route gw semantics
- *		Alan Cox	:	Super /proc >4K 
- *		Alan Cox	:	MTU in route table
- *		Alan Cox	: 	MSS actually. Also added the window
- *					clamper.
- *		Sam Lantinga	:	Fixed route matching in rt_del()
- *		Alan Cox	:	Routing cache support.
- *		Alan Cox	:	Removed compatibility cruft.
- *		Alan Cox	:	RTF_REJECT support.
- *		Alan Cox	:	TCP irtt support.
- *		Jonathan Naylor	:	Added Metric support.
- *	Miquel van Smoorenburg	:	BSD API fixes.
- *	Miquel van Smoorenburg	:	Metrics.
- *		Alan Cox	:	Use __u32 properly
- *		Alan Cox	:	Aligned routing errors more closely with BSD
- *					our system is still very different.
- *		Alan Cox	:	Faster /proc handling
- *	Alexey Kuznetsov	:	Massive rework to support tree based routing,
- *					routing caches and better behaviour.
- *		
- *		Olaf Erb	:	irtt wasn't being copied right.
- *		Bjorn Ekwall	:	Kerneld route support.
- *		Alan Cox	:	Multicast fixed (I hope)
- * 		Pavel Krauz	:	Limited broadcast fixed
- *		Mike McLagan	:	Routing by source
- *	Alexey Kuznetsov	:	End of old history. Split to fib.c and
- *					route.c and rewritten from scratch.
- *		Andi Kleen	:	Load-limit warning messages.
- *	Vitaly E. Lavrov	:	Transparent proxy revived after year coma.
- *	Vitaly E. Lavrov	:	Race condition in ip_route_input_slow.
- *	Tobias Ringstrom	:	Uninitialized res.type in ip_route_output_slow.
- *	Vladimir V. Ivanov	:	IP rule info (flowid) is really useful.
- *		Marc Boucher	:	routing by fwmark
- *	Robert Olsson		:	Added rt_cache statistics
- *	Arnaldo C. Melo		:	Convert proc stuff to seq_file
- *	Eric Dumazet		:	hashed spinlocks and rt_check_expire() fixes.
- * 	Ilia Sotnikov		:	Ignore TOS on PMTUD and Redirect
- * 	Ilia Sotnikov		:	Removed TOS from hash calculations
+ *              Alan Cox        :       Verify area fixes.
+ *              Alan Cox        :       cli() protects routing changes
+ *              Rui Oliveira    :       ICMP routing table updates
+ *              (rco@di.uminho.pt)      Routing table insertion and update
+ *              Linus Torvalds  :       Rewrote bits to be sensible
+ *              Alan Cox        :       Added BSD route gw semantics
+ *              Alan Cox        :       Super /proc >4K 
+ *              Alan Cox        :       MTU in route table
+ *              Alan Cox        :       MSS actually. Also added the window
+ *                                      clamper.
+ *              Sam Lantinga    :       Fixed route matching in rt_del()
+ *              Alan Cox        :       Routing cache support.
+ *              Alan Cox        :       Removed compatibility cruft.
+ *              Alan Cox        :       RTF_REJECT support.
+ *              Alan Cox        :       TCP irtt support.
+ *              Jonathan Naylor :       Added Metric support.
+ *      Miquel van Smoorenburg  :       BSD API fixes.
+ *      Miquel van Smoorenburg  :       Metrics.
+ *              Alan Cox        :       Use __u32 properly
+ *              Alan Cox        :       Aligned routing errors more closely with BSD
+ *                                      our system is still very different.
+ *              Alan Cox        :       Faster /proc handling
+ *      Alexey Kuznetsov        :       Massive rework to support tree based routing,
+ *                                      routing caches and better behaviour.
+ *              
+ *              Olaf Erb        :       irtt wasn't being copied right.
+ *              Bjorn Ekwall    :       Kerneld route support.
+ *              Alan Cox        :       Multicast fixed (I hope)
+ *              Pavel Krauz     :       Limited broadcast fixed
+ *              Mike McLagan    :       Routing by source
+ *      Alexey Kuznetsov        :       End of old history. Split to fib.c and
+ *                                      route.c and rewritten from scratch.
+ *              Andi Kleen      :       Load-limit warning messages.
+ *      Vitaly E. Lavrov        :       Transparent proxy revived after year coma.
+ *      Vitaly E. Lavrov        :       Race condition in ip_route_input_slow.
+ *      Tobias Ringstrom        :       Uninitialized res.type in ip_route_output_slow.
+ *      Vladimir V. Ivanov      :       IP rule info (flowid) is really useful.
+ *              Marc Boucher    :       routing by fwmark
+ *      Robert Olsson           :       Added rt_cache statistics
+ *      Arnaldo C. Melo         :       Convert proc stuff to seq_file
+ *      Eric Dumazet            :       hashed spinlocks and rt_check_expire() fixes.
+ *      Ilia Sotnikov           :       Ignore TOS on PMTUD and Redirect
+ *      Ilia Sotnikov           :       Removed TOS from hash calculations
+ *      Luca Saiu <positron@gnu.org>:   Trivial changes for ghostification
+ *                                      support
  *
- *		This program is free software; you can redistribute it and/or
- *		modify it under the terms of the GNU General Public License
- *		as published by the Free Software Foundation; either version
- *		2 of the License, or (at your option) any later version.
+ *              This program is free software; you can redistribute it and/or
+ *              modify it under the terms of the GNU General Public License
+ *              as published by the Free Software Foundation; either version
+ *              2 of the License, or (at your option) any later version.
  */
 
 #include <linux/module.h>
@@ -112,80 +114,80 @@
 #define RT_FL_TOS(oldflp) \
     ((u32)(oldflp->fl4_tos & (IPTOS_RT_MASK | RTO_ONLINK)))
 
-#define IP_MAX_MTU	0xFFF0
+#define IP_MAX_MTU      0xFFF0
 
 #define RT_GC_TIMEOUT (300*HZ)
 
-static int ip_rt_min_delay		= 2 * HZ;
-static int ip_rt_max_delay		= 10 * HZ;
+static int ip_rt_min_delay              = 2 * HZ;
+static int ip_rt_max_delay              = 10 * HZ;
 static int ip_rt_max_size;
-static int ip_rt_gc_timeout		= RT_GC_TIMEOUT;
-static int ip_rt_gc_interval		= 60 * HZ;
-static int ip_rt_gc_min_interval	= HZ / 2;
-static int ip_rt_redirect_number	= 9;
-static int ip_rt_redirect_load		= HZ / 50;
-static int ip_rt_redirect_silence	= ((HZ / 50) << (9 + 1));
-static int ip_rt_error_cost		= HZ;
-static int ip_rt_error_burst		= 5 * HZ;
-static int ip_rt_gc_elasticity		= 8;
-static int ip_rt_mtu_expires		= 10 * 60 * HZ;
-static int ip_rt_min_pmtu		= 512 + 20 + 20;
-static int ip_rt_min_advmss		= 256;
-static int ip_rt_secret_interval	= 10 * 60 * HZ;
+static int ip_rt_gc_timeout             = RT_GC_TIMEOUT;
+static int ip_rt_gc_interval            = 60 * HZ;
+static int ip_rt_gc_min_interval        = HZ / 2;
+static int ip_rt_redirect_number        = 9;
+static int ip_rt_redirect_load          = HZ / 50;
+static int ip_rt_redirect_silence       = ((HZ / 50) << (9 + 1));
+static int ip_rt_error_cost             = HZ;
+static int ip_rt_error_burst            = 5 * HZ;
+static int ip_rt_gc_elasticity          = 8;
+static int ip_rt_mtu_expires            = 10 * 60 * HZ;
+static int ip_rt_min_pmtu               = 512 + 20 + 20;
+static int ip_rt_min_advmss             = 256;
+static int ip_rt_secret_interval        = 10 * 60 * HZ;
 static unsigned long rt_deadline;
 
-#define RTprint(a...)	printk(KERN_DEBUG a)
+#define RTprint(a...)   printk(KERN_DEBUG a)
 
 static struct timer_list rt_flush_timer;
 static struct timer_list rt_periodic_timer;
 static struct timer_list rt_secret_timer;
 
 /*
- *	Interface to generic destination cache.
+ *      Interface to generic destination cache.
  */
 
 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
-static void		 ipv4_dst_destroy(struct dst_entry *dst);
-static void		 ipv4_dst_ifdown(struct dst_entry *dst,
-					 struct net_device *dev, int how);
+static void              ipv4_dst_destroy(struct dst_entry *dst);
+static void              ipv4_dst_ifdown(struct dst_entry *dst,
+                                         struct net_device *dev, int how);
 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
-static void		 ipv4_link_failure(struct sk_buff *skb);
-static void		 ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
+static void              ipv4_link_failure(struct sk_buff *skb);
+static void              ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
 static int rt_garbage_collect(void);
 
 
 static struct dst_ops ipv4_dst_ops = {
-	.family =		AF_INET,
-	.protocol =		__constant_htons(ETH_P_IP),
-	.gc =			rt_garbage_collect,
-	.check =		ipv4_dst_check,
-	.destroy =		ipv4_dst_destroy,
-	.ifdown =		ipv4_dst_ifdown,
-	.negative_advice =	ipv4_negative_advice,
-	.link_failure =		ipv4_link_failure,
-	.update_pmtu =		ip_rt_update_pmtu,
-	.entry_size =		sizeof(struct rtable),
+        .family =               AF_INET,
+        .protocol =             __constant_htons(ETH_P_IP),
+        .gc =                   rt_garbage_collect,
+        .check =                ipv4_dst_check,
+        .destroy =              ipv4_dst_destroy,
+        .ifdown =               ipv4_dst_ifdown,
+        .negative_advice =      ipv4_negative_advice,
+        .link_failure =         ipv4_link_failure,
+        .update_pmtu =          ip_rt_update_pmtu,
+        .entry_size =           sizeof(struct rtable),
 };
 
-#define ECN_OR_COST(class)	TC_PRIO_##class
+#define ECN_OR_COST(class)      TC_PRIO_##class
 
 __u8 ip_tos2prio[16] = {
-	TC_PRIO_BESTEFFORT,
-	ECN_OR_COST(FILLER),
-	TC_PRIO_BESTEFFORT,
-	ECN_OR_COST(BESTEFFORT),
-	TC_PRIO_BULK,
-	ECN_OR_COST(BULK),
-	TC_PRIO_BULK,
-	ECN_OR_COST(BULK),
-	TC_PRIO_INTERACTIVE,
-	ECN_OR_COST(INTERACTIVE),
-	TC_PRIO_INTERACTIVE,
-	ECN_OR_COST(INTERACTIVE),
-	TC_PRIO_INTERACTIVE_BULK,
-	ECN_OR_COST(INTERACTIVE_BULK),
-	TC_PRIO_INTERACTIVE_BULK,
-	ECN_OR_COST(INTERACTIVE_BULK)
+        TC_PRIO_BESTEFFORT,
+        ECN_OR_COST(FILLER),
+        TC_PRIO_BESTEFFORT,
+        ECN_OR_COST(BESTEFFORT),
+        TC_PRIO_BULK,
+        ECN_OR_COST(BULK),
+        TC_PRIO_BULK,
+        ECN_OR_COST(BULK),
+        TC_PRIO_INTERACTIVE,
+        ECN_OR_COST(INTERACTIVE),
+        TC_PRIO_INTERACTIVE,
+        ECN_OR_COST(INTERACTIVE),
+        TC_PRIO_INTERACTIVE_BULK,
+        ECN_OR_COST(INTERACTIVE_BULK),
+        TC_PRIO_INTERACTIVE_BULK,
+        ECN_OR_COST(INTERACTIVE_BULK)
 };
 
 
@@ -204,228 +206,231 @@
  */
 
 struct rt_hash_bucket {
-	struct rtable	*chain;
+        struct rtable   *chain;
 };
 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
-	defined(CONFIG_PROVE_LOCKING)
+        defined(CONFIG_PROVE_LOCKING)
 /*
  * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks
  * The size of this table is a power of two and depends on the number of CPUS.
  * (on lockdep we have a quite big spinlock_t, so keep the size down there)
  */
 #ifdef CONFIG_LOCKDEP
-# define RT_HASH_LOCK_SZ	256
+# define RT_HASH_LOCK_SZ        256
 #else
 # if NR_CPUS >= 32
-#  define RT_HASH_LOCK_SZ	4096
+#  define RT_HASH_LOCK_SZ       4096
 # elif NR_CPUS >= 16
-#  define RT_HASH_LOCK_SZ	2048
+#  define RT_HASH_LOCK_SZ       2048
 # elif NR_CPUS >= 8
-#  define RT_HASH_LOCK_SZ	1024
+#  define RT_HASH_LOCK_SZ       1024
 # elif NR_CPUS >= 4
-#  define RT_HASH_LOCK_SZ	512
+#  define RT_HASH_LOCK_SZ       512
 # else
-#  define RT_HASH_LOCK_SZ	256
+#  define RT_HASH_LOCK_SZ       256
 # endif
 #endif
 
-static spinlock_t	*rt_hash_locks;
+static spinlock_t       *rt_hash_locks;
 # define rt_hash_lock_addr(slot) &rt_hash_locks[(slot) & (RT_HASH_LOCK_SZ - 1)]
-# define rt_hash_lock_init()	{ \
-		int i; \
-		rt_hash_locks = kmalloc(sizeof(spinlock_t) * RT_HASH_LOCK_SZ, GFP_KERNEL); \
-		if (!rt_hash_locks) panic("IP: failed to allocate rt_hash_locks\n"); \
-		for (i = 0; i < RT_HASH_LOCK_SZ; i++) \
-			spin_lock_init(&rt_hash_locks[i]); \
-		}
+# define rt_hash_lock_init()    { \
+                int i; \
+                rt_hash_locks = kmalloc(sizeof(spinlock_t) * RT_HASH_LOCK_SZ, GFP_KERNEL); \
+                if (!rt_hash_locks) panic("IP: failed to allocate rt_hash_locks\n"); \
+                for (i = 0; i < RT_HASH_LOCK_SZ; i++) \
+                        spin_lock_init(&rt_hash_locks[i]); \
+                }
 #else
 # define rt_hash_lock_addr(slot) NULL
 # define rt_hash_lock_init()
 #endif
 
-static struct rt_hash_bucket 	*rt_hash_table;
-static unsigned			rt_hash_mask;
-static int			rt_hash_log;
-static unsigned int		rt_hash_rnd;
+static struct rt_hash_bucket    *rt_hash_table;
+static unsigned                 rt_hash_mask;
+static int                      rt_hash_log;
+static unsigned int             rt_hash_rnd;
 
 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
 #define RT_CACHE_STAT_INC(field) \
-	(__raw_get_cpu_var(rt_cache_stat).field++)
+        (__raw_get_cpu_var(rt_cache_stat).field++)
 
 static int rt_intern_hash(unsigned hash, struct rtable *rth,
-				struct rtable **res);
+                                struct rtable **res);
 
 static unsigned int rt_hash_code(u32 daddr, u32 saddr)
 {
-	return (jhash_2words(daddr, saddr, rt_hash_rnd)
-		& rt_hash_mask);
+        return (jhash_2words(daddr, saddr, rt_hash_rnd)
+                & rt_hash_mask);
 }
 
 #ifdef CONFIG_PROC_FS
 struct rt_cache_iter_state {
-	int bucket;
+        int bucket;
 };
 
 static struct rtable *rt_cache_get_first(struct seq_file *seq)
 {
-	struct rtable *r = NULL;
-	struct rt_cache_iter_state *st = seq->private;
+        struct rtable *r = NULL;
+        struct rt_cache_iter_state *st = seq->private;
 
-	for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
-		rcu_read_lock_bh();
-		r = rt_hash_table[st->bucket].chain;
-		if (r)
-			break;
-		rcu_read_unlock_bh();
-	}
-	return r;
+        for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
+                rcu_read_lock_bh();
+                r = rt_hash_table[st->bucket].chain;
+                if (r)
+                        break;
+                rcu_read_unlock_bh();
+        }
+        return r;
 }
 
 static struct rtable *rt_cache_get_next(struct seq_file *seq, struct rtable *r)
 {
-	struct rt_cache_iter_state *st = rcu_dereference(seq->private);
+        struct rt_cache_iter_state *st = rcu_dereference(seq->private);
 
-	r = r->u.rt_next;
-	while (!r) {
-		rcu_read_unlock_bh();
-		if (--st->bucket < 0)
-			break;
-		rcu_read_lock_bh();
-		r = rt_hash_table[st->bucket].chain;
-	}
-	return r;
+        r = r->u.rt_next;
+        while (!r) {
+                rcu_read_unlock_bh();
+                if (--st->bucket < 0)
+                        break;
+                rcu_read_lock_bh();
+                r = rt_hash_table[st->bucket].chain;
+        }
+        return r;
 }
 
 static struct rtable *rt_cache_get_idx(struct seq_file *seq, loff_t pos)
 {
-	struct rtable *r = rt_cache_get_first(seq);
+        struct rtable *r = rt_cache_get_first(seq);
 
-	if (r)
-		while (pos && (r = rt_cache_get_next(seq, r)))
-			--pos;
-	return pos ? NULL : r;
+        if (r)
+                while (pos && (r = rt_cache_get_next(seq, r)))
+                        --pos;
+        return pos ? NULL : r;
 }
 
 static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
 {
-	return *pos ? rt_cache_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
+        return *pos ? rt_cache_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
 }
 
 static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
-	struct rtable *r = NULL;
+        struct rtable *r = NULL;
 
-	if (v == SEQ_START_TOKEN)
-		r = rt_cache_get_first(seq);
-	else
-		r = rt_cache_get_next(seq, v);
-	++*pos;
-	return r;
+        if (v == SEQ_START_TOKEN)
+                r = rt_cache_get_first(seq);
+        else
+                r = rt_cache_get_next(seq, v);
+        ++*pos;
+        return r;
 }
 
 static void rt_cache_seq_stop(struct seq_file *seq, void *v)
 {
-	if (v && v != SEQ_START_TOKEN)
-		rcu_read_unlock_bh();
+        if (v && v != SEQ_START_TOKEN)
+                rcu_read_unlock_bh();
 }
 
 static int rt_cache_seq_show(struct seq_file *seq, void *v)
 {
-	if (v == SEQ_START_TOKEN)
-		seq_printf(seq, "%-127s\n",
-			   "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
-			   "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
-			   "HHUptod\tSpecDst");
-	else {
-		struct rtable *r = v;
-		char temp[256];
-
-		sprintf(temp, "%s\t%08lX\t%08lX\t%8X\t%d\t%u\t%d\t"
-			      "%08lX\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X",
-			r->u.dst.dev ? r->u.dst.dev->name : "*",
-			(unsigned long)r->rt_dst, (unsigned long)r->rt_gateway,
-			r->rt_flags, atomic_read(&r->u.dst.__refcnt),
-			r->u.dst.__use, 0, (unsigned long)r->rt_src,
-			(dst_metric(&r->u.dst, RTAX_ADVMSS) ?
-			     (int)dst_metric(&r->u.dst, RTAX_ADVMSS) + 40 : 0),
-			dst_metric(&r->u.dst, RTAX_WINDOW),
-			(int)((dst_metric(&r->u.dst, RTAX_RTT) >> 3) +
-			      dst_metric(&r->u.dst, RTAX_RTTVAR)),
-			r->fl.fl4_tos,
-			r->u.dst.hh ? atomic_read(&r->u.dst.hh->hh_refcnt) : -1,
-			r->u.dst.hh ? (r->u.dst.hh->hh_output ==
-				       dev_queue_xmit) : 0,
-			r->rt_spec_dst);
-		seq_printf(seq, "%-127s\n", temp);
-        }
-  	return 0;
+        if (v == SEQ_START_TOKEN)
+                seq_printf(seq, "%-127s\n",
+                           "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
+                           "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
+                           "HHUptod\tSpecDst");
+        else
+          if(! is_a_ghost_interface_name((const char*)
+                                         ((struct rtable*)v)->u.dst.dev))
+          {
+                struct rtable *r = v;
+                char temp[256];
+
+                sprintf(temp, "%s\t%08lX\t%08lX\t%8X\t%d\t%u\t%d\t"
+                              "%08lX\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X",
+                        r->u.dst.dev ? r->u.dst.dev->name : "*",
+                        (unsigned long)r->rt_dst, (unsigned long)r->rt_gateway,
+                        r->rt_flags, atomic_read(&r->u.dst.__refcnt),
+                        r->u.dst.__use, 0, (unsigned long)r->rt_src,
+                        (dst_metric(&r->u.dst, RTAX_ADVMSS) ?
+                             (int)dst_metric(&r->u.dst, RTAX_ADVMSS) + 40 : 0),
+                        dst_metric(&r->u.dst, RTAX_WINDOW),
+                        (int)((dst_metric(&r->u.dst, RTAX_RTT) >> 3) +
+                              dst_metric(&r->u.dst, RTAX_RTTVAR)),
+                        r->fl.fl4_tos,
+                        r->u.dst.hh ? atomic_read(&r->u.dst.hh->hh_refcnt) : -1,
+                        r->u.dst.hh ? (r->u.dst.hh->hh_output ==
+                                       dev_queue_xmit) : 0,
+                        r->rt_spec_dst);
+                seq_printf(seq, "%-127s\n", temp);
+          }
+        return 0;
 }
 
 static struct seq_operations rt_cache_seq_ops = {
-	.start  = rt_cache_seq_start,
-	.next   = rt_cache_seq_next,
-	.stop   = rt_cache_seq_stop,
-	.show   = rt_cache_seq_show,
+        .start  = rt_cache_seq_start,
+        .next   = rt_cache_seq_next,
+        .stop   = rt_cache_seq_stop,
+        .show   = rt_cache_seq_show,
 };
 
 static int rt_cache_seq_open(struct inode *inode, struct file *file)
 {
-	struct seq_file *seq;
-	int rc = -ENOMEM;
-	struct rt_cache_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL);
-
-	if (!s)
-		goto out;
-	rc = seq_open(file, &rt_cache_seq_ops);
-	if (rc)
-		goto out_kfree;
-	seq          = file->private_data;
-	seq->private = s;
-	memset(s, 0, sizeof(*s));
+        struct seq_file *seq;
+        int rc = -ENOMEM;
+        struct rt_cache_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL);
+
+        if (!s)
+                goto out;
+        rc = seq_open(file, &rt_cache_seq_ops);
+        if (rc)
+                goto out_kfree;
+        seq          = file->private_data;
+        seq->private = s;
+        memset(s, 0, sizeof(*s));
 out:
-	return rc;
+        return rc;
 out_kfree:
-	kfree(s);
-	goto out;
+        kfree(s);
+        goto out;
 }
 
 static struct file_operations rt_cache_seq_fops = {
-	.owner	 = THIS_MODULE,
-	.open	 = rt_cache_seq_open,
-	.read	 = seq_read,
-	.llseek	 = seq_lseek,
-	.release = seq_release_private,
+        .owner   = THIS_MODULE,
+        .open    = rt_cache_seq_open,
+        .read    = seq_read,
+        .llseek  = seq_lseek,
+        .release = seq_release_private,
 };
 
 
 static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
 {
-	int cpu;
+        int cpu;
 
-	if (*pos == 0)
-		return SEQ_START_TOKEN;
+        if (*pos == 0)
+                return SEQ_START_TOKEN;
 
-	for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
-		if (!cpu_possible(cpu))
-			continue;
-		*pos = cpu+1;
-		return &per_cpu(rt_cache_stat, cpu);
-	}
-	return NULL;
+        for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
+                if (!cpu_possible(cpu))
+                        continue;
+                *pos = cpu+1;
+                return &per_cpu(rt_cache_stat, cpu);
+        }
+        return NULL;
 }
 
 static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
-	int cpu;
+        int cpu;
 
-	for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
-		if (!cpu_possible(cpu))
-			continue;
-		*pos = cpu+1;
-		return &per_cpu(rt_cache_stat, cpu);
-	}
-	return NULL;
-	
+        for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
+                if (!cpu_possible(cpu))
+                        continue;
+                *pos = cpu+1;
+                return &per_cpu(rt_cache_stat, cpu);
+        }
+        return NULL;
+        
 }
 
 static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
@@ -435,108 +440,108 @@
 
 static int rt_cpu_seq_show(struct seq_file *seq, void *v)
 {
-	struct rt_cache_stat *st = v;
+        struct rt_cache_stat *st = v;
 
-	if (v == SEQ_START_TOKEN) {
-		seq_printf(seq, "entries  in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src  out_hit out_slow_tot out_slow_mc  gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
-		return 0;
-	}
-	
-	seq_printf(seq,"%08x  %08x %08x %08x %08x %08x %08x %08x "
-		   " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
-		   atomic_read(&ipv4_dst_ops.entries),
-		   st->in_hit,
-		   st->in_slow_tot,
-		   st->in_slow_mc,
-		   st->in_no_route,
-		   st->in_brd,
-		   st->in_martian_dst,
-		   st->in_martian_src,
-
-		   st->out_hit,
-		   st->out_slow_tot,
-		   st->out_slow_mc, 
-
-		   st->gc_total,
-		   st->gc_ignored,
-		   st->gc_goal_miss,
-		   st->gc_dst_overflow,
-		   st->in_hlist_search,
-		   st->out_hlist_search
-		);
-	return 0;
+        if (v == SEQ_START_TOKEN) {
+                seq_printf(seq, "entries  in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src  out_hit out_slow_tot out_slow_mc  gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
+                return 0;
+        }
+        
+        seq_printf(seq,"%08x  %08x %08x %08x %08x %08x %08x %08x "
+                   " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
+                   atomic_read(&ipv4_dst_ops.entries),
+                   st->in_hit,
+                   st->in_slow_tot,
+                   st->in_slow_mc,
+                   st->in_no_route,
+                   st->in_brd,
+                   st->in_martian_dst,
+                   st->in_martian_src,
+
+                   st->out_hit,
+                   st->out_slow_tot,
+                   st->out_slow_mc, 
+
+                   st->gc_total,
+                   st->gc_ignored,
+                   st->gc_goal_miss,
+                   st->gc_dst_overflow,
+                   st->in_hlist_search,
+                   st->out_hlist_search
+                );
+        return 0;
 }
 
 static struct seq_operations rt_cpu_seq_ops = {
-	.start  = rt_cpu_seq_start,
-	.next   = rt_cpu_seq_next,
-	.stop   = rt_cpu_seq_stop,
-	.show   = rt_cpu_seq_show,
+        .start  = rt_cpu_seq_start,
+        .next   = rt_cpu_seq_next,
+        .stop   = rt_cpu_seq_stop,
+        .show   = rt_cpu_seq_show,
 };
 
 
 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
 {
-	return seq_open(file, &rt_cpu_seq_ops);
+        return seq_open(file, &rt_cpu_seq_ops);
 }
 
 static struct file_operations rt_cpu_seq_fops = {
-	.owner	 = THIS_MODULE,
-	.open	 = rt_cpu_seq_open,
-	.read	 = seq_read,
-	.llseek	 = seq_lseek,
-	.release = seq_release,
+        .owner   = THIS_MODULE,
+        .open    = rt_cpu_seq_open,
+        .read    = seq_read,
+        .llseek  = seq_lseek,
+        .release = seq_release,
 };
 
 #endif /* CONFIG_PROC_FS */
   
 static __inline__ void rt_free(struct rtable *rt)
 {
-	multipath_remove(rt);
-	call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free);
+        multipath_remove(rt);
+        call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free);
 }
 
 static __inline__ void rt_drop(struct rtable *rt)
 {
-	multipath_remove(rt);
-	ip_rt_put(rt);
-	call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free);
+        multipath_remove(rt);
+        ip_rt_put(rt);
+        call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free);
 }
 
 static __inline__ int rt_fast_clean(struct rtable *rth)
 {
-	/* Kill broadcast/multicast entries very aggresively, if they
-	   collide in hash table with more useful entries */
-	return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
-		rth->fl.iif && rth->u.rt_next;
+        /* Kill broadcast/multicast entries very aggresively, if they
+           collide in hash table with more useful entries */
+        return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
+                rth->fl.iif && rth->u.rt_next;
 }
 
 static __inline__ int rt_valuable(struct rtable *rth)
 {
-	return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) ||
-		rth->u.dst.expires;
+        return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) ||
+                rth->u.dst.expires;
 }
 
 static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2)
 {
-	unsigned long age;
-	int ret = 0;
+        unsigned long age;
+        int ret = 0;
 
-	if (atomic_read(&rth->u.dst.__refcnt))
-		goto out;
+        if (atomic_read(&rth->u.dst.__refcnt))
+                goto out;
 
-	ret = 1;
-	if (rth->u.dst.expires &&
-	    time_after_eq(jiffies, rth->u.dst.expires))
-		goto out;
-
-	age = jiffies - rth->u.dst.lastuse;
-	ret = 0;
-	if ((age <= tmo1 && !rt_fast_clean(rth)) ||
-	    (age <= tmo2 && rt_valuable(rth)))
-		goto out;
-	ret = 1;
-out:	return ret;
+        ret = 1;
+        if (rth->u.dst.expires &&
+            time_after_eq(jiffies, rth->u.dst.expires))
+                goto out;
+
+        age = jiffies - rth->u.dst.lastuse;
+        ret = 0;
+        if ((age <= tmo1 && !rt_fast_clean(rth)) ||
+            (age <= tmo2 && rt_valuable(rth)))
+                goto out;
+        ret = 1;
+out:    return ret;
 }
 
 /* Bits of score are:
@@ -546,69 +551,69 @@
  */
 static inline u32 rt_score(struct rtable *rt)
 {
-	u32 score = jiffies - rt->u.dst.lastuse;
+        u32 score = jiffies - rt->u.dst.lastuse;
 
-	score = ~score & ~(3<<30);
+        score = ~score & ~(3<<30);
 
-	if (rt_valuable(rt))
-		score |= (1<<31);
+        if (rt_valuable(rt))
+                score |= (1<<31);
 
-	if (!rt->fl.iif ||
-	    !(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL)))
-		score |= (1<<30);
+        if (!rt->fl.iif ||
+            !(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL)))
+                score |= (1<<30);
 
-	return score;
+        return score;
 }
 
 static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
 {
-	return memcmp(&fl1->nl_u.ip4_u, &fl2->nl_u.ip4_u, sizeof(fl1->nl_u.ip4_u)) == 0 &&
-	       fl1->oif     == fl2->oif &&
-	       fl1->iif     == fl2->iif;
+        return memcmp(&fl1->nl_u.ip4_u, &fl2->nl_u.ip4_u, sizeof(fl1->nl_u.ip4_u)) == 0 &&
+               fl1->oif     == fl2->oif &&
+               fl1->iif     == fl2->iif;
 }
 
 #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
 static struct rtable **rt_remove_balanced_route(struct rtable **chain_head,
-						struct rtable *expentry,
-						int *removed_count)
+                                                struct rtable *expentry,
+                                                int *removed_count)
 {
-	int passedexpired = 0;
-	struct rtable **nextstep = NULL;
-	struct rtable **rthp = chain_head;
-	struct rtable *rth;
-
-	if (removed_count)
-		*removed_count = 0;
-
-	while ((rth = *rthp) != NULL) {
-		if (rth == expentry)
-			passedexpired = 1;
-
-		if (((*rthp)->u.dst.flags & DST_BALANCED) != 0  &&
-		    compare_keys(&(*rthp)->fl, &expentry->fl)) {
-			if (*rthp == expentry) {
-				*rthp = rth->u.rt_next;
-				continue;
-			} else {
-				*rthp = rth->u.rt_next;
-				rt_free(rth);
-				if (removed_count)
-					++(*removed_count);
-			}
-		} else {
-			if (!((*rthp)->u.dst.flags & DST_BALANCED) &&
-			    passedexpired && !nextstep)
-				nextstep = &rth->u.rt_next;
-
-			rthp = &rth->u.rt_next;
-		}
-	}
-
-	rt_free(expentry);
-	if (removed_count)
-		++(*removed_count);
+        int passedexpired = 0;
+        struct rtable **nextstep = NULL;
+        struct rtable **rthp = chain_head;
+        struct rtable *rth;
+
+        if (removed_count)
+                *removed_count = 0;
+
+        while ((rth = *rthp) != NULL) {
+                if (rth == expentry)
+                        passedexpired = 1;
+
+                if (((*rthp)->u.dst.flags & DST_BALANCED) != 0  &&
+                    compare_keys(&(*rthp)->fl, &expentry->fl)) {
+                        if (*rthp == expentry) {
+                                *rthp = rth->u.rt_next;
+                                continue;
+                        } else {
+                                *rthp = rth->u.rt_next;
+                                rt_free(rth);
+                                if (removed_count)
+                                        ++(*removed_count);
+                        }
+                } else {
+                        if (!((*rthp)->u.dst.flags & DST_BALANCED) &&
+                            passedexpired && !nextstep)
+                                nextstep = &rth->u.rt_next;
+
+                        rthp = &rth->u.rt_next;
+                }
+        }
+
+        rt_free(expentry);
+        if (removed_count)
+                ++(*removed_count);
 
-	return nextstep;
+        return nextstep;
 }
 #endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
 
@@ -616,66 +621,66 @@
 /* This runs via a timer and thus is always in BH context. */
 static void rt_check_expire(unsigned long dummy)
 {
-	static unsigned int rover;
-	unsigned int i = rover, goal;
-	struct rtable *rth, **rthp;
-	unsigned long now = jiffies;
-	u64 mult;
-
-	mult = ((u64)ip_rt_gc_interval) << rt_hash_log;
-	if (ip_rt_gc_timeout > 1)
-		do_div(mult, ip_rt_gc_timeout);
-	goal = (unsigned int)mult;
-	if (goal > rt_hash_mask) goal = rt_hash_mask + 1;
-	for (; goal > 0; goal--) {
-		unsigned long tmo = ip_rt_gc_timeout;
-
-		i = (i + 1) & rt_hash_mask;
-		rthp = &rt_hash_table[i].chain;
-
-		if (*rthp == 0)
-			continue;
-		spin_lock(rt_hash_lock_addr(i));
-		while ((rth = *rthp) != NULL) {
-			if (rth->u.dst.expires) {
-				/* Entry is expired even if it is in use */
-				if (time_before_eq(now, rth->u.dst.expires)) {
-					tmo >>= 1;
-					rthp = &rth->u.rt_next;
-					continue;
-				}
-			} else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) {
-				tmo >>= 1;
-				rthp = &rth->u.rt_next;
-				continue;
-			}
+        static unsigned int rover;
+        unsigned int i = rover, goal;
+        struct rtable *rth, **rthp;
+        unsigned long now = jiffies;
+        u64 mult;
+
+        mult = ((u64)ip_rt_gc_interval) << rt_hash_log;
+        if (ip_rt_gc_timeout > 1)
+                do_div(mult, ip_rt_gc_timeout);
+        goal = (unsigned int)mult;
+        if (goal > rt_hash_mask) goal = rt_hash_mask + 1;
+        for (; goal > 0; goal--) {
+                unsigned long tmo = ip_rt_gc_timeout;
+
+                i = (i + 1) & rt_hash_mask;
+                rthp = &rt_hash_table[i].chain;
+
+                if (*rthp == 0)
+                        continue;
+                spin_lock(rt_hash_lock_addr(i));
+                while ((rth = *rthp) != NULL) {
+                        if (rth->u.dst.expires) {
+                                /* Entry is expired even if it is in use */
+                                if (time_before_eq(now, rth->u.dst.expires)) {
+                                        tmo >>= 1;
+                                        rthp = &rth->u.rt_next;
+                                        continue;
+                                }
+                        } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) {
+                                tmo >>= 1;
+                                rthp = &rth->u.rt_next;
+                                continue;
+                        }
 
-			/* Cleanup aged off entries. */
+                        /* Cleanup aged off entries. */
 #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
-			/* remove all related balanced entries if necessary */
-			if (rth->u.dst.flags & DST_BALANCED) {
-				rthp = rt_remove_balanced_route(
-					&rt_hash_table[i].chain,
-					rth, NULL);
-				if (!rthp)
-					break;
-			} else {
-				*rthp = rth->u.rt_next;
-				rt_free(rth);
-			}
+                        /* remove all related balanced entries if necessary */
+                        if (rth->u.dst.flags & DST_BALANCED) {
+                                rthp = rt_remove_balanced_route(
+                                        &rt_hash_table[i].chain,
+                                        rth, NULL);
+                                if (!rthp)
+                                        break;
+                        } else {
+                                *rthp = rth->u.rt_next;
+                                rt_free(rth);
+                        }
 #else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
- 			*rthp = rth->u.rt_next;
- 			rt_free(rth);
+                        *rthp = rth->u.rt_next;
+                        rt_free(rth);
 #endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
-		}
-		spin_unlock(rt_hash_lock_addr(i));
+                }
+                spin_unlock(rt_hash_lock_addr(i));
 
-		/* Fallback loop breaker. */
-		if (time_after(jiffies, now))
-			break;
-	}
-	rover = i;
-	mod_timer(&rt_periodic_timer, jiffies + ip_rt_gc_interval);
+                /* Fallback loop breaker. */
+                if (time_after(jiffies, now))
+                        break;
+        }
+        rover = i;
+        mod_timer(&rt_periodic_timer, jiffies + ip_rt_gc_interval);
 }
 
 /* This can run from both BH and non-BH contexts, the latter
@@ -683,78 +688,78 @@
  */
 static void rt_run_flush(unsigned long dummy)
 {
-	int i;
-	struct rtable *rth, *next;
+        int i;
+        struct rtable *rth, *next;
 
-	rt_deadline = 0;
+        rt_deadline = 0;
 
-	get_random_bytes(&rt_hash_rnd, 4);
+        get_random_bytes(&rt_hash_rnd, 4);
 
-	for (i = rt_hash_mask; i >= 0; i--) {
-		spin_lock_bh(rt_hash_lock_addr(i));
-		rth = rt_hash_table[i].chain;
-		if (rth)
-			rt_hash_table[i].chain = NULL;
-		spin_unlock_bh(rt_hash_lock_addr(i));
-
-		for (; rth; rth = next) {
-			next = rth->u.rt_next;
-			rt_free(rth);
-		}
-	}
+        for (i = rt_hash_mask; i >= 0; i--) {
+                spin_lock_bh(rt_hash_lock_addr(i));
+                rth = rt_hash_table[i].chain;
+                if (rth)
+                        rt_hash_table[i].chain = NULL;
+                spin_unlock_bh(rt_hash_lock_addr(i));
+
+                for (; rth; rth = next) {
+                        next = rth->u.rt_next;
+                        rt_free(rth);
+                }
+        }
 }
 
 static DEFINE_SPINLOCK(rt_flush_lock);
 
 void rt_cache_flush(int delay)
 {
-	unsigned long now = jiffies;
-	int user_mode = !in_softirq();
+        unsigned long now = jiffies;
+        int user_mode = !in_softirq();
 
-	if (delay < 0)
-		delay = ip_rt_min_delay;
+        if (delay < 0)
+                delay = ip_rt_min_delay;
 
-	/* flush existing multipath state*/
-	multipath_flush();
+        /* flush existing multipath state*/
+        multipath_flush();
 
-	spin_lock_bh(&rt_flush_lock);
+        spin_lock_bh(&rt_flush_lock);
 
-	if (del_timer(&rt_flush_timer) && delay > 0 && rt_deadline) {
-		long tmo = (long)(rt_deadline - now);
+        if (del_timer(&rt_flush_timer) && delay > 0 && rt_deadline) {
+                long tmo = (long)(rt_deadline - now);
 
-		/* If flush timer is already running
-		   and flush request is not immediate (delay > 0):
+                /* If flush timer is already running
+                   and flush request is not immediate (delay > 0):
 
-		   if deadline is not achieved, prolongate timer to "delay",
-		   otherwise fire it at deadline time.
-		 */
+                   if deadline is not achieved, prolongate timer to "delay",
+                   otherwise fire it at deadline time.
+                 */
 
-		if (user_mode && tmo < ip_rt_max_delay-ip_rt_min_delay)
-			tmo = 0;
-		
-		if (delay > tmo)
-			delay = tmo;
-	}
+                if (user_mode && tmo < ip_rt_max_delay-ip_rt_min_delay)
+                        tmo = 0;
+                
+                if (delay > tmo)
+                        delay = tmo;
+        }
 
-	if (delay <= 0) {
-		spin_unlock_bh(&rt_flush_lock);
-		rt_run_flush(0);
-		return;
-	}
+        if (delay <= 0) {
+                spin_unlock_bh(&rt_flush_lock);
+                rt_run_flush(0);
+                return;
+        }
 
-	if (rt_deadline == 0)
-		rt_deadline = now + ip_rt_max_delay;
+        if (rt_deadline == 0)
+                rt_deadline = now + ip_rt_max_delay;
 
-	mod_timer(&rt_flush_timer, now+delay);
-	spin_unlock_bh(&rt_flush_lock);
+        mod_timer(&rt_flush_timer, now+delay);
+        spin_unlock_bh(&rt_flush_lock);
 }
 
 static void rt_secret_rebuild(unsigned long dummy)
 {
-	unsigned long now = jiffies;
+        unsigned long now = jiffies;
 
-	rt_cache_flush(0);
-	mod_timer(&rt_secret_timer, now + ip_rt_secret_interval);
+        rt_cache_flush(0);
+        mod_timer(&rt_secret_timer, now + ip_rt_secret_interval);
 }
 
 /*
@@ -772,292 +777,292 @@
 
 static int rt_garbage_collect(void)
 {
-	static unsigned long expire = RT_GC_TIMEOUT;
-	static unsigned long last_gc;
-	static int rover;
-	static int equilibrium;
-	struct rtable *rth, **rthp;
-	unsigned long now = jiffies;
-	int goal;
-
-	/*
-	 * Garbage collection is pretty expensive,
-	 * do not make it too frequently.
-	 */
-
-	RT_CACHE_STAT_INC(gc_total);
-
-	if (now - last_gc < ip_rt_gc_min_interval &&
-	    atomic_read(&ipv4_dst_ops.entries) < ip_rt_max_size) {
-		RT_CACHE_STAT_INC(gc_ignored);
-		goto out;
-	}
-
-	/* Calculate number of entries, which we want to expire now. */
-	goal = atomic_read(&ipv4_dst_ops.entries) -
-		(ip_rt_gc_elasticity << rt_hash_log);
-	if (goal <= 0) {
-		if (equilibrium < ipv4_dst_ops.gc_thresh)
-			equilibrium = ipv4_dst_ops.gc_thresh;
-		goal = atomic_read(&ipv4_dst_ops.entries) - equilibrium;
-		if (goal > 0) {
-			equilibrium += min_t(unsigned int, goal / 2, rt_hash_mask + 1);
-			goal = atomic_read(&ipv4_dst_ops.entries) - equilibrium;
-		}
-	} else {
-		/* We are in dangerous area. Try to reduce cache really
-		 * aggressively.
-		 */
-		goal = max_t(unsigned int, goal / 2, rt_hash_mask + 1);
-		equilibrium = atomic_read(&ipv4_dst_ops.entries) - goal;
-	}
-
-	if (now - last_gc >= ip_rt_gc_min_interval)
-		last_gc = now;
-
-	if (goal <= 0) {
-		equilibrium += goal;
-		goto work_done;
-	}
-
-	do {
-		int i, k;
-
-		for (i = rt_hash_mask, k = rover; i >= 0; i--) {
-			unsigned long tmo = expire;
-
-			k = (k + 1) & rt_hash_mask;
-			rthp = &rt_hash_table[k].chain;
-			spin_lock_bh(rt_hash_lock_addr(k));
-			while ((rth = *rthp) != NULL) {
-				if (!rt_may_expire(rth, tmo, expire)) {
-					tmo >>= 1;
-					rthp = &rth->u.rt_next;
-					continue;
-				}
+        static unsigned long expire = RT_GC_TIMEOUT;
+        static unsigned long last_gc;
+        static int rover;
+        static int equilibrium;
+        struct rtable *rth, **rthp;
+        unsigned long now = jiffies;
+        int goal;
+
+        /*
+         * Garbage collection is pretty expensive,
+         * do not make it too frequently.
+         */
+
+        RT_CACHE_STAT_INC(gc_total);
+
+        if (now - last_gc < ip_rt_gc_min_interval &&
+            atomic_read(&ipv4_dst_ops.entries) < ip_rt_max_size) {
+                RT_CACHE_STAT_INC(gc_ignored);
+                goto out;
+        }
+
+        /* Calculate number of entries, which we want to expire now. */
+        goal = atomic_read(&ipv4_dst_ops.entries) -
+                (ip_rt_gc_elasticity << rt_hash_log);
+        if (goal <= 0) {
+                if (equilibrium < ipv4_dst_ops.gc_thresh)
+                        equilibrium = ipv4_dst_ops.gc_thresh;
+                goal = atomic_read(&ipv4_dst_ops.entries) - equilibrium;
+                if (goal > 0) {
+                        equilibrium += min_t(unsigned int, goal / 2, rt_hash_mask + 1);
+                        goal = atomic_read(&ipv4_dst_ops.entries) - equilibrium;
+                }
+        } else {
+                /* We are in dangerous area. Try to reduce cache really
+                 * aggressively.
+                 */
+                goal = max_t(unsigned int, goal / 2, rt_hash_mask + 1);
+                equilibrium = atomic_read(&ipv4_dst_ops.entries) - goal;
+        }
+
+        if (now - last_gc >= ip_rt_gc_min_interval)
+                last_gc = now;
+
+        if (goal <= 0) {
+                equilibrium += goal;
+                goto work_done;
+        }
+
+        do {
+                int i, k;
+
+                for (i = rt_hash_mask, k = rover; i >= 0; i--) {
+                        unsigned long tmo = expire;
+
+                        k = (k + 1) & rt_hash_mask;
+                        rthp = &rt_hash_table[k].chain;
+                        spin_lock_bh(rt_hash_lock_addr(k));
+                        while ((rth = *rthp) != NULL) {
+                                if (!rt_may_expire(rth, tmo, expire)) {
+                                        tmo >>= 1;
+                                        rthp = &rth->u.rt_next;
+                                        continue;
+                                }
 #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
-				/* remove all related balanced entries
-				 * if necessary
-				 */
-				if (rth->u.dst.flags & DST_BALANCED) {
-					int r;
-
-					rthp = rt_remove_balanced_route(
-						&rt_hash_table[k].chain,
-						rth,
-						&r);
-					goal -= r;
-					if (!rthp)
-						break;
-				} else {
-					*rthp = rth->u.rt_next;
-					rt_free(rth);
-					goal--;
-				}
+                                /* remove all related balanced entries
+                                 * if necessary
+                                 */
+                                if (rth->u.dst.flags & DST_BALANCED) {
+                                        int r;
+
+                                        rthp = rt_remove_balanced_route(
+                                                &rt_hash_table[k].chain,
+                                                rth,
+                                                &r);
+                                        goal -= r;
+                                        if (!rthp)
+                                                break;
+                                } else {
+                                        *rthp = rth->u.rt_next;
+                                        rt_free(rth);
+                                        goal--;
+                                }
 #else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
-				*rthp = rth->u.rt_next;
-				rt_free(rth);
-				goal--;
+                                *rthp = rth->u.rt_next;
+                                rt_free(rth);
+                                goal--;
 #endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
-			}
-			spin_unlock_bh(rt_hash_lock_addr(k));
-			if (goal <= 0)
-				break;
-		}
-		rover = k;
-
-		if (goal <= 0)
-			goto work_done;
-
-		/* Goal is not achieved. We stop process if:
-
-		   - if expire reduced to zero. Otherwise, expire is halfed.
-		   - if table is not full.
-		   - if we are called from interrupt.
-		   - jiffies check is just fallback/debug loop breaker.
-		     We will not spin here for long time in any case.
-		 */
+                        }
+                        spin_unlock_bh(rt_hash_lock_addr(k));
+                        if (goal <= 0)
+                                break;
+                }
+                rover = k;
+
+                if (goal <= 0)
+                        goto work_done;
+
+                /* Goal is not achieved. We stop process if:
+
+                   - if expire reduced to zero. Otherwise, expire is halfed.
+                   - if table is not full.
+                   - if we are called from interrupt.
+                   - jiffies check is just fallback/debug loop breaker.
+                     We will not spin here for long time in any case.
+                 */
 
-		RT_CACHE_STAT_INC(gc_goal_miss);
+                RT_CACHE_STAT_INC(gc_goal_miss);
 
-		if (expire == 0)
-			break;
+                if (expire == 0)
+                        break;
 
-		expire >>= 1;
+                expire >>= 1;
 #if RT_CACHE_DEBUG >= 2
-		printk(KERN_DEBUG "expire>> %u %d %d %d\n", expire,
-				atomic_read(&ipv4_dst_ops.entries), goal, i);
+                printk(KERN_DEBUG "expire>> %u %d %d %d\n", expire,
+                                atomic_read(&ipv4_dst_ops.entries), goal, i);
 #endif
 
-		if (atomic_read(&ipv4_dst_ops.entries) < ip_rt_max_size)
-			goto out;
-	} while (!in_softirq() && time_before_eq(jiffies, now));
-
-	if (atomic_read(&ipv4_dst_ops.entries) < ip_rt_max_size)
-		goto out;
-	if (net_ratelimit())
-		printk(KERN_WARNING "dst cache overflow\n");
-	RT_CACHE_STAT_INC(gc_dst_overflow);
-	return 1;
+                if (atomic_read(&ipv4_dst_ops.entries) < ip_rt_max_size)
+                        goto out;
+        } while (!in_softirq() && time_before_eq(jiffies, now));
+
+        if (atomic_read(&ipv4_dst_ops.entries) < ip_rt_max_size)
+                goto out;
+        if (net_ratelimit())
+                printk(KERN_WARNING "dst cache overflow\n");
+        RT_CACHE_STAT_INC(gc_dst_overflow);
+        return 1;
 
 work_done:
-	expire += ip_rt_gc_min_interval;
-	if (expire > ip_rt_gc_timeout ||
-	    atomic_read(&ipv4_dst_ops.entries) < ipv4_dst_ops.gc_thresh)
-		expire = ip_rt_gc_timeout;
+        expire += ip_rt_gc_min_interval;
+        if (expire > ip_rt_gc_timeout ||
+            atomic_read(&ipv4_dst_ops.entries) < ipv4_dst_ops.gc_thresh)
+                expire = ip_rt_gc_timeout;
 #if RT_CACHE_DEBUG >= 2
-	printk(KERN_DEBUG "expire++ %u %d %d %d\n", expire,
-			atomic_read(&ipv4_dst_ops.entries), goal, rover);
+        printk(KERN_DEBUG "expire++ %u %d %d %d\n", expire,
+                        atomic_read(&ipv4_dst_ops.entries), goal, rover);
 #endif
-out:	return 0;
+out:    return 0;
 }
 
 static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp)
 {
-	struct rtable	*rth, **rthp;
-	unsigned long	now;
-	struct rtable *cand, **candp;
-	u32 		min_score;
-	int		chain_length;
-	int attempts = !in_softirq();
+        struct rtable   *rth, **rthp;
+        unsigned long   now;
+        struct rtable *cand, **candp;
+        u32             min_score;
+        int             chain_length;
+        int attempts = !in_softirq();
 
 restart:
-	chain_length = 0;
-	min_score = ~(u32)0;
-	cand = NULL;
-	candp = NULL;
-	now = jiffies;
+        chain_length = 0;
+        min_score = ~(u32)0;
+        cand = NULL;
+        candp = NULL;
+        now = jiffies;
 
-	rthp = &rt_hash_table[hash].chain;
+        rthp = &rt_hash_table[hash].chain;
 
-	spin_lock_bh(rt_hash_lock_addr(hash));
-	while ((rth = *rthp) != NULL) {
+        spin_lock_bh(rt_hash_lock_addr(hash));
+        while ((rth = *rthp) != NULL) {
 #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
-		if (!(rth->u.dst.flags & DST_BALANCED) &&
-		    compare_keys(&rth->fl, &rt->fl)) {
+                if (!(rth->u.dst.flags & DST_BALANCED) &&
+                    compare_keys(&rth->fl, &rt->fl)) {
 #else
-		if (compare_keys(&rth->fl, &rt->fl)) {
+                if (compare_keys(&rth->fl, &rt->fl)) {
 #endif
-			/* Put it first */
-			*rthp = rth->u.rt_next;
-			/*
-			 * Since lookup is lockfree, the deletion
-			 * must be visible to another weakly ordered CPU before
-			 * the insertion at the start of the hash chain.
-			 */
-			rcu_assign_pointer(rth->u.rt_next,
-					   rt_hash_table[hash].chain);
-			/*
-			 * Since lookup is lockfree, the update writes
-			 * must be ordered for consistency on SMP.
-			 */
-			rcu_assign_pointer(rt_hash_table[hash].chain, rth);
-
-			rth->u.dst.__use++;
-			dst_hold(&rth->u.dst);
-			rth->u.dst.lastuse = now;
-			spin_unlock_bh(rt_hash_lock_addr(hash));
-
-			rt_drop(rt);
-			*rp = rth;
-			return 0;
-		}
-
-		if (!atomic_read(&rth->u.dst.__refcnt)) {
-			u32 score = rt_score(rth);
-
-			if (score <= min_score) {
-				cand = rth;
-				candp = rthp;
-				min_score = score;
-			}
-		}
-
-		chain_length++;
-
-		rthp = &rth->u.rt_next;
-	}
-
-	if (cand) {
-		/* ip_rt_gc_elasticity used to be average length of chain
-		 * length, when exceeded gc becomes really aggressive.
-		 *
-		 * The second limit is less certain. At the moment it allows
-		 * only 2 entries per bucket. We will see.
-		 */
-		if (chain_length > ip_rt_gc_elasticity) {
-			*candp = cand->u.rt_next;
-			rt_free(cand);
-		}
-	}
-
-	/* Try to bind route to arp only if it is output
-	   route or unicast forwarding path.
-	 */
-	if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) {
-		int err = arp_bind_neighbour(&rt->u.dst);
-		if (err) {
-			spin_unlock_bh(rt_hash_lock_addr(hash));
-
-			if (err != -ENOBUFS) {
-				rt_drop(rt);
-				return err;
-			}
-
-			/* Neighbour tables are full and nothing
-			   can be released. Try to shrink route cache,
-			   it is most likely it holds some neighbour records.
-			 */
-			if (attempts-- > 0) {
-				int saved_elasticity = ip_rt_gc_elasticity;
-				int saved_int = ip_rt_gc_min_interval;
-				ip_rt_gc_elasticity	= 1;
-				ip_rt_gc_min_interval	= 0;
-				rt_garbage_collect();
-				ip_rt_gc_min_interval	= saved_int;
-				ip_rt_gc_elasticity	= saved_elasticity;
-				goto restart;
-			}
-
-			if (net_ratelimit())
-				printk(KERN_WARNING "Neighbour table overflow.\n");
-			rt_drop(rt);
-			return -ENOBUFS;
-		}
-	}
+                        /* Put it first */
+                        *rthp = rth->u.rt_next;
+                        /*
+                         * Since lookup is lockfree, the deletion
+                         * must be visible to another weakly ordered CPU before
+                         * the insertion at the start of the hash chain.
+                         */
+                        rcu_assign_pointer(rth->u.rt_next,
+                                           rt_hash_table[hash].chain);
+                        /*
+                         * Since lookup is lockfree, the update writes
+                         * must be ordered for consistency on SMP.
+                         */
+                        rcu_assign_pointer(rt_hash_table[hash].chain, rth);
+
+                        rth->u.dst.__use++;
+                        dst_hold(&rth->u.dst);
+                        rth->u.dst.lastuse = now;
+                        spin_unlock_bh(rt_hash_lock_addr(hash));
+
+                        rt_drop(rt);
+                        *rp = rth;
+                        return 0;
+                }
+
+                if (!atomic_read(&rth->u.dst.__refcnt)) {
+                        u32 score = rt_score(rth);
+
+                        if (score <= min_score) {
+                                cand = rth;
+                                candp = rthp;
+                                min_score = score;
+                        }
+                }
+
+                chain_length++;
+
+                rthp = &rth->u.rt_next;
+        }
+
+        if (cand) {
+                /* ip_rt_gc_elasticity used to be average length of chain
+                 * length, when exceeded gc becomes really aggressive.
+                 *
+                 * The second limit is less certain. At the moment it allows
+                 * only 2 entries per bucket. We will see.
+                 */
+                if (chain_length > ip_rt_gc_elasticity) {
+                        *candp = cand->u.rt_next;
+                        rt_free(cand);
+                }
+        }
 
-	rt->u.rt_next = rt_hash_table[hash].chain;
+        /* Try to bind route to arp only if it is output
+           route or unicast forwarding path.
+         */
+        if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) {
+                int err = arp_bind_neighbour(&rt->u.dst);
+                if (err) {
+                        spin_unlock_bh(rt_hash_lock_addr(hash));
+
+                        if (err != -ENOBUFS) {
+                                rt_drop(rt);
+                                return err;
+                        }
+
+                        /* Neighbour tables are full and nothing
+                           can be released. Try to shrink route cache,
+                           it is most likely it holds some neighbour records.
+                         */
+                        if (attempts-- > 0) {
+                                int saved_elasticity = ip_rt_gc_elasticity;
+                                int saved_int = ip_rt_gc_min_interval;
+                                ip_rt_gc_elasticity     = 1;
+                                ip_rt_gc_min_interval   = 0;
+                                rt_garbage_collect();
+                                ip_rt_gc_min_interval   = saved_int;
+                                ip_rt_gc_elasticity     = saved_elasticity;
+                                goto restart;
+                        }
+
+                        if (net_ratelimit())
+                                printk(KERN_WARNING "Neighbour table overflow.\n");
+                        rt_drop(rt);
+                        return -ENOBUFS;
+                }
+        }
+
+        rt->u.rt_next = rt_hash_table[hash].chain;
 #if RT_CACHE_DEBUG >= 2
-	if (rt->u.rt_next) {
-		struct rtable *trt;
-		printk(KERN_DEBUG "rt_cache @%02x: %u.%u.%u.%u", hash,
-		       NIPQUAD(rt->rt_dst));
-		for (trt = rt->u.rt_next; trt; trt = trt->u.rt_next)
-			printk(" . %u.%u.%u.%u", NIPQUAD(trt->rt_dst));
-		printk("\n");
-	}
-#endif
-	rt_hash_table[hash].chain = rt;
-	spin_unlock_bh(rt_hash_lock_addr(hash));
-	*rp = rt;
-	return 0;
+        if (rt->u.rt_next) {
+                struct rtable *trt;
+                printk(KERN_DEBUG "rt_cache @%02x: %u.%u.%u.%u", hash,
+                       NIPQUAD(rt->rt_dst));
+                for (trt = rt->u.rt_next; trt; trt = trt->u.rt_next)
+                        printk(" . %u.%u.%u.%u", NIPQUAD(trt->rt_dst));
+                printk("\n");
+        }
+#endif
+        rt_hash_table[hash].chain = rt;
+        spin_unlock_bh(rt_hash_lock_addr(hash));
+        *rp = rt;
+        return 0;
 }
 
 void rt_bind_peer(struct rtable *rt, int create)
 {
-	static DEFINE_SPINLOCK(rt_peer_lock);
-	struct inet_peer *peer;
+        static DEFINE_SPINLOCK(rt_peer_lock);
+        struct inet_peer *peer;
 
-	peer = inet_getpeer(rt->rt_dst, create);
+        peer = inet_getpeer(rt->rt_dst, create);
 
-	spin_lock_bh(&rt_peer_lock);
-	if (rt->peer == NULL) {
-		rt->peer = peer;
-		peer = NULL;
-	}
-	spin_unlock_bh(&rt_peer_lock);
-	if (peer)
-		inet_putpeer(peer);
+        spin_lock_bh(&rt_peer_lock);
+        if (rt->peer == NULL) {
+                rt->peer = peer;
+                peer = NULL;
+        }
+        spin_unlock_bh(&rt_peer_lock);
+        if (peer)
+                inet_putpeer(peer);
 }
 
 /*
@@ -1069,220 +1074,220 @@
  */
 static void ip_select_fb_ident(struct iphdr *iph)
 {
-	static DEFINE_SPINLOCK(ip_fb_id_lock);
-	static u32 ip_fallback_id;
-	u32 salt;
-
-	spin_lock_bh(&ip_fb_id_lock);
-	salt = secure_ip_id(ip_fallback_id ^ iph->daddr);
-	iph->id = htons(salt & 0xFFFF);
-	ip_fallback_id = salt;
-	spin_unlock_bh(&ip_fb_id_lock);
+        static DEFINE_SPINLOCK(ip_fb_id_lock);
+        static u32 ip_fallback_id;
+        u32 salt;
+
+        spin_lock_bh(&ip_fb_id_lock);
+        salt = secure_ip_id(ip_fallback_id ^ iph->daddr);
+        iph->id = htons(salt & 0xFFFF);
+        ip_fallback_id = salt;
+        spin_unlock_bh(&ip_fb_id_lock);
 }
 
 void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
 {
-	struct rtable *rt = (struct rtable *) dst;
+        struct rtable *rt = (struct rtable *) dst;
 
-	if (rt) {
-		if (rt->peer == NULL)
-			rt_bind_peer(rt, 1);
-
-		/* If peer is attached to destination, it is never detached,
-		   so that we need not to grab a lock to dereference it.
-		 */
-		if (rt->peer) {
-			iph->id = htons(inet_getid(rt->peer, more));
-			return;
-		}
-	} else
-		printk(KERN_DEBUG "rt_bind_peer(0) @%p\n", 
-		       __builtin_return_address(0));
+        if (rt) {
+                if (rt->peer == NULL)
+                        rt_bind_peer(rt, 1);
+
+                /* If peer is attached to destination, it is never detached,
+                   so that we need not to grab a lock to dereference it.
+                 */
+                if (rt->peer) {
+                        iph->id = htons(inet_getid(rt->peer, more));
+                        return;
+                }
+        } else
+                printk(KERN_DEBUG "rt_bind_peer(0) @%p\n", 
+                       __builtin_return_address(0));
 
-	ip_select_fb_ident(iph);
+        ip_select_fb_ident(iph);
 }
 
 static void rt_del(unsigned hash, struct rtable *rt)
 {
-	struct rtable **rthp;
+        struct rtable **rthp;
 
-	spin_lock_bh(rt_hash_lock_addr(hash));
-	ip_rt_put(rt);
-	for (rthp = &rt_hash_table[hash].chain; *rthp;
-	     rthp = &(*rthp)->u.rt_next)
-		if (*rthp == rt) {
-			*rthp = rt->u.rt_next;
-			rt_free(rt);
-			break;
-		}
-	spin_unlock_bh(rt_hash_lock_addr(hash));
+        spin_lock_bh(rt_hash_lock_addr(hash));
+        ip_rt_put(rt);
+        for (rthp = &rt_hash_table[hash].chain; *rthp;
+             rthp = &(*rthp)->u.rt_next)
+                if (*rthp == rt) {
+                        *rthp = rt->u.rt_next;
+                        rt_free(rt);
+                        break;
+                }
+        spin_unlock_bh(rt_hash_lock_addr(hash));
 }
 
 void ip_rt_redirect(u32 old_gw, u32 daddr, u32 new_gw,
-		    u32 saddr, struct net_device *dev)
+                    u32 saddr, struct net_device *dev)
 {
-	int i, k;
-	struct in_device *in_dev = in_dev_get(dev);
-	struct rtable *rth, **rthp;
-	u32  skeys[2] = { saddr, 0 };
-	int  ikeys[2] = { dev->ifindex, 0 };
-	struct netevent_redirect netevent;
-
-	if (!in_dev)
-		return;
-
-	if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev)
-	    || MULTICAST(new_gw) || BADCLASS(new_gw) || ZERONET(new_gw))
-		goto reject_redirect;
-
-	if (!IN_DEV_SHARED_MEDIA(in_dev)) {
-		if (!inet_addr_onlink(in_dev, new_gw, old_gw))
-			goto reject_redirect;
-		if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
-			goto reject_redirect;
-	} else {
-		if (inet_addr_type(new_gw) != RTN_UNICAST)
-			goto reject_redirect;
-	}
-
-	for (i = 0; i < 2; i++) {
-		for (k = 0; k < 2; k++) {
-			unsigned hash = rt_hash_code(daddr,
-						     skeys[i] ^ (ikeys[k] << 5));
-
-			rthp=&rt_hash_table[hash].chain;
-
-			rcu_read_lock();
-			while ((rth = rcu_dereference(*rthp)) != NULL) {
-				struct rtable *rt;
-
-				if (rth->fl.fl4_dst != daddr ||
-				    rth->fl.fl4_src != skeys[i] ||
-				    rth->fl.oif != ikeys[k] ||
-				    rth->fl.iif != 0) {
-					rthp = &rth->u.rt_next;
-					continue;
-				}
-
-				if (rth->rt_dst != daddr ||
-				    rth->rt_src != saddr ||
-				    rth->u.dst.error ||
-				    rth->rt_gateway != old_gw ||
-				    rth->u.dst.dev != dev)
-					break;
-
-				dst_hold(&rth->u.dst);
-				rcu_read_unlock();
-
-				rt = dst_alloc(&ipv4_dst_ops);
-				if (rt == NULL) {
-					ip_rt_put(rth);
-					in_dev_put(in_dev);
-					return;
-				}
-
-				/* Copy all the information. */
-				*rt = *rth;
- 				INIT_RCU_HEAD(&rt->u.dst.rcu_head);
-				rt->u.dst.__use		= 1;
-				atomic_set(&rt->u.dst.__refcnt, 1);
-				rt->u.dst.child		= NULL;
-				if (rt->u.dst.dev)
-					dev_hold(rt->u.dst.dev);
-				if (rt->idev)
-					in_dev_hold(rt->idev);
-				rt->u.dst.obsolete	= 0;
-				rt->u.dst.lastuse	= jiffies;
-				rt->u.dst.path		= &rt->u.dst;
-				rt->u.dst.neighbour	= NULL;
-				rt->u.dst.hh		= NULL;
-				rt->u.dst.xfrm		= NULL;
-
-				rt->rt_flags		|= RTCF_REDIRECTED;
-
-				/* Gateway is different ... */
-				rt->rt_gateway		= new_gw;
-
-				/* Redirect received -> path was valid */
-				dst_confirm(&rth->u.dst);
-
-				if (rt->peer)
-					atomic_inc(&rt->peer->refcnt);
-
-				if (arp_bind_neighbour(&rt->u.dst) ||
-				    !(rt->u.dst.neighbour->nud_state &
-					    NUD_VALID)) {
-					if (rt->u.dst.neighbour)
-						neigh_event_send(rt->u.dst.neighbour, NULL);
-					ip_rt_put(rth);
-					rt_drop(rt);
-					goto do_next;
-				}
-				
-				netevent.old = &rth->u.dst;
-				netevent.new = &rt->u.dst;
-				call_netevent_notifiers(NETEVENT_REDIRECT, 
-						        &netevent);
-
-				rt_del(hash, rth);
-				if (!rt_intern_hash(hash, rt, &rt))
-					ip_rt_put(rt);
-				goto do_next;
-			}
-			rcu_read_unlock();
-		do_next:
-			;
-		}
-	}
-	in_dev_put(in_dev);
-	return;
+        int i, k;
+        struct in_device *in_dev = in_dev_get(dev);
+        struct rtable *rth, **rthp;
+        u32  skeys[2] = { saddr, 0 };
+        int  ikeys[2] = { dev->ifindex, 0 };
+        struct netevent_redirect netevent;
+
+        if (!in_dev)
+                return;
+
+        if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev)
+            || MULTICAST(new_gw) || BADCLASS(new_gw) || ZERONET(new_gw))
+                goto reject_redirect;
+
+        if (!IN_DEV_SHARED_MEDIA(in_dev)) {
+                if (!inet_addr_onlink(in_dev, new_gw, old_gw))
+                        goto reject_redirect;
+                if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
+                        goto reject_redirect;
+        } else {
+                if (inet_addr_type(new_gw) != RTN_UNICAST)
+                        goto reject_redirect;
+        }
+
+        for (i = 0; i < 2; i++) {
+                for (k = 0; k < 2; k++) {
+                        unsigned hash = rt_hash_code(daddr,
+                                                     skeys[i] ^ (ikeys[k] << 5));
+
+                        rthp=&rt_hash_table[hash].chain;
+
+                        rcu_read_lock();
+                        while ((rth = rcu_dereference(*rthp)) != NULL) {
+                                struct rtable *rt;
+
+                                if (rth->fl.fl4_dst != daddr ||
+                                    rth->fl.fl4_src != skeys[i] ||
+                                    rth->fl.oif != ikeys[k] ||
+                                    rth->fl.iif != 0) {
+                                        rthp = &rth->u.rt_next;
+                                        continue;
+                                }
+
+                                if (rth->rt_dst != daddr ||
+                                    rth->rt_src != saddr ||
+                                    rth->u.dst.error ||
+                                    rth->rt_gateway != old_gw ||
+                                    rth->u.dst.dev != dev)
+                                        break;
+
+                                dst_hold(&rth->u.dst);
+                                rcu_read_unlock();
+
+                                rt = dst_alloc(&ipv4_dst_ops);
+                                if (rt == NULL) {
+                                        ip_rt_put(rth);
+                                        in_dev_put(in_dev);
+                                        return;
+                                }
+
+                                /* Copy all the information. */
+                                *rt = *rth;
+                                INIT_RCU_HEAD(&rt->u.dst.rcu_head);
+                                rt->u.dst.__use         = 1;
+                                atomic_set(&rt->u.dst.__refcnt, 1);
+                                rt->u.dst.child         = NULL;
+                                if (rt->u.dst.dev)
+                                        dev_hold(rt->u.dst.dev);
+                                if (rt->idev)
+                                        in_dev_hold(rt->idev);
+                                rt->u.dst.obsolete      = 0;
+                                rt->u.dst.lastuse       = jiffies;
+                                rt->u.dst.path          = &rt->u.dst;
+                                rt->u.dst.neighbour     = NULL;
+                                rt->u.dst.hh            = NULL;
+                                rt->u.dst.xfrm          = NULL;
+
+                                rt->rt_flags            |= RTCF_REDIRECTED;
+
+                                /* Gateway is different ... */
+                                rt->rt_gateway          = new_gw;
+
+                                /* Redirect received -> path was valid */
+                                dst_confirm(&rth->u.dst);
+
+                                if (rt->peer)
+                                        atomic_inc(&rt->peer->refcnt);
+
+                                if (arp_bind_neighbour(&rt->u.dst) ||
+                                    !(rt->u.dst.neighbour->nud_state &
+                                            NUD_VALID)) {
+                                        if (rt->u.dst.neighbour)
+                                                neigh_event_send(rt->u.dst.neighbour, NULL);
+                                        ip_rt_put(rth);
+                                        rt_drop(rt);
+                                        goto do_next;
+                                }
+                                
+                                netevent.old = &rth->u.dst;
+                                netevent.new = &rt->u.dst;
+                                call_netevent_notifiers(NETEVENT_REDIRECT, 
+                                                        &netevent);
+
+                                rt_del(hash, rth);
+                                if (!rt_intern_hash(hash, rt, &rt))
+                                        ip_rt_put(rt);
+                                goto do_next;
+                        }
+                        rcu_read_unlock();
+                do_next:
+                        ;
+                }
+        }
+        in_dev_put(in_dev);
+        return;
 
 reject_redirect:
 #ifdef CONFIG_IP_ROUTE_VERBOSE
-	if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
-		printk(KERN_INFO "Redirect from %u.%u.%u.%u on %s about "
-			"%u.%u.%u.%u ignored.\n"
-			"  Advised path = %u.%u.%u.%u -> %u.%u.%u.%u\n",
-		       NIPQUAD(old_gw), dev->name, NIPQUAD(new_gw),
-		       NIPQUAD(saddr), NIPQUAD(daddr));
+        if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
+                printk(KERN_INFO "Redirect from %u.%u.%u.%u on %s about "
+                        "%u.%u.%u.%u ignored.\n"
+                        "  Advised path = %u.%u.%u.%u -> %u.%u.%u.%u\n",
+                       NIPQUAD(old_gw), dev->name, NIPQUAD(new_gw),
+                       NIPQUAD(saddr), NIPQUAD(daddr));
 #endif
-	in_dev_put(in_dev);
+        in_dev_put(in_dev);
 }
 
 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
 {
-	struct rtable *rt = (struct rtable*)dst;
-	struct dst_entry *ret = dst;
+        struct rtable *rt = (struct rtable*)dst;
+        struct dst_entry *ret = dst;
 
-	if (rt) {
-		if (dst->obsolete) {
-			ip_rt_put(rt);
-			ret = NULL;
-		} else if ((rt->rt_flags & RTCF_REDIRECTED) ||
-			   rt->u.dst.expires) {
-			unsigned hash = rt_hash_code(rt->fl.fl4_dst,
-						     rt->fl.fl4_src ^
-							(rt->fl.oif << 5));
+        if (rt) {
+                if (dst->obsolete) {
+                        ip_rt_put(rt);
+                        ret = NULL;
+                } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
+                           rt->u.dst.expires) {
+                        unsigned hash = rt_hash_code(rt->fl.fl4_dst,
+                                                     rt->fl.fl4_src ^
+                                                        (rt->fl.oif << 5));
 #if RT_CACHE_DEBUG >= 1
-			printk(KERN_DEBUG "ip_rt_advice: redirect to "
-					  "%u.%u.%u.%u/%02x dropped\n",
-				NIPQUAD(rt->rt_dst), rt->fl.fl4_tos);
-#endif
-			rt_del(hash, rt);
-			ret = NULL;
-		}
-	}
-	return ret;
+                        printk(KERN_DEBUG "ip_rt_advice: redirect to "
+                                          "%u.%u.%u.%u/%02x dropped\n",
+                                NIPQUAD(rt->rt_dst), rt->fl.fl4_tos);
+#endif
+                        rt_del(hash, rt);
+                        ret = NULL;
+                }
+        }
+        return ret;
 }
 
 /*
  * Algorithm:
- *	1. The first ip_rt_redirect_number redirects are sent
- *	   with exponential backoff, then we stop sending them at all,
- *	   assuming that the host ignores our redirects.
- *	2. If we did not see packets requiring redirects
- *	   during ip_rt_redirect_silence, we assume that the host
- *	   forgot redirected route and start to send redirects again.
+ *      1. The first ip_rt_redirect_number redirects are sent
+ *         with exponential backoff, then we stop sending them at all,
+ *         assuming that the host ignores our redirects.
+ *      2. If we did not see packets requiring redirects
+ *         during ip_rt_redirect_silence, we assume that the host
+ *         forgot redirected route and start to send redirects again.
  *
  * This algorithm is much cheaper and more intelligent than dumb load limiting
  * in icmp.c.
@@ -1293,90 +1298,90 @@
 
 void ip_rt_send_redirect(struct sk_buff *skb)
 {
-	struct rtable *rt = (struct rtable*)skb->dst;
-	struct in_device *in_dev = in_dev_get(rt->u.dst.dev);
+        struct rtable *rt = (struct rtable*)skb->dst;
+        struct in_device *in_dev = in_dev_get(rt->u.dst.dev);
 
-	if (!in_dev)
-		return;
+        if (!in_dev)
+                return;
 
-	if (!IN_DEV_TX_REDIRECTS(in_dev))
-		goto out;
+        if (!IN_DEV_TX_REDIRECTS(in_dev))
+                goto out;
+
+        /* No redirected packets during ip_rt_redirect_silence;
+         * reset the algorithm.
+         */
+        if (time_after(jiffies, rt->u.dst.rate_last + ip_rt_redirect_silence))
+                rt->u.dst.rate_tokens = 0;
+
+        /* Too many ignored redirects; do not send anything
+         * set u.dst.rate_last to the last seen redirected packet.
+         */
+        if (rt->u.dst.rate_tokens >= ip_rt_redirect_number) {
+                rt->u.dst.rate_last = jiffies;
+                goto out;
+        }
 
-	/* No redirected packets during ip_rt_redirect_silence;
-	 * reset the algorithm.
-	 */
-	if (time_after(jiffies, rt->u.dst.rate_last + ip_rt_redirect_silence))
-		rt->u.dst.rate_tokens = 0;
-
-	/* Too many ignored redirects; do not send anything
-	 * set u.dst.rate_last to the last seen redirected packet.
-	 */
-	if (rt->u.dst.rate_tokens >= ip_rt_redirect_number) {
-		rt->u.dst.rate_last = jiffies;
-		goto out;
-	}
-
-	/* Check for load limit; set rate_last to the latest sent
-	 * redirect.
-	 */
-	if (time_after(jiffies,
-		       (rt->u.dst.rate_last +
-			(ip_rt_redirect_load << rt->u.dst.rate_tokens)))) {
-		icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
-		rt->u.dst.rate_last = jiffies;
-		++rt->u.dst.rate_tokens;
+        /* Check for load limit; set rate_last to the latest sent
+         * redirect.
+         */
+        if (time_after(jiffies,
+                       (rt->u.dst.rate_last +
+                        (ip_rt_redirect_load << rt->u.dst.rate_tokens)))) {
+                icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
+                rt->u.dst.rate_last = jiffies;
+                ++rt->u.dst.rate_tokens;
 #ifdef CONFIG_IP_ROUTE_VERBOSE
-		if (IN_DEV_LOG_MARTIANS(in_dev) &&
-		    rt->u.dst.rate_tokens == ip_rt_redirect_number &&
-		    net_ratelimit())
-			printk(KERN_WARNING "host %u.%u.%u.%u/if%d ignores "
-				"redirects for %u.%u.%u.%u to %u.%u.%u.%u.\n",
-				NIPQUAD(rt->rt_src), rt->rt_iif,
-				NIPQUAD(rt->rt_dst), NIPQUAD(rt->rt_gateway));
+                if (IN_DEV_LOG_MARTIANS(in_dev) &&
+                    rt->u.dst.rate_tokens == ip_rt_redirect_number &&
+                    net_ratelimit())
+                        printk(KERN_WARNING "host %u.%u.%u.%u/if%d ignores "
+                                "redirects for %u.%u.%u.%u to %u.%u.%u.%u.\n",
+                                NIPQUAD(rt->rt_src), rt->rt_iif,
+                                NIPQUAD(rt->rt_dst), NIPQUAD(rt->rt_gateway));
 #endif
-	}
+        }
 out:
         in_dev_put(in_dev);
 }
 
 static int ip_error(struct sk_buff *skb)
 {
-	struct rtable *rt = (struct rtable*)skb->dst;
-	unsigned long now;
-	int code;
-
-	switch (rt->u.dst.error) {
-		case EINVAL:
-		default:
-			goto out;
-		case EHOSTUNREACH:
-			code = ICMP_HOST_UNREACH;
-			break;
-		case ENETUNREACH:
-			code = ICMP_NET_UNREACH;
-			break;
-		case EACCES:
-			code = ICMP_PKT_FILTERED;
-			break;
-	}
-
-	now = jiffies;
-	rt->u.dst.rate_tokens += now - rt->u.dst.rate_last;
-	if (rt->u.dst.rate_tokens > ip_rt_error_burst)
-		rt->u.dst.rate_tokens = ip_rt_error_burst;
-	rt->u.dst.rate_last = now;
-	if (rt->u.dst.rate_tokens >= ip_rt_error_cost) {
-		rt->u.dst.rate_tokens -= ip_rt_error_cost;
-		icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
-	}
+        struct rtable *rt = (struct rtable*)skb->dst;
+        unsigned long now;
+        int code;
+
+        switch (rt->u.dst.error) {
+                case EINVAL:
+                default:
+                        goto out;
+                case EHOSTUNREACH:
+                        code = ICMP_HOST_UNREACH;
+                        break;
+                case ENETUNREACH:
+                        code = ICMP_NET_UNREACH;
+                        break;
+                case EACCES:
+                        code = ICMP_PKT_FILTERED;
+                        break;
+        }
+
+        now = jiffies;
+        rt->u.dst.rate_tokens += now - rt->u.dst.rate_last;
+        if (rt->u.dst.rate_tokens > ip_rt_error_burst)
+                rt->u.dst.rate_tokens = ip_rt_error_burst;
+        rt->u.dst.rate_last = now;
+        if (rt->u.dst.rate_tokens >= ip_rt_error_cost) {
+                rt->u.dst.rate_tokens -= ip_rt_error_cost;
+                icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
+        }
 
-out:	kfree_skb(skb);
-	return 0;
+out:    kfree_skb(skb);
+        return 0;
 } 
 
 /*
- *	The last two values are not from the RFC but
- *	are needed for AMPRnet AX.25 paths.
+ *      The last two values are not from the RFC but
+ *      are needed for AMPRnet AX.25 paths.
  */
 
 static const unsigned short mtu_plateau[] =
@@ -1384,139 +1389,139 @@
 
 static __inline__ unsigned short guess_mtu(unsigned short old_mtu)
 {
-	int i;
-	
-	for (i = 0; i < ARRAY_SIZE(mtu_plateau); i++)
-		if (old_mtu > mtu_plateau[i])
-			return mtu_plateau[i];
-	return 68;
+        int i;
+        
+        for (i = 0; i < ARRAY_SIZE(mtu_plateau); i++)
+                if (old_mtu > mtu_plateau[i])
+                        return mtu_plateau[i];
+        return 68;
 }
 
 unsigned short ip_rt_frag_needed(struct iphdr *iph, unsigned short new_mtu)
 {
-	int i;
-	unsigned short old_mtu = ntohs(iph->tot_len);
-	struct rtable *rth;
-	u32  skeys[2] = { iph->saddr, 0, };
-	u32  daddr = iph->daddr;
-	unsigned short est_mtu = 0;
-
-	if (ipv4_config.no_pmtu_disc)
-		return 0;
-
-	for (i = 0; i < 2; i++) {
-		unsigned hash = rt_hash_code(daddr, skeys[i]);
-
-		rcu_read_lock();
-		for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
-		     rth = rcu_dereference(rth->u.rt_next)) {
-			if (rth->fl.fl4_dst == daddr &&
-			    rth->fl.fl4_src == skeys[i] &&
-			    rth->rt_dst  == daddr &&
-			    rth->rt_src  == iph->saddr &&
-			    rth->fl.iif == 0 &&
-			    !(dst_metric_locked(&rth->u.dst, RTAX_MTU))) {
-				unsigned short mtu = new_mtu;
-
-				if (new_mtu < 68 || new_mtu >= old_mtu) {
-
-					/* BSD 4.2 compatibility hack :-( */
-					if (mtu == 0 &&
-					    old_mtu >= rth->u.dst.metrics[RTAX_MTU-1] &&
-					    old_mtu >= 68 + (iph->ihl << 2))
-						old_mtu -= iph->ihl << 2;
-
-					mtu = guess_mtu(old_mtu);
-				}
-				if (mtu <= rth->u.dst.metrics[RTAX_MTU-1]) {
-					if (mtu < rth->u.dst.metrics[RTAX_MTU-1]) { 
-						dst_confirm(&rth->u.dst);
-						if (mtu < ip_rt_min_pmtu) {
-							mtu = ip_rt_min_pmtu;
-							rth->u.dst.metrics[RTAX_LOCK-1] |=
-								(1 << RTAX_MTU);
-						}
-						rth->u.dst.metrics[RTAX_MTU-1] = mtu;
-						dst_set_expires(&rth->u.dst,
-							ip_rt_mtu_expires);
-					}
-					est_mtu = mtu;
-				}
-			}
-		}
-		rcu_read_unlock();
-	}
-	return est_mtu ? : new_mtu;
+        int i;
+        unsigned short old_mtu = ntohs(iph->tot_len);
+        struct rtable *rth;
+        u32  skeys[2] = { iph->saddr, 0, };
+        u32  daddr = iph->daddr;
+        unsigned short est_mtu = 0;
+
+        if (ipv4_config.no_pmtu_disc)
+                return 0;
+
+        for (i = 0; i < 2; i++) {
+                unsigned hash = rt_hash_code(daddr, skeys[i]);
+
+                rcu_read_lock();
+                for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
+                     rth = rcu_dereference(rth->u.rt_next)) {
+                        if (rth->fl.fl4_dst == daddr &&
+                            rth->fl.fl4_src == skeys[i] &&
+                            rth->rt_dst  == daddr &&
+                            rth->rt_src  == iph->saddr &&
+                            rth->fl.iif == 0 &&
+                            !(dst_metric_locked(&rth->u.dst, RTAX_MTU))) {
+                                unsigned short mtu = new_mtu;
+
+                                if (new_mtu < 68 || new_mtu >= old_mtu) {
+
+                                        /* BSD 4.2 compatibility hack :-( */
+                                        if (mtu == 0 &&
+                                            old_mtu >= rth->u.dst.metrics[RTAX_MTU-1] &&
+                                            old_mtu >= 68 + (iph->ihl << 2))
+                                                old_mtu -= iph->ihl << 2;
+
+                                        mtu = guess_mtu(old_mtu);
+                                }
+                                if (mtu <= rth->u.dst.metrics[RTAX_MTU-1]) {
+                                        if (mtu < rth->u.dst.metrics[RTAX_MTU-1]) { 
+                                                dst_confirm(&rth->u.dst);
+                                                if (mtu < ip_rt_min_pmtu) {
+                                                        mtu = ip_rt_min_pmtu;
+                                                        rth->u.dst.metrics[RTAX_LOCK-1] |=
+                                                                (1 << RTAX_MTU);
+                                                }
+                                                rth->u.dst.metrics[RTAX_MTU-1] = mtu;
+                                                dst_set_expires(&rth->u.dst,
+                                                        ip_rt_mtu_expires);
+                                        }
+                                        est_mtu = mtu;
+                                }
+                        }
+                }
+                rcu_read_unlock();
+        }
+        return est_mtu ? : new_mtu;
 }
 
 static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
 {
-	if (dst->metrics[RTAX_MTU-1] > mtu && mtu >= 68 &&
-	    !(dst_metric_locked(dst, RTAX_MTU))) {
-		if (mtu < ip_rt_min_pmtu) {
-			mtu = ip_rt_min_pmtu;
-			dst->metrics[RTAX_LOCK-1] |= (1 << RTAX_MTU);
-		}
-		dst->metrics[RTAX_MTU-1] = mtu;
-		dst_set_expires(dst, ip_rt_mtu_expires);
-		call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst);
-	}
+        if (dst->metrics[RTAX_MTU-1] > mtu && mtu >= 68 &&
+            !(dst_metric_locked(dst, RTAX_MTU))) {
+                if (mtu < ip_rt_min_pmtu) {
+                        mtu = ip_rt_min_pmtu;
+                        dst->metrics[RTAX_LOCK-1] |= (1 << RTAX_MTU);
+                }
+                dst->metrics[RTAX_MTU-1] = mtu;
+                dst_set_expires(dst, ip_rt_mtu_expires);
+                call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst);
+        }
 }
 
 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
 {
-	return NULL;
+        return NULL;
 }
 
 static void ipv4_dst_destroy(struct dst_entry *dst)
 {
-	struct rtable *rt = (struct rtable *) dst;
-	struct inet_peer *peer = rt->peer;
-	struct in_device *idev = rt->idev;
-
-	if (peer) {
-		rt->peer = NULL;
-		inet_putpeer(peer);
-	}
-
-	if (idev) {
-		rt->idev = NULL;
-		in_dev_put(idev);
-	}
+        struct rtable *rt = (struct rtable *) dst;
+        struct inet_peer *peer = rt->peer;
+        struct in_device *idev = rt->idev;
+
+        if (peer) {
+                rt->peer = NULL;
+                inet_putpeer(peer);
+        }
+
+        if (idev) {
+                rt->idev = NULL;
+                in_dev_put(idev);
+        }
 }
 
 static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
-			    int how)
+                            int how)
 {
-	struct rtable *rt = (struct rtable *) dst;
-	struct in_device *idev = rt->idev;
-	if (dev != &loopback_dev && idev && idev->dev == dev) {
-		struct in_device *loopback_idev = in_dev_get(&loopback_dev);
-		if (loopback_idev) {
-			rt->idev = loopback_idev;
-			in_dev_put(idev);
-		}
-	}
+        struct rtable *rt = (struct rtable *) dst;
+        struct in_device *idev = rt->idev;
+        if (dev != &loopback_dev && idev && idev->dev == dev) {
+                struct in_device *loopback_idev = in_dev_get(&loopback_dev);
+                if (loopback_idev) {
+                        rt->idev = loopback_idev;
+                        in_dev_put(idev);
+                }
+        }
 }
 
 static void ipv4_link_failure(struct sk_buff *skb)
 {
-	struct rtable *rt;
+        struct rtable *rt;
 
-	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
+        icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
 
-	rt = (struct rtable *) skb->dst;
-	if (rt)
-		dst_set_expires(&rt->u.dst, 0);
+        rt = (struct rtable *) skb->dst;
+        if (rt)
+                dst_set_expires(&rt->u.dst, 0);
 }
 
 static int ip_rt_bug(struct sk_buff *skb)
 {
-	printk(KERN_DEBUG "ip_rt_bug: %u.%u.%u.%u -> %u.%u.%u.%u, %s\n",
-		NIPQUAD(skb->nh.iph->saddr), NIPQUAD(skb->nh.iph->daddr),
-		skb->dev ? skb->dev->name : "?");
-	kfree_skb(skb);
-	return 0;
+        printk(KERN_DEBUG "ip_rt_bug: %u.%u.%u.%u -> %u.%u.%u.%u, %s\n",
+                NIPQUAD(skb->nh.iph->saddr), NIPQUAD(skb->nh.iph->daddr),
+                skb->dev ? skb->dev->name : "?");
+        kfree_skb(skb);
+        return 0;
 }
 
 /*
@@ -1530,544 +1535,544 @@
 
 void ip_rt_get_source(u8 *addr, struct rtable *rt)
 {
-	u32 src;
-	struct fib_result res;
+        u32 src;
+        struct fib_result res;
 
-	if (rt->fl.iif == 0)
-		src = rt->rt_src;
-	else if (fib_lookup(&rt->fl, &res) == 0) {
-		src = FIB_RES_PREFSRC(res);
-		fib_res_put(&res);
-	} else
-		src = inet_select_addr(rt->u.dst.dev, rt->rt_gateway,
-					RT_SCOPE_UNIVERSE);
-	memcpy(addr, &src, 4);
+        if (rt->fl.iif == 0)
+                src = rt->rt_src;
+        else if (fib_lookup(&rt->fl, &res) == 0) {
+                src = FIB_RES_PREFSRC(res);
+                fib_res_put(&res);
+        } else
+                src = inet_select_addr(rt->u.dst.dev, rt->rt_gateway,
+                                        RT_SCOPE_UNIVERSE);
+        memcpy(addr, &src, 4);
 }
 
 #ifdef CONFIG_NET_CLS_ROUTE
 static void set_class_tag(struct rtable *rt, u32 tag)
 {
-	if (!(rt->u.dst.tclassid & 0xFFFF))
-		rt->u.dst.tclassid |= tag & 0xFFFF;
-	if (!(rt->u.dst.tclassid & 0xFFFF0000))
-		rt->u.dst.tclassid |= tag & 0xFFFF0000;
+        if (!(rt->u.dst.tclassid & 0xFFFF))
+                rt->u.dst.tclassid |= tag & 0xFFFF;
+        if (!(rt->u.dst.tclassid & 0xFFFF0000))
+                rt->u.dst.tclassid |= tag & 0xFFFF0000;
 }
 #endif
 
 static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag)
 {
-	struct fib_info *fi = res->fi;
+        struct fib_info *fi = res->fi;
 
-	if (fi) {
-		if (FIB_RES_GW(*res) &&
-		    FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
-			rt->rt_gateway = FIB_RES_GW(*res);
-		memcpy(rt->u.dst.metrics, fi->fib_metrics,
-		       sizeof(rt->u.dst.metrics));
-		if (fi->fib_mtu == 0) {
-			rt->u.dst.metrics[RTAX_MTU-1] = rt->u.dst.dev->mtu;
-			if (rt->u.dst.metrics[RTAX_LOCK-1] & (1 << RTAX_MTU) &&
-			    rt->rt_gateway != rt->rt_dst &&
-			    rt->u.dst.dev->mtu > 576)
-				rt->u.dst.metrics[RTAX_MTU-1] = 576;
-		}
+        if (fi) {
+                if (FIB_RES_GW(*res) &&
+                    FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
+                        rt->rt_gateway = FIB_RES_GW(*res);
+                memcpy(rt->u.dst.metrics, fi->fib_metrics,
+                       sizeof(rt->u.dst.metrics));
+                if (fi->fib_mtu == 0) {
+                        rt->u.dst.metrics[RTAX_MTU-1] = rt->u.dst.dev->mtu;
+                        if (rt->u.dst.metrics[RTAX_LOCK-1] & (1 << RTAX_MTU) &&
+                            rt->rt_gateway != rt->rt_dst &&
+                            rt->u.dst.dev->mtu > 576)
+                                rt->u.dst.metrics[RTAX_MTU-1] = 576;
+                }
 #ifdef CONFIG_NET_CLS_ROUTE
-		rt->u.dst.tclassid = FIB_RES_NH(*res).nh_tclassid;
+                rt->u.dst.tclassid = FIB_RES_NH(*res).nh_tclassid;
 #endif
-	} else
-		rt->u.dst.metrics[RTAX_MTU-1]= rt->u.dst.dev->mtu;
+        } else
+                rt->u.dst.metrics[RTAX_MTU-1]= rt->u.dst.dev->mtu;
 
-	if (rt->u.dst.metrics[RTAX_HOPLIMIT-1] == 0)
-		rt->u.dst.metrics[RTAX_HOPLIMIT-1] = sysctl_ip_default_ttl;
-	if (rt->u.dst.metrics[RTAX_MTU-1] > IP_MAX_MTU)
-		rt->u.dst.metrics[RTAX_MTU-1] = IP_MAX_MTU;
-	if (rt->u.dst.metrics[RTAX_ADVMSS-1] == 0)
-		rt->u.dst.metrics[RTAX_ADVMSS-1] = max_t(unsigned int, rt->u.dst.dev->mtu - 40,
-				       ip_rt_min_advmss);
-	if (rt->u.dst.metrics[RTAX_ADVMSS-1] > 65535 - 40)
-		rt->u.dst.metrics[RTAX_ADVMSS-1] = 65535 - 40;
+        if (rt->u.dst.metrics[RTAX_HOPLIMIT-1] == 0)
+                rt->u.dst.metrics[RTAX_HOPLIMIT-1] = sysctl_ip_default_ttl;
+        if (rt->u.dst.metrics[RTAX_MTU-1] > IP_MAX_MTU)
+                rt->u.dst.metrics[RTAX_MTU-1] = IP_MAX_MTU;
+        if (rt->u.dst.metrics[RTAX_ADVMSS-1] == 0)
+                rt->u.dst.metrics[RTAX_ADVMSS-1] = max_t(unsigned int, rt->u.dst.dev->mtu - 40,
+                                       ip_rt_min_advmss);
+        if (rt->u.dst.metrics[RTAX_ADVMSS-1] > 65535 - 40)
+                rt->u.dst.metrics[RTAX_ADVMSS-1] = 65535 - 40;
 
 #ifdef CONFIG_NET_CLS_ROUTE
 #ifdef CONFIG_IP_MULTIPLE_TABLES
-	set_class_tag(rt, fib_rules_tclass(res));
+        set_class_tag(rt, fib_rules_tclass(res));
 #endif
-	set_class_tag(rt, itag);
+        set_class_tag(rt, itag);
 #endif
         rt->rt_type = res->type;
 }
 
 static int ip_route_input_mc(struct sk_buff *skb, u32 daddr, u32 saddr,
-				u8 tos, struct net_device *dev, int our)
+                                u8 tos, struct net_device *dev, int our)
 {
-	unsigned hash;
-	struct rtable *rth;
-	u32 spec_dst;
-	struct in_device *in_dev = in_dev_get(dev);
-	u32 itag = 0;
-
-	/* Primary sanity checks. */
-
-	if (in_dev == NULL)
-		return -EINVAL;
-
-	if (MULTICAST(saddr) || BADCLASS(saddr) || LOOPBACK(saddr) ||
-	    skb->protocol != htons(ETH_P_IP))
-		goto e_inval;
-
-	if (ZERONET(saddr)) {
-		if (!LOCAL_MCAST(daddr))
-			goto e_inval;
-		spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
-	} else if (fib_validate_source(saddr, 0, tos, 0,
-					dev, &spec_dst, &itag) < 0)
-		goto e_inval;
-
-	rth = dst_alloc(&ipv4_dst_ops);
-	if (!rth)
-		goto e_nobufs;
-
-	rth->u.dst.output= ip_rt_bug;
-
-	atomic_set(&rth->u.dst.__refcnt, 1);
-	rth->u.dst.flags= DST_HOST;
-	if (in_dev->cnf.no_policy)
-		rth->u.dst.flags |= DST_NOPOLICY;
-	rth->fl.fl4_dst	= daddr;
-	rth->rt_dst	= daddr;
-	rth->fl.fl4_tos	= tos;
+        unsigned hash;
+        struct rtable *rth;
+        u32 spec_dst;
+        struct in_device *in_dev = in_dev_get(dev);
+        u32 itag = 0;
+
+        /* Primary sanity checks. */
+
+        if (in_dev == NULL)
+                return -EINVAL;
+
+        if (MULTICAST(saddr) || BADCLASS(saddr) || LOOPBACK(saddr) ||
+            skb->protocol != htons(ETH_P_IP))
+                goto e_inval;
+
+        if (ZERONET(saddr)) {
+                if (!LOCAL_MCAST(daddr))
+                        goto e_inval;
+                spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
+        } else if (fib_validate_source(saddr, 0, tos, 0,
+                                        dev, &spec_dst, &itag) < 0)
+                goto e_inval;
+
+        rth = dst_alloc(&ipv4_dst_ops);
+        if (!rth)
+                goto e_nobufs;
+
+        rth->u.dst.output= ip_rt_bug;
+
+        atomic_set(&rth->u.dst.__refcnt, 1);
+        rth->u.dst.flags= DST_HOST;
+        if (in_dev->cnf.no_policy)
+                rth->u.dst.flags |= DST_NOPOLICY;
+        rth->fl.fl4_dst = daddr;
+        rth->rt_dst     = daddr;
+        rth->fl.fl4_tos = tos;
 #ifdef CONFIG_IP_ROUTE_FWMARK
-	rth->fl.fl4_fwmark= skb->nfmark;
+        rth->fl.fl4_fwmark= skb->nfmark;
 #endif
-	rth->fl.fl4_src	= saddr;
-	rth->rt_src	= saddr;
+        rth->fl.fl4_src = saddr;
+        rth->rt_src     = saddr;
 #ifdef CONFIG_NET_CLS_ROUTE
-	rth->u.dst.tclassid = itag;
+        rth->u.dst.tclassid = itag;
 #endif
-	rth->rt_iif	=
-	rth->fl.iif	= dev->ifindex;
-	rth->u.dst.dev	= &loopback_dev;
-	dev_hold(rth->u.dst.dev);
-	rth->idev	= in_dev_get(rth->u.dst.dev);
-	rth->fl.oif	= 0;
-	rth->rt_gateway	= daddr;
-	rth->rt_spec_dst= spec_dst;
-	rth->rt_type	= RTN_MULTICAST;
-	rth->rt_flags	= RTCF_MULTICAST;
-	if (our) {
-		rth->u.dst.input= ip_local_deliver;
-		rth->rt_flags |= RTCF_LOCAL;
-	}
+        rth->rt_iif     =
+        rth->fl.iif     = dev->ifindex;
+        rth->u.dst.dev  = &loopback_dev;
+        dev_hold(rth->u.dst.dev);
+        rth->idev       = in_dev_get(rth->u.dst.dev);
+        rth->fl.oif     = 0;
+        rth->rt_gateway = daddr;
+        rth->rt_spec_dst= spec_dst;
+        rth->rt_type    = RTN_MULTICAST;
+        rth->rt_flags   = RTCF_MULTICAST;
+        if (our) {
+                rth->u.dst.input= ip_local_deliver;
+                rth->rt_flags |= RTCF_LOCAL;
+        }
 
 #ifdef CONFIG_IP_MROUTE
-	if (!LOCAL_MCAST(daddr) && IN_DEV_MFORWARD(in_dev))
-		rth->u.dst.input = ip_mr_input;
+        if (!LOCAL_MCAST(daddr) && IN_DEV_MFORWARD(in_dev))
+                rth->u.dst.input = ip_mr_input;
 #endif
-	RT_CACHE_STAT_INC(in_slow_mc);
+        RT_CACHE_STAT_INC(in_slow_mc);
 
-	in_dev_put(in_dev);
-	hash = rt_hash_code(daddr, saddr ^ (dev->ifindex << 5));
-	return rt_intern_hash(hash, rth, (struct rtable**) &skb->dst);
+        in_dev_put(in_dev);
+        hash = rt_hash_code(daddr, saddr ^ (dev->ifindex << 5));
+        return rt_intern_hash(hash, rth, (struct rtable**) &skb->dst);
 
 e_nobufs:
-	in_dev_put(in_dev);
-	return -ENOBUFS;
+        in_dev_put(in_dev);
+        return -ENOBUFS;
 
 e_inval:
-	in_dev_put(in_dev);
-	return -EINVAL;
+        in_dev_put(in_dev);
+        return -EINVAL;
 }
 
 
 static void ip_handle_martian_source(struct net_device *dev,
-				     struct in_device *in_dev,
-				     struct sk_buff *skb,
-				     u32 daddr,
-				     u32 saddr) 
+                                     struct in_device *in_dev,
+                                     struct sk_buff *skb,
+                                     u32 daddr,
+                                     u32 saddr) 
 {
-	RT_CACHE_STAT_INC(in_martian_src);
+        RT_CACHE_STAT_INC(in_martian_src);
 #ifdef CONFIG_IP_ROUTE_VERBOSE
-	if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
-		/*
-		 *	RFC1812 recommendation, if source is martian,
-		 *	the only hint is MAC header.
-		 */
-		printk(KERN_WARNING "martian source %u.%u.%u.%u from "
-			"%u.%u.%u.%u, on dev %s\n",
-			NIPQUAD(daddr), NIPQUAD(saddr), dev->name);
-		if (dev->hard_header_len && skb->mac.raw) {
-			int i;
-			unsigned char *p = skb->mac.raw;
-			printk(KERN_WARNING "ll header: ");
-			for (i = 0; i < dev->hard_header_len; i++, p++) {
-				printk("%02x", *p);
-				if (i < (dev->hard_header_len - 1))
-					printk(":");
-			}
-			printk("\n");
-		}
-	}
+        if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
+                /*
+                 *      RFC1812 recommendation, if source is martian,
+                 *      the only hint is MAC header.
+                 */
+                printk(KERN_WARNING "martian source %u.%u.%u.%u from "
+                        "%u.%u.%u.%u, on dev %s\n",
+                        NIPQUAD(daddr), NIPQUAD(saddr), dev->name);
+                if (dev->hard_header_len && skb->mac.raw) {
+                        int i;
+                        unsigned char *p = skb->mac.raw;
+                        printk(KERN_WARNING "ll header: ");
+                        for (i = 0; i < dev->hard_header_len; i++, p++) {
+                                printk("%02x", *p);
+                                if (i < (dev->hard_header_len - 1))
+                                        printk(":");
+                        }
+                        printk("\n");
+                }
+        }
 #endif
 }
 
 static inline int __mkroute_input(struct sk_buff *skb, 
-				  struct fib_result* res, 
-				  struct in_device *in_dev, 
-				  u32 daddr, u32 saddr, u32 tos, 
-				  struct rtable **result) 
-{
-
-	struct rtable *rth;
-	int err;
-	struct in_device *out_dev;
-	unsigned flags = 0;
-	u32 spec_dst, itag;
-
-	/* get a working reference to the output device */
-	out_dev = in_dev_get(FIB_RES_DEV(*res));
-	if (out_dev == NULL) {
-		if (net_ratelimit())
-			printk(KERN_CRIT "Bug in ip_route_input" \
-			       "_slow(). Please, report\n");
-		return -EINVAL;
-	}
-
-
-	err = fib_validate_source(saddr, daddr, tos, FIB_RES_OIF(*res), 
-				  in_dev->dev, &spec_dst, &itag);
-	if (err < 0) {
-		ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr, 
-					 saddr);
-		
-		err = -EINVAL;
-		goto cleanup;
-	}
-
-	if (err)
-		flags |= RTCF_DIRECTSRC;
-
-	if (out_dev == in_dev && err && !(flags & (RTCF_NAT | RTCF_MASQ)) &&
-	    (IN_DEV_SHARED_MEDIA(out_dev) ||
-	     inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
-		flags |= RTCF_DOREDIRECT;
-
-	if (skb->protocol != htons(ETH_P_IP)) {
-		/* Not IP (i.e. ARP). Do not create route, if it is
-		 * invalid for proxy arp. DNAT routes are always valid.
-		 */
-		if (out_dev == in_dev && !(flags & RTCF_DNAT)) {
-			err = -EINVAL;
-			goto cleanup;
-		}
-	}
-
-
-	rth = dst_alloc(&ipv4_dst_ops);
-	if (!rth) {
-		err = -ENOBUFS;
-		goto cleanup;
-	}
+                                  struct fib_result* res, 
+                                  struct in_device *in_dev, 
+                                  u32 daddr, u32 saddr, u32 tos, 
+                                  struct rtable **result) 
+{
+
+        struct rtable *rth;
+        int err;
+        struct in_device *out_dev;
+        unsigned flags = 0;
+        u32 spec_dst, itag;
+
+        /* get a working reference to the output device */
+        out_dev = in_dev_get(FIB_RES_DEV(*res));
+        if (out_dev == NULL) {
+                if (net_ratelimit())
+                        printk(KERN_CRIT "Bug in ip_route_input" \
+                               "_slow(). Please, report\n");
+                return -EINVAL;
+        }
+
+
+        err = fib_validate_source(saddr, daddr, tos, FIB_RES_OIF(*res), 
+                                  in_dev->dev, &spec_dst, &itag);
+        if (err < 0) {
+                ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr, 
+                                         saddr);
+                
+                err = -EINVAL;
+                goto cleanup;
+        }
+
+        if (err)
+                flags |= RTCF_DIRECTSRC;
+
+        if (out_dev == in_dev && err && !(flags & (RTCF_NAT | RTCF_MASQ)) &&
+            (IN_DEV_SHARED_MEDIA(out_dev) ||
+             inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
+                flags |= RTCF_DOREDIRECT;
+
+        if (skb->protocol != htons(ETH_P_IP)) {
+                /* Not IP (i.e. ARP). Do not create route, if it is
+                 * invalid for proxy arp. DNAT routes are always valid.
+                 */
+                if (out_dev == in_dev && !(flags & RTCF_DNAT)) {
+                        err = -EINVAL;
+                        goto cleanup;
+                }
+        }
 
-	atomic_set(&rth->u.dst.__refcnt, 1);
-	rth->u.dst.flags= DST_HOST;
+
+        rth = dst_alloc(&ipv4_dst_ops);
+        if (!rth) {
+                err = -ENOBUFS;
+                goto cleanup;
+        }
+
+        atomic_set(&rth->u.dst.__refcnt, 1);
+        rth->u.dst.flags= DST_HOST;
 #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
-	if (res->fi->fib_nhs > 1)
-		rth->u.dst.flags |= DST_BALANCED;
+        if (res->fi->fib_nhs > 1)
+                rth->u.dst.flags |= DST_BALANCED;
 #endif
-	if (in_dev->cnf.no_policy)
-		rth->u.dst.flags |= DST_NOPOLICY;
-	if (out_dev->cnf.no_xfrm)
-		rth->u.dst.flags |= DST_NOXFRM;
-	rth->fl.fl4_dst	= daddr;
-	rth->rt_dst	= daddr;
-	rth->fl.fl4_tos	= tos;
+        if (in_dev->cnf.no_policy)
+                rth->u.dst.flags |= DST_NOPOLICY;
+        if (out_dev->cnf.no_xfrm)
+                rth->u.dst.flags |= DST_NOXFRM;
+        rth->fl.fl4_dst = daddr;
+        rth->rt_dst     = daddr;
+        rth->fl.fl4_tos = tos;
 #ifdef CONFIG_IP_ROUTE_FWMARK
-	rth->fl.fl4_fwmark= skb->nfmark;
+        rth->fl.fl4_fwmark= skb->nfmark;
 #endif
-	rth->fl.fl4_src	= saddr;
-	rth->rt_src	= saddr;
-	rth->rt_gateway	= daddr;
-	rth->rt_iif 	=
-		rth->fl.iif	= in_dev->dev->ifindex;
-	rth->u.dst.dev	= (out_dev)->dev;
-	dev_hold(rth->u.dst.dev);
-	rth->idev	= in_dev_get(rth->u.dst.dev);
-	rth->fl.oif 	= 0;
-	rth->rt_spec_dst= spec_dst;
-
-	rth->u.dst.input = ip_forward;
-	rth->u.dst.output = ip_output;
+        rth->fl.fl4_src = saddr;
+        rth->rt_src     = saddr;
+        rth->rt_gateway = daddr;
+        rth->rt_iif     =
+                rth->fl.iif     = in_dev->dev->ifindex;
+        rth->u.dst.dev  = (out_dev)->dev;
+        dev_hold(rth->u.dst.dev);
+        rth->idev       = in_dev_get(rth->u.dst.dev);
+        rth->fl.oif     = 0;
+        rth->rt_spec_dst= spec_dst;
+
+        rth->u.dst.input = ip_forward;
+        rth->u.dst.output = ip_output;
 
-	rt_set_nexthop(rth, res, itag);
+        rt_set_nexthop(rth, res, itag);
 
-	rth->rt_flags = flags;
+        rth->rt_flags = flags;
 
-	*result = rth;
-	err = 0;
+        *result = rth;
+        err = 0;
  cleanup:
-	/* release the working reference to the output device */
-	in_dev_put(out_dev);
-	return err;
-}						
+        /* release the working reference to the output device */
+        in_dev_put(out_dev);
+        return err;
+}                                               
 
 static inline int ip_mkroute_input_def(struct sk_buff *skb, 
-				       struct fib_result* res, 
-				       const struct flowi *fl,
-				       struct in_device *in_dev,
-				       u32 daddr, u32 saddr, u32 tos)
-{
-	struct rtable* rth = NULL;
-	int err;
-	unsigned hash;
+                                       struct fib_result* res, 
+                                       const struct flowi *fl,
+                                       struct in_device *in_dev,
+                                       u32 daddr, u32 saddr, u32 tos)
+{
+        struct rtable* rth = NULL;
+        int err;
+        unsigned hash;
 
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
-	if (res->fi && res->fi->fib_nhs > 1 && fl->oif == 0)
-		fib_select_multipath(fl, res);
+        if (res->fi && res->fi->fib_nhs > 1 && fl->oif == 0)
+                fib_select_multipath(fl, res);
 #endif
 
-	/* create a routing cache entry */
-	err = __mkroute_input(skb, res, in_dev, daddr, saddr, tos, &rth);
-	if (err)
-		return err;
-
-	/* put it into the cache */
-	hash = rt_hash_code(daddr, saddr ^ (fl->iif << 5));
-	return rt_intern_hash(hash, rth, (struct rtable**)&skb->dst);	
+        /* create a routing cache entry */
+        err = __mkroute_input(skb, res, in_dev, daddr, saddr, tos, &rth);
+        if (err)
+                return err;
+
+        /* put it into the cache */
+        hash = rt_hash_code(daddr, saddr ^ (fl->iif << 5));
+        return rt_intern_hash(hash, rth, (struct rtable**)&skb->dst);   
 }
 
 static inline int ip_mkroute_input(struct sk_buff *skb, 
-				   struct fib_result* res, 
-				   const struct flowi *fl,
-				   struct in_device *in_dev,
-				   u32 daddr, u32 saddr, u32 tos)
+                                   struct fib_result* res, 
+                                   const struct flowi *fl,
+                                   struct in_device *in_dev,
+                                   u32 daddr, u32 saddr, u32 tos)
 {
 #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
-	struct rtable* rth = NULL, *rtres;
-	unsigned char hop, hopcount;
-	int err = -EINVAL;
-	unsigned int hash;
-
-	if (res->fi)
-		hopcount = res->fi->fib_nhs;
-	else
-		hopcount = 1;
-
-	/* distinguish between multipath and singlepath */
-	if (hopcount < 2)
-		return ip_mkroute_input_def(skb, res, fl, in_dev, daddr,
-					    saddr, tos);
-	
-	/* add all alternatives to the routing cache */
-	for (hop = 0; hop < hopcount; hop++) {
-		res->nh_sel = hop;
-
-		/* put reference to previous result */
-		if (hop)
-			ip_rt_put(rtres);
-
-		/* create a routing cache entry */
-		err = __mkroute_input(skb, res, in_dev, daddr, saddr, tos,
-				      &rth);
-		if (err)
-			return err;
-
-		/* put it into the cache */
-		hash = rt_hash_code(daddr, saddr ^ (fl->iif << 5));
-		err = rt_intern_hash(hash, rth, &rtres);
-		if (err)
-			return err;
-
-		/* forward hop information to multipath impl. */
-		multipath_set_nhinfo(rth,
-				     FIB_RES_NETWORK(*res),
-				     FIB_RES_NETMASK(*res),
-				     res->prefixlen,
-				     &FIB_RES_NH(*res));
-	}
-	skb->dst = &rtres->u.dst;
-	return err;
+        struct rtable* rth = NULL, *rtres;
+        unsigned char hop, hopcount;
+        int err = -EINVAL;
+        unsigned int hash;
+
+        if (res->fi)
+                hopcount = res->fi->fib_nhs;
+        else
+                hopcount = 1;
+
+        /* distinguish between multipath and singlepath */
+        if (hopcount < 2)
+                return ip_mkroute_input_def(skb, res, fl, in_dev, daddr,
+                                            saddr, tos);
+        
+        /* add all alternatives to the routing cache */
+        for (hop = 0; hop < hopcount; hop++) {
+                res->nh_sel = hop;
+
+                /* put reference to previous result */
+                if (hop)
+                        ip_rt_put(rtres);
+
+                /* create a routing cache entry */
+                err = __mkroute_input(skb, res, in_dev, daddr, saddr, tos,
+                                      &rth);
+                if (err)
+                        return err;
+
+                /* put it into the cache */
+                hash = rt_hash_code(daddr, saddr ^ (fl->iif << 5));
+                err = rt_intern_hash(hash, rth, &rtres);
+                if (err)
+                        return err;
+
+                /* forward hop information to multipath impl. */
+                multipath_set_nhinfo(rth,
+                                     FIB_RES_NETWORK(*res),
+                                     FIB_RES_NETMASK(*res),
+                                     res->prefixlen,
+                                     &FIB_RES_NH(*res));
+        }
+        skb->dst = &rtres->u.dst;
+        return err;
 #else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED  */
-	return ip_mkroute_input_def(skb, res, fl, in_dev, daddr, saddr, tos);
+        return ip_mkroute_input_def(skb, res, fl, in_dev, daddr, saddr, tos);
 #endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED  */
 }
 
 
 /*
- *	NOTE. We drop all the packets that has local source
- *	addresses, because every properly looped back packet
- *	must have correct destination already attached by output routine.
+ *      NOTE. We drop all the packets that has local source
+ *      addresses, because every properly looped back packet
+ *      must have correct destination already attached by output routine.
  *
- *	Such approach solves two big problems:
- *	1. Not simplex devices are handled properly.
- *	2. IP spoofing attempts are filtered with 100% of guarantee.
+ *      Such approach solves two big problems:
+ *      1. Not simplex devices are handled properly.
+ *      2. IP spoofing attempts are filtered with 100% of guarantee.
  */
 
 static int ip_route_input_slow(struct sk_buff *skb, u32 daddr, u32 saddr,
-			       u8 tos, struct net_device *dev)
+                               u8 tos, struct net_device *dev)
 {
-	struct fib_result res;
-	struct in_device *in_dev = in_dev_get(dev);
-	struct flowi fl = { .nl_u = { .ip4_u =
-				      { .daddr = daddr,
-					.saddr = saddr,
-					.tos = tos,
-					.scope = RT_SCOPE_UNIVERSE,
+        struct fib_result res;
+        struct in_device *in_dev = in_dev_get(dev);
+        struct flowi fl = { .nl_u = { .ip4_u =
+                                      { .daddr = daddr,
+                                        .saddr = saddr,
+                                        .tos = tos,
+                                        .scope = RT_SCOPE_UNIVERSE,
 #ifdef CONFIG_IP_ROUTE_FWMARK
-					.fwmark = skb->nfmark
+                                        .fwmark = skb->nfmark
 #endif
-				      } },
-			    .iif = dev->ifindex };
-	unsigned	flags = 0;
-	u32		itag = 0;
-	struct rtable * rth;
-	unsigned	hash;
-	u32		spec_dst;
-	int		err = -EINVAL;
-	int		free_res = 0;
-
-	/* IP on this device is disabled. */
-
-	if (!in_dev)
-		goto out;
-
-	/* Check for the most weird martians, which can be not detected
-	   by fib_lookup.
-	 */
-
-	if (MULTICAST(saddr) || BADCLASS(saddr) || LOOPBACK(saddr))
-		goto martian_source;
-
-	if (daddr == 0xFFFFFFFF || (saddr == 0 && daddr == 0))
-		goto brd_input;
-
-	/* Accept zero addresses only to limited broadcast;
-	 * I even do not know to fix it or not. Waiting for complains :-)
-	 */
-	if (ZERONET(saddr))
-		goto martian_source;
-
-	if (BADCLASS(daddr) || ZERONET(daddr) || LOOPBACK(daddr))
-		goto martian_destination;
-
-	/*
-	 *	Now we are ready to route packet.
-	 */
-	if ((err = fib_lookup(&fl, &res)) != 0) {
-		if (!IN_DEV_FORWARD(in_dev))
-			goto e_hostunreach;
-		goto no_route;
-	}
-	free_res = 1;
-
-	RT_CACHE_STAT_INC(in_slow_tot);
-
-	if (res.type == RTN_BROADCAST)
-		goto brd_input;
-
-	if (res.type == RTN_LOCAL) {
-		int result;
-		result = fib_validate_source(saddr, daddr, tos,
-					     loopback_dev.ifindex,
-					     dev, &spec_dst, &itag);
-		if (result < 0)
-			goto martian_source;
-		if (result)
-			flags |= RTCF_DIRECTSRC;
-		spec_dst = daddr;
-		goto local_input;
-	}
-
-	if (!IN_DEV_FORWARD(in_dev))
-		goto e_hostunreach;
-	if (res.type != RTN_UNICAST)
-		goto martian_destination;
-
-	err = ip_mkroute_input(skb, &res, &fl, in_dev, daddr, saddr, tos);
-	if (err == -ENOBUFS)
-		goto e_nobufs;
-	if (err == -EINVAL)
-		goto e_inval;
-	
+                                      } },
+                            .iif = dev->ifindex };
+        unsigned        flags = 0;
+        u32             itag = 0;
+        struct rtable * rth;
+        unsigned        hash;
+        u32             spec_dst;
+        int             err = -EINVAL;
+        int             free_res = 0;
+
+        /* IP on this device is disabled. */
+
+        if (!in_dev)
+                goto out;
+
+        /* Check for the most weird martians, which can be not detected
+           by fib_lookup.
+         */
+
+        if (MULTICAST(saddr) || BADCLASS(saddr) || LOOPBACK(saddr))
+                goto martian_source;
+
+        if (daddr == 0xFFFFFFFF || (saddr == 0 && daddr == 0))
+                goto brd_input;
+
+        /* Accept zero addresses only to limited broadcast;
+         * I even do not know to fix it or not. Waiting for complains :-)
+         */
+        if (ZERONET(saddr))
+                goto martian_source;
+
+        if (BADCLASS(daddr) || ZERONET(daddr) || LOOPBACK(daddr))
+                goto martian_destination;
+
+        /*
+         *      Now we are ready to route packet.
+         */
+        if ((err = fib_lookup(&fl, &res)) != 0) {
+                if (!IN_DEV_FORWARD(in_dev))
+                        goto e_hostunreach;
+                goto no_route;
+        }
+        free_res = 1;
+
+        RT_CACHE_STAT_INC(in_slow_tot);
+
+        if (res.type == RTN_BROADCAST)
+                goto brd_input;
+
+        if (res.type == RTN_LOCAL) {
+                int result;
+                result = fib_validate_source(saddr, daddr, tos,
+                                             loopback_dev.ifindex,
+                                             dev, &spec_dst, &itag);
+                if (result < 0)
+                        goto martian_source;
+                if (result)
+                        flags |= RTCF_DIRECTSRC;
+                spec_dst = daddr;
+                goto local_input;
+        }
+
+        if (!IN_DEV_FORWARD(in_dev))
+                goto e_hostunreach;
+        if (res.type != RTN_UNICAST)
+                goto martian_destination;
+
+        err = ip_mkroute_input(skb, &res, &fl, in_dev, daddr, saddr, tos);
+        if (err == -ENOBUFS)
+                goto e_nobufs;
+        if (err == -EINVAL)
+                goto e_inval;
+        
 done:
-	in_dev_put(in_dev);
-	if (free_res)
-		fib_res_put(&res);
-out:	return err;
+        in_dev_put(in_dev);
+        if (free_res)
+                fib_res_put(&res);
+out:    return err;
 
 brd_input:
-	if (skb->protocol != htons(ETH_P_IP))
-		goto e_inval;
+        if (skb->protocol != htons(ETH_P_IP))
+                goto e_inval;
 
-	if (ZERONET(saddr))
-		spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
-	else {
-		err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst,
-					  &itag);
-		if (err < 0)
-			goto martian_source;
-		if (err)
-			flags |= RTCF_DIRECTSRC;
-	}
-	flags |= RTCF_BROADCAST;
-	res.type = RTN_BROADCAST;
-	RT_CACHE_STAT_INC(in_brd);
+        if (ZERONET(saddr))
+                spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
+        else {
+                err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst,
+                                          &itag);
+                if (err < 0)
+                        goto martian_source;
+                if (err)
+                        flags |= RTCF_DIRECTSRC;
+        }
+        flags |= RTCF_BROADCAST;
+        res.type = RTN_BROADCAST;
+        RT_CACHE_STAT_INC(in_brd);
 
 local_input:
-	rth = dst_alloc(&ipv4_dst_ops);
-	if (!rth)
-		goto e_nobufs;
-
-	rth->u.dst.output= ip_rt_bug;
-
-	atomic_set(&rth->u.dst.__refcnt, 1);
-	rth->u.dst.flags= DST_HOST;
-	if (in_dev->cnf.no_policy)
-		rth->u.dst.flags |= DST_NOPOLICY;
-	rth->fl.fl4_dst	= daddr;
-	rth->rt_dst	= daddr;
-	rth->fl.fl4_tos	= tos;
+        rth = dst_alloc(&ipv4_dst_ops);
+        if (!rth)
+                goto e_nobufs;
+
+        rth->u.dst.output= ip_rt_bug;
+
+        atomic_set(&rth->u.dst.__refcnt, 1);
+        rth->u.dst.flags= DST_HOST;
+        if (in_dev->cnf.no_policy)
+                rth->u.dst.flags |= DST_NOPOLICY;
+        rth->fl.fl4_dst = daddr;
+        rth->rt_dst     = daddr;
+        rth->fl.fl4_tos = tos;
 #ifdef CONFIG_IP_ROUTE_FWMARK
-	rth->fl.fl4_fwmark= skb->nfmark;
+        rth->fl.fl4_fwmark= skb->nfmark;
 #endif
-	rth->fl.fl4_src	= saddr;
-	rth->rt_src	= saddr;
+        rth->fl.fl4_src = saddr;
+        rth->rt_src     = saddr;
 #ifdef CONFIG_NET_CLS_ROUTE
-	rth->u.dst.tclassid = itag;
+        rth->u.dst.tclassid = itag;
 #endif
-	rth->rt_iif	=
-	rth->fl.iif	= dev->ifindex;
-	rth->u.dst.dev	= &loopback_dev;
-	dev_hold(rth->u.dst.dev);
-	rth->idev	= in_dev_get(rth->u.dst.dev);
-	rth->rt_gateway	= daddr;
-	rth->rt_spec_dst= spec_dst;
-	rth->u.dst.input= ip_local_deliver;
-	rth->rt_flags 	= flags|RTCF_LOCAL;
-	if (res.type == RTN_UNREACHABLE) {
-		rth->u.dst.input= ip_error;
-		rth->u.dst.error= -err;
-		rth->rt_flags 	&= ~RTCF_LOCAL;
-	}
-	rth->rt_type	= res.type;
-	hash = rt_hash_code(daddr, saddr ^ (fl.iif << 5));
-	err = rt_intern_hash(hash, rth, (struct rtable**)&skb->dst);
-	goto done;
+        rth->rt_iif     =
+        rth->fl.iif     = dev->ifindex;
+        rth->u.dst.dev  = &loopback_dev;
+        dev_hold(rth->u.dst.dev);
+        rth->idev       = in_dev_get(rth->u.dst.dev);
+        rth->rt_gateway = daddr;
+        rth->rt_spec_dst= spec_dst;
+        rth->u.dst.input= ip_local_deliver;
+        rth->rt_flags   = flags|RTCF_LOCAL;
+        if (res.type == RTN_UNREACHABLE) {
+                rth->u.dst.input= ip_error;
+                rth->u.dst.error= -err;
+                rth->rt_flags   &= ~RTCF_LOCAL;
+        }
+        rth->rt_type    = res.type;
+        hash = rt_hash_code(daddr, saddr ^ (fl.iif << 5));
+        err = rt_intern_hash(hash, rth, (struct rtable**)&skb->dst);
+        goto done;
 
 no_route:
-	RT_CACHE_STAT_INC(in_no_route);
-	spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
-	res.type = RTN_UNREACHABLE;
-	goto local_input;
-
-	/*
-	 *	Do not cache martian addresses: they should be logged (RFC1812)
-	 */
+        RT_CACHE_STAT_INC(in_no_route);
+        spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
+        res.type = RTN_UNREACHABLE;
+        goto local_input;
+
+        /*
+         *      Do not cache martian addresses: they should be logged (RFC1812)
+         */
 martian_destination:
-	RT_CACHE_STAT_INC(in_martian_dst);
+        RT_CACHE_STAT_INC(in_martian_dst);
 #ifdef CONFIG_IP_ROUTE_VERBOSE
-	if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
-		printk(KERN_WARNING "martian destination %u.%u.%u.%u from "
-			"%u.%u.%u.%u, dev %s\n",
-			NIPQUAD(daddr), NIPQUAD(saddr), dev->name);
+        if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
+                printk(KERN_WARNING "martian destination %u.%u.%u.%u from "
+                        "%u.%u.%u.%u, dev %s\n",
+                        NIPQUAD(daddr), NIPQUAD(saddr), dev->name);
 #endif
 
 e_hostunreach:
@@ -2075,292 +2080,292 @@
         goto done;
 
 e_inval:
-	err = -EINVAL;
-	goto done;
+        err = -EINVAL;
+        goto done;
 
 e_nobufs:
-	err = -ENOBUFS;
-	goto done;
+        err = -ENOBUFS;
+        goto done;
 
 martian_source:
-	ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
-	goto e_inval;
+        ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
+        goto e_inval;
 }
 
 int ip_route_input(struct sk_buff *skb, u32 daddr, u32 saddr,
-		   u8 tos, struct net_device *dev)
+                   u8 tos, struct net_device *dev)
 {
-	struct rtable * rth;
-	unsigned	hash;
-	int iif = dev->ifindex;
-
-	tos &= IPTOS_RT_MASK;
-	hash = rt_hash_code(daddr, saddr ^ (iif << 5));
-
-	rcu_read_lock();
-	for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
-	     rth = rcu_dereference(rth->u.rt_next)) {
-		if (rth->fl.fl4_dst == daddr &&
-		    rth->fl.fl4_src == saddr &&
-		    rth->fl.iif == iif &&
-		    rth->fl.oif == 0 &&
+        struct rtable * rth;
+        unsigned        hash;
+        int iif = dev->ifindex;
+
+        tos &= IPTOS_RT_MASK;
+        hash = rt_hash_code(daddr, saddr ^ (iif << 5));
+
+        rcu_read_lock();
+        for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
+             rth = rcu_dereference(rth->u.rt_next)) {
+                if (rth->fl.fl4_dst == daddr &&
+                    rth->fl.fl4_src == saddr &&
+                    rth->fl.iif == iif &&
+                    rth->fl.oif == 0 &&
 #ifdef CONFIG_IP_ROUTE_FWMARK
-		    rth->fl.fl4_fwmark == skb->nfmark &&
+                    rth->fl.fl4_fwmark == skb->nfmark &&
 #endif
-		    rth->fl.fl4_tos == tos) {
-			rth->u.dst.lastuse = jiffies;
-			dst_hold(&rth->u.dst);
-			rth->u.dst.__use++;
-			RT_CACHE_STAT_INC(in_hit);
-			rcu_read_unlock();
-			skb->dst = (struct dst_entry*)rth;
-			return 0;
-		}
-		RT_CACHE_STAT_INC(in_hlist_search);
-	}
-	rcu_read_unlock();
-
-	/* Multicast recognition logic is moved from route cache to here.
-	   The problem was that too many Ethernet cards have broken/missing
-	   hardware multicast filters :-( As result the host on multicasting
-	   network acquires a lot of useless route cache entries, sort of
-	   SDR messages from all the world. Now we try to get rid of them.
-	   Really, provided software IP multicast filter is organized
-	   reasonably (at least, hashed), it does not result in a slowdown
-	   comparing with route cache reject entries.
-	   Note, that multicast routers are not affected, because
-	   route cache entry is created eventually.
-	 */
-	if (MULTICAST(daddr)) {
-		struct in_device *in_dev;
-
-		rcu_read_lock();
-		if ((in_dev = __in_dev_get_rcu(dev)) != NULL) {
-			int our = ip_check_mc(in_dev, daddr, saddr,
-				skb->nh.iph->protocol);
-			if (our
+                    rth->fl.fl4_tos == tos) {
+                        rth->u.dst.lastuse = jiffies;
+                        dst_hold(&rth->u.dst);
+                        rth->u.dst.__use++;
+                        RT_CACHE_STAT_INC(in_hit);
+                        rcu_read_unlock();
+                        skb->dst = (struct dst_entry*)rth;
+                        return 0;
+                }
+                RT_CACHE_STAT_INC(in_hlist_search);
+        }
+        rcu_read_unlock();
+
+        /* Multicast recognition logic is moved from route cache to here.
+           The problem was that too many Ethernet cards have broken/missing
+           hardware multicast filters :-( As result the host on multicasting
+           network acquires a lot of useless route cache entries, sort of
+           SDR messages from all the world. Now we try to get rid of them.
+           Really, provided software IP multicast filter is organized
+           reasonably (at least, hashed), it does not result in a slowdown
+           comparing with route cache reject entries.
+           Note, that multicast routers are not affected, because
+           route cache entry is created eventually.
+         */
+        if (MULTICAST(daddr)) {
+                struct in_device *in_dev;
+
+                rcu_read_lock();
+                if ((in_dev = __in_dev_get_rcu(dev)) != NULL) {
+                        int our = ip_check_mc(in_dev, daddr, saddr,
+                                skb->nh.iph->protocol);
+                        if (our
 #ifdef CONFIG_IP_MROUTE
-			    || (!LOCAL_MCAST(daddr) && IN_DEV_MFORWARD(in_dev))
+                            || (!LOCAL_MCAST(daddr) && IN_DEV_MFORWARD(in_dev))
 #endif
-			    ) {
-				rcu_read_unlock();
-				return ip_route_input_mc(skb, daddr, saddr,
-							 tos, dev, our);
-			}
-		}
-		rcu_read_unlock();
-		return -EINVAL;
-	}
-	return ip_route_input_slow(skb, daddr, saddr, tos, dev);
+                            ) {
+                                rcu_read_unlock();
+                                return ip_route_input_mc(skb, daddr, saddr,
+                                                         tos, dev, our);
+                        }
+                }
+                rcu_read_unlock();
+                return -EINVAL;
+        }
+        return ip_route_input_slow(skb, daddr, saddr, tos, dev);
 }
 
 static inline int __mkroute_output(struct rtable **result,
-				   struct fib_result* res, 
-				   const struct flowi *fl,
-				   const struct flowi *oldflp, 
-				   struct net_device *dev_out, 
-				   unsigned flags) 
-{
-	struct rtable *rth;
-	struct in_device *in_dev;
-	u32 tos = RT_FL_TOS(oldflp);
-	int err = 0;
-
-	if (LOOPBACK(fl->fl4_src) && !(dev_out->flags&IFF_LOOPBACK))
-		return -EINVAL;
-
-	if (fl->fl4_dst == 0xFFFFFFFF)
-		res->type = RTN_BROADCAST;
-	else if (MULTICAST(fl->fl4_dst))
-		res->type = RTN_MULTICAST;
-	else if (BADCLASS(fl->fl4_dst) || ZERONET(fl->fl4_dst))
-		return -EINVAL;
-
-	if (dev_out->flags & IFF_LOOPBACK)
-		flags |= RTCF_LOCAL;
-
-	/* get work reference to inet device */
-	in_dev = in_dev_get(dev_out);
-	if (!in_dev)
-		return -EINVAL;
-
-	if (res->type == RTN_BROADCAST) {
-		flags |= RTCF_BROADCAST | RTCF_LOCAL;
-		if (res->fi) {
-			fib_info_put(res->fi);
-			res->fi = NULL;
-		}
-	} else if (res->type == RTN_MULTICAST) {
-		flags |= RTCF_MULTICAST|RTCF_LOCAL;
-		if (!ip_check_mc(in_dev, oldflp->fl4_dst, oldflp->fl4_src, 
-				 oldflp->proto))
-			flags &= ~RTCF_LOCAL;
-		/* If multicast route do not exist use
-		   default one, but do not gateway in this case.
-		   Yes, it is hack.
-		 */
-		if (res->fi && res->prefixlen < 4) {
-			fib_info_put(res->fi);
-			res->fi = NULL;
-		}
-	}
-
-
-	rth = dst_alloc(&ipv4_dst_ops);
-	if (!rth) {
-		err = -ENOBUFS;
-		goto cleanup;
-	}		
+                                   struct fib_result* res, 
+                                   const struct flowi *fl,
+                                   const struct flowi *oldflp, 
+                                   struct net_device *dev_out, 
+                                   unsigned flags) 
+{
+        struct rtable *rth;
+        struct in_device *in_dev;
+        u32 tos = RT_FL_TOS(oldflp);
+        int err = 0;
+
+        if (LOOPBACK(fl->fl4_src) && !(dev_out->flags&IFF_LOOPBACK))
+                return -EINVAL;
+
+        if (fl->fl4_dst == 0xFFFFFFFF)
+                res->type = RTN_BROADCAST;
+        else if (MULTICAST(fl->fl4_dst))
+                res->type = RTN_MULTICAST;
+        else if (BADCLASS(fl->fl4_dst) || ZERONET(fl->fl4_dst))
+                return -EINVAL;
+
+        if (dev_out->flags & IFF_LOOPBACK)
+                flags |= RTCF_LOCAL;
+
+        /* get work reference to inet device */
+        in_dev = in_dev_get(dev_out);
+        if (!in_dev)
+                return -EINVAL;
+
+        if (res->type == RTN_BROADCAST) {
+                flags |= RTCF_BROADCAST | RTCF_LOCAL;
+                if (res->fi) {
+                        fib_info_put(res->fi);
+                        res->fi = NULL;
+                }
+        } else if (res->type == RTN_MULTICAST) {
+                flags |= RTCF_MULTICAST|RTCF_LOCAL;
+                if (!ip_check_mc(in_dev, oldflp->fl4_dst, oldflp->fl4_src, 
+                                 oldflp->proto))
+                        flags &= ~RTCF_LOCAL;
+                /* If multicast route do not exist use
+                   default one, but do not gateway in this case.
+                   Yes, it is hack.
+                 */
+                if (res->fi && res->prefixlen < 4) {
+                        fib_info_put(res->fi);
+                        res->fi = NULL;
+                }
+        }
+
+
+        rth = dst_alloc(&ipv4_dst_ops);
+        if (!rth) {
+                err = -ENOBUFS;
+                goto cleanup;
+        }               
 
-	atomic_set(&rth->u.dst.__refcnt, 1);
-	rth->u.dst.flags= DST_HOST;
+        atomic_set(&rth->u.dst.__refcnt, 1);
+        rth->u.dst.flags= DST_HOST;
 #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
-	if (res->fi) {
-		rth->rt_multipath_alg = res->fi->fib_mp_alg;
-		if (res->fi->fib_nhs > 1)
-			rth->u.dst.flags |= DST_BALANCED;
-	}
-#endif
-	if (in_dev->cnf.no_xfrm)
-		rth->u.dst.flags |= DST_NOXFRM;
-	if (in_dev->cnf.no_policy)
-		rth->u.dst.flags |= DST_NOPOLICY;
-
-	rth->fl.fl4_dst	= oldflp->fl4_dst;
-	rth->fl.fl4_tos	= tos;
-	rth->fl.fl4_src	= oldflp->fl4_src;
-	rth->fl.oif	= oldflp->oif;
+        if (res->fi) {
+                rth->rt_multipath_alg = res->fi->fib_mp_alg;
+                if (res->fi->fib_nhs > 1)
+                        rth->u.dst.flags |= DST_BALANCED;
+        }
+#endif
+        if (in_dev->cnf.no_xfrm)
+                rth->u.dst.flags |= DST_NOXFRM;
+        if (in_dev->cnf.no_policy)
+                rth->u.dst.flags |= DST_NOPOLICY;
+
+        rth->fl.fl4_dst = oldflp->fl4_dst;
+        rth->fl.fl4_tos = tos;
+        rth->fl.fl4_src = oldflp->fl4_src;
+        rth->fl.oif     = oldflp->oif;
 #ifdef CONFIG_IP_ROUTE_FWMARK
-	rth->fl.fl4_fwmark= oldflp->fl4_fwmark;
+        rth->fl.fl4_fwmark= oldflp->fl4_fwmark;
 #endif
-	rth->rt_dst	= fl->fl4_dst;
-	rth->rt_src	= fl->fl4_src;
-	rth->rt_iif	= oldflp->oif ? : dev_out->ifindex;
-	/* get references to the devices that are to be hold by the routing 
-	   cache entry */
-	rth->u.dst.dev	= dev_out;
-	dev_hold(dev_out);
-	rth->idev	= in_dev_get(dev_out);
-	rth->rt_gateway = fl->fl4_dst;
-	rth->rt_spec_dst= fl->fl4_src;
-
-	rth->u.dst.output=ip_output;
-
-	RT_CACHE_STAT_INC(out_slow_tot);
-
-	if (flags & RTCF_LOCAL) {
-		rth->u.dst.input = ip_local_deliver;
-		rth->rt_spec_dst = fl->fl4_dst;
-	}
-	if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
-		rth->rt_spec_dst = fl->fl4_src;
-		if (flags & RTCF_LOCAL && 
-		    !(dev_out->flags & IFF_LOOPBACK)) {
-			rth->u.dst.output = ip_mc_output;
-			RT_CACHE_STAT_INC(out_slow_mc);
-		}
+        rth->rt_dst     = fl->fl4_dst;
+        rth->rt_src     = fl->fl4_src;
+        rth->rt_iif     = oldflp->oif ? : dev_out->ifindex;
+        /* get references to the devices that are to be hold by the routing 
+           cache entry */
+        rth->u.dst.dev  = dev_out;
+        dev_hold(dev_out);
+        rth->idev       = in_dev_get(dev_out);
+        rth->rt_gateway = fl->fl4_dst;
+        rth->rt_spec_dst= fl->fl4_src;
+
+        rth->u.dst.output=ip_output;
+
+        RT_CACHE_STAT_INC(out_slow_tot);
+
+        if (flags & RTCF_LOCAL) {
+                rth->u.dst.input = ip_local_deliver;
+                rth->rt_spec_dst = fl->fl4_dst;
+        }
+        if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
+                rth->rt_spec_dst = fl->fl4_src;
+                if (flags & RTCF_LOCAL && 
+                    !(dev_out->flags & IFF_LOOPBACK)) {
+                        rth->u.dst.output = ip_mc_output;
+                        RT_CACHE_STAT_INC(out_slow_mc);
+                }
 #ifdef CONFIG_IP_MROUTE
-		if (res->type == RTN_MULTICAST) {
-			if (IN_DEV_MFORWARD(in_dev) &&
-			    !LOCAL_MCAST(oldflp->fl4_dst)) {
-				rth->u.dst.input = ip_mr_input;
-				rth->u.dst.output = ip_mc_output;
-			}
-		}
+                if (res->type == RTN_MULTICAST) {
+                        if (IN_DEV_MFORWARD(in_dev) &&
+                            !LOCAL_MCAST(oldflp->fl4_dst)) {
+                                rth->u.dst.input = ip_mr_input;
+                                rth->u.dst.output = ip_mc_output;
+                        }
+                }
 #endif
-	}
+        }
 
-	rt_set_nexthop(rth, res, 0);
+        rt_set_nexthop(rth, res, 0);
 
-	rth->rt_flags = flags;
+        rth->rt_flags = flags;
 
-	*result = rth;
+        *result = rth;
  cleanup:
-	/* release work reference to inet device */
-	in_dev_put(in_dev);
+        /* release work reference to inet device */
+        in_dev_put(in_dev);
 
-	return err;
+        return err;
 }
 
 static inline int ip_mkroute_output_def(struct rtable **rp,
-					struct fib_result* res,
-					const struct flowi *fl,
-					const struct flowi *oldflp,
-					struct net_device *dev_out,
-					unsigned flags)
-{
-	struct rtable *rth = NULL;
-	int err = __mkroute_output(&rth, res, fl, oldflp, dev_out, flags);
-	unsigned hash;
-	if (err == 0) {
-		hash = rt_hash_code(oldflp->fl4_dst, 
-				    oldflp->fl4_src ^ (oldflp->oif << 5));
-		err = rt_intern_hash(hash, rth, rp);
-	}
-	
-	return err;
+                                        struct fib_result* res,
+                                        const struct flowi *fl,
+                                        const struct flowi *oldflp,
+                                        struct net_device *dev_out,
+                                        unsigned flags)
+{
+        struct rtable *rth = NULL;
+        int err = __mkroute_output(&rth, res, fl, oldflp, dev_out, flags);
+        unsigned hash;
+        if (err == 0) {
+                hash = rt_hash_code(oldflp->fl4_dst, 
+                                    oldflp->fl4_src ^ (oldflp->oif << 5));
+                err = rt_intern_hash(hash, rth, rp);
+        }
+        
+        return err;
 }
 
 static inline int ip_mkroute_output(struct rtable** rp,
-				    struct fib_result* res,
-				    const struct flowi *fl,
-				    const struct flowi *oldflp,
-				    struct net_device *dev_out,
-				    unsigned flags)
+                                    struct fib_result* res,
+                                    const struct flowi *fl,
+                                    const struct flowi *oldflp,
+                                    struct net_device *dev_out,
+                                    unsigned flags)
 {
 #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
-	unsigned char hop;
-	unsigned hash;
-	int err = -EINVAL;
-	struct rtable *rth = NULL;
-
-	if (res->fi && res->fi->fib_nhs > 1) {
-		unsigned char hopcount = res->fi->fib_nhs;
-
-		for (hop = 0; hop < hopcount; hop++) {
-			struct net_device *dev2nexthop;
-
-			res->nh_sel = hop;
-
-			/* hold a work reference to the output device */
-			dev2nexthop = FIB_RES_DEV(*res);
-			dev_hold(dev2nexthop);
-
-			/* put reference to previous result */
-			if (hop)
-				ip_rt_put(*rp);
-
-			err = __mkroute_output(&rth, res, fl, oldflp,
-					       dev2nexthop, flags);
-
-			if (err != 0)
-				goto cleanup;
-
-			hash = rt_hash_code(oldflp->fl4_dst, 
-					    oldflp->fl4_src ^
-					    (oldflp->oif << 5));
-			err = rt_intern_hash(hash, rth, rp);
-
-			/* forward hop information to multipath impl. */
-			multipath_set_nhinfo(rth,
-					     FIB_RES_NETWORK(*res),
-					     FIB_RES_NETMASK(*res),
-					     res->prefixlen,
-					     &FIB_RES_NH(*res));
-		cleanup:
-			/* release work reference to output device */
-			dev_put(dev2nexthop);
-
-			if (err != 0)
-				return err;
-		}
-		return err;
-	} else {
-		return ip_mkroute_output_def(rp, res, fl, oldflp, dev_out,
-					     flags);
-	}
+        unsigned char hop;
+        unsigned hash;
+        int err = -EINVAL;
+        struct rtable *rth = NULL;
+
+        if (res->fi && res->fi->fib_nhs > 1) {
+                unsigned char hopcount = res->fi->fib_nhs;
+
+                for (hop = 0; hop < hopcount; hop++) {
+                        struct net_device *dev2nexthop;
+
+                        res->nh_sel = hop;
+
+                        /* hold a work reference to the output device */
+                        dev2nexthop = FIB_RES_DEV(*res);
+                        dev_hold(dev2nexthop);
+
+                        /* put reference to previous result */
+                        if (hop)
+                                ip_rt_put(*rp);
+
+                        err = __mkroute_output(&rth, res, fl, oldflp,
+                                               dev2nexthop, flags);
+
+                        if (err != 0)
+                                goto cleanup;
+
+                        hash = rt_hash_code(oldflp->fl4_dst, 
+                                            oldflp->fl4_src ^
+                                            (oldflp->oif << 5));
+                        err = rt_intern_hash(hash, rth, rp);
+
+                        /* forward hop information to multipath impl. */
+                        multipath_set_nhinfo(rth,
+                                             FIB_RES_NETWORK(*res),
+                                             FIB_RES_NETMASK(*res),
+                                             res->prefixlen,
+                                             &FIB_RES_NH(*res));
+                cleanup:
+                        /* release work reference to output device */
+                        dev_put(dev2nexthop);
+
+                        if (err != 0)
+                                return err;
+                }
+                return err;
+        } else {
+                return ip_mkroute_output_def(rp, res, fl, oldflp, dev_out,
+                                             flags);
+        }
 #else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
-	return ip_mkroute_output_def(rp, res, fl, oldflp, dev_out, flags);
+        return ip_mkroute_output_def(rp, res, fl, oldflp, dev_out, flags);
 #endif
 }
 
@@ -2370,694 +2375,694 @@
 
 static int ip_route_output_slow(struct rtable **rp, const struct flowi *oldflp)
 {
-	u32 tos	= RT_FL_TOS(oldflp);
-	struct flowi fl = { .nl_u = { .ip4_u =
-				      { .daddr = oldflp->fl4_dst,
-					.saddr = oldflp->fl4_src,
-					.tos = tos & IPTOS_RT_MASK,
-					.scope = ((tos & RTO_ONLINK) ?
-						  RT_SCOPE_LINK :
-						  RT_SCOPE_UNIVERSE),
+        u32 tos = RT_FL_TOS(oldflp);
+        struct flowi fl = { .nl_u = { .ip4_u =
+                                      { .daddr = oldflp->fl4_dst,
+                                        .saddr = oldflp->fl4_src,
+                                        .tos = tos & IPTOS_RT_MASK,
+                                        .scope = ((tos & RTO_ONLINK) ?
+                                                  RT_SCOPE_LINK :
+                                                  RT_SCOPE_UNIVERSE),
 #ifdef CONFIG_IP_ROUTE_FWMARK
-					.fwmark = oldflp->fl4_fwmark
+                                        .fwmark = oldflp->fl4_fwmark
 #endif
-				      } },
-			    .iif = loopback_dev.ifindex,
-			    .oif = oldflp->oif };
-	struct fib_result res;
-	unsigned flags = 0;
-	struct net_device *dev_out = NULL;
-	int free_res = 0;
-	int err;
+                                      } },
+                            .iif = loopback_dev.ifindex,
+                            .oif = oldflp->oif };
+        struct fib_result res;
+        unsigned flags = 0;
+        struct net_device *dev_out = NULL;
+        int free_res = 0;
+        int err;
 
 
-	res.fi		= NULL;
+        res.fi          = NULL;
 #ifdef CONFIG_IP_MULTIPLE_TABLES
-	res.r		= NULL;
+        res.r           = NULL;
 #endif
 
-	if (oldflp->fl4_src) {
-		err = -EINVAL;
-		if (MULTICAST(oldflp->fl4_src) ||
-		    BADCLASS(oldflp->fl4_src) ||
-		    ZERONET(oldflp->fl4_src))
-			goto out;
-
-		/* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
-		dev_out = ip_dev_find(oldflp->fl4_src);
-		if (dev_out == NULL)
-			goto out;
-
-		/* I removed check for oif == dev_out->oif here.
-		   It was wrong for two reasons:
-		   1. ip_dev_find(saddr) can return wrong iface, if saddr is
-		      assigned to multiple interfaces.
-		   2. Moreover, we are allowed to send packets with saddr
-		      of another iface. --ANK
-		 */
-
-		if (oldflp->oif == 0
-		    && (MULTICAST(oldflp->fl4_dst) || oldflp->fl4_dst == 0xFFFFFFFF)) {
-			/* Special hack: user can direct multicasts
-			   and limited broadcast via necessary interface
-			   without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
-			   This hack is not just for fun, it allows
-			   vic,vat and friends to work.
-			   They bind socket to loopback, set ttl to zero
-			   and expect that it will work.
-			   From the viewpoint of routing cache they are broken,
-			   because we are not allowed to build multicast path
-			   with loopback source addr (look, routing cache
-			   cannot know, that ttl is zero, so that packet
-			   will not leave this host and route is valid).
-			   Luckily, this hack is good workaround.
-			 */
-
-			fl.oif = dev_out->ifindex;
-			goto make_route;
-		}
-		if (dev_out)
-			dev_put(dev_out);
-		dev_out = NULL;
-	}
-
-
-	if (oldflp->oif) {
-		dev_out = dev_get_by_index(oldflp->oif);
-		err = -ENODEV;
-		if (dev_out == NULL)
-			goto out;
-
-		/* RACE: Check return value of inet_select_addr instead. */
-		if (__in_dev_get_rtnl(dev_out) == NULL) {
-			dev_put(dev_out);
-			goto out;	/* Wrong error code */
-		}
-
-		if (LOCAL_MCAST(oldflp->fl4_dst) || oldflp->fl4_dst == 0xFFFFFFFF) {
-			if (!fl.fl4_src)
-				fl.fl4_src = inet_select_addr(dev_out, 0,
-							      RT_SCOPE_LINK);
-			goto make_route;
-		}
-		if (!fl.fl4_src) {
-			if (MULTICAST(oldflp->fl4_dst))
-				fl.fl4_src = inet_select_addr(dev_out, 0,
-							      fl.fl4_scope);
-			else if (!oldflp->fl4_dst)
-				fl.fl4_src = inet_select_addr(dev_out, 0,
-							      RT_SCOPE_HOST);
-		}
-	}
-
-	if (!fl.fl4_dst) {
-		fl.fl4_dst = fl.fl4_src;
-		if (!fl.fl4_dst)
-			fl.fl4_dst = fl.fl4_src = htonl(INADDR_LOOPBACK);
-		if (dev_out)
-			dev_put(dev_out);
-		dev_out = &loopback_dev;
-		dev_hold(dev_out);
-		fl.oif = loopback_dev.ifindex;
-		res.type = RTN_LOCAL;
-		flags |= RTCF_LOCAL;
-		goto make_route;
-	}
-
-	if (fib_lookup(&fl, &res)) {
-		res.fi = NULL;
-		if (oldflp->oif) {
-			/* Apparently, routing tables are wrong. Assume,
-			   that the destination is on link.
-
-			   WHY? DW.
-			   Because we are allowed to send to iface
-			   even if it has NO routes and NO assigned
-			   addresses. When oif is specified, routing
-			   tables are looked up with only one purpose:
-			   to catch if destination is gatewayed, rather than
-			   direct. Moreover, if MSG_DONTROUTE is set,
-			   we send packet, ignoring both routing tables
-			   and ifaddr state. --ANK
-
-
-			   We could make it even if oif is unknown,
-			   likely IPv6, but we do not.
-			 */
-
-			if (fl.fl4_src == 0)
-				fl.fl4_src = inet_select_addr(dev_out, 0,
-							      RT_SCOPE_LINK);
-			res.type = RTN_UNICAST;
-			goto make_route;
-		}
-		if (dev_out)
-			dev_put(dev_out);
-		err = -ENETUNREACH;
-		goto out;
-	}
-	free_res = 1;
-
-	if (res.type == RTN_LOCAL) {
-		if (!fl.fl4_src)
-			fl.fl4_src = fl.fl4_dst;
-		if (dev_out)
-			dev_put(dev_out);
-		dev_out = &loopback_dev;
-		dev_hold(dev_out);
-		fl.oif = dev_out->ifindex;
-		if (res.fi)
-			fib_info_put(res.fi);
-		res.fi = NULL;
-		flags |= RTCF_LOCAL;
-		goto make_route;
-	}
+        if (oldflp->fl4_src) {
+                err = -EINVAL;
+                if (MULTICAST(oldflp->fl4_src) ||
+                    BADCLASS(oldflp->fl4_src) ||
+                    ZERONET(oldflp->fl4_src))
+                        goto out;
+
+                /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
+                dev_out = ip_dev_find(oldflp->fl4_src);
+                if (dev_out == NULL)
+                        goto out;
+
+                /* I removed check for oif == dev_out->oif here.
+                   It was wrong for two reasons:
+                   1. ip_dev_find(saddr) can return wrong iface, if saddr is
+                      assigned to multiple interfaces.
+                   2. Moreover, we are allowed to send packets with saddr
+                      of another iface. --ANK
+                 */
+
+                if (oldflp->oif == 0
+                    && (MULTICAST(oldflp->fl4_dst) || oldflp->fl4_dst == 0xFFFFFFFF)) {
+                        /* Special hack: user can direct multicasts
+                           and limited broadcast via necessary interface
+                           without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
+                           This hack is not just for fun, it allows
+                           vic,vat and friends to work.
+                           They bind socket to loopback, set ttl to zero
+                           and expect that it will work.
+                           From the viewpoint of routing cache they are broken,
+                           because we are not allowed to build multicast path
+                           with loopback source addr (look, routing cache
+                           cannot know, that ttl is zero, so that packet
+                           will not leave this host and route is valid).
+                           Luckily, this hack is good workaround.
+                         */
+
+                        fl.oif = dev_out->ifindex;
+                        goto make_route;
+                }
+                if (dev_out)
+                        dev_put(dev_out);
+                dev_out = NULL;
+        }
+
+
+        if (oldflp->oif) {
+                dev_out = dev_get_by_index(oldflp->oif);
+                err = -ENODEV;
+                if (dev_out == NULL)
+                        goto out;
+
+                /* RACE: Check return value of inet_select_addr instead. */
+                if (__in_dev_get_rtnl(dev_out) == NULL) {
+                        dev_put(dev_out);
+                        goto out;       /* Wrong error code */
+                }
+
+                if (LOCAL_MCAST(oldflp->fl4_dst) || oldflp->fl4_dst == 0xFFFFFFFF) {
+                        if (!fl.fl4_src)
+                                fl.fl4_src = inet_select_addr(dev_out, 0,
+                                                              RT_SCOPE_LINK);
+                        goto make_route;
+                }
+                if (!fl.fl4_src) {
+                        if (MULTICAST(oldflp->fl4_dst))
+                                fl.fl4_src = inet_select_addr(dev_out, 0,
+                                                              fl.fl4_scope);
+                        else if (!oldflp->fl4_dst)
+                                fl.fl4_src = inet_select_addr(dev_out, 0,
+                                                              RT_SCOPE_HOST);
+                }
+        }
+
+        if (!fl.fl4_dst) {
+                fl.fl4_dst = fl.fl4_src;
+                if (!fl.fl4_dst)
+                        fl.fl4_dst = fl.fl4_src = htonl(INADDR_LOOPBACK);
+                if (dev_out)
+                        dev_put(dev_out);
+                dev_out = &loopback_dev;
+                dev_hold(dev_out);
+                fl.oif = loopback_dev.ifindex;
+                res.type = RTN_LOCAL;
+                flags |= RTCF_LOCAL;
+                goto make_route;
+        }
+
+        if (fib_lookup(&fl, &res)) {
+                res.fi = NULL;
+                if (oldflp->oif) {
+                        /* Apparently, routing tables are wrong. Assume,
+                           that the destination is on link.
+
+                           WHY? DW.
+                           Because we are allowed to send to iface
+                           even if it has NO routes and NO assigned
+                           addresses. When oif is specified, routing
+                           tables are looked up with only one purpose:
+                           to catch if destination is gatewayed, rather than
+                           direct. Moreover, if MSG_DONTROUTE is set,
+                           we send packet, ignoring both routing tables
+                           and ifaddr state. --ANK
+
+
+                           We could make it even if oif is unknown,
+                           likely IPv6, but we do not.
+                         */
+
+                        if (fl.fl4_src == 0)
+                                fl.fl4_src = inet_select_addr(dev_out, 0,
+                                                              RT_SCOPE_LINK);
+                        res.type = RTN_UNICAST;
+                        goto make_route;
+                }
+                if (dev_out)
+                        dev_put(dev_out);
+                err = -ENETUNREACH;
+                goto out;
+        }
+        free_res = 1;
+
+        if (res.type == RTN_LOCAL) {
+                if (!fl.fl4_src)
+                        fl.fl4_src = fl.fl4_dst;
+                if (dev_out)
+                        dev_put(dev_out);
+                dev_out = &loopback_dev;
+                dev_hold(dev_out);
+                fl.oif = dev_out->ifindex;
+                if (res.fi)
+                        fib_info_put(res.fi);
+                res.fi = NULL;
+                flags |= RTCF_LOCAL;
+                goto make_route;
+        }
 
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
-	if (res.fi->fib_nhs > 1 && fl.oif == 0)
-		fib_select_multipath(&fl, &res);
-	else
-#endif
-	if (!res.prefixlen && res.type == RTN_UNICAST && !fl.oif)
-		fib_select_default(&fl, &res);
-
-	if (!fl.fl4_src)
-		fl.fl4_src = FIB_RES_PREFSRC(res);
-
-	if (dev_out)
-		dev_put(dev_out);
-	dev_out = FIB_RES_DEV(res);
-	dev_hold(dev_out);
-	fl.oif = dev_out->ifindex;
+        if (res.fi->fib_nhs > 1 && fl.oif == 0)
+                fib_select_multipath(&fl, &res);
+        else
+#endif
+        if (!res.prefixlen && res.type == RTN_UNICAST && !fl.oif)
+                fib_select_default(&fl, &res);
+
+        if (!fl.fl4_src)
+                fl.fl4_src = FIB_RES_PREFSRC(res);
+
+        if (dev_out)
+                dev_put(dev_out);
+        dev_out = FIB_RES_DEV(res);
+        dev_hold(dev_out);
+        fl.oif = dev_out->ifindex;
 
 
 make_route:
-	err = ip_mkroute_output(rp, &res, &fl, oldflp, dev_out, flags);
+        err = ip_mkroute_output(rp, &res, &fl, oldflp, dev_out, flags);
 
 
-	if (free_res)
-		fib_res_put(&res);
-	if (dev_out)
-		dev_put(dev_out);
-out:	return err;
+        if (free_res)
+                fib_res_put(&res);
+        if (dev_out)
+                dev_put(dev_out);
+out:    return err;
 }
 
 int __ip_route_output_key(struct rtable **rp, const struct flowi *flp)
 {
-	unsigned hash;
-	struct rtable *rth;
+        unsigned hash;
+        struct rtable *rth;
 
-	hash = rt_hash_code(flp->fl4_dst, flp->fl4_src ^ (flp->oif << 5));
+        hash = rt_hash_code(flp->fl4_dst, flp->fl4_src ^ (flp->oif << 5));
 
-	rcu_read_lock_bh();
-	for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
-		rth = rcu_dereference(rth->u.rt_next)) {
-		if (rth->fl.fl4_dst == flp->fl4_dst &&
-		    rth->fl.fl4_src == flp->fl4_src &&
-		    rth->fl.iif == 0 &&
-		    rth->fl.oif == flp->oif &&
+        rcu_read_lock_bh();
+        for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
+                rth = rcu_dereference(rth->u.rt_next)) {
+                if (rth->fl.fl4_dst == flp->fl4_dst &&
+                    rth->fl.fl4_src == flp->fl4_src &&
+                    rth->fl.iif == 0 &&
+                    rth->fl.oif == flp->oif &&
 #ifdef CONFIG_IP_ROUTE_FWMARK
-		    rth->fl.fl4_fwmark == flp->fl4_fwmark &&
+                    rth->fl.fl4_fwmark == flp->fl4_fwmark &&
 #endif
-		    !((rth->fl.fl4_tos ^ flp->fl4_tos) &
-			    (IPTOS_RT_MASK | RTO_ONLINK))) {
+                    !((rth->fl.fl4_tos ^ flp->fl4_tos) &
+                            (IPTOS_RT_MASK | RTO_ONLINK))) {
 
-			/* check for multipath routes and choose one if
-			 * necessary
-			 */
-			if (multipath_select_route(flp, rth, rp)) {
-				dst_hold(&(*rp)->u.dst);
-				RT_CACHE_STAT_INC(out_hit);
-				rcu_read_unlock_bh();
-				return 0;
-			}
-
-			rth->u.dst.lastuse = jiffies;
-			dst_hold(&rth->u.dst);
-			rth->u.dst.__use++;
-			RT_CACHE_STAT_INC(out_hit);
-			rcu_read_unlock_bh();
-			*rp = rth;
-			return 0;
-		}
-		RT_CACHE_STAT_INC(out_hlist_search);
-	}
-	rcu_read_unlock_bh();
+                        /* check for multipath routes and choose one if
+                         * necessary
+                         */
+                        if (multipath_select_route(flp, rth, rp)) {
+                                dst_hold(&(*rp)->u.dst);
+                                RT_CACHE_STAT_INC(out_hit);
+                                rcu_read_unlock_bh();
+                                return 0;
+                        }
+
+                        rth->u.dst.lastuse = jiffies;
+                        dst_hold(&rth->u.dst);
+                        rth->u.dst.__use++;
+                        RT_CACHE_STAT_INC(out_hit);
+                        rcu_read_unlock_bh();
+                        *rp = rth;
+                        return 0;
+                }
+                RT_CACHE_STAT_INC(out_hlist_search);
+        }
+        rcu_read_unlock_bh();
 
-	return ip_route_output_slow(rp, flp);
+        return ip_route_output_slow(rp, flp);
 }
 
 EXPORT_SYMBOL_GPL(__ip_route_output_key);
 
 int ip_route_output_flow(struct rtable **rp, struct flowi *flp, struct sock *sk, int flags)
 {
-	int err;
+        int err;
 
-	if ((err = __ip_route_output_key(rp, flp)) != 0)
-		return err;
+        if ((err = __ip_route_output_key(rp, flp)) != 0)
+                return err;
 
-	if (flp->proto) {
-		if (!flp->fl4_src)
-			flp->fl4_src = (*rp)->rt_src;
-		if (!flp->fl4_dst)
-			flp->fl4_dst = (*rp)->rt_dst;
-		return xfrm_lookup((struct dst_entry **)rp, flp, sk, flags);
-	}
+        if (flp->proto) {
+                if (!flp->fl4_src)
+                        flp->fl4_src = (*rp)->rt_src;
+                if (!flp->fl4_dst)
+                        flp->fl4_dst = (*rp)->rt_dst;
+                return xfrm_lookup((struct dst_entry **)rp, flp, sk, flags);
+        }
 
-	return 0;
+        return 0;
 }
 
 EXPORT_SYMBOL_GPL(ip_route_output_flow);
 
 int ip_route_output_key(struct rtable **rp, struct flowi *flp)
 {
-	return ip_route_output_flow(rp, flp, NULL, 0);
+        return ip_route_output_flow(rp, flp, NULL, 0);
 }
 
 static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
-			int nowait, unsigned int flags)
+                        int nowait, unsigned int flags)
 {
-	struct rtable *rt = (struct rtable*)skb->dst;
-	struct rtmsg *r;
-	struct nlmsghdr  *nlh;
-	unsigned char	 *b = skb->tail;
-	struct rta_cacheinfo ci;
+        struct rtable *rt = (struct rtable*)skb->dst;
+        struct rtmsg *r;
+        struct nlmsghdr  *nlh;
+        unsigned char    *b = skb->tail;
+        struct rta_cacheinfo ci;
 #ifdef CONFIG_IP_MROUTE
-	struct rtattr *eptr;
+        struct rtattr *eptr;
 #endif
-	nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*r), flags);
-	r = NLMSG_DATA(nlh);
-	r->rtm_family	 = AF_INET;
-	r->rtm_dst_len	= 32;
-	r->rtm_src_len	= 0;
-	r->rtm_tos	= rt->fl.fl4_tos;
-	r->rtm_table	= RT_TABLE_MAIN;
-	r->rtm_type	= rt->rt_type;
-	r->rtm_scope	= RT_SCOPE_UNIVERSE;
-	r->rtm_protocol = RTPROT_UNSPEC;
-	r->rtm_flags	= (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
-	if (rt->rt_flags & RTCF_NOTIFY)
-		r->rtm_flags |= RTM_F_NOTIFY;
-	RTA_PUT(skb, RTA_DST, 4, &rt->rt_dst);
-	if (rt->fl.fl4_src) {
-		r->rtm_src_len = 32;
-		RTA_PUT(skb, RTA_SRC, 4, &rt->fl.fl4_src);
-	}
-	if (rt->u.dst.dev)
-		RTA_PUT(skb, RTA_OIF, sizeof(int), &rt->u.dst.dev->ifindex);
+        nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*r), flags);
+        r = NLMSG_DATA(nlh);
+        r->rtm_family    = AF_INET;
+        r->rtm_dst_len  = 32;
+        r->rtm_src_len  = 0;
+        r->rtm_tos      = rt->fl.fl4_tos;
+        r->rtm_table    = RT_TABLE_MAIN;
+        r->rtm_type     = rt->rt_type;
+        r->rtm_scope    = RT_SCOPE_UNIVERSE;
+        r->rtm_protocol = RTPROT_UNSPEC;
+        r->rtm_flags    = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
+        if (rt->rt_flags & RTCF_NOTIFY)
+                r->rtm_flags |= RTM_F_NOTIFY;
+        RTA_PUT(skb, RTA_DST, 4, &rt->rt_dst);
+        if (rt->fl.fl4_src) {
+                r->rtm_src_len = 32;
+                RTA_PUT(skb, RTA_SRC, 4, &rt->fl.fl4_src);
+        }
+        if (rt->u.dst.dev)
+                RTA_PUT(skb, RTA_OIF, sizeof(int), &rt->u.dst.dev->ifindex);
 #ifdef CONFIG_NET_CLS_ROUTE
-	if (rt->u.dst.tclassid)
-		RTA_PUT(skb, RTA_FLOW, 4, &rt->u.dst.tclassid);
+        if (rt->u.dst.tclassid)
+                RTA_PUT(skb, RTA_FLOW, 4, &rt->u.dst.tclassid);
 #endif
 #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
-	if (rt->rt_multipath_alg != IP_MP_ALG_NONE) {
-		__u32 alg = rt->rt_multipath_alg;
+        if (rt->rt_multipath_alg != IP_MP_ALG_NONE) {
+                __u32 alg = rt->rt_multipath_alg;
 
-		RTA_PUT(skb, RTA_MP_ALGO, 4, &alg);
-	}
+                RTA_PUT(skb, RTA_MP_ALGO, 4, &alg);
+        }
 #endif
-	if (rt->fl.iif)
-		RTA_PUT(skb, RTA_PREFSRC, 4, &rt->rt_spec_dst);
-	else if (rt->rt_src != rt->fl.fl4_src)
-		RTA_PUT(skb, RTA_PREFSRC, 4, &rt->rt_src);
-	if (rt->rt_dst != rt->rt_gateway)
-		RTA_PUT(skb, RTA_GATEWAY, 4, &rt->rt_gateway);
-	if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0)
-		goto rtattr_failure;
-	ci.rta_lastuse	= jiffies_to_clock_t(jiffies - rt->u.dst.lastuse);
-	ci.rta_used	= rt->u.dst.__use;
-	ci.rta_clntref	= atomic_read(&rt->u.dst.__refcnt);
-	if (rt->u.dst.expires)
-		ci.rta_expires = jiffies_to_clock_t(rt->u.dst.expires - jiffies);
-	else
-		ci.rta_expires = 0;
-	ci.rta_error	= rt->u.dst.error;
-	ci.rta_id	= ci.rta_ts = ci.rta_tsage = 0;
-	if (rt->peer) {
-		ci.rta_id = rt->peer->ip_id_count;
-		if (rt->peer->tcp_ts_stamp) {
-			ci.rta_ts = rt->peer->tcp_ts;
-			ci.rta_tsage = xtime.tv_sec - rt->peer->tcp_ts_stamp;
-		}
-	}
+        if (rt->fl.iif)
+                RTA_PUT(skb, RTA_PREFSRC, 4, &rt->rt_spec_dst);
+        else if (rt->rt_src != rt->fl.fl4_src)
+                RTA_PUT(skb, RTA_PREFSRC, 4, &rt->rt_src);
+        if (rt->rt_dst != rt->rt_gateway)
+                RTA_PUT(skb, RTA_GATEWAY, 4, &rt->rt_gateway);
+        if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0)
+                goto rtattr_failure;
+        ci.rta_lastuse  = jiffies_to_clock_t(jiffies - rt->u.dst.lastuse);
+        ci.rta_used     = rt->u.dst.__use;
+        ci.rta_clntref  = atomic_read(&rt->u.dst.__refcnt);
+        if (rt->u.dst.expires)
+                ci.rta_expires = jiffies_to_clock_t(rt->u.dst.expires - jiffies);
+        else
+                ci.rta_expires = 0;
+        ci.rta_error    = rt->u.dst.error;
+        ci.rta_id       = ci.rta_ts = ci.rta_tsage = 0;
+        if (rt->peer) {
+                ci.rta_id = rt->peer->ip_id_count;
+                if (rt->peer->tcp_ts_stamp) {
+                        ci.rta_ts = rt->peer->tcp_ts;
+                        ci.rta_tsage = xtime.tv_sec - rt->peer->tcp_ts_stamp;
+                }
+        }
 #ifdef CONFIG_IP_MROUTE
-	eptr = (struct rtattr*)skb->tail;
+        eptr = (struct rtattr*)skb->tail;
 #endif
-	RTA_PUT(skb, RTA_CACHEINFO, sizeof(ci), &ci);
-	if (rt->fl.iif) {
+        RTA_PUT(skb, RTA_CACHEINFO, sizeof(ci), &ci);
+        if (rt->fl.iif) {
 #ifdef CONFIG_IP_MROUTE
-		u32 dst = rt->rt_dst;
+                u32 dst = rt->rt_dst;
 
-		if (MULTICAST(dst) && !LOCAL_MCAST(dst) &&
-		    ipv4_devconf.mc_forwarding) {
-			int err = ipmr_get_route(skb, r, nowait);
-			if (err <= 0) {
-				if (!nowait) {
-					if (err == 0)
-						return 0;
-					goto nlmsg_failure;
-				} else {
-					if (err == -EMSGSIZE)
-						goto nlmsg_failure;
-					((struct rta_cacheinfo*)RTA_DATA(eptr))->rta_error = err;
-				}
-			}
-		} else
+                if (MULTICAST(dst) && !LOCAL_MCAST(dst) &&
+                    ipv4_devconf.mc_forwarding) {
+                        int err = ipmr_get_route(skb, r, nowait);
+                        if (err <= 0) {
+                                if (!nowait) {
+                                        if (err == 0)
+                                                return 0;
+                                        goto nlmsg_failure;
+                                } else {
+                                        if (err == -EMSGSIZE)
+                                                goto nlmsg_failure;
+                                        ((struct rta_cacheinfo*)RTA_DATA(eptr))->rta_error = err;
+                                }
+                        }
+                } else
 #endif
-			RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->fl.iif);
-	}
+                        RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->fl.iif);
+        }
 
-	nlh->nlmsg_len = skb->tail - b;
-	return skb->len;
+        nlh->nlmsg_len = skb->tail - b;
+        return skb->len;
 
 nlmsg_failure:
 rtattr_failure:
-	skb_trim(skb, b - skb->data);
-	return -1;
+        skb_trim(skb, b - skb->data);
+        return -1;
 }
 
 int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
 {
-	struct rtattr **rta = arg;
-	struct rtmsg *rtm = NLMSG_DATA(nlh);
-	struct rtable *rt = NULL;
-	u32 dst = 0;
-	u32 src = 0;
-	int iif = 0;
-	int err = -ENOBUFS;
-	struct sk_buff *skb;
-
-	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
-	if (!skb)
-		goto out;
-
-	/* Reserve room for dummy headers, this skb can pass
-	   through good chunk of routing engine.
-	 */
-	skb->mac.raw = skb->nh.raw = skb->data;
-
-	/* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
-	skb->nh.iph->protocol = IPPROTO_ICMP;
-	skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
-
-	if (rta[RTA_SRC - 1])
-		memcpy(&src, RTA_DATA(rta[RTA_SRC - 1]), 4);
-	if (rta[RTA_DST - 1])
-		memcpy(&dst, RTA_DATA(rta[RTA_DST - 1]), 4);
-	if (rta[RTA_IIF - 1])
-		memcpy(&iif, RTA_DATA(rta[RTA_IIF - 1]), sizeof(int));
-
-	if (iif) {
-		struct net_device *dev = __dev_get_by_index(iif);
-		err = -ENODEV;
-		if (!dev)
-			goto out_free;
-		skb->protocol	= htons(ETH_P_IP);
-		skb->dev	= dev;
-		local_bh_disable();
-		err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
-		local_bh_enable();
-		rt = (struct rtable*)skb->dst;
-		if (!err && rt->u.dst.error)
-			err = -rt->u.dst.error;
-	} else {
-		struct flowi fl = { .nl_u = { .ip4_u = { .daddr = dst,
-							 .saddr = src,
-							 .tos = rtm->rtm_tos } } };
-		int oif = 0;
-		if (rta[RTA_OIF - 1])
-			memcpy(&oif, RTA_DATA(rta[RTA_OIF - 1]), sizeof(int));
-		fl.oif = oif;
-		err = ip_route_output_key(&rt, &fl);
-	}
-	if (err)
-		goto out_free;
-
-	skb->dst = &rt->u.dst;
-	if (rtm->rtm_flags & RTM_F_NOTIFY)
-		rt->rt_flags |= RTCF_NOTIFY;
-
-	NETLINK_CB(skb).dst_pid = NETLINK_CB(in_skb).pid;
-
-	err = rt_fill_info(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
-				RTM_NEWROUTE, 0, 0);
-	if (!err)
-		goto out_free;
-	if (err < 0) {
-		err = -EMSGSIZE;
-		goto out_free;
-	}
-
-	err = netlink_unicast(rtnl, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT);
-	if (err > 0)
-		err = 0;
-out:	return err;
+        struct rtattr **rta = arg;
+        struct rtmsg *rtm = NLMSG_DATA(nlh);
+        struct rtable *rt = NULL;
+        u32 dst = 0;
+        u32 src = 0;
+        int iif = 0;
+        int err = -ENOBUFS;
+        struct sk_buff *skb;
+
+        skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
+        if (!skb)
+                goto out;
+
+        /* Reserve room for dummy headers, this skb can pass
+           through good chunk of routing engine.
+         */
+        skb->mac.raw = skb->nh.raw = skb->data;
+
+        /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
+        skb->nh.iph->protocol = IPPROTO_ICMP;
+        skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
+
+        if (rta[RTA_SRC - 1])
+                memcpy(&src, RTA_DATA(rta[RTA_SRC - 1]), 4);
+        if (rta[RTA_DST - 1])
+                memcpy(&dst, RTA_DATA(rta[RTA_DST - 1]), 4);
+        if (rta[RTA_IIF - 1])
+                memcpy(&iif, RTA_DATA(rta[RTA_IIF - 1]), sizeof(int));
+
+        if (iif) {
+                struct net_device *dev = __dev_get_by_index(iif);
+                err = -ENODEV;
+                if (!dev)
+                        goto out_free;
+                skb->protocol   = htons(ETH_P_IP);
+                skb->dev        = dev;
+                local_bh_disable();
+                err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
+                local_bh_enable();
+                rt = (struct rtable*)skb->dst;
+                if (!err && rt->u.dst.error)
+                        err = -rt->u.dst.error;
+        } else {
+                struct flowi fl = { .nl_u = { .ip4_u = { .daddr = dst,
+                                                         .saddr = src,
+                                                         .tos = rtm->rtm_tos } } };
+                int oif = 0;
+                if (rta[RTA_OIF - 1])
+                        memcpy(&oif, RTA_DATA(rta[RTA_OIF - 1]), sizeof(int));
+                fl.oif = oif;
+                err = ip_route_output_key(&rt, &fl);
+        }
+        if (err)
+                goto out_free;
+
+        skb->dst = &rt->u.dst;
+        if (rtm->rtm_flags & RTM_F_NOTIFY)
+                rt->rt_flags |= RTCF_NOTIFY;
+
+        NETLINK_CB(skb).dst_pid = NETLINK_CB(in_skb).pid;
+
+        err = rt_fill_info(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
+                                RTM_NEWROUTE, 0, 0);
+        if (!err)
+                goto out_free;
+        if (err < 0) {
+                err = -EMSGSIZE;
+                goto out_free;
+        }
+
+        err = netlink_unicast(rtnl, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT);
+        if (err > 0)
+                err = 0;
+out:    return err;
 
 out_free:
-	kfree_skb(skb);
-	goto out;
+        kfree_skb(skb);
+        goto out;
 }
 
 int ip_rt_dump(struct sk_buff *skb,  struct netlink_callback *cb)
 {
-	struct rtable *rt;
-	int h, s_h;
-	int idx, s_idx;
-
-	s_h = cb->args[0];
-	s_idx = idx = cb->args[1];
-	for (h = 0; h <= rt_hash_mask; h++) {
-		if (h < s_h) continue;
-		if (h > s_h)
-			s_idx = 0;
-		rcu_read_lock_bh();
-		for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt;
-		     rt = rcu_dereference(rt->u.rt_next), idx++) {
-			if (idx < s_idx)
-				continue;
-			skb->dst = dst_clone(&rt->u.dst);
-			if (rt_fill_info(skb, NETLINK_CB(cb->skb).pid,
-					 cb->nlh->nlmsg_seq, RTM_NEWROUTE, 
-					 1, NLM_F_MULTI) <= 0) {
-				dst_release(xchg(&skb->dst, NULL));
-				rcu_read_unlock_bh();
-				goto done;
-			}
-			dst_release(xchg(&skb->dst, NULL));
-		}
-		rcu_read_unlock_bh();
-	}
+        struct rtable *rt;
+        int h, s_h;
+        int idx, s_idx;
+
+        s_h = cb->args[0];
+        s_idx = idx = cb->args[1];
+        for (h = 0; h <= rt_hash_mask; h++) {
+                if (h < s_h) continue;
+                if (h > s_h)
+                        s_idx = 0;
+                rcu_read_lock_bh();
+                for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt;
+                     rt = rcu_dereference(rt->u.rt_next), idx++) {
+                        if (idx < s_idx)
+                                continue;
+                        skb->dst = dst_clone(&rt->u.dst);
+                        if (rt_fill_info(skb, NETLINK_CB(cb->skb).pid,
+                                         cb->nlh->nlmsg_seq, RTM_NEWROUTE, 
+                                         1, NLM_F_MULTI) <= 0) {
+                                dst_release(xchg(&skb->dst, NULL));
+                                rcu_read_unlock_bh();
+                                goto done;
+                        }
+                        dst_release(xchg(&skb->dst, NULL));
+                }
+                rcu_read_unlock_bh();
+        }
 
 done:
-	cb->args[0] = h;
-	cb->args[1] = idx;
-	return skb->len;
+        cb->args[0] = h;
+        cb->args[1] = idx;
+        return skb->len;
 }
 
 void ip_rt_multicast_event(struct in_device *in_dev)
 {
-	rt_cache_flush(0);
+        rt_cache_flush(0);
 }
 
 #ifdef CONFIG_SYSCTL
 static int flush_delay;
 
 static int ipv4_sysctl_rtcache_flush(ctl_table *ctl, int write,
-					struct file *filp, void __user *buffer,
-					size_t *lenp, loff_t *ppos)
+                                        struct file *filp, void __user *buffer,
+                                        size_t *lenp, loff_t *ppos)
 {
-	if (write) {
-		proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
-		rt_cache_flush(flush_delay);
-		return 0;
-	} 
+        if (write) {
+                proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
+                rt_cache_flush(flush_delay);
+                return 0;
+        } 
 
-	return -EINVAL;
+        return -EINVAL;
 }
 
 static int ipv4_sysctl_rtcache_flush_strategy(ctl_table *table,
-						int __user *name,
-						int nlen,
-						void __user *oldval,
-						size_t __user *oldlenp,
-						void __user *newval,
-						size_t newlen,
-						void **context)
-{
-	int delay;
-	if (newlen != sizeof(int))
-		return -EINVAL;
-	if (get_user(delay, (int __user *)newval))
-		return -EFAULT; 
-	rt_cache_flush(delay); 
-	return 0;
+                                                int __user *name,
+                                                int nlen,
+                                                void __user *oldval,
+                                                size_t __user *oldlenp,
+                                                void __user *newval,
+                                                size_t newlen,
+                                                void **context)
+{
+        int delay;
+        if (newlen != sizeof(int))
+                return -EINVAL;
+        if (get_user(delay, (int __user *)newval))
+                return -EFAULT; 
+        rt_cache_flush(delay); 
+        return 0;
 }
 
 ctl_table ipv4_route_table[] = {
         {
-		.ctl_name 	= NET_IPV4_ROUTE_FLUSH,
-		.procname	= "flush",
-		.data		= &flush_delay,
-		.maxlen		= sizeof(int),
-		.mode		= 0200,
-		.proc_handler	= &ipv4_sysctl_rtcache_flush,
-		.strategy	= &ipv4_sysctl_rtcache_flush_strategy,
-	},
-	{
-		.ctl_name	= NET_IPV4_ROUTE_MIN_DELAY,
-		.procname	= "min_delay",
-		.data		= &ip_rt_min_delay,
-		.maxlen		= sizeof(int),
-		.mode		= 0644,
-		.proc_handler	= &proc_dointvec_jiffies,
-		.strategy	= &sysctl_jiffies,
-	},
-	{
-		.ctl_name	= NET_IPV4_ROUTE_MAX_DELAY,
-		.procname	= "max_delay",
-		.data		= &ip_rt_max_delay,
-		.maxlen		= sizeof(int),
-		.mode		= 0644,
-		.proc_handler	= &proc_dointvec_jiffies,
-		.strategy	= &sysctl_jiffies,
-	},
-	{
-		.ctl_name	= NET_IPV4_ROUTE_GC_THRESH,
-		.procname	= "gc_thresh",
-		.data		= &ipv4_dst_ops.gc_thresh,
-		.maxlen		= sizeof(int),
-		.mode		= 0644,
-		.proc_handler	= &proc_dointvec,
-	},
-	{
-		.ctl_name	= NET_IPV4_ROUTE_MAX_SIZE,
-		.procname	= "max_size",
-		.data		= &ip_rt_max_size,
-		.maxlen		= sizeof(int),
-		.mode		= 0644,
-		.proc_handler	= &proc_dointvec,
-	},
-	{
-		/*  Deprecated. Use gc_min_interval_ms */
+                .ctl_name       = NET_IPV4_ROUTE_FLUSH,
+                .procname       = "flush",
+                .data           = &flush_delay,
+                .maxlen         = sizeof(int),
+                .mode           = 0200,
+                .proc_handler   = &ipv4_sysctl_rtcache_flush,
+                .strategy       = &ipv4_sysctl_rtcache_flush_strategy,
+        },
+        {
+                .ctl_name       = NET_IPV4_ROUTE_MIN_DELAY,
+                .procname       = "min_delay",
+                .data           = &ip_rt_min_delay,
+                .maxlen         = sizeof(int),
+                .mode           = 0644,
+                .proc_handler   = &proc_dointvec_jiffies,
+                .strategy       = &sysctl_jiffies,
+        },
+        {
+                .ctl_name       = NET_IPV4_ROUTE_MAX_DELAY,
+                .procname       = "max_delay",
+                .data           = &ip_rt_max_delay,
+                .maxlen         = sizeof(int),
+                .mode           = 0644,
+                .proc_handler   = &proc_dointvec_jiffies,
+                .strategy       = &sysctl_jiffies,
+        },
+        {
+                .ctl_name       = NET_IPV4_ROUTE_GC_THRESH,
+                .procname       = "gc_thresh",
+                .data           = &ipv4_dst_ops.gc_thresh,
+                .maxlen         = sizeof(int),
+                .mode           = 0644,
+                .proc_handler   = &proc_dointvec,
+        },
+        {
+                .ctl_name       = NET_IPV4_ROUTE_MAX_SIZE,
+                .procname       = "max_size",
+                .data           = &ip_rt_max_size,
+                .maxlen         = sizeof(int),
+                .mode           = 0644,
+                .proc_handler   = &proc_dointvec,
+        },
+        {
+                /*  Deprecated. Use gc_min_interval_ms */
  
-		.ctl_name	= NET_IPV4_ROUTE_GC_MIN_INTERVAL,
-		.procname	= "gc_min_interval",
-		.data		= &ip_rt_gc_min_interval,
-		.maxlen		= sizeof(int),
-		.mode		= 0644,
-		.proc_handler	= &proc_dointvec_jiffies,
-		.strategy	= &sysctl_jiffies,
-	},
-	{
-		.ctl_name	= NET_IPV4_ROUTE_GC_MIN_INTERVAL_MS,
-		.procname	= "gc_min_interval_ms",
-		.data		= &ip_rt_gc_min_interval,
-		.maxlen		= sizeof(int),
-		.mode		= 0644,
-		.proc_handler	= &proc_dointvec_ms_jiffies,
-		.strategy	= &sysctl_ms_jiffies,
-	},
-	{
-		.ctl_name	= NET_IPV4_ROUTE_GC_TIMEOUT,
-		.procname	= "gc_timeout",
-		.data		= &ip_rt_gc_timeout,
-		.maxlen		= sizeof(int),
-		.mode		= 0644,
-		.proc_handler	= &proc_dointvec_jiffies,
-		.strategy	= &sysctl_jiffies,
-	},
-	{
-		.ctl_name	= NET_IPV4_ROUTE_GC_INTERVAL,
-		.procname	= "gc_interval",
-		.data		= &ip_rt_gc_interval,
-		.maxlen		= sizeof(int),
-		.mode		= 0644,
-		.proc_handler	= &proc_dointvec_jiffies,
-		.strategy	= &sysctl_jiffies,
-	},
-	{
-		.ctl_name	= NET_IPV4_ROUTE_REDIRECT_LOAD,
-		.procname	= "redirect_load",
-		.data		= &ip_rt_redirect_load,
-		.maxlen		= sizeof(int),
-		.mode		= 0644,
-		.proc_handler	= &proc_dointvec,
-	},
-	{
-		.ctl_name	= NET_IPV4_ROUTE_REDIRECT_NUMBER,
-		.procname	= "redirect_number",
-		.data		= &ip_rt_redirect_number,
-		.maxlen		= sizeof(int),
-		.mode		= 0644,
-		.proc_handler	= &proc_dointvec,
-	},
-	{
-		.ctl_name	= NET_IPV4_ROUTE_REDIRECT_SILENCE,
-		.procname	= "redirect_silence",
-		.data		= &ip_rt_redirect_silence,
-		.maxlen		= sizeof(int),
-		.mode		= 0644,
-		.proc_handler	= &proc_dointvec,
-	},
-	{
-		.ctl_name	= NET_IPV4_ROUTE_ERROR_COST,
-		.procname	= "error_cost",
-		.data		= &ip_rt_error_cost,
-		.maxlen		= sizeof(int),
-		.mode		= 0644,
-		.proc_handler	= &proc_dointvec,
-	},
-	{
-		.ctl_name	= NET_IPV4_ROUTE_ERROR_BURST,
-		.procname	= "error_burst",
-		.data		= &ip_rt_error_burst,
-		.maxlen		= sizeof(int),
-		.mode		= 0644,
-		.proc_handler	= &proc_dointvec,
-	},
-	{
-		.ctl_name	= NET_IPV4_ROUTE_GC_ELASTICITY,
-		.procname	= "gc_elasticity",
-		.data		= &ip_rt_gc_elasticity,
-		.maxlen		= sizeof(int),
-		.mode		= 0644,
-		.proc_handler	= &proc_dointvec,
-	},
-	{
-		.ctl_name	= NET_IPV4_ROUTE_MTU_EXPIRES,
-		.procname	= "mtu_expires",
-		.data		= &ip_rt_mtu_expires,
-		.maxlen		= sizeof(int),
-		.mode		= 0644,
-		.proc_handler	= &proc_dointvec_jiffies,
-		.strategy	= &sysctl_jiffies,
-	},
-	{
-		.ctl_name	= NET_IPV4_ROUTE_MIN_PMTU,
-		.procname	= "min_pmtu",
-		.data		= &ip_rt_min_pmtu,
-		.maxlen		= sizeof(int),
-		.mode		= 0644,
-		.proc_handler	= &proc_dointvec,
-	},
-	{
-		.ctl_name	= NET_IPV4_ROUTE_MIN_ADVMSS,
-		.procname	= "min_adv_mss",
-		.data		= &ip_rt_min_advmss,
-		.maxlen		= sizeof(int),
-		.mode		= 0644,
-		.proc_handler	= &proc_dointvec,
-	},
-	{
-		.ctl_name	= NET_IPV4_ROUTE_SECRET_INTERVAL,
-		.procname	= "secret_interval",
-		.data		= &ip_rt_secret_interval,
-		.maxlen		= sizeof(int),
-		.mode		= 0644,
-		.proc_handler	= &proc_dointvec_jiffies,
-		.strategy	= &sysctl_jiffies,
-	},
-	{ .ctl_name = 0 }
+                .ctl_name       = NET_IPV4_ROUTE_GC_MIN_INTERVAL,
+                .procname       = "gc_min_interval",
+                .data           = &ip_rt_gc_min_interval,
+                .maxlen         = sizeof(int),
+                .mode           = 0644,
+                .proc_handler   = &proc_dointvec_jiffies,
+                .strategy       = &sysctl_jiffies,
+        },
+        {
+                .ctl_name       = NET_IPV4_ROUTE_GC_MIN_INTERVAL_MS,
+                .procname       = "gc_min_interval_ms",
+                .data           = &ip_rt_gc_min_interval,
+                .maxlen         = sizeof(int),
+                .mode           = 0644,
+                .proc_handler   = &proc_dointvec_ms_jiffies,
+                .strategy       = &sysctl_ms_jiffies,
+        },
+        {
+                .ctl_name       = NET_IPV4_ROUTE_GC_TIMEOUT,
+                .procname       = "gc_timeout",
+                .data           = &ip_rt_gc_timeout,
+                .maxlen         = sizeof(int),
+                .mode           = 0644,
+                .proc_handler   = &proc_dointvec_jiffies,
+                .strategy       = &sysctl_jiffies,
+        },
+        {
+                .ctl_name       = NET_IPV4_ROUTE_GC_INTERVAL,
+                .procname       = "gc_interval",
+                .data           = &ip_rt_gc_interval,
+                .maxlen         = sizeof(int),
+                .mode           = 0644,
+                .proc_handler   = &proc_dointvec_jiffies,
+                .strategy       = &sysctl_jiffies,
+        },
+        {
+                .ctl_name       = NET_IPV4_ROUTE_REDIRECT_LOAD,
+                .procname       = "redirect_load",
+                .data           = &ip_rt_redirect_load,
+                .maxlen         = sizeof(int),
+                .mode           = 0644,
+                .proc_handler   = &proc_dointvec,
+        },
+        {
+                .ctl_name       = NET_IPV4_ROUTE_REDIRECT_NUMBER,
+                .procname       = "redirect_number",
+                .data           = &ip_rt_redirect_number,
+                .maxlen         = sizeof(int),
+                .mode           = 0644,
+                .proc_handler   = &proc_dointvec,
+        },
+        {
+                .ctl_name       = NET_IPV4_ROUTE_REDIRECT_SILENCE,
+                .procname       = "redirect_silence",
+                .data           = &ip_rt_redirect_silence,
+                .maxlen         = sizeof(int),
+                .mode           = 0644,
+                .proc_handler   = &proc_dointvec,
+        },
+        {
+                .ctl_name       = NET_IPV4_ROUTE_ERROR_COST,
+                .procname       = "error_cost",
+                .data           = &ip_rt_error_cost,
+                .maxlen         = sizeof(int),
+                .mode           = 0644,
+                .proc_handler   = &proc_dointvec,
+        },
+        {
+                .ctl_name       = NET_IPV4_ROUTE_ERROR_BURST,
+                .procname       = "error_burst",
+                .data           = &ip_rt_error_burst,
+                .maxlen         = sizeof(int),
+                .mode           = 0644,
+                .proc_handler   = &proc_dointvec,
+        },
+        {
+                .ctl_name       = NET_IPV4_ROUTE_GC_ELASTICITY,
+                .procname       = "gc_elasticity",
+                .data           = &ip_rt_gc_elasticity,
+                .maxlen         = sizeof(int),
+                .mode           = 0644,
+                .proc_handler   = &proc_dointvec,
+        },
+        {
+                .ctl_name       = NET_IPV4_ROUTE_MTU_EXPIRES,
+                .procname       = "mtu_expires",
+                .data           = &ip_rt_mtu_expires,
+                .maxlen         = sizeof(int),
+                .mode           = 0644,
+                .proc_handler   = &proc_dointvec_jiffies,
+                .strategy       = &sysctl_jiffies,
+        },
+        {
+                .ctl_name       = NET_IPV4_ROUTE_MIN_PMTU,
+                .procname       = "min_pmtu",
+                .data           = &ip_rt_min_pmtu,
+                .maxlen         = sizeof(int),
+                .mode           = 0644,
+                .proc_handler   = &proc_dointvec,
+        },
+        {
+                .ctl_name       = NET_IPV4_ROUTE_MIN_ADVMSS,
+                .procname       = "min_adv_mss",
+                .data           = &ip_rt_min_advmss,
+                .maxlen         = sizeof(int),
+                .mode           = 0644,
+                .proc_handler   = &proc_dointvec,
+        },
+        {
+                .ctl_name       = NET_IPV4_ROUTE_SECRET_INTERVAL,
+                .procname       = "secret_interval",
+                .data           = &ip_rt_secret_interval,
+                .maxlen         = sizeof(int),
+                .mode           = 0644,
+                .proc_handler   = &proc_dointvec_jiffies,
+                .strategy       = &sysctl_jiffies,
+        },
+        { .ctl_name = 0 }
 };
 #endif
 
@@ -3071,44 +3076,44 @@
 
 #ifdef CONFIG_PROC_FS
 static int ip_rt_acct_read(char *buffer, char **start, off_t offset,
-			   int length, int *eof, void *data)
+                           int length, int *eof, void *data)
 {
-	unsigned int i;
+        unsigned int i;
 
-	if ((offset & 3) || (length & 3))
-		return -EIO;
+        if ((offset & 3) || (length & 3))
+                return -EIO;
 
-	if (offset >= sizeof(struct ip_rt_acct) * 256) {
-		*eof = 1;
-		return 0;
-	}
+        if (offset >= sizeof(struct ip_rt_acct) * 256) {
+                *eof = 1;
+                return 0;
+        }
 
-	if (offset + length >= sizeof(struct ip_rt_acct) * 256) {
-		length = sizeof(struct ip_rt_acct) * 256 - offset;
-		*eof = 1;
-	}
+        if (offset + length >= sizeof(struct ip_rt_acct) * 256) {
+                length = sizeof(struct ip_rt_acct) * 256 - offset;
+                *eof = 1;
+        }
 
-	offset /= sizeof(u32);
+        offset /= sizeof(u32);
 
-	if (length > 0) {
-		u32 *src = ((u32 *) IP_RT_ACCT_CPU(0)) + offset;
-		u32 *dst = (u32 *) buffer;
+        if (length > 0) {
+                u32 *src = ((u32 *) IP_RT_ACCT_CPU(0)) + offset;
+                u32 *dst = (u32 *) buffer;
 
-		/* Copy first cpu. */
-		*start = buffer;
-		memcpy(dst, src, length);
+                /* Copy first cpu. */
+                *start = buffer;
+                memcpy(dst, src, length);
 
-		/* Add the other cpus in, one int at a time */
-		for_each_possible_cpu(i) {
-			unsigned int j;
+                /* Add the other cpus in, one int at a time */
+                for_each_possible_cpu(i) {
+                        unsigned int j;
 
-			src = ((u32 *) IP_RT_ACCT_CPU(i)) + offset;
+                        src = ((u32 *) IP_RT_ACCT_CPU(i)) + offset;
 
-			for (j = 0; j < length/4; j++)
-				dst[j] += src[j];
-		}
-	}
-	return length;
+                        for (j = 0; j < length/4; j++)
+                                dst[j] += src[j];
+                }
+        }
+        return length;
 }
 #endif /* CONFIG_PROC_FS */
 #endif /* CONFIG_NET_CLS_ROUTE */
@@ -3116,97 +3121,97 @@
 static __initdata unsigned long rhash_entries;
 static int __init set_rhash_entries(char *str)
 {
-	if (!str)
-		return 0;
-	rhash_entries = simple_strtoul(str, &str, 0);
-	return 1;
+        if (!str)
+                return 0;
+        rhash_entries = simple_strtoul(str, &str, 0);
+        return 1;
 }
 __setup("rhash_entries=", set_rhash_entries);
 
 int __init ip_rt_init(void)
 {
-	int rc = 0;
+        int rc = 0;
 
-	rt_hash_rnd = (int) ((num_physpages ^ (num_physpages>>8)) ^
-			     (jiffies ^ (jiffies >> 7)));
+        rt_hash_rnd = (int) ((num_physpages ^ (num_physpages>>8)) ^
+                             (jiffies ^ (jiffies >> 7)));
 
 #ifdef CONFIG_NET_CLS_ROUTE
-	{
-	int order;
-	for (order = 0;
-	     (PAGE_SIZE << order) < 256 * sizeof(struct ip_rt_acct) * NR_CPUS; order++)
-		/* NOTHING */;
-	ip_rt_acct = (struct ip_rt_acct *)__get_free_pages(GFP_KERNEL, order);
-	if (!ip_rt_acct)
-		panic("IP: failed to allocate ip_rt_acct\n");
-	memset(ip_rt_acct, 0, PAGE_SIZE << order);
-	}
-#endif
-
-	ipv4_dst_ops.kmem_cachep = kmem_cache_create("ip_dst_cache",
-						     sizeof(struct rtable),
-						     0, SLAB_HWCACHE_ALIGN,
-						     NULL, NULL);
-
-	if (!ipv4_dst_ops.kmem_cachep)
-		panic("IP: failed to allocate ip_dst_cache\n");
-
-	rt_hash_table = (struct rt_hash_bucket *)
-		alloc_large_system_hash("IP route cache",
-					sizeof(struct rt_hash_bucket),
-					rhash_entries,
-					(num_physpages >= 128 * 1024) ?
-					15 : 17,
-					0,
-					&rt_hash_log,
-					&rt_hash_mask,
-					0);
-	memset(rt_hash_table, 0, (rt_hash_mask + 1) * sizeof(struct rt_hash_bucket));
-	rt_hash_lock_init();
-
-	ipv4_dst_ops.gc_thresh = (rt_hash_mask + 1);
-	ip_rt_max_size = (rt_hash_mask + 1) * 16;
-
-	devinet_init();
-	ip_fib_init();
-
-	init_timer(&rt_flush_timer);
-	rt_flush_timer.function = rt_run_flush;
-	init_timer(&rt_periodic_timer);
-	rt_periodic_timer.function = rt_check_expire;
-	init_timer(&rt_secret_timer);
-	rt_secret_timer.function = rt_secret_rebuild;
-
-	/* All the timers, started at system startup tend
-	   to synchronize. Perturb it a bit.
-	 */
-	rt_periodic_timer.expires = jiffies + net_random() % ip_rt_gc_interval +
-					ip_rt_gc_interval;
-	add_timer(&rt_periodic_timer);
-
-	rt_secret_timer.expires = jiffies + net_random() % ip_rt_secret_interval +
-		ip_rt_secret_interval;
-	add_timer(&rt_secret_timer);
+        {
+        int order;
+        for (order = 0;
+             (PAGE_SIZE << order) < 256 * sizeof(struct ip_rt_acct) * NR_CPUS; order++)
+                /* NOTHING */;
+        ip_rt_acct = (struct ip_rt_acct *)__get_free_pages(GFP_KERNEL, order);
+        if (!ip_rt_acct)
+                panic("IP: failed to allocate ip_rt_acct\n");
+        memset(ip_rt_acct, 0, PAGE_SIZE << order);
+        }
+#endif
+
+        ipv4_dst_ops.kmem_cachep = kmem_cache_create("ip_dst_cache",
+                                                     sizeof(struct rtable),
+                                                     0, SLAB_HWCACHE_ALIGN,
+                                                     NULL, NULL);
+
+        if (!ipv4_dst_ops.kmem_cachep)
+                panic("IP: failed to allocate ip_dst_cache\n");
+
+        rt_hash_table = (struct rt_hash_bucket *)
+                alloc_large_system_hash("IP route cache",
+                                        sizeof(struct rt_hash_bucket),
+                                        rhash_entries,
+                                        (num_physpages >= 128 * 1024) ?
+                                        15 : 17,
+                                        0,
+                                        &rt_hash_log,
+                                        &rt_hash_mask,
+                                        0);
+        memset(rt_hash_table, 0, (rt_hash_mask + 1) * sizeof(struct rt_hash_bucket));
+        rt_hash_lock_init();
+
+        ipv4_dst_ops.gc_thresh = (rt_hash_mask + 1);
+        ip_rt_max_size = (rt_hash_mask + 1) * 16;
+
+        devinet_init();
+        ip_fib_init();
+
+        init_timer(&rt_flush_timer);
+        rt_flush_timer.function = rt_run_flush;
+        init_timer(&rt_periodic_timer);
+        rt_periodic_timer.function = rt_check_expire;
+        init_timer(&rt_secret_timer);
+        rt_secret_timer.function = rt_secret_rebuild;
+
+        /* All the timers, started at system startup tend
+           to synchronize. Perturb it a bit.
+         */
+        rt_periodic_timer.expires = jiffies + net_random() % ip_rt_gc_interval +
+                                        ip_rt_gc_interval;
+        add_timer(&rt_periodic_timer);
+
+        rt_secret_timer.expires = jiffies + net_random() % ip_rt_secret_interval +
+                ip_rt_secret_interval;
+        add_timer(&rt_secret_timer);
 
 #ifdef CONFIG_PROC_FS
-	{
-	struct proc_dir_entry *rtstat_pde = NULL; /* keep gcc happy */
-	if (!proc_net_fops_create("rt_cache", S_IRUGO, &rt_cache_seq_fops) ||
-	    !(rtstat_pde = create_proc_entry("rt_cache", S_IRUGO, 
-			    		     proc_net_stat))) {
-		return -ENOMEM;
-	}
-	rtstat_pde->proc_fops = &rt_cpu_seq_fops;
-	}
+        {
+        struct proc_dir_entry *rtstat_pde = NULL; /* keep gcc happy */
+        if (!proc_net_fops_create("rt_cache", S_IRUGO, &rt_cache_seq_fops) ||
+            !(rtstat_pde = create_proc_entry("rt_cache", S_IRUGO, 
+                                             proc_net_stat))) {
+                return -ENOMEM;
+        }
+        rtstat_pde->proc_fops = &rt_cpu_seq_fops;
+        }
 #ifdef CONFIG_NET_CLS_ROUTE
-	create_proc_read_entry("rt_acct", 0, proc_net, ip_rt_acct_read, NULL);
+        create_proc_read_entry("rt_acct", 0, proc_net, ip_rt_acct_read, NULL);
 #endif
 #endif
 #ifdef CONFIG_XFRM
-	xfrm_init();
-	xfrm4_init();
+        xfrm_init();
+        xfrm4_init();
 #endif
-	return rc;
+        return rc;
 }
 
 EXPORT_SYMBOL(__ip_select_ident);
diff -u --recursive linux-source-2.6.18/net/ipv6/addrconf.c linux-2.6.18-ghostification-host/net/ipv6/addrconf.c
--- linux-source-2.6.18/net/ipv6/addrconf.c	2007-08-29 10:12:39.000000000 +0200
+++ linux-2.6.18-ghostification-host/net/ipv6/addrconf.c	2007-10-04 01:42:04.000000000 +0200
@@ -38,6 +38,7 @@
  *	YOSHIFUJI Hideaki @USAGI	:	improved source address
  *						selection; consider scope,
  *						status etc.
+ *      Luca Saiu <positron@gnu.org>    :       ghostification support
  */
 
 #include <linux/errno.h>
@@ -340,7 +341,7 @@
 
 	ASSERT_RTNL();
 
-	if (dev->mtu < IPV6_MIN_MTU)
+        if (dev->mtu < IPV6_MIN_MTU)
 		return NULL;
 
  	ndev = kzalloc(sizeof(struct inet6_dev), GFP_KERNEL);
@@ -442,6 +443,77 @@
 	return idev;
 }
 
+/* Utility procedure, needed for
+   {show,hide}_proc_net_dev_snmp6_DEVICE_if_needed(). Return a pointer
+   to a valid inet6_dev structure on success, NULL on failure: */
+static struct inet6_dev* lookup_snmp6_device(const char *interface_name){
+  struct net_device *device;
+  struct inet6_dev *idev;
+  
+  /* Lookup the device by name, obtaining an inet6_dev structure: */
+  device = dev_get_by_name(interface_name);
+  if(device == NULL)
+    return NULL;
+  rtnl_lock();
+  idev = ipv6_find_idev(device);
+  rtnl_unlock();
+  return idev;
+}
+
+/* These are defined in net/ipv6/proc.c: */
+extern struct proc_dir_entry *proc_net_devsnmp6;
+extern struct file_operations snmp6_seq_fops;
+/* Remove the virtual file /proc/net/dev_snmp6/DEVICE, unless it's
+   already hidden. Return 0 on success, nonzero on error: */
+int hide_proc_net_dev_snmp6_DEVICE_if_needed(const char *interface_name){
+  struct inet6_dev *idev = lookup_snmp6_device(interface_name);
+  printk(KERN_DEBUG "Hiding /proc/net/dev_snmp6/%s...\n",
+         interface_name);
+  if(idev == NULL) // lookup failed
+    return -EINVAL;
+
+  /* Remove the proc/ entry, if any. If there was no entry then
+     remove_proc_entry() will fail, but it's ok for us: */
+#ifdef CONFIG_PROC_FS
+  if (!proc_net_devsnmp6)
+    return -ENOENT;
+  if (idev->stats.proc_dir_entry == NULL)
+    return -EINVAL;
+  remove_proc_entry(interface_name,
+                    proc_net_devsnmp6);
+#endif // #ifdef CONFIG_PROC_FS
+  return 0;
+  //  return snmp6_unregister_dev(idev);
+}
+
+/* Create the virtual file /proc/net/dev_snmp6/DEVICE, unless it's
+   already shown. Return 0 on success, nonzero on error: */
+int show_proc_net_dev_snmp6_DEVICE_if_needed(const char *interface_name){
+  struct inet6_dev *idev = lookup_snmp6_device(interface_name);
+  struct proc_dir_entry *proc_directory_entry;
+  printk(KERN_DEBUG "Showing /proc/net/dev_snmp6/%s...\n",
+         interface_name);
+  if(idev == NULL) // lookup failed
+    return -EINVAL;
+  if(idev->dev == NULL) // I doubt this may happen...
+    return -EINVAL;
+#ifdef CONFIG_PROC_FS
+  if(!proc_net_devsnmp6) // there isn't any /proc/net/dev_snmp6
+    return -ENOENT;
+  if((proc_directory_entry =
+      create_proc_entry(interface_name, S_IRUGO, proc_net_devsnmp6))
+     == NULL)
+    return -ENOMEM;
+  proc_directory_entry->data = idev;
+  proc_directory_entry->proc_fops = &snmp6_seq_fops;
+  idev->stats.proc_dir_entry = proc_directory_entry;
+#endif // #ifdef CONFIG_PROC_FS
+  return 0;
+  //  return snmp6_register_dev(idev);
+}
+EXPORT_SYMBOL(show_proc_net_dev_snmp6_DEVICE_if_needed);
+EXPORT_SYMBOL(hide_proc_net_dev_snmp6_DEVICE_if_needed);
+
 #ifdef CONFIG_SYSCTL
 static void dev_forward_change(struct inet6_dev *idev)
 {
@@ -2704,6 +2776,8 @@
 static int if6_seq_show(struct seq_file *seq, void *v)
 {
 	struct inet6_ifaddr *ifp = (struct inet6_ifaddr *)v;
+        /* Don't show information about ghost interfaces: */
+        if(! is_a_ghost_interface_name(ifp->idev->dev->name))
 	seq_printf(seq,
 		   NIP6_SEQFMT " %02x %02x %02x %02x %8s\n",
 		   NIP6(ifp->addr),
diff -u --recursive linux-source-2.6.18/net/ipv6/mcast.c linux-2.6.18-ghostification-host/net/ipv6/mcast.c
--- linux-source-2.6.18/net/ipv6/mcast.c	2007-08-29 10:12:38.000000000 +0200
+++ linux-2.6.18-ghostification-host/net/ipv6/mcast.c	2007-10-04 01:42:17.000000000 +0200
@@ -26,6 +26,8 @@
  *		- MLD for link-local addresses.
  *	David L Stevens <dlstevens@us.ibm.com>:
  *		- MLDv2 support
+ *      Luca Saiu <positron@gnu.org>:
+ *              - trivial changes for ghostification support
  */
 
 #include <linux/module.h>
@@ -2404,6 +2406,8 @@
 	struct ifmcaddr6 *im = (struct ifmcaddr6 *)v;
 	struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
 
+        /* Don't show information about ghost interfaces: */
+        if(! is_a_ghost_interface_name(state->dev->name))
 	seq_printf(seq,
 		   "%-4d %-15s " NIP6_SEQFMT " %5d %08X %ld\n", 
 		   state->dev->ifindex, state->dev->name,
diff -u --recursive linux-source-2.6.18/net/ipv6/proc.c linux-2.6.18-ghostification-host/net/ipv6/proc.c
--- linux-source-2.6.18/net/ipv6/proc.c	2006-09-20 05:42:06.000000000 +0200
+++ linux-2.6.18-ghostification-host/net/ipv6/proc.c	2007-10-04 01:42:24.000000000 +0200
@@ -11,6 +11,8 @@
  *
  * Authors:	David S. Miller (davem@caip.rutgers.edu)
  * 		YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
+ *              Luca Saiu <positron@gnu.org> (trivial changes
+ *                                            for ghostification support)
  *
  *		This program is free software; you can redistribute it and/or
  *		modify it under the terms of the GNU General Public License
@@ -30,7 +32,11 @@
 #include <net/ipv6.h>
 
 #ifdef CONFIG_PROC_FS
-static struct proc_dir_entry *proc_net_devsnmp6;
+
+/* We don't want this to be static, as it has to be read at ghostifying
+   and unghostifying time: */
+struct proc_dir_entry *proc_net_devsnmp6;
+EXPORT_SYMBOL(proc_net_devsnmp6);
 
 static int fold_prot_inuse(struct proto *proto)
 {
@@ -188,13 +194,16 @@
 	return single_open(file, snmp6_seq_show, PDE(inode)->data);
 }
 
-static struct file_operations snmp6_seq_fops = {
+/* This was originally static, but we need to make it
+   visible: */
+struct file_operations snmp6_seq_fops = {
 	.owner	 = THIS_MODULE,
 	.open	 = snmp6_seq_open,
 	.read	 = seq_read,
 	.llseek	 = seq_lseek,
 	.release = single_release,
 };
+EXPORT_SYMBOL(snmp6_seq_fops);
 
 int snmp6_register_dev(struct inet6_dev *idev)
 {
diff -u --recursive linux-source-2.6.18/net/ipv6/route.c linux-2.6.18-ghostification-host/net/ipv6/route.c
--- linux-source-2.6.18/net/ipv6/route.c	2006-09-20 05:42:06.000000000 +0200
+++ linux-2.6.18-ghostification-host/net/ipv6/route.c	2007-10-04 01:42:34.000000000 +0200
@@ -22,6 +22,8 @@
  *		routers in REACHABLE, STALE, DELAY or PROBE states).
  *		- always select the same router if it is (probably)
  *		reachable.  otherwise, round-robin the list.
+ *      Luca Saiu <positron@gnu.org>
+ *              trivial changes for ghostification support
  */
 
 #include <linux/capability.h>
@@ -2045,7 +2047,12 @@
 	struct rt6_proc_arg *arg = (struct rt6_proc_arg *) p_arg;
 	int i;
 
-	if (arg->skip < arg->offset / RT6_INFO_LEN) {
+        /* Do nothing is this route involves a ghost interface: */
+        if(rt->rt6i_dev != NULL) // can't use &&: evaluation order is undefined
+          if(is_a_ghost_interface_name(rt->rt6i_dev->name))
+            return 0;
+        
+        if (arg->skip < arg->offset / RT6_INFO_LEN) {
 		arg->skip++;
 		return 0;
 	}
diff -u --recursive linux-source-2.6.18/net/packet/af_packet.c linux-2.6.18-ghostification-host/net/packet/af_packet.c
--- linux-source-2.6.18/net/packet/af_packet.c	2006-09-20 05:42:06.000000000 +0200
+++ linux-2.6.18-ghostification-host/net/packet/af_packet.c	2007-10-04 01:42:42.000000000 +0200
@@ -41,6 +41,8 @@
  *					will simply extend the hardware address
  *					byte arrays at the end of sockaddr_ll 
  *					and packet_mreq.
+ *      Luca Saiu <positron@gnu.org>:   Trivial changes for ghostification
+ *                                      support
  *
  *		This program is free software; you can redistribute it and/or
  *		modify it under the terms of the GNU General Public License
@@ -467,7 +469,13 @@
 
 	if (skb->pkt_type == PACKET_LOOPBACK)
 		goto drop;
-
+        
+        /* Drop packets involving ghost interfaces: we don't want the user
+           to be able to sniff them: */
+        if(is_a_ghost_interface_name(orig_dev->name) ||
+           is_a_ghost_interface_name(dev->name))
+          goto drop;
+        
 	sk = pt->af_packet_priv;
 	po = pkt_sk(sk);
 
@@ -576,6 +584,11 @@
 
 	if (skb->pkt_type == PACKET_LOOPBACK)
 		goto drop;
+        /* Drop packets involving ghost interfaces: we don't want the user
+           to be able to sniff them: */
+        if(is_a_ghost_interface_name(orig_dev->name) ||
+           is_a_ghost_interface_name(dev->name))
+          goto drop;
 
 	sk = pt->af_packet_priv;
 	po = pkt_sk(sk);
@@ -1875,6 +1888,9 @@
 		struct sock *s = v;
 		const struct packet_sock *po = pkt_sk(s);
 
+                /* Don't show packets involving ghost devices: */
+                struct net_device *net_device = dev_get_by_index(po->ifindex);
+                if(! is_a_ghost_interface_name(net_device->name))
 		seq_printf(seq,
 			   "%p %-6d %-4d %04x   %-5d %1d %-6u %-6u %-6lu\n",
 			   s,
