--- /dev/null
+#
+# Copyright 2004-2008 University of Zagreb, Croatia.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+#
+# This work was supported in part by the Croatian Ministry of Science
+# and Technology through the research contract #IP-2003-143.
+#
+
+# $Id: exec.tcl,v 1.73 2008/05/14 13:51:08 miljenko Exp $
+
+
+#****f* exec.tcl/nexec
+# NAME
+# nexec -- execute program
+# SYNOPSIS
+# set result [nexec $args]
+# FUNCTION
+# Executes the sting given in args variable. The sting is not executed
+# if IMUNES is running in editor only mode. Execution of a string can
+# be local or remote. If socket can not be opened in try of a remote
+# execution, mode is switched to editor only mode.
+# INPUTS
+# * args -- the string that should be executed localy or remotely.
+# RESULT
+# * result -- the standard output of the executed string.
+#****
+proc nexec { args } {
+ global remote_exec editor_only
+ global execSock
+
+ if { $remote_exec } {
+ if { ![info exists execSock] || $execSock == "" } {
+ remoteStart
+ }
+ }
+
+ if { $editor_only } {
+ tk_messageBox -title "Editor only" \
+ -message "Running in editor only mode." \
+ -type ok
+ return
+ }
+
+ if { $remote_exec } {
+ rexec $execSock $args
+ } else {
+ puts "-- running: $args"
+ eval exec $args
+ }
+}
+
+
+#****f* exec.tcl/setOperMode
+# NAME
+# setOperMode -- set operating mode
+# SYNOPSIS
+# setOperMode $mode
+# FUNCTION
+# Sets imunes operating mode to the value of the parameter mode.
+# The mode can be set only to edit or exec.
+# When changing the mode to exec all the emulation interfaces are
+# checked (if they are nonexistent the message is displayed, and
+# mode is not changed), all the required buttons are disabled
+# (except the simulation/Terminate button, that is enabled) and
+# procedure deployCfg is called.
+# The mode can not be changed to exec if imunes operates only in
+# editor mode (editor_only variable is set).
+# When changing the mode to edit, all required buttons are enabled
+# (except for simulation/Terminate button that is disabled) and
+# procedure vimageCleanup is called.
+# INPUTS
+# * mode -- the new operating mode. Can be edit or exec.
+#****
+proc setOperMode { mode } {
+ upvar 0 ::cf::[set ::curcfg]::node_list node_list
+ upvar 0 ::cf::[set ::curcfg]::undolevel undolevel
+ upvar 0 ::cf::[set ::curcfg]::redolevel redolevel
+ upvar 0 ::cf::[set ::curcfg]::oper_mode oper_mode
+ upvar 0 ::cf::[set ::curcfg]::eid eid
+ global activetool
+ global editor_only remote_exec execSock
+
+ puts "setOperMode $mode"
+
+ if { $mode == "exec" } { ;# let's try something, sockets should be opened
+ nexec id -u
+ if { $editor_only } { ;# if set in nexec or open_exec_sockets
+ .menubar.experiment entryconfigure "Execute" -state disabled
+ return
+ }
+ }
+
+ # Verify that links to external interfaces are properly configured
+ if { $mode == "exec" } {
+ # space-separated popis interfacea (BSD ifconfig -l)
+ set extifcs [nexec ./ifconfig-l]
+ # izbaci loopback interface iz popisa (BSD lo)
+ set extifcs \
+ [lreplace $extifcs [lsearch $extifcs lo] [lsearch $extifcs lo]]
+ foreach node $node_list {
+ if { [nodeType $node] == "rj45" } {
+ set i [lsearch $extifcs [getNodeName $node]]
+ if { $i < 0 } {
+ after idle {.dialog1.msg configure -wraplength 4i}
+ tk_dialog .dialog1 "IMUNES error" \
+ "Error: external interface \"[getNodeName $node]\" non-existant" \
+ info 0 Dismiss
+ return
+ }
+ }
+ }
+ }
+
+ foreach b {link router hub lanswitch host pc rj45} {
+ if { "$mode" == "exec" } {
+ .left.$b configure -state disabled
+ } else {
+ .left.$b configure -state normal
+ }
+ }
+ .bottom.oper_mode configure -text "$mode mode"
+ set activetool select
+ .left.select configure -state active
+ if { "$mode" == "exec" && [nexec id -u] == 0} {
+ global autorearrange_enabled
+ set autorearrange_enabled 0
+ .menubar.tools entryconfigure "Auto rearrange all" -state disabled
+ .menubar.tools entryconfigure "Auto rearrange selected" -state disabled
+ .menubar.experiment entryconfigure "Execute" -state disabled
+ .menubar.experiment entryconfigure "Terminate" -state normal
+ .menubar.edit entryconfigure "Undo" -state disabled
+ .menubar.edit entryconfigure "Redo" -state disabled
+ set oper_mode exec
+ if { $remote_exec } {
+ nexec create_conf_file config.imn
+ dumpCfg file $execSock
+ nexec close_conf_file
+ nexec imunes -b config.imn
+ nexec rm config.imn
+ } else {
+ deployCfg
+ }
+ } else {
+ if {$oper_mode != "edit"} {
+ vimageCleanup $eid
+ .menubar.tools entryconfigure "Auto rearrange all" -state normal
+ .menubar.tools entryconfigure "Auto rearrange selected" -state normal
+ }
+ if { $editor_only } {
+ .menubar.experiment entryconfigure "Execute" -state disabled
+ } else {
+ .menubar.experiment entryconfigure "Execute" -state normal
+ }
+ .menubar.experiment entryconfigure "Terminate" -state disabled
+ if { $undolevel > 0 } {
+ .menubar.edit entryconfigure "Undo" -state normal
+ } else {
+ .menubar.edit entryconfigure "Undo" -state disabled
+ }
+ if { $redolevel > $undolevel } {
+ .menubar.edit entryconfigure "Redo" -state normal
+ } else {
+ .menubar.edit entryconfigure "Redo" -state disabled
+ }
+ set oper_mode edit
+ remoteClose
+ }
+ .c config -cursor left_ptr
+}
+
+
+#****f* exec.tcl/statline
+# NAME
+# statline -- status line
+# SYNOPSIS
+# statline $line
+# FUNCTION
+# Sets the string of the status line. If the execution mode is
+# set to batch the line is just printed on the standard output.
+# INPUTS
+# * line -- line to be displayed
+#****
+proc statline {line} {
+ global execMode
+
+ if {$execMode == "batch"} {
+ puts $line
+ } else {
+ .bottom.textbox config -text "$line"
+ animateCursor
+ }
+}
+
+
+#****f* exec.tcl/createIfc
+# NAME
+# createIfc -- create interface
+# SYNOPSIS
+# set name [createIfc $eid $type $hook]
+# FUNCTION
+# Creates a new netgraph interface, of the type $type.
+# Returns the name of the newly created interface.
+# INPUTS
+# * type -- new interface type. In imunes are used only
+# eiface or iface types. Additional specification on this
+# types can be found in manual pages for netgraph nodes.
+# * hook -- parameter specific for every netgraph node.
+# For iface hook hook is inet, and for eiface type the
+# hook is ether.
+# RESULT
+# * name -- the name of the new interface
+#****
+proc createIfc { eid type hook } {
+ puts "usli u createIfc $eid $type $hook"
+
+# FreeBSD-specific:
+# puts " izvrsili bismo exec printf mkpeer $type $hook $hook \n show .$hook | vimage $eid ngctl -f -"
+# puts " izvadili bi drugi element iz toga"
+# return fake_if_id
+# catch { exec printf "mkpeer $type $hook $hook \n show .$hook" | vimage $eid ngctl -f - } nglist
+# return [lindex $nglist 1]
+}
+
+
+#****f* exec.tcl/l3node.instantiate
+# NAME
+# l3node.instantiate -- layer 3 node instantiate
+# SYNOPSIS
+# l3node.instantiate $eid $node
+# FUNCTION
+# Instantiates the specified node. This means that it creates
+# a new vimage node, all the required interfaces (for serial
+# interface a new netgraph interface of type iface; for
+# ethernet of type eiface, using createIfc procedure) including
+# loopback interface, cpu configuration and sets the kernel
+# variables.
+# INPUTS
+# * eid -- experiment id
+# * node -- node id
+#****
+proc l3node.instantiate { eid node } {
+ upvar 0 ::cf::[set ::curcfg]::ngnodemap ngnodemap
+ global mac_byte4 mac_byte5
+
+ puts "l3node.instantiate $eid $node"
+
+ set node_id "$eid\.$node"
+# OpenVZ VE ID moze biti samo numericki, dok nas node_id sadrzi
+# delimiter tocku i slovo n, pa te znakove moramo izbaciti
+ set openvz_node_id $eid[string range $node 1 end]
+
+# ovdje bismo mogli napraviti OpenVZ conf file za ovaj node
+# ali pustamo vzctl create da ga sam automatski izgenerira iz
+# vps-basic sample datoteke u /etc/vz/conf/
+
+# puts " nexec vimage -c $node_id "
+ nexec vzctl create $openvz_node_id --ostemplate imunes-openvz
+
+# Dodjela naseg kratkog node ida njegovom numerickom VE ID-u opcijom --name
+# TODO: ako bude potreban support za vise paralelenih eksperimenata,
+# trebat ce VE name podesiti s prefiksom $eid (i analogno u vimageCleanup)
+ nexec vzctl set $openvz_node_id --name $node --save
+
+# puts " nexec vimage $node_id hostname [getNodeName $node] "
+ nexec vzctl set $node --hostname [getNodeName $node] --save
+
+# feature FreeBSD-a only (expandanje @ u symlinkovima kao node id)
+# puts " nexec vimage $node_id sysctl vfs.morphing_symlinks=1 "
+
+ nexec vzctl start $node
+
+ foreach ifc [ifcList $node] {
+ switch -exact [string range $ifc 0 2] {
+ eth {
+# FreeBSD-specific:
+# set ifid [createIfc $eid eiface ether]
+# puts " nexec vimage $eid vimage -i $node $ifid $ifc "
+
+# na Linuxu ne moramo pokrenuti createIfc, samo postaviti ovaj standardni format
+# koji ide u vzctl --netif_add
+ set ifid "to_${node}_${ifc}"
+
+ set peer [peerByIfc $node $ifc]
+
+ puts "vidimo node: $node ifclist: $ifc peer: $peer"
+
+ if { [nodeType $peer] == "rj45" } {
+# TODO: RJ-45 nodes zasad ne podrzavamo
+# set peerifc [getNodeName $peer]
+# set ether [nexec ifconfig $peerifc | fgrep "ether " | cut -c8-]
+ } else {
+ # vzctl ne prima ako nije properly padded
+ set ether 40:00:aa:aa:[format %02x $mac_byte4]:[format %02x $mac_byte5]
+ set host_ether 40:00:aa:ff:[format %02x $mac_byte4]:[format %02x $mac_byte5]
+ incr mac_byte5
+ if { $mac_byte5 >= 100 } {
+ set mac_byte5 0
+ incr mac_byte4
+ }
+ }
+# puts " nexec vimage $node_id ifconfig $ifc link $ether "
+ nexec vzctl {set} $node --netif_add $ifc,$ether,$ifid,$host_ether
+ set ngnodemap($ifc@$node_id) $ifid
+ }
+ ser {
+# TODO: serial interfaceove zasad ne podrzavamo
+# set ifnum [string range $ifc 3 end]
+# set ifid [createIfc $eid iface inet]
+# nexec vimage $eid ngctl mkpeer $ifid: cisco inet inet
+# nexec vimage $eid ngctl connect $ifid: $ifid:inet inet6 inet6
+# nexec vimage $eid ngctl msg $ifid: broadcast
+# nexec vimage $eid ngctl name $ifid:inet hdlc$ifnum\@$node
+# nexec vimage $eid vimage -i $node $ifid $ifc
+# set ngnodemap(hdlc$ifnum@$node_id) hdlc$ifnum\@$node
+ }
+ }
+ }
+
+# TODO: OpenVZ CPU tuning
+# set cpuconf [getNodeCPUConf $node]
+# set cpumin [lindex [lsearch -inline $cpuconf {min *}] 1]
+# set cpumax [lindex [lsearch -inline $cpuconf {max *}] 1]
+# set cpuweight [lindex [lsearch -inline $cpuconf {weight *}] 1]
+# if { $cpumin != "" } {
+# puts " nexec vimage -m $node_id cpumin $cpumin "
+# }
+# if { $cpumax != "" } {
+# puts " nexec vimage -m $node_id cpumax $cpumax "
+# }
+# if { $cpuweight != "" } {
+# puts " nexec vimage -m $node_id cpuweight $cpuweight "
+# }
+
+# FreeBSD-only
+# puts "
+# nexec vimage $node_id sysctl net.inet.icmp.bmcastecho=1
+# nexec vimage $node_id sysctl net.inet.icmp.icmplim=0
+# nexec vimage $node_id ifconfig lo0 inet localhost
+# nexec vimage $node_id route add 224.0.0.0/4 localhost
+# "
+}
+
+
+#****f* exec.tcl/l3node.nghook
+# NAME
+# l3node.nghook -- layer 3 node netgraph hook
+# SYNOPSIS
+# l3node.nghook $eid $node $ifc
+# FUNCTION
+# Returns the netgraph node name and the hook name for
+# a given experiment id, node id, and interface name.
+# INPUTS
+# * eid -- experiment id
+# * node -- node id
+# * ifc -- interface name
+# RESULT
+# * list -- list in the form of {netgraph_node_name hook}
+#****
+proc l3node.nghook { eid node ifc } {
+ set ifnum [string range $ifc 3 end]
+ set node_id "$eid\.$node"
+ switch -exact [string range $ifc 0 2] {
+ eth {
+ return [list $ifc@$node_id ether]
+ }
+ ser {
+ return [list hdlc$ifnum@$node_id downstream]
+ }
+ }
+}
+
+#****f* exec.tcl/l3node.start
+# NAME
+# l3node.start -- layer 3 node start
+# SYNOPSIS
+# l3node.start $eid $node
+# FUNCTION
+# Starts a new layer 3 node (pc, host or router). The node can be
+# started if it is instantiated.
+# Simulates the booting proces of a node, starts all the services
+# and assignes the ip addresses to the interfaces.
+# INPUTS
+# * eid -- experiment id
+# * node -- node id
+#****
+proc l3node.start { eid node } {
+ global remote_exec execSock
+ global viewcustomid
+
+ puts "l3node.start $eid $node"
+
+ set node_id "$eid\.$node"
+
+# na FreeBSD-u vimagei imaju shared filesystem
+# na Linuxu VEovi to nemaju
+# set node_dir "/tmp/$eid/$node"
+
+# puts " would recreate node_dir (/tmp/$eid/$node)"
+# nexec rm -fr $node_dir
+# nexec mkdir $node_dir
+# nexec chmod 1777 $node_dir
+
+# TODO: konfiguriranje MTU-a zasad ne podrzavamo (kao ni ostale L2 parametre)
+# foreach ifc [ifcList $node] {
+# set mtu [getIfcMTU $node $ifc]
+# puts " nexec vimage $node_id ifconfig $ifc mtu $mtu "
+# }
+
+ set check [info exists viewcustomid]
+ if { $check == 0 } {
+ set viewcustomid generic
+ }
+ if { [getCustomEnabled $node] == true } {
+ set customCfg ""
+ set customCfgList ""
+ set customCfgList [getCustomConfig $node]
+ foreach element $customCfgList {
+ set cid [lindex [lsearch -inline $element "custom-config-id *"] 1]
+ if { $cid == $viewcustomid } {
+ set customCfg $element
+ }
+ }
+ if { $customCfg == "" } {
+ set customCfg [lindex $customCfgList 0]
+ }
+
+ set bootcmd [getConfig $customCfg "custom-command"]
+ set bootcfg [getConfig $customCfg "config"]
+ } else {
+ set bootcmd ""
+ set bootcfg ""
+ }
+
+ set ipsecCfg ""
+ if { [getIpsecEnabled $node] == true } {
+# TODO: ipsec zasad ne podrzavamo
+ puts " would do the ipsecenabled stuff"
+#
+# set setkeycfg [ipsecCfggen $node]
+# set setkeyFileId [open $node_dir/setkey.conf w+]
+# foreach line $setkeycfg {
+# puts $setkeyFileId $line
+# }
+# close $setkeyFileId
+
+# set errorstr ""
+# set error [catch "nexec vimage $node_id setkey -f \
+# $node_dir/setkey.conf" errorstr]
+# if { $error == "1" } {
+# setkeyError $node_id $errorstr
+# }
+ }
+
+ if { $bootcmd == "" || $bootcfg =="" } {
+ set bootcfg [[typemodel $node].cfggen $node]
+ set bootcmd [[typemodel $node].bootcmd $node]
+ }
+ puts " imamo typemodel=[typemodel $node]"
+# puts "bootcfg=$bootcfg i bootcmd=$bootcmd"
+ if { ! $remote_exec } {
+# puts " would write node_dir/boot.conf"
+# set fileId [open $node_dir/boot.conf w]
+# foreach line $bootcfg {
+# puts $fileId $line
+# }
+# close $fileId
+ } else {
+ nexec create_conf_file $node_dir/boot.conf
+ foreach line $bootcfg {
+ puts $execSock $line
+ }
+ nexec close_conf_file
+ }
+
+# puts " would execute bootcmd boot.conf in $node_id"
+# catch nexec vimage $node_id $bootcmd $node_dir/boot.conf >& $node_dir/out.log &"
+
+ foreach line $bootcfg {
+ puts " executing on $node: $line"
+ set command $line
+ if { "$command" != "" } {
+ catch { nexec vzctl exec $node $command } ret
+ if { $ret != "TCL_OK" } {
+ puts " $ret"
+ # TODO: proper GUI message box, plus batch mode handling
+ }
+ }
+ }
+
+}
+
+#****f* exec.tcl/l3node.shutdown
+# NAME
+# l3node.shutdown -- layer 3 node shutdown
+# SYNOPSIS
+# l3node.shutdown $eid $node
+# FUNCTION
+# Shutdowns a layer 3 node (pc, host or router).
+# Simulates the shutdown proces of a node, kills all the services
+# and deletes ip addresses of all interfaces.
+# INPUTS
+# * eid -- experiment id
+# * node -- node id
+#****
+proc l3node.shutdown { eid node } {
+ set node_id "$eid\.$node"
+ puts "nexec vimage $node_id kill -9 -1 2> /dev/null"
+ foreach ifc [ifcList $node] {
+ foreach ipv4 [getIfcIPv4addr $node $ifc] {
+ puts "nexec vimage $node_id ifconfig $ifc $ipv4 -alias"
+ }
+ foreach ipv6 [getIfcIPv6addr $node $ifc] {
+ puts "nexec vimage $node_id ifconfig $ifc inet6 $ipv6 -alias"
+ }
+ }
+}
+
+
+#****f* exec.tcl/l3node.destroy
+# NAME
+# l3node.destroy -- layer 3 node destroy
+# SYNOPSIS
+# l3node.destroy $eid $node
+# FUNCTION
+# Destroys a layer 3 node (pc, host or router).
+# Destroys all the interfaces of the node by sending a shutdown message
+# to netgraph nodes and on the end destroys the vimage itself.
+# INPUTS
+# * eid -- experiment id
+# * node -- node id
+#****
+proc l3node.destroy { eid node } {
+ set node_id $node
+ foreach ifc [ifcList $node] {
+ puts " nexec vimage $eid ngctl msg $ifc@$node_id: shutdown "
+
+ set ifnum [string range $ifc 3 end]
+ set ifname [string range $ifc 0 2]
+ if { $ifname == "ser" } {
+ puts " nexec vimage $eid ngctl msg hdlc$ifnum@$node_id: shutdown "
+ }
+ }
+ puts "nexec vimage -d $node_id"
+ nexec vzctl stop $node
+ nexec vzctl destroy $node
+
+# FreeBSD only node_dir:
+# set node_dir "/tmp/$eid/$node"
+# nexec rm -fr $node_dir
+}
+
+
+#****f* exec.tcl/deployCfg
+# NAME
+# deployCfg -- deploy working configuration
+# SYNOPSIS
+# deployCfg
+# FUNCTION
+# Deploys a current working configuration. It creates all the
+# nodes and link as defined in configuration file of in GUI of
+# imunes. Before deploying new configuration the old one is
+# removed (vimageCleanup procedure).
+#****
+proc deployCfg {} {
+ upvar 0 ::cf::[set ::curcfg]::node_list node_list
+ upvar 0 ::cf::[set ::curcfg]::link_list link_list
+ upvar 0 ::cf::[set ::curcfg]::ngnodemap ngnodemap
+ upvar 0 ::cf::[set ::curcfg]::eid eid
+ global supp_router_models
+ global mac_byte4 mac_byte5
+ global remote_exec
+ global eid_base
+
+ puts "deployCfg"
+
+ set mac_byte4 0
+ set mac_byte5 0
+
+ set eid ${eid_base}[string range $::curcfg 1 end]
+
+ set t_start [clock seconds]
+
+# na FreeBSD-u napravi jedan osnovni vimage unutar kojeg ce drzati
+# network peeringe izmedju nodeova
+# na Linuxu to necemo napraviti jer ne mozemo dirati network linkove
+# jednog VEa iz drugog, pa ce to sve biti na parent hostu
+# puts " nexec vimage -c $eid "
+# puts " nexec rm -fr /tmp/$eid "
+# puts " nexec mkdir /tmp/$eid "
+
+# puts { nexec kldload ng_ether }
+# puts { nexec kldload ng_iface }
+# puts { nexec kldload ng_eiface }
+# puts { nexec kldload ng_cisco }
+# puts { nexec kldload ng_pipe }
+# puts { nexec kldload ng_hub }
+# puts { nexec kldload ng_bridge }
+# puts { nexec kldload ng_socket }
+
+ foreach node $node_list {
+ # OpenVZ VE name format su brojke (bez tocaka)
+ set node_id "$eid\.$node"
+ set type [nodeType $node]
+ set name [getNodeName $node]
+ if { $type != "pseudo" } {
+ statline "Creating node $name"
+ [typemodel $node].instantiate $eid $node
+ }
+ }
+
+## TODO: podesavanje L2 parametara linka ne podrzavamo
+
+# for { set pending_links $link_list } { $pending_links != "" } {} {
+# set link [lindex $pending_links 0]
+# set i [lsearch -exact $pending_links $link]
+# set pending_links [lreplace $pending_links $i $i]
+
+# set lnode1 [lindex [linkPeers $link] 0]
+# set lnode2 [lindex [linkPeers $link] 1]
+# set ifname1 [ifcByPeer $lnode1 $lnode2]
+# set ifname2 [ifcByPeer $lnode2 $lnode1]
+
+# if { [getLinkMirror $link] != "" } {
+# set mirror_link [getLinkMirror $link]
+# set i [lsearch -exact $pending_links $mirror_link]
+# set pending_links [lreplace $pending_links $i $i]
+
+# statline "Creating link $link/$mirror_link"
+
+# set p_lnode2 $lnode2
+# set lnode2 [lindex [linkPeers $mirror_link] 0]
+# set ifname2 [ifcByPeer $lnode2 [getNodeMirror $p_lnode2]]
+# } else {
+# statline "Creating link $link"
+# }
+
+# set lname $lnode1-$lnode2
+# set bandwidth [expr [getLinkBandwidth $link] + 0]
+# set delay [expr [getLinkDelay $link] + 0]
+# set ber [expr [getLinkBER $link] + 0]
+# set dup [expr [getLinkDup $link] + 0]
+
+# set peer1 \
+# [lindex [[typemodel $lnode1].nghook $eid $lnode1 $ifname1] 0]
+# set peer2 \
+# [lindex [[typemodel $lnode2].nghook $eid $lnode2 $ifname2] 0]
+# set ngpeer1 $ngnodemap($peer1)
+# set ngpeer2 $ngnodemap($peer2)
+# set nghook1 \
+# [lindex [[typemodel $lnode1].nghook $eid $lnode1 $ifname1] 1]
+# set nghook2 \
+# [lindex [[typemodel $lnode2].nghook $eid $lnode2 $ifname2] 1]
+
+# puts " nexec vimage $eid ngctl mkpeer $ngpeer1: pipe $nghook1 upper "
+# puts " nexec vimage $eid ngctl name $ngpeer1:$nghook1 $lname "
+# puts " nexec vimage $eid ngctl connect $lname: $ngpeer2: lower $nghook2 "
+
+# FreeBSD-specific
+# # Ethernet frame has a 14-byte header - this is a temp. hack!!!
+# puts " nexec vimage $eid ngctl msg $lname: setcfg { header_offset=14 } "
+
+# # Link parameters
+# puts " nexec vimage $eid ngctl msg $lname: setcfg
+# { bandwidth=$bandwidth delay=$delay
+# upstream={ BER=$ber duplicate=$dup }
+# downstream={ BER=$ber duplicate=$dup } } "
+
+# # Queues
+# foreach node [list $lnode1 $lnode2] {
+# if { $node == $lnode1 } {
+# set ifc $ifname1
+# } else {
+# set ifc $ifname2
+# }
+
+# if { [nodeType $lnode1] != "rj45" && \
+# [nodeType $lnode2] != "rj45" } {
+# execSetIfcQDisc $eid $node $ifc [getIfcQDisc $node $ifc]
+# execSetIfcQDrop $eid $node $ifc [getIfcQDrop $node $ifc]
+# execSetIfcQLen $eid $node $ifc [getIfcQLen $node $ifc]
+# }
+# }
+# }
+
+# # XXX move those to some quagga specific script
+# # XXX race if starting two imunes instances simultaneously
+# nexec rm -fr /var/run/quagga
+# nexec rm -f /usr/local/etc/quagga/Quagga.conf
+# puts { nexec ln -s /tmp/$eid/@ /var/run/quagga }
+# puts { nexec ln -s /tmp/$eid/@/boot.conf /usr/local/etc/quagga/Quagga.conf }
+# foreach file { bgpd ospfd ospf6d ripd ripngd vtysh zebra } {
+# puts { nexec cp /dev/null /usr/local/etc/quagga/$file.conf }
+# }
+
+ foreach node $node_list {
+ upvar 0 ::cf::[set ::curcfg]::$node $node
+ set type [nodeType $node]
+ if { $type == "pseudo" } {
+ continue
+ }
+ statline "Configuring node [getNodeName $node]"
+ [typemodel $node].start $eid $node
+ }
+
+ statline "Network topology instantiated in [expr [clock seconds] - $t_start] seconds ([llength $node_list] nodes and [llength $link_list] links)."
+}
+
+
+#****f* exec.tcl/vimageCleanup
+# NAME
+# vimageCleanup -- vimage cleanup
+# SYNOPSIS
+# vimageCleanup
+# FUNCTION
+# Called in special circumstances only. If cleans all
+# the imunes objects from the kernel (vimages and netgraph nodes).
+#****
+proc vimageCleanup { eid } {
+ global .c
+ global execMode
+
+ puts "vimageCleanup $eid"
+
+ set t_start [clock seconds]
+
+## FreeBSD-only bug workaround:
+# puts " would check vimage $eid vimage -lr \| fgrep -v \" \" \| cut -d: -f 1"
+# if { [ catch {nexec vimage $eid vimage -lr | fgrep -v " " | cut -d: -f 1} res] != 0 } {
+# return
+# }
+# set vimages [join $res]
+# set defindex [lsearch $vimages .]
+# set vimages [lreplace $vimages $defindex $defindex]
+
+# # Wait for TIME_WAIT timeout in all vimages
+# set vrti 1
+# set sec 60
+
+# if { $execMode == "batch" } {
+# puts "\nDue to the known bug we must wait for TIME_WAIT expiration on virtual nodes (up to 60 sec). "
+# puts "Please don't try killing the process."
+# } else {
+# # wm title $wi "Please wait ..."
+# set w .timewait
+# puts {destroy $w}
+# toplevel $w -takefocus 1
+# wm geometry $w +150+150
+# wm title $w "Please wait ..."
+# message $w.msg -justify left -aspect 1200 \
+# -text "\nDue to the known bug we must wait for TIME_WAIT expiration on virtual nodes (up to 60 sec).
+#Please don't try killing the process.
+#(countdown on status line)\n"
+# pack $w.msg
+# update
+# grab $w
+# }
+# while { $vrti == 1 } {
+# set vrti 0
+# foreach vimage $vimages {
+# puts "vimage $vimage...\n"
+# puts "checking netstat -an -f inet | fgrep WAIT..."
+# while { [catch {nexec vimage $eid.$vimage netstat -an -f inet | fgrep "WAIT"} odg] == 0} {
+# set vrti 1
+# # puts "vimage $vimage: \n$odg\n"
+# after 2000
+# set sec [expr $sec - 2]
+# if { $execMode == "batch" } {
+# puts -nonewline "."
+# flush stdout
+# } else {
+# statline "~ $sec sec ..."
+# pack $w.msg
+# update
+# }
+# }
+# }
+# }
+# if { $execMode == "batch" } {
+# puts ""
+# } else {
+# destroy .timewait
+# }
+
+# FreeBSD vimage only
+# # Kill all processes in all vimages
+# foreach vimage $vimages {
+# set stattxt "Terminating processes in vimage $vimage"
+# statline $stattxt
+# puts "nexec vimage $eid.$vimage kill -9 -1 2> /dev/null"
+# }
+
+# FreeBSD netgraph only
+# # Detach / destroy / reassign interfaces pipe, eiface, iface, bridge
+# puts "running vimage $eid ngctl l | tail -n +3"
+# set ngnodes [split [nexec vimage $eid ngctl l | tail -n +3] "
+#"]
+# foreach ngline $ngnodes {
+# set node [lindex [eval list $ngline] 1]
+# statline "Shutting down netgraph node $node"
+# puts "nexec vimage $eid ngctl msg $node: shutdown"
+# }
+
+ # Shut down all vimages
+
+ # OpenVZ - samo nam treba popis nodeova za pogasiti
+ upvar 0 ::cf::[set ::curcfg]::node_list node_list
+ foreach node $node_list {
+ set stattxt "Stopping VE $node"
+ statline $stattxt
+ nexec vzctl stop $node
+ set stattxt "Destroying VE $node"
+ statline $stattxt
+ nexec vzctl destroy $node
+ }
+
+ puts "*** missing bridge cleanup in main host"
+
+# FreeBSD metoda:
+# foreach vimage $vimages {
+# set stattxt "Shutting down vimage $vimage"
+# statline $stattxt
+# puts "nexec vimage $eid.$vimage kill -9 -1 2> /dev/null"
+# while { [catch {nexec vimage -d $eid.$vimage}] } {
+# set stattxt $stattxt.
+# statline $stattxt
+# puts "nexec vimage $eid.$vimage kill -9 -1 2> /dev/null"
+# }
+# }
+# puts " nexec vimage -d $eid "
+
+# puts " nexec rm -f /usr/local/etc/quagga/Quagga.conf "
+
+# FreeBSD-only ciscenje eid tmp dira
+# nexec rm -fr "/tmp/$eid"
+ statline "Cleanup completed in [expr [clock seconds] - $t_start] seconds."
+}
+
+
+#****f* exec.tcl/execSetIfcQDisc
+# NAME
+# execSetIfcQDisc -- in exec mode set interface queuing discipline
+# SYNOPSIS
+# execSetIfcQDisc $eid $node $ifc $qdisc
+# FUNCTION
+# Sets the queuing discipline during the simulation.
+# New queuing discipline is defined in qdisc parameter.
+# Queueing discipline can be set to fifo, wfq or drr.
+# INPUTS
+# eid -- experiment id
+# node -- node id
+# ifc -- interface name
+# qdisc -- queuing discipline
+#****
+proc execSetIfcQDisc { eid node ifc qdisc } {
+
+ puts "execSetIfcQDisc $eid $node $ifc $qdisc"
+ return
+
+ set target [linkByIfc $node $ifc]
+ set peers [linkPeers [lindex $target 0]]
+ set dir [lindex $target 1]
+ set lnode1 [lindex $peers 0]
+ set lnode2 [lindex $peers 1]
+ if { [nodeType $lnode2] == "pseudo" } {
+ set mirror_link [getLinkMirror [lindex $target 0]]
+ set lnode2 [lindex [linkPeers $mirror_link] 0]
+ }
+ switch -exact $qdisc {
+ FIFO { set qdisc fifo }
+ WFQ { set qdisc wfq }
+ DRR { set qdisc drr }
+ }
+ set ngnode "$lnode1-$lnode2"
+ if { [catch { nexec vimage $eid ngctl msg $ngnode: setcfg "{ $dir={ $qdisc=1 } }" }] } {
+ set ngnode "$lnode2-$lnode1"
+ nexec vimage $eid ngctl msg $ngnode: setcfg "{ $dir={ $qdisc=1 } }"
+ }
+}
+
+
+#****f* exec.tcl/execSetIfcQDrop
+# NAME
+# execSetIfcQDrop -- in exec mode set interface queue drop
+# SYNOPSIS
+# execSetIfcQDrop $eid $node $ifc $qdrop
+# FUNCTION
+# Sets the queue dropping policy during the simulation.
+# New queue dropping policy is defined in qdrop parameter.
+# Queue dropping policy can be set to drop-head or drop-tail.
+# INPUTS
+# eid -- experiment id
+# node -- node id
+# ifc -- interface name
+# qdrop -- queue dropping policy
+#****
+proc execSetIfcQDrop { eid node ifc qdrop } {
+
+ puts "execSetIfcQDrop $eid $node $ifc $qdrop"
+ return
+
+ set target [linkByIfc $node $ifc]
+ set peers [linkPeers [lindex $target 0]]
+ set dir [lindex $target 1]
+ set lnode1 [lindex $peers 0]
+ set lnode2 [lindex $peers 1]
+ if { [nodeType $lnode2] == "pseudo" } {
+ set mirror_link [getLinkMirror [lindex $target 0]]
+ set lnode2 [lindex [linkPeers $mirror_link] 0]
+ }
+ switch -exact $qdrop {
+ drop-head { set qdrop drophead }
+ drop-tail { set qdrop droptail }
+ }
+ set ngnode "$lnode1-$lnode2"
+ if { [catch { nexec vimage $eid ngctl msg $ngnode: setcfg "{ $dir={ $qdrop=1 } }" }] } {
+ # XXX dir should be reversed!
+ set ngnode "$lnode2-$lnode1"
+ nexec vimage $eid ngctl msg $ngnode: setcfg "{ $dir={ $qdrop=1 } }"
+ }
+}
+
+
+#****f* exec.tcl/execSetIfcQLen
+# NAME
+# execSetIfcQLen -- in exec mode set interface queue length
+# SYNOPSIS
+# execSetIfcQDrop $eid $node $ifc $qlen
+# FUNCTION
+# Sets the queue length during the simulation.
+# New queue length is defined in qlen parameter.
+# INPUTS
+# eid -- experiment id
+# node -- node id
+# ifc -- interface name
+# qlen -- new queue's length
+#****
+proc execSetIfcQLen { eid node ifc qlen } {
+
+ puts "execSetIfcQLen $eid $node $ifc $qlen"
+ return
+
+ set target [linkByIfc $node $ifc]
+ set peers [linkPeers [lindex $target 0]]
+ set dir [lindex $target 1]
+ set lnode1 [lindex $peers 0]
+ set lnode2 [lindex $peers 1]
+ if { [nodeType $lnode2] == "pseudo" } {
+ set mirror_link [getLinkMirror [lindex $target 0]]
+ set lnode2 [lindex [linkPeers $mirror_link] 0]
+ }
+ set ngnode "$lnode1-$lnode2"
+ if { $qlen == 0 } {
+ set qlen -1
+ }
+ if { [catch { nexec vimage $eid ngctl msg $ngnode: setcfg "{ $dir={ queuelen=$qlen } }" }] } {
+ set ngnode "$lnode2-$lnode1"
+ nexec vimage $eid ngctl msg $ngnode: setcfg "{ $dir={ queuelen=$qlen } }"
+ }
+}
+
+
+#****f* exec.tcl/execSetLinkParams
+# NAME
+# execSetLinkParams -- in exec mode set link parameters
+# SYNOPSIS
+# execSetLinkParams $eid $link
+# FUNCTION
+# Sets the link parameters during the simulation.
+# All the parameters are set at the same time.
+# INPUTS
+# eid -- experiment id
+# link -- link id
+#****
+proc execSetLinkParams { eid link } {
+
+ puts "execSetLinkParams $eid $link"
+ return
+
+ set lnode1 [lindex [linkPeers $link] 0]
+ set lnode2 [lindex [linkPeers $link] 1]
+ set lname $lnode1-$lnode2
+
+ set bandwidth [expr [getLinkBandwidth $link] + 0]
+ set delay [expr [getLinkDelay $link] + 0]
+ set ber [expr [getLinkBER $link] + 0]
+ set dup [expr [getLinkDup $link] + 0]
+
+ if { $bandwidth == 0 } {
+ set bandwidth -1
+ }
+ if { $delay == 0 } {
+ set delay -1
+ }
+ if { $ber == 0 } {
+ set ber -1
+ }
+ if { $dup == 0 } {
+ set dup -1
+ }
+
+ nexec vimage $eid ngctl msg $lname: setcfg \
+ "{ bandwidth=$bandwidth delay=$delay \
+ upstream={ BER=$ber duplicate=$dup } \
+ downstream={ BER=$ber duplicate=$dup } }"
+}
+
+
+#****f* exec.tcl/openFwrd
+# NAME
+# openFwrd -- open port forwarding
+# SYNOPSIS
+# set result [openFwrd lPort rPort rHost]
+# FUNCTION
+# Called upon starting remote execution with ssh port forwarding.
+# Works only on unix hosts, opens a secure connection from local host
+# to remote host. Uses a key authentication. Returns the proces id
+# of port forwarding
+# INPUTS
+# lPort -- local port
+# rPort -- remote port
+# rHost -- remote host in the form: userName@hostName
+# RESULT
+# * result -- proces id of ssh port forwarding
+#****
+proc openFwrd { lPort rPort rHost } {
+ global tcl_platform platform
+ if { $tcl_platform(platform) == "unix" } {
+ set pid [exec ssh -N -L $lPort:localhost:$rPort $rHost &]
+ return $pid
+ }
+}
+
+#****f* exec.tcl/closeFwrd
+# NAME
+# closeFwrd -- close port forwarding
+# SYNOPSIS
+# set result [closeFwrd pid]
+# FUNCTION
+# Called upon ending remote execution with ssh port forwarding.
+# Works only on unix hosts, closes a secure connection from a local host
+# to the remote host.
+# INPUTS
+# pid -- proces id
+#****
+proc closeFwrd { pid } {
+ global tcl_platform platform
+ if { $tcl_platform(platform) == "unix" } {
+ puts {eval exec kill $pid}
+ return ""
+ }
+}
+
+#****f* exec.tcl/open_sock
+# NAME
+# open_sock -- open socket
+# SYNOPSIS
+# set sock [open_sock rHost rPort]
+# FUNCTION
+# Called upon starting remote execution. Opens a socket.
+# If the ssh encryption is used a socket to the local port is opened,
+# else a socket to the remote host is used. If the creation of
+# socket fails, an empty string is returend.
+# INPUTS
+# rHost -- remote host in the form: hostName
+# rPort -- remote port
+# RESULT
+# * sock -- socket
+#****
+proc open_sock { rHost rPort } {
+ global ssh
+
+ if { $ssh } {
+ catch { set sock [socket localhost [expr $rPort + 10]] }
+ } else {
+ catch { set sock [socket $rHost $rPort] }
+ }
+ if { [info exists sock] } {
+ return $sock
+ } else {
+ return ""
+ }
+}
+
+#****f* exec.tcl/close_sock
+# NAME
+# close_sock -- close socket
+# SYNOPSIS
+# set sock [close_sock sock]
+# FUNCTION
+# Called upon ending remote execution. Closes a socket and returns an empty string.
+# INPUTS
+# sock -- socket that is being closed
+# RESULT
+# * sock -- an empty string
+#****
+proc close_sock { sock } {
+ catch { close $sock }
+ return ""
+}
+
+#****f* exec.tcl/rexec
+# NAME
+# rexec -- remote exec
+# SYNOPSIS
+# rexec io command
+# FUNCTION
+# Called upon remote execution of a command. Puts the command on the io, and reads the response
+# until a string Kraj is read. Returns all the received lines before string Kraj.
+# INPUTS
+# io -- socket on which the command is passed
+# command -- command that is remotly executed
+# RESULT
+# * response -- the command output
+#****
+proc rexec { io command } {
+ set response ""
+
+ puts $io $command
+ flush $io
+
+ gets $io line
+ if { $line == "Kraj rada" } {
+ close $io
+ }
+ if {$line != "Kraj" } {
+ set response $line
+ if { [string match "imunes -b *" $command] } {
+ statline $line
+ }
+ gets $io line
+ }
+ while { $line != "Kraj" } {
+ append response "\n" $line
+ if { [string match "imunes -b *" $command] } {
+ statline $line
+ }
+ gets $io line
+ }
+ return $response
+}
+
+#****f* exec.tcl/remoteStart
+# NAME
+# remoteStart -- remoteStart
+# SYNOPSIS
+# remoteStart
+# FUNCTION
+# Starts the remote execution. Reads the parameters from the gui, decides
+# to open an encrypted chanel or e reguar socket for remote execution.
+# If the sockets cannot be opened, a message box is displayed.
+#****
+proc remoteStart {} {
+ global monSock execSock mpid epid lmPort lePort editor_only ssh
+ global remote_exec exec_hosts exec_host
+
+ if { ! $remote_exec } { return }
+
+ set remote_exec false
+ set n [llength $exec_hosts]
+ for { set i 0 } { $i < $n } { incr i } {
+ if { [lindex [lindex $exec_hosts $i] 3] } {
+ set rHost [lindex [lindex $exec_hosts $i] 0]
+ set rePort [lindex [lindex $exec_hosts $i] 1]
+ set rmPort [lindex [lindex $exec_hosts $i] 2]
+ set ssh [lindex [lindex $exec_hosts $i] 4]
+ set userName [lindex [lindex $exec_hosts $i] 5]
+ set remote_exec true
+ set exec_host $rHost
+ break
+ }
+ }
+
+ if { ! $remote_exec } { return }
+
+
+ if { (![info exists mpid] || $mpid == "") && $ssh } {
+ set mpid [openFwrd [expr $rmPort + 10] $rmPort $userName@$rHost]
+ }
+ if { (![info exists epid] || $epid == "") && $ssh } {
+ set epid [openFwrd [expr $rePort + 10] $rePort $userName@$rHost]
+ }
+ after 500 { set t 1 }
+ vwait t
+ if { ![info exists monSock] || $monSock == "" } {
+ set monSock [open_sock $rHost $rmPort]
+ }
+ if { ![info exists execSock] || $execSock == "" } {
+ set execSock [open_sock $rHost $rePort]
+ }
+ if { $monSock == "" || $execSock == "" } {
+ set sel [tk_dialog .box "Socket problems" \
+ "Cannot open sockets" \
+ "" 0 "Retry" "Configure remote hosts" "Editor only mode" ]
+ switch $sel {
+ 0 {
+ remoteStart
+ }
+ 1 {
+ configRemoteHosts
+ }
+ 2 {
+ set editor_only true
+ }
+
+ }
+ }
+}
+
+#****f* exec.tcl/remoteClose
+# NAME
+# remoteClose
+# SYNOPSIS
+# remoteClose -- closes the remote execution
+# FUNCTION
+# Closes the remote execution of the experiment. Closes all the sockets.
+#****
+proc remoteClose { } {
+ global execSock monSock remote_exec mpid epid
+
+ if { $remote_exec && [info exists execSock] } {
+ set execSock [close_sock $execSock]
+ set monSock [close_sock $monSock]
+ if { [info exists mpid] && ($mpid != "") } {
+ set mpid [closeFwrd $mpid]
+ }
+ if { [info exists epid] && ($epid != "") } {
+ set epid [closeFwrd $epid]
+ }
+ }
+ return
+}
--- /dev/null
+#
+# Copyright 2005-2008 University of Zagreb, Croatia.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+#
+# This work was supported in part by Croatian Ministry of Science
+# and Technology through the research contract #IP-2003-143.
+#
+
+# $Id: host.tcl,v 1.19 2008/01/02 12:08:46 marko Exp $
+
+
+#****h* imunes/host.tcl
+# NAME
+# host.tcl -- defines host specific procedures
+# FUNCTION
+# This module is used to define all the host specific procedures.
+# NOTES
+# Procedures in this module start with the keyword host and
+# end with function specific part that is the same for all the node
+# types that work on the same layer.
+#****
+
+
+
+set MODULE host
+
+#****f* host.tcl/host.layer
+# NAME
+# host.layer
+# SYNOPSIS
+# set layer [host.layer]
+# FUNCTION
+# Returns the layer on which the host operates
+# i.e. returns NETWORK.
+# RESULT
+# * layer -- set to NETWORK
+#****
+
+proc $MODULE.layer {} {
+ return NETWORK
+}
+
+#****f* host.tcl/host.cfggen
+# NAME
+# host.cfggen
+# SYNOPSIS
+# set config [host.cfggen $node_id]
+# FUNCTION
+# Returns the generated configuration. This configuration represents
+# the configuration loaded on the booting time of the virtual nodes
+# and it is closly related to the procedure host.bootcmd
+# Foreach interface in the interface list of the node ip address is
+# configured and each static route from the simulator is added. portmap
+# and inetd are also started.
+# INPUTS
+# * node_id - id of the node (type of the node is host)
+# RESULT
+# * congif -- generated configuration
+#****
+
+proc $MODULE.cfggen { node } {
+ upvar 0 ::cf::[set ::curcfg]::$node $node
+
+ set cfg {}
+
+ foreach ifc [ifcList $node] {
+ set addr [getIfcIPv4addr $node $ifc]
+ if { $addr != "" } {
+ lappend cfg "ifconfig $ifc inet $addr"
+ }
+ set addr [getIfcIPv6addr $node $ifc]
+ if { $addr != "" } {
+ lappend cfg "ifconfig $ifc inet6 $addr"
+ }
+ }
+ lappend cfg ""
+
+ foreach statrte [getStatIPv4routes $node] {
+ lappend cfg "route -q add -inet $statrte"
+ }
+ foreach statrte [getStatIPv6routes $node] {
+ lappend cfg "route -q add -inet6 $statrte"
+ }
+
+ # Temporary fix to enable multiple inetd and rpcbind processes
+ # (in different vimages). Sholud be removed if each vimage is running
+ # in separate file system.
+
+ # In Linux/OpenVZ each node is indeed running in a separate file system.
+
+ #
+ # Before startup ...
+# lappend cfg "rm -f /var/run/inetd.pid"
+# lappend cfg "rm -f /var/run/rpcbind.lock"
+
+# lappend cfg "rpcbind"
+# lappend cfg "inetd"
+
+ # ... and after startup.
+# lappend cfg "rm -f /var/run/inetd.pid"
+# lappend cfg "rm -f /var/run/rpcbind.lock"
+
+ return $cfg
+}
+
+#****f* host.tcl/host.bootcmd
+# NAME
+# host.bootcmd
+# SYNOPSIS
+# set appl [host.bootcmd $node_id]
+# FUNCTION
+# Procedure bootcmd returns the application that reads and
+# employes the configuration generated in host.cfggen.
+# In this case (procedure host.bootcmd) specific application
+# is /bin/sh
+# INPUTS
+# * node_id - id of the node (type of the node is host)
+# RESULT
+# * appl -- application that reads the configuration (/bin/sh)
+#****
+
+proc $MODULE.bootcmd { node } {
+ return "/bin/sh"
+}
+
+#****f* host.tcl/host.shellcmd
+# NAME
+# host.shellcmd
+# SYNOPSIS
+# set shell [host.shellcmd $node_id]
+# FUNCTION
+# Procedure shellcmd returns the shell that will be opened
+# as a default shell for the system.
+# Procedure host.shellcmd searches the system for bash shell,
+# if it is not found the sh shell is used insted.
+# INPUTS
+# * node_id - id of the node (type of the node is host)
+# RESULT
+# * shell -- defalut shell for the host
+#****
+
+proc $MODULE.shellcmd { node } {
+ set ret [nexec whereis -b bash]
+ if { [llength $ret] == 2 } {
+ return [lindex $ret 1]
+ } else {
+ set ret [nexec whereis -b tcsh]
+ if { [llength $ret] == 2 } {
+ return [lindex $ret 1]
+ } else {
+ return "/bin/sh"
+ }
+ }
+}
+
+#****f* host.tcl/host.instantiate
+# NAME
+# host.instantiate
+# SYNOPSIS
+# host.instantiate $eid $node_id
+# FUNCTION
+# Procedure instantiate creates a new virtaul node
+# for a given node in imunes.
+# Procedure host.instantiate cretaes a new virtual node
+# with all the interfaces and CPU parameters as defined
+# in imunes.
+# INPUTS
+# * eid - experiment id
+# * node_id - id of the node (type of the node is host)
+#****
+proc $MODULE.instantiate { eid node } {
+ l3node.instantiate $eid $node
+}
+
+
+#****f* host.tcl/host.start
+# NAME
+# host.start
+# SYNOPSIS
+# host.start $eid $node_id
+# FUNCTION
+# Starts a new host. The node can be started if it is instantiated.
+# Simulates the booting proces of a host, by calling l3node.start
+# procedure.
+# INPUTS
+# * eid - experiment id
+# * node_id - id of the node (type of the node is host)
+#****
+proc $MODULE.start { eid node } {
+ l3node.start $eid $node
+}
+
+
+#****f* host.tcl/host.shutdown
+# NAME
+# host.shutdown
+# SYNOPSIS
+# host.shutdown $eid $node_id
+# FUNCTION
+# Shutdowns a host. Simulates the shutdown proces of a host,
+# by calling the l3node.shutdown procedure.
+# INPUTS
+# * eid - experiment id
+# * node_id - id of the node (type of the node is host)
+#****
+proc $MODULE.shutdown { eid node } {
+ l3node.shutdown $eid $node
+}
+
+
+#****f* host.tcl/host.destroy
+# NAME
+# host.destroy
+# SYNOPSIS
+# host.destroy $eid $node_id
+# FUNCTION
+# Destroys a host. Destroys all the interfaces of the host
+# and the vimage itself by calling l3node.destroy procedure.
+# INPUTS
+# * eid - experiment id
+# * node_id - id of the node (type of the node is host)
+#****
+proc $MODULE.destroy { eid node } {
+ l3node.destroy $eid $node
+}
+
+
+#****f* host.tcl/host.nghook
+# NAME
+# host.nghook
+# SYNOPSIS
+# host.nghook $eid $node_id $ifc
+# FUNCTION
+# Returns the id of the netgraph node and the name of the
+# netgraph hook which is used for connecting two netgraph
+# nodes. This procedure calls l3node.hook procedure and
+# passes the result of that procedure.
+# INPUTS
+# * eid - experiment id
+# * node_id - node id
+# * ifc - interface name
+# RESULT
+# * nghook - the list containing netgraph node id and the
+# netgraph hook (ngNode ngHook).
+#****
+
+proc $MODULE.nghook { eid node ifc } {
+ return [l3node.nghook $eid $node $ifc]
+}
+