Showing posts with label AI. Show all posts
Showing posts with label AI. Show all posts

Tuesday, July 29, 2014

Configuration profile - DNS

This is an example of a DNS client configuration profile.
This is useful to streamline installations:
  
Assume all DNS services prerequisites and assumptions stay the same.
Also check the on-line documentation Managing DNS (Tasks) for details.

The following are the necessary customizations:

<!DOCTYPE service_bundle SYSTEM "/usr/share/lib/xml/dtd/service_bundle.dtd.1">
<service_bundle type="profile" name="sysconfig">
  <service version="1" type="service" name="system/config-user">
    <instance enabled="true" name="default">
      <property_group type="application" name="root_account">
        <propval type="astring" name="login" value="root"/>
        <propval type="astring" name="password" value="$5$..."/>
        <propval type="astring" name="type" value="role"/>
      </property_group>
      <property_group type="application" name="user_account">
        <propval type="astring" name="login" value="..."/>
        <propval type="astring" name="password" value="$5$..."/>
        <propval type="astring" name="type" value="normal"/>
        <propval type="astring" name="description" value="Primary Administrator"/>
        <propval type="count" name="gid" value="10"/>
        <propval type="astring" name="shell" value="/usr/bin/bash"/>
        <propval type="astring" name="roles" value="root"/>
        <propval type="astring" name="profiles" value="System Administrator"/>
        <propval type="astring" name="sudoers" value="ALL=(ALL) ALL"/>
      </property_group>
    </instance>
  </service>
  <service version="1" type="service" name="system/timezone">
    <instance enabled="true" name="default">
      <property_group type="application" name="timezone">
        <propval type="astring" name="localtime" value="..."/>
      </property_group>
    </instance>
  </service>
  <service version="1" type="service" name="system/environment">
    <instance enabled="true" name="init">
      <property_group type="application" name="environment">
        <propval type="astring" name="LANG" value="en_US.UTF-8"/>
      </property_group>
    </instance>
  </service>
  <service version="1" type="service" name="system/identity">
    <instance enabled="true" name="node">
      <property_group type="application" name="config">
        <propval type="astring" name="nodename" value="zone-1"/>
      </property_group>
    </instance>
  </service>
  <service version="1" type="service" name="system/keymap">
    <instance enabled="true" name="default">
      <property_group type="system" name="keymap">
        <propval type="astring" name="layout" value="US-English"/>
      </property_group>
    </instance>
  </service>
  <service version="1" type="service" name="system/console-login">
    <instance enabled="true" name="default">
      <property_group type="application" name="ttymon">
        <propval type="astring" name="terminal_type" value="sun-color"/>
      </property_group>
    </instance>
  </service>
  <service version="1" type="service" name="network/physical">
    <instance enabled="true" name="default">
      <property_group type="application" name="netcfg">
        <propval type="astring" name="active_ncp" value="DefaultFixed"/>
      </property_group>
    </instance>
  </service>
  <service version="1" type="service" name="network/install">
    <instance enabled="true" name="default">
      <property_group type="application" name="install_ipv4_interface">
        <propval type="astring" name="address_type" value="static"/>
        <propval type="net_address_v4" name="static_address" value="192.168.0.91/24"/>
        <propval type="astring" name="name" value="net11/v4"/>
      </property_group>
    </instance>
  </service>
  <service version="1" type="service" name="system/name-service/switch">
    <property_group type="application" name="config">
      <propval type="astring" name="default" value="files"/>
      <propval type="astring" name="host" value="files dns"/>
      <propval type="astring" name="printer" value="user files"/>
    </property_group>
    <instance enabled="true" name="default"/>
  </service>
  

  <service version="1" type="service" name="system/name-service/cache">
    <instance enabled="true" name="default"/>
  </service>
  <service version="1" type="service" name="network/dns/client">
    <property_group type="application" name="config">
      <property type="net_address" name="nameserver">
        <net_address_list>
          <value_node value="10.0.1.10"/>
          <value_node value="10.0.1.20"/>
          <value_node value="10.0.1.30"/>
        </net_address_list>
      </property>
      <property type="astring" name="search">
        <astring_list>
          <value_node value="business.corp"/>
        </astring_list>
      </property>
    </property_group>
    <instance enabled="true" name="default"/>
  </service>
  

  <service version="1" type="service" name="system/ocm">
    <instance enabled="true" name="default">
      <property_group type="application" name="reg">
        <propval type="astring" name="user" value=""/>
        <propval type="astring" name="password" value=""/>
        <propval type="astring" name="key" value=""/>
        <propval type="astring" name="cipher" value=""/>
        <propval type="astring" name="proxy_host" value=""/>
        <propval type="astring" name="proxy_user" value=""/>
        <propval type="astring" name="proxy_password" value=""/>
        <propval type="astring" name="config_hub" value=""/>
      </property_group>
    </instance>
  </service>
  <service version="1" type="service" name="system/fm/asr-notify">
    <instance enabled="true" name="default">
      <property_group type="application" name="autoreg">
        <propval type="astring" name="user" value=""/>
        <propval type="astring" name="password" value=""/>
        <propval type="astring" name="index" value=""/>
        <propval type="astring" name="private-key" value=""/>
        <propval type="astring" name="public-key" value=""/>
        <propval type="astring" name="client-id" value=""/>
        <propval type="astring" name="timestamp" value=""/>
        <propval type="astring" name="proxy-host" value=""/>
        <propval type="astring" name="proxy-user" value=""/>
        <propval type="astring" name="proxy-password" value=""/>
        <propval type="astring" name="hub-endpoint" value=""/>
      </property_group>
    </instance>
  </service>
</service_bundle>

 
The trailing notices for Configuration profile - NIS still apply.
Of course, there's no need to declare the DNS servers on /etc/hosts.
     

Configuration profile - NIS

This is an example of a NIS client configuration profile.
This is useful to streamline installations:
  
Assume all initial prerequisites stay the same.
The following are the necessary customizations.
 
<!DOCTYPE service_bundle SYSTEM "/usr/share/lib/xml/dtd/service_bundle.dtd.1">
<service_bundle type="profile" name="sysconfig">
  <service version="1" type="service" name="system/config-user">
    <instance enabled="true" name="default">
      <property_group type="application" name="root_account">
        <propval type="astring" name="login" value="root"/>
        <propval type="astring" name="password" value="$5$..."/>
        <propval type="astring" name="type" value="role"/>
      </property_group>
      <property_group type="application" name="user_account">
        <propval type="astring" name="login" value="..."/>
        <propval type="astring" name="password" value="$5$..."/>
        <propval type="astring" name="type" value="normal"/>
        <propval type="astring" name="description" value="Primary Administrator"/>
        <propval type="count" name="gid" value="10"/>
        <propval type="astring" name="shell" value="/usr/bin/bash"/>
        <propval type="astring" name="roles" value="root"/>
        <propval type="astring" name="profiles" value="System Administrator"/>
        <propval type="astring" name="sudoers" value="ALL=(ALL) ALL"/>
      </property_group>
    </instance>
  </service>
  <service version="1" type="service" name="system/timezone">
    <instance enabled="true" name="default">
      <property_group type="application" name="timezone">
        <propval type="astring" name="localtime" value="..."/>
      </property_group>
    </instance>
  </service>
  <service version="1" type="service" name="system/environment">
    <instance enabled="true" name="init">
      <property_group type="application" name="environment">
        <propval type="astring" name="LANG" value="en_US.UTF-8"/>
      </property_group>
    </instance>
  </service>
  <service version="1" type="service" name="system/identity">
    <instance enabled="true" name="node">
      <property_group type="application" name="config">
        <propval type="astring" name="nodename" value="zone-1"/>
      </property_group>
    </instance>
  </service>
  <service version="1" type="service" name="system/keymap">
    <instance enabled="true" name="default">
      <property_group type="system" name="keymap">
        <propval type="astring" name="layout" value="US-English"/>
      </property_group>
    </instance>
  </service>
  <service version="1" type="service" name="system/console-login">
    <instance enabled="true" name="default">
      <property_group type="application" name="ttymon">
        <propval type="astring" name="terminal_type" value="sun-color"/>
      </property_group>
    </instance>
  </service>
  <service version="1" type="service" name="network/physical">
    <instance enabled="true" name="default">
      <property_group type="application" name="netcfg">
        <propval type="astring" name="active_ncp" value="DefaultFixed"/>
      </property_group>
    </instance>
  </service>
  <service version="1" type="service" name="network/install">
    <instance enabled="true" name="default">
      <property_group type="application" name="install_ipv4_interface">
        <propval type="astring" name="address_type" value="static"/>
        <propval type="net_address_v4" name="static_address" value="192.168.0.84/24"/>
        <propval type="astring" name="name" value="net9/v4"/>
      </property_group>
    </instance>
  </service>
  <service version="1" type="service" name="system/name-service/switch">
    <property_group type="application" name="config">
      <propval type="astring" name="default" value="files nis"/>
      <propval type="astring" name="printers" value="user files nis"/>
      <propval type="astring" name="netgroup" value="nis"/>
    </property_group>
    <instance enabled="true" name="default"/>
  </service>
  <service version="1" type="service" name="network/nis/domain">
    <property_group type="application" name="config">
      <propval type="hostname" name="domainname" value="business.corp"/>
      <property type="host" name="ypservers">
        <host_list>
          <value_node value="nis-2"/>
          <value_node value="nis-3"/>
        </host_list>
      </property>
    </property_group>
    <instance enabled="true" name="default"/>
  </service>
  <service version="1" type="service" name="network/nis/client">
    <instance enabled="true" name="default"/>
  </service>

  <service version="1" type="service" name="system/name-service/cache">
    <instance enabled="true" name="default"/>
  </service>
  <service version="1" type="service" name="network/dns/client">
    <instance enabled="false" name="default"/>
  </service>
  <service version="1" type="service" name="system/ocm">
    <instance enabled="true" name="default">
      <property_group type="application" name="reg">
        <propval type="astring" name="user" value=""/>
        <propval type="astring" name="password" value=""/>
        <propval type="astring" name="key" value=""/>
        <propval type="astring" name="cipher" value=""/>
        <propval type="astring" name="proxy_host" value=""/>
        <propval type="astring" name="proxy_user" value=""/>
        <propval type="astring" name="proxy_password" value=""/>
        <propval type="astring" name="config_hub" value=""/>
      </property_group>
    </instance>
  </service>
  <service version="1" type="service" name="system/fm/asr-notify">
    <instance enabled="true" name="default">
      <property_group type="application" name="autoreg">
        <propval type="astring" name="user" value=""/>
        <propval type="astring" name="password" value=""/>
        <propval type="astring" name="index" value=""/>
        <propval type="astring" name="private-key" value=""/>
        <propval type="astring" name="public-key" value=""/>
        <propval type="astring" name="client-id" value=""/>
        <propval type="astring" name="timestamp" value=""/>
        <propval type="astring" name="proxy-host" value=""/>
        <propval type="astring" name="proxy-user" value=""/>
        <propval type="astring" name="proxy-password" value=""/>
        <propval type="astring" name="hub-endpoint" value=""/>
      </property_group>
    </instance>
  </service>
</service_bundle>

   
Note that as the zone configuration (shown below) is using a net resource, the network/install service must refer to the corresponding name (net9), otherwise error or warning messages will appear during installation. The same goes to the IP address which must respect the value of allowed-address.

# zonecfg -z zone-1 info
zonename: zone-1
zonepath: /zone/zone-1
brand: solaris
autoboot: false
bootargs:
file-mac-profile: fixed-configuration
pool:
limitpriv:
scheduling-class:
ip-type: exclusive
hostid:
fs-allowed:
net:
    address not specified
    allowed-address: 192.168.0.84/24
    configure-allowed-address: true
    physical: net9
    defrouter not specified
attr:
    name: description
    type: string
    value: "zone-1"


Before the 1st boot it's recommended to update the zone's /etc/hosts.
In fact, for NIS services this is a critical step before the 1st boot:

# cat /zone/zone-1/root/etc/hosts
#
# Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
# Use is subject to license terms.
#
# Internet host table
#
::1             localhost
127.0.0.1       localhost                loghost
#

192.168.0.33    zone-1.business.corp     zone-1
#
192.168.0.202       nis-2.business.corp  nis-2
192.168.0.203       nis-3.business.corp  nis-3
   
Note that this is an immutable zone.
An immutable zone installation behavior has been already documented.
  

Configuration profile

A system configuration profile is to avoid interactions during installations.
solaris(5) describes its usage as -c option to subcommands.
They are the roughly equivalent to Solaris 10 sysidcfg files.

The main benefits are:
  • Consistency;
  • Simplicity;
  • Speed;
 
They can be used during bare-metal system installations but also during zone installations and even a combination of both. In any case, the benefits are immense and it's worth while take some time to learn how to deal with system configuration profiles.

A system configuration profile is a somewhat complex XML file.
Instead of building it from the scratch, the following approach seems best:
  1. Generate a baseline by using sysconfig create-profile;
  2. Manually edit the baseline accordingly.
    
The 1st step is rather easy.
Simply do:

$ sysconfig create-profile -o <output_xml_file>

The 2nd step may be much harder in at first.
That is, while you have to research what excerpts have to be inserted.
The ultimate help are the on-line manuals and some SMF info extraction.

NOTE
A configuration profile is focused on a client-side configuration.
It can't configure for instance a DNS server.
That's another story.
See sysconfig(1M).
I have already given examples on applying a system configuration profile.
Please, refer to the following other posts:

Examples of system configuration profiles:
   

Monday, July 28, 2014

Immutable zone installation

This post is a kind of wrap up of a few others, such as:

I will just show how an immutable zone gets installed.
On this example the zone won't have any specific services.
Well, at a minimum, for convenience, I choose make it a NIS client.

On a more real scenario, I would further refine the configuration profile.
For instance, I could add other pre-configured SMF services.

I assume all the premises of the aforementioned posts.
The immutable zone configuration and configuration profile are ready.

In fact, there are more than one installation method.
It can happen through:
  • Automated Installer (AI); not shown on this post;
  • From the scratch;
  • Cloning;
   
There's nothing really special about installing "from the scratch":

# zoneadm -z zone-1 install -c /tmp/zone-1.xml
...

I like the cloning method because it's faster and tends to save space:

# zoneadm -z zone-1 clone -c /tmp/zone-1.xml template-zone
...

NOTE
The argument to the -c option must be an absolute path.
template-zone must not be an immutable zone already.
Here's the zone-1 zone's console on the 1st boot:

# zlogin -C zone-1
[Connected to zone 'zone-1' console]

 
From another terminal just boot the zone:
 
# zoneadm -z zone-1 boot 

Now go back to the zone's console and watch:
 
[NOTICE: Read-only zone booting up read-write]
 

SunOS Release 5.11 Version 11.1 64-bit
Copyright (c) 1983, 2012, Oracle and/or its affiliates...
Hostname: unknown
Hostname: zone-1


[NOTICE: This read-only system transiently booted read/write]
[NOTICE: Now that self assembly has been completed, the system is rebooting]

[NOTICE: Zone rebooting]
 

SunOS Release 5.11 Version 11.1 64-bit
Copyright (c) 1983, 2012, Oracle and/or its affiliates...
Hostname: zone-1

zone-1 console login:


It's amazing how the system detects I'm installing an immutable zone and then upon installation boots the zone in read-write mode and after installation finishes, the zone is automatically rebooted to assume its immutability state. This saves administrators some work and makes sure no interactions are required.
   

Friday, July 25, 2014

SMF info extraction

A few months ago I started an SMF overview.
That post also led to a few others enough to a very brief introduction.
In fact SMF is much much bigger and powerful than just that.

This time I'll touch on a useful feature related to configuration profiles.
For instance, it's very useful to:

Despite the existent on-line documentation, at this moment, there may be a bug, an undocumented or a misunderstanding of mine regarding how the extraction process is to be performed for an specific and customized FMRI because, at least in my opinion it doesn't seem to work as expected.

In face of the difficulty presented on the previous paragraph I'll devised what most probably can be considered as an work-around though not yet fully bullet-proof acknowledged. Nevertheless I'm rather confident it's OK.

The idea is based on the fact that SMF information is organized into four layers, two of which vendor-provided on top of which two more are administratively customizable. The layers are arranged as follows:
  • admin          ➜ administrative customizable
  • site-profile   ➜ administrative customizable (↑)
  • system-profile ➜ vendor-provided (↑)
  • manifest       ➜ vendor-provided (↑)

The additional key information one needs to know is that the current (or all) layer designation means the combined result of all four layers previously described.

Thus to know (in XML format) just what has been administratively customized if suffices to compute the difference in relation to the vendor-provided configuration information. Now it's clear, simple and obvious, isn't it?

Let's see an example about obtaining certain naming services configuration.
I'll refer to another post about zone cloning involving NIS services.

# svccfg extract -l manifest,system-profile \
  network/nis/domain > /tmp/nis-domain-vendor

# svccfg extract -l all \
  network/nis/domain > /tmp/nis-domain-custom

# diff /tmp/nis-domain-{vendor,custom}
...
<       <propval name='domainname' type='hostname' value=''/>
---
>       <propval name='domainname' type='hostname' value='business.corp'/>
>       <property name='ypservers' type='host'>
>         <host_list>
>           <value_node value='nis-2'/>
>           <value_node value='nis-3'/>
>         </host_list>
>       </property>


# svccfg extract -l manifest,system-profile \
  system/name-service/switch > /tmp/nss-vendor

# svccfg extract -l all \
  system/name-service/switch > /tmp/nss-custom

# diff /tmp/nss-{vendor,custom}
...
<       <propval name='default' type='astring' value='files'/>
---
>       <propval name='default' type='astring' value='files nis'/>

>       <propval name='printers' type='astring' value='user files nis'/>
>       <propval name='netgroup' type='astring' value='nis'/>

And so on... (see the full list for NIS in Zone cloning)

For a certain technology or functionality it's necessary to refer to the on-line manuals and references in order to know the associated services. Of course this has already been done at this point as I assume there exists customizations to each of those services.
   

Wednesday, January 8, 2014

DNS services

DNS (Domain Name System) is a hierarchical and distributed database for hosts name and addresses relationships as well as hosts related information such as: mail exchange routing, location data and available services.

Due to its characteristics, DNS is vital.
Any kind of Internet presence or access requires it.
Many other infrastructure services take advantage of it.
It doesn't replace NIS, but supersedes the hosts map.

NIS has a known DNS-forwarding mode (see nsswitch.conf(4)) where it forwards host names and addresses lookup requests to DNS if it doesn't have the information on its own databases.  This possible integration, together with the further variations on the hosts database source list of the Name Services Switch, can lead to unexpected resolutions and subtle issues. Hence, except for specific cases, it may indeed be better to adopt the following host database source list:
hosts: files dns nis
Nowadays we also have the alternative of multicast DNS and the now reserved .local pseudo-TLD name is used for it. The on-line documentation also talks about it as well as man pages mdnsd(1M) and dns-sd(1M). It has to do with the zero-configuration networking and Apples's Bonjour implementation whose open source framework and tools is present in Solaris. But being restricted to local area networks, at least for now, I won't enable it. One notable exception is when setting up the Automated Installer Framework, which requires enabling it.

# svcs dns/multicast
STATE          STIME    FMRI
disabled       Jan_07   svc:/network/dns/multicast:default


These DNS series of blog posts will primarily cover traditional DNS, a.k.a. unicast DNS. I do not yet know how deep I'll go with my descriptions and examples, but I intend to visit the basics alongside benefits and advantages of implementing it under Solaris.

So these are the main posts:


For further detail I'd point to:
 
     

Friday, December 27, 2013

Zone cloning

On this post I intend to exemplify cloning a non-global zone (NGZ).
In the end it shall be quite obvious why cloning is so powerful and desirable.
In this context I understand cloning as a duplication within the same host.
An identical NGZ on another host is another topic related to migration.
The underlying support for cloning is ultimately provided by ZFS.

I make the following assumptions:
  • The system is a Solaris 11 or higher.
  • There is a dedicated ZFS pool for NGZs paths.
  • There is an accessible IPS local repository.
  • There's no DNS service implemented yet.
  • There is an available (unused) network interface.

$ pkg info entire | grep Version
       Version: 0.5.11 (Oracle Solaris 11.1.13.6.0)


$ zpool list zone
NAME    SIZE  ALLOC   FREE  CAP  DEDUP  HEALTH  ALTROOT
zone   15.9G   622M  15.3G   3%  1.00x  ONLINE  -


$ zfs list -r -d 1 zone
NAME             USED  AVAIL  REFER  MOUNTPOINT
zone             622M  15.0G    35K  /zone
zone/server-1a   479M  15.0G    33K  /zone/server-1a
zone/server-1b  70.8M  15.0G    34K  /zone/server-1b
zone/server-1c  70.7M  15.0G    34K  /zone/server-1c


$ pkg publisher
PUBLISHER        TYPE     STATUS P LOCATION
solaris          origin   online F http://192.168.0.100/


$ svcs '*dns*'
STATE          STIME    FMRI
disabled        9:17:59 svc:/network/dns/client:default
disabled        9:18:02 svc:/network/dns/multicast:default
disabled        9:18:10 svc:/network/dns/server:default


# dladm show-phys -o link,state,speed,duplex,device
LINK              STATE      SPEED  DUPLEX    DEVICE
net0              up         1000   full      e1000g4
net3              up         1000   full      e1000g7
server-1c/net3    up         1000   full      e1000g7
net2              up         1000   full      e1000g6
server-1b/net2    up         1000   full      e1000g6
net1              up         1000   full      e1000g5
server-1a/net1    up         1000   full      e1000g5
net4              unknown    0      unknown   e1000g8
net7              unknown    0      unknown   e1000g11
net6              unknown    0      unknown   e1000g10
net5              unknown    0      unknown   e1000g9


Let's create another NGZ (server-1d) as a clone of server-1a.
Note from the previous output that server-1b and server-1c are clones.
More clearly:

$ zfs list -t all -r -d 2 -o name,used zone/server-1a
NAME                                   USED
zone/server-1a                         479M
zone/server-1a/rpool                   479M
zone/server-1a/rpool@server-1c_snap00     0
zone/server-1a/rpool@server-1b_snap00     0

zone/server-1a/rpool/ROOT              479M
zone/server-1a/rpool/VARSHARE           39K
zone/server-1a/rpool/export            134K


$ zfs get -o value origin zone/server-{1b,1c}/rpool
VALUE
zone/server-1a/rpool@server-1b_snap00

zone/server-1a/rpool@server-1c_snap00

Extract the source NGZ (server-1a) configuration:

# zonecfg -z server-1a export -f /tmp/server-1a.cfg

# cat /tmp/server-1a.cfg
create -b
set brand=solaris
set zonepath=/zone/server-1a
set autoboot=true
set ip-type=exclusive
add net
set allowed-address=192.168.0.11/24
set configure-allowed-address=true
set physical=net1
end

add attr
set name=description
set type=string
set value=Template
end


Edit the target NGZ (server-1d) configuration accordingly:
(attention: if net4 is already a vnic, then use net instead of anet)

# cp /tmp/server-{1a,1d}.cfg

# cat /tmp/server-1d.cfg
create -b
set brand=solaris
set zonepath=/zone/server-1d
set autoboot=true
set ip-type=exclusive
add net
set allowed-address=192.168.0.14/24
set configure-allowed-address=true
set physical=net4
end

add attr
set name=description
set type=string
set value="NIS server"
end


Import the target NGZ (server-1d) configuration:

# zonecfg -z server-1d -f /tmp/server-1d.cfg

# zonecfg -z server-1d info
zonename: server-1d
zonepath: /zone/server-1d
brand: solaris
autoboot: true
bootargs:
file-mac-profile:
pool:
limitpriv:
scheduling-class:
ip-type: exclusive
hostid:
fs-allowed:
net:
    address not specified
    allowed-address: 192.168.0.14/24
    configure-allowed-address: true
    physical: net4
    defrouter not specified

attr:
    name: description
    type: string
    value: "NIS server"


# zoneadm list -cv
  ID NAME      STATUS     PATH             BRAND    IP   
   0 global    running    /                solaris  shared
   1 server-1c running    /zone/server-1c  solaris  excl 
   2 server-1b running    /zone/server-1b  solaris  excl 
   3 server-1a running    /zone/server-1a  solaris  excl 
   - server-1d configured /zone/server-1d  solaris  excl


Create a configuration profile to help streamline this and future cloning.

NOTE
During the creation of the configuration profile, selecting None for networking connection configuration may avoid mistakes, but it's probably better to specify the correct settings. It doesn't seem a good idea to include the name services configuration while operating the sysconf create-profile utility. The results seems rather terse or minimalist. I would rather manually edit the configuration profile subsequently (using SMF info extraction from other golden or template systems) as later exemplified for the case of enabling NIS services right from the start. Furthermore, there may be complains about IPv6, hence I prefer to edit out it's default configuration.  If using the anet zone configuration, net0 is probably the correct choice; but if a net physical interface is being referenced in the zone configuration, then choose the corresponding interface.

An interesting alternative, is to copy from a configuration profile template initially generated by sysconfig create-profile and then manually adjust accordingly.
 
In other words my advice is:
  • Specify the correct network settings, using net0 for vnics (anets) and the matching physical interface in the zone configuration. The IP address must respect the eventual  allowed-address zone configuration clause. Example: Configuration profile - NIS client
  • Do not specify any name services configurations when initially generating the profile via sysconfig create-profile. Manually edit the initially generated profile and add name services and any other thing that makes sense to a particular purpose. Example: Configuration profile - NIS client
  • Remove altogether the IPv6 configuration section if you'll use just IPv4. That is, remove the following lines from the configuration profile:
     
    <property_group type="application" name="install_ipv6_interface"
    >
    <
    propval type="astring" name="stateful" value="yes">

    <
    propval type="astring" name="address_type" value="addrconf"/
    >
    <
    propval type="astring" name="name" value="net10/v6"/>

    <
    /property_group
    >
Taking into consideration the above advice, create the very first (initial) configuration profile to be customized and subsequently used as a baseline for similar installations:

# sysconfig create-profile -o /tmp/server-1d.xml
SC profile successfully generated.
Exiting System Configuration Tool. Log is available at:
/system/volatile/sysconfig/sysconfig.log.6643


If a baseline configuration profile already existed, then adjust accordingly. In general, the following fields will be updated (beyond the deletion of the aforementioned IPv6 section). Here's an unrelated/independent example:

# diff /tmp/dns-1.xml /tmp/dns-2.xml
40c40
<         <
propval type="astring" name="nodename" value="dns-1"/>;
---

<
         <
propval type="astring" name="nodename" value="dns-2"/>
69,70c69,70

<
        
<propval type="net_address_v4" name="static_address" value="192.168.0.84/24"/>
<
        
<propval type="astring" name="name" value="net9/v4"/>
---

>
        
<propval type="net_address_v4" name="static_address" value="192.168.0.87/24"/
>
>
        
<propval type="astring" name="name" value="net10/v4"/>    

Shutdown the source NGZ (server-1a) for performing the cloning.
In general, there should be a golden template NGZ ready to be cloned.

# zoneadm -z server-1a shutdown

# zoneadm list -cv
  ID NAME      STATUS     PATH             BRAND    IP   
   0 global    running    /                solaris  shared
   1 server-1c running    /zone/server-1c  solaris  excl 
   2 server-1b running    /zone/server-1b  solaris  excl 
   - server-1a installed  /zone/server-1a  solaris  excl 
   - server-1d configured /zone/server-1d  solaris  excl 


# zoneadm -z server-1d clone -c /tmp/server-1d.xml server-1a
The following ZFS file system(s) have been created:
    zone/server-1d
Progress being logged to ...
Log saved in non-global zone as ...


# zoneadm list -cv
  ID NAME      STATUS     PATH             BRAND    IP   
   0 global    running    /                solaris  shared
   1 server-1c running    /zone/server-1c  solaris  excl 
   2 server-1b running    /zone/server-1b  solaris  excl 
   - server-1a installed  /zone/server-1a  solaris  excl 
   - server-1d installed  /zone/server-1d  solaris  excl
 


Resume the source NGZ (server-1a) to its fully operational state.
As previously noted, this isn't needed in case a golden template is being used.

# zoneadm -z server-1a boot

Before booting the cloned NGZ (server-1d) for the 1st time, do minor adjustments such as manually editing /zone/server-1d/root/etc/hosts. If much more elaborated measures are needed them there's a chance that cloning may not be the best solution. Of course, it all depends on a case by case analysis.

# cat /zone/server-1d/root/etc/hosts
#
# Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
# Use is subject to license terms.
#
# Internet host table
#
::1             localhost
127.0.0.1       localhost                loghost
#
192.168.0.14    server-1d.business.corp  server-1d


The above /etc/hosts example may not be adequate to NIS services, unless the even more insecure local network dynamic discovery is used. For NIS services direct mode, typically and in addition, it's also required to add at least two NIS servers, such as:

# cat /zone/server-1d/root/etc/hosts
...

192.168.0.14    server-1d.business.corp  server-1d
#
192.168.0.202       nis-2.business.corp  nis-2
192.168.0.203       nis-3.business.corp  nis-3
   
For NIS services, the relevant part of the configuration profile changes from:
  
<service version="1" type="service" name="system/name-service/switch">
 
<property_group type="application" name="config">
    <propval type="astring" name="default" value="files"/>
   
<propval type="astring" name="printer" value="user files"/>
 
</property_group>
 
<instance enabled="true" name="default"/> 
</service>

To:

<service version="1" type="service" name="system/name-service/switch">
   
<property_group type="application" name="config">
     
<propval type="astring" name="default" value="files nis"/>
     
<propval type="astring" name="printers" value="user files nis"/>
     
<propval type="astring" name="netgroup" value="nis"/>
 
</property_group>
 
<instance enabled="true" name="default"/> 
</service>

<service version="1" type="service" name="network/nis/domain">
 
<property_group type="application" name="config">
   
<propval type="hostname" name="domainname" value="business.corp"/>
   
<property type="host" name="ypservers">
     
<host_list>
       
<value_node value="nis-2"/>
       
<value_node value="nis-3"/>
     
</host_list>
   
</property>
 
</property_group>
 
<instance enabled="true" name="default"/> 
</service>

<service version="1" type="service" name="network/nis/client">
 
<instance enabled="true" name="default"/> 
</service>

One might well be wondering how did I find out what to substitute for in the above XML excerpt. For more detail on how to obtain to obtain the above changes, please, read my other posts about SMF info extraction and NIS & NSS. Of course, I found out about which services to inspect based on the on-line manuals and references.

For the final step it's advisable to use two terminals. One for the console monitoring of the 1st boot. Other for issuing the zone boot command. Depending on the existing configuration in the source NGZ, it will take a little while for the system to realize the inherent changes to be applied to the newly cloned NGZ.

# zlogin -C server-1d
[Connected to zone 'server-1d' console]
 
# zoneadm -z server-1d boot  (from another terminal)
[NOTICE: Zone booting up]

SunOS Release 5.11 Version 11.1 64-bit
Copyright (c) 1983, 2012, Oracle ... All rights reserved.
Hostname: unknown
Hostname: server-1d

server-1d console login:


Hit ~. (or ~~. if nested twice, and so on...) and watch the results:

# zfs list -r -t all -d 1 zone
NAME             USED  AVAIL  REFER  MOUNTPOINT
zone             669M  15.0G    36K  /zone
zone/server-1a   487M  15.0G    33K  /zone/server-1a
zone/server-1b  70.9M  15.0G    34K  /zone/server-1b
zone/server-1c  70.8M  15.0G    34K  /zone/server-1c
zone/server-1d  38.1M  15.0G    34K  /zone/server-1d


Thanks to ZFS the cloning is naturally fast and extremely space efficient.
We were able to quickly get a new fully functional OS instance with just around 40 MB! In addition to the near zero virtualization overhead, this is a unique advantage of Solaris. 
   
There is one caveat when it comes to updating a system with multiple cloned zones. As updates are applied, they will be duplicated on each and every cloned zone, thus lessening the space savings benefits (zone server-1f was cloned from server-1a after an update process).

# zfs list -r -d 1 zone
NAME             USED  AVAIL  REFER  MOUNTPOINT
zone            1.85G  13.8G    38K  /zone
zone/server-1a   187M  13.8G    33K  /zone/server-1a
zone/server-1b   304M  13.8G    34K  /zone/server-1b
zone/server-1c   301M  13.8G    34K  /zone/server-1c
zone/server-1d   301M  13.8G    34K  /zone/server-1d
zone/server-1e   739M  13.8G    35K  /zone/server-1e
zone/server-1f  59.7M  13.8G    34K  /zone/server-1f

 
To mitigate the problem, the update plan must take into consideration the redeployment of cloned zones from updated golden templates. This implies a best practice:
Keep actual configuration and installation scripts synchronized.
I wonder if deduplication would be effective.
Of course, I'm not convinced.
  

Wednesday, July 18, 2012

AI derived manifest sample

I'm assuming the setup previously described.
A derived manifest example can be the best explanation about it:
 
      : official documentation showing how to mirror the rpool during the installation.
      : what's not shown or what I have changed based on the official documentation.
      : official documentation showing the structure of a derived manifest.
 
$ cat /export/auto_install/files/derived.sh
#!/bin/bash -
 
SCRIPT_SUCCESS=0
SCRIPT_FAILURE=1
 
function error_handler
{
    exit $SCRIPT_FAILURE
}
 
trap error_handler ERR
 
# Define the location of the custom base manifest.
AI_SERVER=192.168.0.50:5555
AI_PATH=export/auto_install/$SI_INSTALL_SERVICE/auto_install
AI_BASE_MANIFEST=base.xml
 
alias wget=/usr/bin/wget
alias aimanifest=/usr/bin/aimanifest
 
# Load a base, customized, manifest which to dynamically adjust.
wget -P /tmp http://$AI_SERVER/$AI_PATH/$AI_BASE_MANIFEST
aimanifest load /tmp/$AI_BASE_MANIFEST
 

# Use the default if there is only one disk.
if [[ $SI_NUMDISKS -ge 2 ]]
then
    # Turn mirroring on.
 
    # Assumes a root zpool is already set up.
    vdev=$(aimanifest add -r target/logical/zpool[@name=rpool]/vdev@name mirror-0)
    aimanifest set ${vdev}@redundancy mirror
 
    # A 2-way mirror is enough.
    typeset -i disk_num
    for ((disk_num = 1; disk_num <= 2; disk_num++))

    do
        eval curr_disk="$"SI_DISKNAME_${disk_num}
 
        disk=$(aimanifest add -r target/disk@in_vdev mirror-0)
        aimanifest set ${disk}@in_zpool rpool
        aimanifest set ${disk}@whole_disk true
 
        disk_name=$(aimanifest add -r ${disk}/disk_name@name $curr_disk)
        aimanifest set ${disk_name}@name_type ctd
    done
fi
 
exit $SCRIPT_SUCCESS