Background: #fff
Foreground: #000
PrimaryPale: #8cf
PrimaryLight: #18f
PrimaryMid: #04b
PrimaryDark: #014
SecondaryPale: #ffc
SecondaryLight: #fe8
SecondaryMid: #db4
SecondaryDark: #841
TertiaryPale: #eee
TertiaryLight: #ccc
TertiaryMid: #999
TertiaryDark: #666
Error: #f88
<!--{{{-->
<div class='toolbar' macro='toolbar [[ToolbarCommands::EditToolbar]]'></div>
<div class='title' macro='view title'></div>
<div class='editor' macro='edit title'></div>
<div macro='annotations'></div>
<div class='editor' macro='edit text'></div>
<div class='editor' macro='edit tags'></div><div class='editorFooter'><span macro='message views.editor.tagPrompt'></span><span macro='tagChooser excludeLists'></span></div>
<!--}}}-->
To get started with this blank [[TiddlyWiki]], you'll need to modify the following tiddlers:
* [[SiteTitle]] & [[SiteSubtitle]]: The title and subtitle of the site, as shown above (after saving, they will also appear in the browser title bar)
* [[MainMenu]]: The menu (usually on the left)
* [[DefaultTiddlers]]: Contains the names of the tiddlers that you want to appear when the TiddlyWiki is opened
You'll also need to enter your username for signing your edits: <<option txtUserName>>
<<importTiddlers>>
<!--{{{-->
<link rel='alternate' type='application/rss+xml' title='RSS' href='index.xml' />
<!--}}}-->
These [[InterfaceOptions]] for customising [[TiddlyWiki]] are saved in your browser

Your username for signing your edits. Write it as a [[WikiWord]] (eg [[JoeBloggs]])

<<option txtUserName>>
<<option chkSaveBackups>> [[SaveBackups]]
<<option chkAutoSave>> [[AutoSave]]
<<option chkRegExpSearch>> [[RegExpSearch]]
<<option chkCaseSensitiveSearch>> [[CaseSensitiveSearch]]
<<option chkAnimate>> [[EnableAnimations]]

----
Also see [[AdvancedOptions]]
<!--{{{-->
<div class='header' role='banner' macro='gradient vert [[ColorPalette::PrimaryLight]] [[ColorPalette::PrimaryMid]]'>
<div class='headerShadow'>
<span class='siteTitle' refresh='content' tiddler='SiteTitle'></span>&nbsp;
<span class='siteSubtitle' refresh='content' tiddler='SiteSubtitle'></span>
</div>
<div class='headerForeground'>
<span class='siteTitle' refresh='content' tiddler='SiteTitle'></span>&nbsp;
<span class='siteSubtitle' refresh='content' tiddler='SiteSubtitle'></span>
</div>
</div>
<div id='mainMenu' role='navigation' refresh='content' tiddler='MainMenu'></div>
<div id='sidebar'>
<div id='sidebarOptions' role='navigation' refresh='content' tiddler='SideBarOptions'></div>
<div id='sidebarTabs' role='complementary' refresh='content' force='true' tiddler='SideBarTabs'></div>
</div>
<div id='displayArea' role='main'>
<div id='messageArea'></div>
<div id='tiddlerDisplay'></div>
</div>
<!--}}}-->
/*{{{*/
body {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];}

a {color:[[ColorPalette::PrimaryMid]];}
a:hover {background-color:[[ColorPalette::PrimaryMid]]; color:[[ColorPalette::Background]];}
a img {border:0;}

h1,h2,h3,h4,h5,h6 {color:[[ColorPalette::SecondaryDark]]; background:transparent;}
h1 {border-bottom:2px solid [[ColorPalette::TertiaryLight]];}
h2,h3 {border-bottom:1px solid [[ColorPalette::TertiaryLight]];}

.button {color:[[ColorPalette::PrimaryDark]]; border:1px solid [[ColorPalette::Background]];}
.button:hover {color:[[ColorPalette::PrimaryDark]]; background:[[ColorPalette::SecondaryLight]]; border-color:[[ColorPalette::SecondaryMid]];}
.button:active {color:[[ColorPalette::Background]]; background:[[ColorPalette::SecondaryMid]]; border:1px solid [[ColorPalette::SecondaryDark]];}

.header {background:[[ColorPalette::PrimaryMid]];}
.headerShadow {color:[[ColorPalette::Foreground]];}
.headerShadow a {font-weight:normal; color:[[ColorPalette::Foreground]];}
.headerForeground {color:[[ColorPalette::Background]];}
.headerForeground a {font-weight:normal; color:[[ColorPalette::PrimaryPale]];}

.tabSelected {color:[[ColorPalette::PrimaryDark]];
	background:[[ColorPalette::TertiaryPale]];
	border-left:1px solid [[ColorPalette::TertiaryLight]];
	border-top:1px solid [[ColorPalette::TertiaryLight]];
	border-right:1px solid [[ColorPalette::TertiaryLight]];
}
.tabUnselected {color:[[ColorPalette::Background]]; background:[[ColorPalette::TertiaryMid]];}
.tabContents {color:[[ColorPalette::PrimaryDark]]; background:[[ColorPalette::TertiaryPale]]; border:1px solid [[ColorPalette::TertiaryLight]];}
.tabContents .button {border:0;}

#sidebar {}
#sidebarOptions input {border:1px solid [[ColorPalette::PrimaryMid]];}
#sidebarOptions .sliderPanel {background:[[ColorPalette::PrimaryPale]];}
#sidebarOptions .sliderPanel a {border:none;color:[[ColorPalette::PrimaryMid]];}
#sidebarOptions .sliderPanel a:hover {color:[[ColorPalette::Background]]; background:[[ColorPalette::PrimaryMid]];}
#sidebarOptions .sliderPanel a:active {color:[[ColorPalette::PrimaryMid]]; background:[[ColorPalette::Background]];}

.wizard {background:[[ColorPalette::PrimaryPale]]; border:1px solid [[ColorPalette::PrimaryMid]];}
.wizard h1 {color:[[ColorPalette::PrimaryDark]]; border:none;}
.wizard h2 {color:[[ColorPalette::Foreground]]; border:none;}
.wizardStep {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];
	border:1px solid [[ColorPalette::PrimaryMid]];}
.wizardStep.wizardStepDone {background:[[ColorPalette::TertiaryLight]];}
.wizardFooter {background:[[ColorPalette::PrimaryPale]];}
.wizardFooter .status {background:[[ColorPalette::PrimaryDark]]; color:[[ColorPalette::Background]];}
.wizard .button {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::SecondaryLight]]; border: 1px solid;
	border-color:[[ColorPalette::SecondaryPale]] [[ColorPalette::SecondaryDark]] [[ColorPalette::SecondaryDark]] [[ColorPalette::SecondaryPale]];}
.wizard .button:hover {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::Background]];}
.wizard .button:active {color:[[ColorPalette::Background]]; background:[[ColorPalette::Foreground]]; border: 1px solid;
	border-color:[[ColorPalette::PrimaryDark]] [[ColorPalette::PrimaryPale]] [[ColorPalette::PrimaryPale]] [[ColorPalette::PrimaryDark]];}

.wizard .notChanged {background:transparent;}
.wizard .changedLocally {background:#80ff80;}
.wizard .changedServer {background:#8080ff;}
.wizard .changedBoth {background:#ff8080;}
.wizard .notFound {background:#ffff80;}
.wizard .putToServer {background:#ff80ff;}
.wizard .gotFromServer {background:#80ffff;}

#messageArea {border:1px solid [[ColorPalette::SecondaryMid]]; background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]];}
#messageArea .button {color:[[ColorPalette::PrimaryMid]]; background:[[ColorPalette::SecondaryPale]]; border:none;}

.popupTiddler {background:[[ColorPalette::TertiaryPale]]; border:2px solid [[ColorPalette::TertiaryMid]];}

.popup {background:[[ColorPalette::TertiaryPale]]; color:[[ColorPalette::TertiaryDark]]; border-left:1px solid [[ColorPalette::TertiaryMid]]; border-top:1px solid [[ColorPalette::TertiaryMid]]; border-right:2px solid [[ColorPalette::TertiaryDark]]; border-bottom:2px solid [[ColorPalette::TertiaryDark]];}
.popup hr {color:[[ColorPalette::PrimaryDark]]; background:[[ColorPalette::PrimaryDark]]; border-bottom:1px;}
.popup li.disabled {color:[[ColorPalette::TertiaryMid]];}
.popup li a, .popup li a:visited {color:[[ColorPalette::Foreground]]; border: none;}
.popup li a:hover {background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]]; border: none;}
.popup li a:active {background:[[ColorPalette::SecondaryPale]]; color:[[ColorPalette::Foreground]]; border: none;}
.popupHighlight {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];}
.listBreak div {border-bottom:1px solid [[ColorPalette::TertiaryDark]];}

.tiddler .defaultCommand {font-weight:bold;}

.shadow .title {color:[[ColorPalette::TertiaryDark]];}

.title {color:[[ColorPalette::SecondaryDark]];}
.subtitle {color:[[ColorPalette::TertiaryDark]];}

.toolbar {color:[[ColorPalette::PrimaryMid]];}
.toolbar a {color:[[ColorPalette::TertiaryLight]];}
.selected .toolbar a {color:[[ColorPalette::TertiaryMid]];}
.selected .toolbar a:hover {color:[[ColorPalette::Foreground]];}

.tagging, .tagged {border:1px solid [[ColorPalette::TertiaryPale]]; background-color:[[ColorPalette::TertiaryPale]];}
.selected .tagging, .selected .tagged {background-color:[[ColorPalette::TertiaryLight]]; border:1px solid [[ColorPalette::TertiaryMid]];}
.tagging .listTitle, .tagged .listTitle {color:[[ColorPalette::PrimaryDark]];}
.tagging .button, .tagged .button {border:none;}

.footer {color:[[ColorPalette::TertiaryLight]];}
.selected .footer {color:[[ColorPalette::TertiaryMid]];}

.error, .errorButton {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::Error]];}
.warning {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::SecondaryPale]];}
.lowlight {background:[[ColorPalette::TertiaryLight]];}

.zoomer {background:none; color:[[ColorPalette::TertiaryMid]]; border:3px solid [[ColorPalette::TertiaryMid]];}

.imageLink, #displayArea .imageLink {background:transparent;}

.annotation {background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]]; border:2px solid [[ColorPalette::SecondaryMid]];}

.viewer .listTitle {list-style-type:none; margin-left:-2em;}
.viewer .button {border:1px solid [[ColorPalette::SecondaryMid]];}
.viewer blockquote {border-left:3px solid [[ColorPalette::TertiaryDark]];}

.viewer table, table.twtable {border:2px solid [[ColorPalette::TertiaryDark]];}
.viewer th, .viewer thead td, .twtable th, .twtable thead td {background:[[ColorPalette::SecondaryMid]]; border:1px solid [[ColorPalette::TertiaryDark]]; color:[[ColorPalette::Background]];}
.viewer td, .viewer tr, .twtable td, .twtable tr {border:1px solid [[ColorPalette::TertiaryDark]];}

.viewer pre {border:1px solid [[ColorPalette::SecondaryLight]]; background:[[ColorPalette::SecondaryPale]];}
.viewer code {color:[[ColorPalette::SecondaryDark]];}
.viewer hr {border:0; border-top:dashed 1px [[ColorPalette::TertiaryDark]]; color:[[ColorPalette::TertiaryDark]];}

.highlight, .marked {background:[[ColorPalette::SecondaryLight]];}

.editor input {border:1px solid [[ColorPalette::PrimaryMid]];}
.editor textarea {border:1px solid [[ColorPalette::PrimaryMid]]; width:100%;}
.editorFooter {color:[[ColorPalette::TertiaryMid]];}
.readOnly {background:[[ColorPalette::TertiaryPale]];}

#backstageArea {background:[[ColorPalette::Foreground]]; color:[[ColorPalette::TertiaryMid]];}
#backstageArea a {background:[[ColorPalette::Foreground]]; color:[[ColorPalette::Background]]; border:none;}
#backstageArea a:hover {background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]]; }
#backstageArea a.backstageSelTab {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];}
#backstageButton a {background:none; color:[[ColorPalette::Background]]; border:none;}
#backstageButton a:hover {background:[[ColorPalette::Foreground]]; color:[[ColorPalette::Background]]; border:none;}
#backstagePanel {background:[[ColorPalette::Background]]; border-color: [[ColorPalette::Background]] [[ColorPalette::TertiaryDark]] [[ColorPalette::TertiaryDark]] [[ColorPalette::TertiaryDark]];}
.backstagePanelFooter .button {border:none; color:[[ColorPalette::Background]];}
.backstagePanelFooter .button:hover {color:[[ColorPalette::Foreground]];}
#backstageCloak {background:[[ColorPalette::Foreground]]; opacity:0.6; filter:alpha(opacity=60);}
/*}}}*/
/*{{{*/
* html .tiddler {height:1%;}

body {font-size:.75em; font-family:arial,helvetica; margin:0; padding:0;}

h1,h2,h3,h4,h5,h6 {font-weight:bold; text-decoration:none;}
h1,h2,h3 {padding-bottom:1px; margin-top:1.2em;margin-bottom:0.3em;}
h4,h5,h6 {margin-top:1em;}
h1 {font-size:1.35em;}
h2 {font-size:1.25em;}
h3 {font-size:1.1em;}
h4 {font-size:1em;}
h5 {font-size:.9em;}

hr {height:1px;}

a {text-decoration:none;}

dt {font-weight:bold;}

ol {list-style-type:decimal;}
ol ol {list-style-type:lower-alpha;}
ol ol ol {list-style-type:lower-roman;}
ol ol ol ol {list-style-type:decimal;}
ol ol ol ol ol {list-style-type:lower-alpha;}
ol ol ol ol ol ol {list-style-type:lower-roman;}
ol ol ol ol ol ol ol {list-style-type:decimal;}

.txtOptionInput {width:11em;}

#contentWrapper .chkOptionInput {border:0;}

.externalLink {text-decoration:underline;}

.indent {margin-left:3em;}
.outdent {margin-left:3em; text-indent:-3em;}
code.escaped {white-space:nowrap;}

.tiddlyLinkExisting {font-weight:bold;}
.tiddlyLinkNonExisting {font-style:italic;}

/* the 'a' is required for IE, otherwise it renders the whole tiddler in bold */
a.tiddlyLinkNonExisting.shadow {font-weight:bold;}

#mainMenu .tiddlyLinkExisting,
	#mainMenu .tiddlyLinkNonExisting,
	#sidebarTabs .tiddlyLinkNonExisting {font-weight:normal; font-style:normal;}
#sidebarTabs .tiddlyLinkExisting {font-weight:bold; font-style:normal;}

.header {position:relative;}
.header a:hover {background:transparent;}
.headerShadow {position:relative; padding:4.5em 0 1em 1em; left:-1px; top:-1px;}
.headerForeground {position:absolute; padding:4.5em 0 1em 1em; left:0; top:0;}

.siteTitle {font-size:3em;}
.siteSubtitle {font-size:1.2em;}

#mainMenu {position:absolute; left:0; width:10em; text-align:right; line-height:1.6em; padding:1.5em 0.5em 0.5em 0.5em; font-size:1.1em;}

#sidebar {position:absolute; right:3px; width:16em; font-size:.9em;}
#sidebarOptions {padding-top:0.3em;}
#sidebarOptions a {margin:0 0.2em; padding:0.2em 0.3em; display:block;}
#sidebarOptions input {margin:0.4em 0.5em;}
#sidebarOptions .sliderPanel {margin-left:1em; padding:0.5em; font-size:.85em;}
#sidebarOptions .sliderPanel a {font-weight:bold; display:inline; padding:0;}
#sidebarOptions .sliderPanel input {margin:0 0 0.3em 0;}
#sidebarTabs .tabContents {width:15em; overflow:hidden;}

.wizard {padding:0.1em 1em 0 2em;}
.wizard h1 {font-size:2em; font-weight:bold; background:none; padding:0; margin:0.4em 0 0.2em;}
.wizard h2 {font-size:1.2em; font-weight:bold; background:none; padding:0; margin:0.4em 0 0.2em;}
.wizardStep {padding:1em 1em 1em 1em;}
.wizard .button {margin:0.5em 0 0; font-size:1.2em;}
.wizardFooter {padding:0.8em 0.4em 0.8em 0;}
.wizardFooter .status {padding:0 0.4em; margin-left:1em;}
.wizard .button {padding:0.1em 0.2em;}

#messageArea {position:fixed; top:2em; right:0; margin:0.5em; padding:0.5em; z-index:2000; _position:absolute;}
.messageToolbar {display:block; text-align:right; padding:0.2em;}
#messageArea a {text-decoration:underline;}

.tiddlerPopupButton {padding:0.2em;}
.popupTiddler {position: absolute; z-index:300; padding:1em; margin:0;}

.popup {position:absolute; z-index:300; font-size:.9em; padding:0; list-style:none; margin:0;}
.popup .popupMessage {padding:0.4em;}
.popup hr {display:block; height:1px; width:auto; padding:0; margin:0.2em 0;}
.popup li.disabled {padding:0.4em;}
.popup li a {display:block; padding:0.4em; font-weight:normal; cursor:pointer;}
.listBreak {font-size:1px; line-height:1px;}
.listBreak div {margin:2px 0;}

.tabset {padding:1em 0 0 0.5em;}
.tab {margin:0 0 0 0.25em; padding:2px;}
.tabContents {padding:0.5em;}
.tabContents ul, .tabContents ol {margin:0; padding:0;}
.txtMainTab .tabContents li {list-style:none;}
.tabContents li.listLink { margin-left:.75em;}

#contentWrapper {display:block;}
#splashScreen {display:none;}

#displayArea {margin:1em 17em 0 14em;}

.toolbar {text-align:right; font-size:.9em;}

.tiddler {padding:1em 1em 0;}

.missing .viewer,.missing .title {font-style:italic;}

.title {font-size:1.6em; font-weight:bold;}

.missing .subtitle {display:none;}
.subtitle {font-size:1.1em;}

.tiddler .button {padding:0.2em 0.4em;}

.tagging {margin:0.5em 0.5em 0.5em 0; float:left; display:none;}
.isTag .tagging {display:block;}
.tagged {margin:0.5em; float:right;}
.tagging, .tagged {font-size:0.9em; padding:0.25em;}
.tagging ul, .tagged ul {list-style:none; margin:0.25em; padding:0;}
.tagClear {clear:both;}

.footer {font-size:.9em;}
.footer li {display:inline;}

.annotation {padding:0.5em; margin:0.5em;}

* html .viewer pre {width:99%; padding:0 0 1em 0;}
.viewer {line-height:1.4em; padding-top:0.5em;}
.viewer .button {margin:0 0.25em; padding:0 0.25em;}
.viewer blockquote {line-height:1.5em; padding-left:0.8em;margin-left:2.5em;}
.viewer ul, .viewer ol {margin-left:0.5em; padding-left:1.5em;}

.viewer table, table.twtable {border-collapse:collapse; margin:0.8em 1.0em;}
.viewer th, .viewer td, .viewer tr,.viewer caption,.twtable th, .twtable td, .twtable tr,.twtable caption {padding:3px;}
table.listView {font-size:0.85em; margin:0.8em 1.0em;}
table.listView th, table.listView td, table.listView tr {padding:0 3px 0 3px;}

.viewer pre {padding:0.5em; margin-left:0.5em; font-size:1.2em; line-height:1.4em; overflow:auto;}
.viewer code {font-size:1.2em; line-height:1.4em;}

.editor {font-size:1.1em;}
.editor input, .editor textarea {display:block; width:100%; font:inherit;}
.editorFooter {padding:0.25em 0; font-size:.9em;}
.editorFooter .button {padding-top:0; padding-bottom:0;}

.fieldsetFix {border:0; padding:0; margin:1px 0px;}

.zoomer {font-size:1.1em; position:absolute; overflow:hidden;}
.zoomer div {padding:1em;}

* html #backstage {width:99%;}
* html #backstageArea {width:99%;}
#backstageArea {display:none; position:relative; overflow: hidden; z-index:150; padding:0.3em 0.5em;}
#backstageToolbar {position:relative;}
#backstageArea a {font-weight:bold; margin-left:0.5em; padding:0.3em 0.5em;}
#backstageButton {display:none; position:absolute; z-index:175; top:0; right:0;}
#backstageButton a {padding:0.1em 0.4em; margin:0.1em;}
#backstage {position:relative; width:100%; z-index:50;}
#backstagePanel {display:none; z-index:100; position:absolute; width:90%; margin-left:3em; padding:1em;}
.backstagePanelFooter {padding-top:0.2em; float:right;}
.backstagePanelFooter a {padding:0.2em 0.4em;}
#backstageCloak {display:none; z-index:20; position:absolute; width:100%; height:100px;}

.whenBackstage {display:none;}
.backstageVisible .whenBackstage {display:block;}
/*}}}*/
/***
StyleSheet for use when a translation requires any css style changes.
This StyleSheet can be used directly by languages such as Chinese, Japanese and Korean which need larger font sizes.
***/
/*{{{*/
body {font-size:0.8em;}
#sidebarOptions {font-size:1.05em;}
#sidebarOptions a {font-style:normal;}
#sidebarOptions .sliderPanel {font-size:0.95em;}
.subtitle {font-size:0.8em;}
.viewer table.listView {font-size:0.95em;}
/*}}}*/
/*{{{*/
@media print {
#mainMenu, #sidebar, #messageArea, .toolbar, #backstageButton, #backstageArea {display: none !important;}
#displayArea {margin: 1em 1em 0em;}
noscript {display:none;} /* Fixes a feature in Firefox 1.5.0.2 where print preview displays the noscript content */
}
/*}}}*/
<!--{{{-->
<div class='toolbar' role='navigation' macro='toolbar [[ToolbarCommands::ViewToolbar]]'></div>
<div class='title' macro='view title'></div>
<div class='subtitle'><span macro='view modifier link'></span>, <span macro='view modified date'></span> (<span macro='message views.wikified.createdPrompt'></span> <span macro='view created date'></span>)</div>
<div class='tagging' macro='tagging'></div>
<div class='tagged' macro='tags'></div>
<div class='viewer' macro='view text wikified'></div>
<div class='tagClear'></div>
<!--}}}-->
<html>
<center>
  <video id="my-video" class="video-js" controls preload="auto" width="700" height="460" poster="" data-setup="{}">
    <source src="video/putty.mp4" type='video/mp4'>
    <p class="vjs-no-js">
      To view this video please enable JavaScript, and consider upgrading to a web browser that
      <a href="http://videojs.com/html5-video-support/" target="_blank">supports HTML5 video</a>
    </p>
  </video>

  <script src="https://vjs.zencdn.net/7.8.2/video.min.js"></script>
</center>
</html>

{{Note{''Note:''  Be sure to use the appropriate shell server host name in putty:  ''lab.ci233.net''}}}
<html>
<table border=0>
<tr border=0><TD align=center border=0>
<iframe src="https://calendar.google.com/calendar/embed?src=jrj1sasujp1l1ab5r4ejqbbet4%40group.calendar.google.com&ctz=America/New_York" style="border: 0" width="900" height="600" frameborder="0" scrolling="no"></iframe>
<BR><B>This calendar is authoritative should there be due date conflicts with other pages on the site</B>
</TD></TR>
</table>
</html>
A traditional class meeting on campus naturally allows for regular communication.  This is beneficial by helping students better understand the material along with allowing the instructor to more easily gauge how everyone is doing in the class.

I would like to ensure hosting this course online does not deprive us of regular communication.  Class participation will be worth 10% of your overall grade.  Each week's class participation will be worth ''50 points'' total. Multiple quality posts each week will be necessary to receive full credit.

Posts for each week must be made by Sunday, 11:55 p.m. (EST) the following week in order to receive full credit.  This allows one week to post questions about outstanding assignments and one week to post questions about labs after they have been returned.  Please post lab questions on the discussion board for the week they were assigned.

Joining group Zoom meetings will also earn class participation credit.  


!! Participation:

You may work collaboratively on assignments and provide assistance to one another in the Blackboard discussion boards. You can also provide ideas or helpful resources that assisted you on your assignments.  Credit may also be received for joining or participating in either regularly scheduled or ad-hoc group Zoom meetings.

!! Rubric for weekly class participation:

* 25 points - Actively participate in a group Zoom meeting
* 10-20 points - High quality posts which contain well-developed questions, actionable suggestions, or recommendations
* 15 points - Attend a group Zoom meeting
* 5-10 points - General comments regarding the assignments.  No specific insights directly related to the problem or responses to questions which are not actionable.


!! Quality of Remarks:

You will be evaluated based on the quality of your participation by making recommendations, answering questions and asking questions related to the problems, and making pertinent comments.

The discussion forum and Zoom meetings are a valuable component of learning since they allow you to see a variety of solutions and ideas just like you would in a classroom.

Generally, please do not post direct solutions to lab questions, especially unsolicited, before their due date.  Doing so will not be awarded participation points.  If someone is genuinely stuck on a problem and you'd like to help, guidance towards the solution is always a more beneficial place to start rather then just posting the answer.  If you just post the answer, I cannot tell if someone understands the problem or simply copied your solution.

Please be sure to check out the [[Using Blackboard]] and [[Using Discord]] pages to see more useful information.


!! Adding New Threads

Create new threads in the weekly discussion board forum in which the material was assigned.  When naming your threads, use something descriptive in the name and not just the lab and question number.  The highlighted thread is a model to follow and will make things easier to find as the number of posts grows.  Be sure to scan for an existing thread relating to your topic before creating a new one.  Usability is an important consideration in what you do.  ''Not using descriptive thread titles is detrimental to usability, so that post will not receive full credit.''

[img[img/discussionBoards.png]]



/***
!! CollapseTiddlersPlugin
^^Author: Bradley Meck^^
^^Source: http://gensoft.revhost.net/Collapse.html^^

|ELS 2/24/2006: added fallback to "CollapsedTemplate if "WebCollapsedTemplate" is not found |
|ELS 2/6/2006: added check for 'readOnly' flag to use alternative "WebCollapsedTemplate" |

***/

config.commands.collapseTiddler = {
text: "fold",
tooltip: "Collapse this tiddler",
handler: function(event,src,title)
{
var e = story.findContainingTiddler(src);
if(e.getAttribute("template") != config.tiddlerTemplates[DEFAULT_EDIT_TEMPLATE]){
var t = (readOnly&&store.tiddlerExists("WebCollapsedTemplate"))?"WebCollapsedTemplate":"CollapsedTemplate";
if (!store.tiddlerExists(t)) { alert("Can't find 'CollapsedTemplate'"); return; }
if(e.getAttribute("template") != t ){
e.setAttribute("oldTemplate",e.getAttribute("template"));
story.displayTiddler(null,title,t);
}
}
}
}

config.commands.expandTiddler = {
text: "unfold",
tooltip: "Expand this tiddler",
handler: function(event,src,title)
{
var e = story.findContainingTiddler(src);
story.displayTiddler(null,title,e.getAttribute("oldTemplate"));
}
}

config.macros.collapseAll = {
handler: function(place,macroName,params,wikifier,paramString,tiddler){
createTiddlyButton(place,"collapse all","",function(){
story.forEachTiddler(function(title,tiddler){
if(tiddler.getAttribute("template") != config.tiddlerTemplates[DEFAULT_EDIT_TEMPLATE])
var t = (readOnly&&store.tiddlerExists("WebCollapsedTemplate"))?"WebCollapsedTemplate":"CollapsedTemplate";
if (!store.tiddlerExists(t)) { alert("Can't find 'CollapsedTemplate'"); return; }
story.displayTiddler(null,title,t);
})})
}
}

config.macros.expandAll = {
handler: function(place,macroName,params,wikifier,paramString,tiddler){
createTiddlyButton(place,"expand all","",function(){
story.forEachTiddler(function(title,tiddler){
var t = (readOnly&&store.tiddlerExists("WebCollapsedTemplate"))?"WebCollapsedTemplate":"CollapsedTemplate";
if (!store.tiddlerExists(t)) { alert("Can't find 'CollapsedTemplate'"); return; }
if(tiddler.getAttribute("template") == t) story.displayTiddler(null,title,tiddler.getAttribute("oldTemplate"));
})})
}
}

config.commands.collapseOthers = {
text: "focus",
tooltip: "Expand this tiddler and collapse all others",
handler: function(event,src,title)
{
var e = story.findContainingTiddler(src);
story.forEachTiddler(function(title,tiddler){
if(tiddler.getAttribute("template") != config.tiddlerTemplates[DEFAULT_EDIT_TEMPLATE]){
var t = (readOnly&&store.tiddlerExists("WebCollapsedTemplate"))?"WebCollapsedTemplate":"CollapsedTemplate";
if (!store.tiddlerExists(t)) { alert("Can't find 'CollapsedTemplate'"); return; }
if (e==tiddler) t=e.getAttribute("oldTemplate");
//////////
// ELS 2006.02.22 - removed this line. if t==null, then the *current* view template, not the default "ViewTemplate", will be used.
// if (!t||!t.length) t=!readOnly?"ViewTemplate":"WebViewTemplate";
//////////
story.displayTiddler(null,title,t);
}
})
}
}
<div><div class='toolbar' macro='toolbar -closeTiddler closeOthers +editTiddler  permalink references jump newHere expandTiddler collapseOthers'></div>
<div class='title' macro='view title'></div></div>
[[Home]]
<!--{{{-->
<div class='toolbar' macro='toolbar [[ToolbarCommands::EditToolbar]]'></div>
<div class='title' macro='view title'></div>
<div class="editor">Title</div><div class='editor' macro='edit title'></div>
<div class="editor">Tags</div><div class='editor' macro='edit tags'></div><div class='editorFooter'><span macro='message views.editor.tagPrompt'></span><span macro='tagChooser'></span></div>
<div macro='annotations'></div>
<div class='editor' macro='edit text'></div>
<!--}}}-->
! Material

!! Read:
* Read Chapter 20, pp 299-307 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]].  
** Stop after the grey box at the top of 307.
** The book uses advanced regular expressions for some of its sed examples.  Don't worry about understanding what the regular expressions do.  Stick with the simple examples like the ones on pages 301 to top of 303.
*** The first regular expression is at the top of page 305 and looks like this:  @@s/\([0-9]\{2\}\)\/\([0-9]\{2\}\)\/\([0-9]\{4\}\)$/\3-\1-\2/@@

!! Watch:
* {{Command{tr}}}:
** How to Use tr, sed, and aspell: Linux Terminal 201 - https://www.youtube.com/watch?v=F7Brrn-L1Zg
** Mostly for tr, but there's some talk about sed too
** Hak5 has a lot of great content.  Check out their other videos.
* {{Command{awk}}}: 
** Learning awk - https://www.youtube.com/watch?v=9YOZmI-zWok
** This video goes into advanced usage at the 11:30 mark that won't be covering in this class.  You can stop at that point if you'd like.
* {{Command{sed}}}:
** SED Tutorial Basic Substitution - https://www.youtube.com/watch?v=32waL1Z9XK0&list=PLcUid3OP_4OW-rwv_mBHzx9MmE5TxvvcQ&index=1
** SED Substitute Beginning and End of Line: https://www.youtube.com/watch?v=8T5azKqYAjc&list=PLcUid3OP_4OW-rwv_mBHzx9MmE5TxvvcQ&index=2
** SED Remove Lines When Match is Found: https://www.youtube.com/watch?v=37r5Ykdnlkk&list=PLcUid3OP_4OW-rwv_mBHzx9MmE5TxvvcQ&index=13
** The entire sed series has great content, but those three are the highlights


! Notes

!! More complex filters

The {{Command{tr}}}, {{Command{awk}}}, and {{Command{sed}}} commands are a little more complex than the others we've introduced, but all three are important tools to have in your toolbox.  {{Command{awk}}} is easier to work with than cut for extracting columns of text.  {{Command{sed}}} is especially useful for search and replace operations and extracting particular rows of text from a file.

* {{Command{tr}}} - Translate characters
**Works only on stdin, does not alter the file, only the data stream
**Two arguments for translating characters  (set1/from)  (set2/to)
**Input characters in ''//set1//'' are mapped to corresponding characters in ''//set2//''.
**If the length of the two sets are unequal:
***''//set1//'' larger then ''//set2//'': ''//set2//'' is extended to the length of set1 by repeating ''//set2//'''s last character as necessary.
***''//set2//'' larger then ''//set1//'': Excess characters in ''//set2//'' are ignored.
**Options:
*** {{Monospaced{-d}}} : delete  (one argument for which characters to delete)
*** {{Monospaced{-s}}} : Squeeze multiple consecutive occurrences of a character down into a single instance.
** Character classes:
*** Another way to identify groups of characters
*** Page 260 & 261 in //The Linux Command Line// 
*** {{Monospaced{[:digit:]}}}
*** {{Monospaced{ [:alpha:] [:lower:] [:upper:] }}}
*** {{Monospaced{[:space:] [:blank:]}}}
**Examples:
*** {{Command{tr '[:upper:]' '[:lower:]' < /etc/printcap}}}
*** {{Command{tr '[:upper:]' '*' < /etc/printcap}}}
*** {{Command{tr -s '[:upper:]' '*' < /etc/printcap}}}
** Special characters
*** On the unix command line, {{Monospaced{''\t''}}} will represent a tab and {{Monospaced{''\n''}}} will represent a newline.  {{Command{tr}}} supports using these for substitutions.

{{Warning{''Important note:''  Most students have trouble with the {{Command{tr}}} command and interpret its actions incorrectly.  {{Command{tr}}} stands for translate, and as such it translates ''characters'' individually.  It does ''NOT'' translate strings.  There is a big difference between the two.

{{Command{tr}}} will individually translate the characters from the first argument into the characters in the second argument according to their placement.  The first character in argument 1 will be translated into the first character in argument 2.  The translation will proceed for each character in the first argument to the corresponding position in the second argument.

{{Command{cat data.txt | tr one two}}}  does not convert the string //one// to the string //two// in the output of {{File{data.txt}}}.  It converts each ''o'' to ''t'', each ''n'' to ''w'', and each ''e'' to ''o''.  Each of those characters in the output of {{File{data.txt}}} is changed individually.

When completing labs involving {{Command{tr}}}, it is important that your responses indicate these translations are happening //character by character//.  Additionally, the translation does not occur in the //file// {{File{data.txt}}}.  Our source files are not modified by the filters.  It is important to indicate the translation is occurring in the //output// of the file {{File{data.txt}}}.

Providing a response which is less then clear on these important points will be considered incorrect.}}}

!! {{Command{''sed''}}} & {{Command{''awk''}}}

I like using {{Command{''awk''}}} instead of {{Command{''cut''}}}.  Everything {{Command{''cut''}}} can do {{Command{''awk''}}} can do better.  Often our delimiters are variable lengths of whitespace, such as several spaces or several tabs.  {{Command{''cut''}}} can only delimit on a single character, but {{Command{''awk''}}}'s default delimiter is whitespace, regardless how long it is.  {{Command{''awk''}}} can also use multiple characters as a delimiter at the same time.

There's an [[Oreilly book|https://www.oreilly.com/library/view/sed-awk/1565922255/]] for just these two commands.  They're pretty powerful, but we're only going to scratch the surface.  We'll mostly work with {{Command{''awk''}}} but {{Command{''sed''}}} is good to know too.  Both come in very handy.


* {{Command{awk}}}
** {{Command{awk}}} is a fully functional programming language written for processing text and numbers.
** {{Command{tr}}} works byte by byte (1 character at a time)
** {{Command{grep}}} works line by line
** {{Command{awk}}} works field by field
** Terminology:
*** Record = line of input
*** Field = a column of data, separated by a delimiter
** basic usage:  {{Command{awk [-F//delim//] '{ action ; action ; action }' }}}
*** default action is to print the entire record
*** {{Monospaced{ ''-F'' }}} = specify alternate field separator (default is whitespace)
*** Multiple delimiters can be used.  For example, the option {{Monospaced{ ''-F'[-:]' '' }}} will set the delimiter to be either a colon or a dash.
*** ''Note:'' {{Command{cut}}} uses a single character for a delimiter where {{Command{awk}}}'s default is any amount of whitespace.  This is especially handy if a sequence of spaces or tabs is used between columns, such as in the output of the {{Command{w}}} command.
** advanced usage:  {{Command{ awk [-F//delim//] [ -v var=value ] '//pattern// { action ; action ; action }' }}}
*** //pattern// is an optional way to specify which lines to operate on
*** {{Monospaced{ ''-v'' }}} = define a variable and its value to be used within awk.  ex:  {{Monospaced{ ''-v start=10'' }}}
** Useful awk variables:
*** {{Monospaced{ ''$0'' }}} - The entire line of text
*** {{Monospaced{ ''$//n//'' }}} - The //n//^^th^^ data field of the record
*** {{Monospaced{ ''$0'' }}} - Entire line
*** {{Monospaced{ ''NR'' }}} - record number
** Patterns can be (advanced use only, I will not give problems in labs or tests that require this):
*** Relational expressions  ( {{Monospaced{ ''<=, <, >, >=, ==, !='' }}} )
**** ex:  {{Monospaced{ ''$1 == $2'' }}}
*** Regular expressions /regex/
**** Must be enclosed in {{Monospaced{ ''/ /'' }}}
**** When specified, the regex must match somewhere on the line.  example: {{Monospaced{ ''/[0-9]+/'' }}}
**** Or use a pattern matching expression ( {{Monospaced{ '' ~, !~'' }}} ) to match regex to a specific field.  example:  {{Monospaced{ ''$1 ~ /^string/'' }}}
** Examples:
*** Show only the username and tty from the output of the {{Command{w}}} command: {{Command{w | awk '{print $1 " " $2}' }}}
**** Same output, but skip the first two header lines:  {{Command{w | awk ' NR > 2 {print $1 " " $2}' }}}
*** Set the delimiter to be the string {{Monospaced{ ''", "''}}} (comma then space), then invert the first and last names: {{Command{awk -F", " '{print $2, $1}' names }}}


* {{Command{sed}}}:  Stream editior  //commands// //file(s)//
**Works mainly on streams, but can also be used to modify files in place when used with the {{Monospaced{ -i }}} option.
*** Be sure you are clear about this in your labs.  A response that indicates a change or deletion is occurring in the file will not be correct.  By default, changes are happening to the output of the file.
**We use {{Command{sed}}} to change the text in a stream.
**For each line in the //file//, check to see if it is addressed. If so, perform //command//
**[address1[,address2]] command [options]
***Addresses can be line numbers:  start[,stop]
***simple patterns:  {{Monospaced{ /pattern/ }}}
***The pattern can contain our ^ and $ anchors
***or regular expressions:  {{Monospaced{ /regex/ }}}
***Defaults to all lines if none are addressed
**Most used sed commands
*** {{Monospaced{s}}} - substitute - {{Monospaced{s/find/replace/flags}}}
**** flags:
**** {{Monospaced{g}}} - all instances on the line
**** {{Monospaced{p}}} - print lines containing a substitution
*** {{Monospaced{d}}} - delete line
*** {{Monospaced{p}}} - print line
*** {{Monospaced{y}}} - translate characters on the line (similar to {{Command{tr}}} command)
**Options:
*** {{Monospaced{-n}}} : suppress normal behavior and only show lines addressed and given {{Monospaced{p}}} command.
**sed examples:
*** {{Command{sed 7p file1}}} - print line 7 twice (notice absence of {{Monospaced{-n}}} option)
*** {{Command{sed '7d' file1}}} - delete line 7 from the output
*** {{Command{sed '/pattern/d' file1}}} - delete all lines containing //pattern// from the output
****Pattern can contain ^ and $ anchors and [sets]
****[sets] examples:  [abc]  [aeiou]  [~A-Z]  [a-z]  [A-z]   [0-9]
*** {{Command{sed -n '1,6p' file1}}} - only print lines 1 through 6 (notice the inclusion of the {{Monospaced{-n}}} option)
*** {{Command{sed 's/Sam/Bob/' file1}}}  -  All lines with Sam changed to Bob  (just once)
*** {{Command{sed 's/Sam/Bob/g' file1}}}  -  All lines with Sam changed to Bob  (all matches on the line)
*** {{Command{sed 's/Sam/Bob/gp' file1}}}  -  All lines with Sam changed to Bob  (all matches on the line).  Notice the lack of the {{Monospaced{-n}}} option.
*** {{Command{sed -n 's/Sam/Bob/gp' file1}}}  -  All lines with Sam changed to Bob  (all matches), only printing lines where the substitution occurred
*** For addressing lines, {{Monospaced{$}}} = last line in the output

{{Note{''Note:'' Always put your awk & sed commands (the first argument), within single quotes, for example:  {{Command{sed -n '4,6p' file1.txt}}} }}}

<html>
<center>
  <video id="my-video" class="video-js" controls preload="auto" width="984" height="768" poster="" data-setup="{}">
    <source src="video/FoxyProxy.mp4" type='video/mp4'>
    <p class="vjs-no-js">
      To view this video please enable JavaScript, and consider upgrading to a web browser that
      <a href="http://videojs.com/html5-video-support/" target="_blank">supports HTML5 video</a>
    </p>
  </video>

  <script src="https://vjs.zencdn.net/7.8.2/video.min.js"></script>
</center>
</html>
!! Problem Reports:

If you have a problem, please send me a report I can work with.  I need details of the problem, what you tried, steps you took to diagnose it, screenshots, etc.  If you send me something basic like "X doesn't work" with no supporting details, there may not be much I can do for you and I will wait for you to follow up your message with meaningful details.


!! Time management & workload expectations:

MVCC, as well as most others, [[requires 42.5 hours of work per credit hour|https://www.suny.edu/sunypp/documents.cfm?doc_id=168]].  A three-credit course will thus require 128 hours over the course of our 16 week term, or 8 hours per week. Going to college full time is effectively a full time job.  I will be expecting that time commitment each week.

Waiting until the last minute to complete, or even worse, begin, the lab assignments will not be a recipe for success.  Review the tasks early so you have plenty of time to research the problems, seek help in the discussion boards, and get up to speed if you are behind on any prerequisite material. 


!! Grading:

All course deliverables will be collected as PDF documents.  Graded copies of these PDF documents will be returned to you containing my annotations.  If you have questions regarding your grade or my comments, please contact me via email.

My grading is more traditional.  Meeting the bare minimum does not yield an A.  A high grade will require intellectual curiosity, problem-solving abilities, and thorough responses.


Letter grades will be assigned as follows:

| !Percent | !Grade |
| 90% ≥ | A |
| 80% ≥ | B |
| 70% ≥ | C |
| 65% ≥ | D |
| < 65% | F |


!CI 233 Course Notes

[[Getting Started|Week 0]] - Administrative Tasks & course intro

Aug 26 [[Week 1]] - Course intro & Linux Review 
Sept 2 [[Week 2]] - Linux Review 
Sept 9 [[Week 3, Part 1]] - File Permissions
Sept 11 [[Week 3, Part 2]] - Process management & Job control
Sept 16 [[Week 4, Part 1]] - Review past labs & catch up
Sept 18 [[Week 4, Part 2]] - I/O practice & Quoting
Sept 23 [[Week 5, Part 1]] - Substitution
Sept 25 [[Week 5, Part 2]] - Text Editors & Shell Scripting Intro
Sept 30 [[Week 6, Part 1]] - Shell Scripting
Oct 2 [[Week 6, Part 2]] - Version control with git
Oct 7 [[Week 7, Part 1]] - Shell scripting 2
Oct 9 Week 7, Part 2 - Catch up and review or push ahead
Oct 14 [[Week 8, Part 1]] - The Environment

Oct 21 [[Week 9, Part 1]] - Basic networking & SSH
Oct 23 [[Week 9, Part 2]] - System Basics - Starting and Stopping, init & run levels, layout of the operating system, system configuration (/etc/ files)
Important background material - [[Working more efficiently with GNU screen & SSH keys]] and [[Tunnels & Proxies with SSH]]
Oct 28 [[Week 10, Part 1]] - Expanding our systems: Working with rpm & yum, installing software from package and source
Oct 30 [[Week 10, Part 2]] - Web services and proxies
Nov 11 [[Week 12, Part 1]] - Time & Logging
Nov 18 [[Week 13, Part 1]] - Access control and user management
Nov 25 [[Week 14, Part 1]] - Linux Firewalls 
Dec 2 [[Week 15, Part 1]] - Storage systems  
Dec 4 [[Week 15, Part 2]] - Logical Volume Manager
/%
Nov 20 Week 13, Part 2 -  Catch up and review or push ahead
Nov 25 [[Week 14, Part 1]] - Containerization with Docker
Nov 27 [[Week 14, Part 2]] - Thanksgiving Break!
Dec 2 [[Week 15, Part 1]] - Server Orchestration
Dec 4 [[Week 15, Part 2]] - 

Dec 9 [[Week 16]] - Finals Week
%/

!!!&nbsp;Agendas for days in italics are tentative
/%
Nov 21 Week 13, Part 1 - Catch up and review or push ahead
/%

Extra Credit Material:
&nbsp;&nbsp; - We don't have time for this, but it's good stuff to know:
* [[Week B]] - Regular Expressions
/%
Other topics to work in:
* process isolation & containers
* System Integrity Monitoring
** vulnerability monitoring
** file integrity monitoring
** automated defense with fail2ban
* central authentication
%/

<html>
<font size="-2">Last Updated: 241204 23:21</font>
</html>
/***
To use, add {{{[[Styles HorizontalMainMenu]]}}} to your StyleSheet tiddler, or you can just paste the CSS in directly. See also HorizontalMainMenu and PageTemplate.
***/
/*{{{*/

#topMenu br {display:none; }
#topMenu { background: #39a; }
#topMenu { float: left; }
#topMenu { width: 90%; }
#topMenu { padding: 2px 0 2px 0; }
#topMenu .button,  #topMenu .tiddlyLink { padding-left:1em; padding-right:1em; color:white; font-size:115%;}
#displayArea { margin: 1em 15.7em 0em 1em; }


#rightMenu {
   float: right;
   background: #39a;
   width: 10%;
   padding: 2px 0 2px 0;
}
#rightMenu .button,  #rightMenu .tiddlyLink { padding-left:1em; padding-right:1em; color:white; font-size:115%;}

/* just in case want some QuickOpenTags in your topMenu */
#topMenu .quickopentag { padding:0px; margin:0px; border:0px; }
#topMenu .quickopentag .tiddlyLink { padding-right:1px; }
#topMenu .quickopentag .button { padding-left:1px; border:0px; }


/*}}}*/
!![[Lab 41 - Bring test and www online]]
Assigned [[Week 9, Part 1]]

* Set a root password for your test and www ~VMs so you are able to log into them via SSH.
* Ensure your test and www ~VMs are online and joined to the lab network.  
** The notes above will help you configure networking
*** Use the hostname {{Monospaced{''www.//username//.ci233.net''}}} and second IP address in your range for your web server VM.
** [[Virtual Machines]] - VM information (Linked on the top menu bar)
** Also complete and submit the [[Lab 41|labs/lab41.pdf]] verification worksheet.  This submitted worksheet will indicate your ~VMs are ready to be evaluated.
* Connect to your ~VMs via SSH from the class shell server

The Proxmox virtual console is a means to access the ~VMs for initial troubleshooting and in case something goes wrong.  Once your ~VMs are online, all work should be done via SSH login.  

You cannot log into these ~VMs directly since they are on private IP addresses (192.168.13.x) behind the class router.  They can only be accessed by first connecting to the class shell server from home.  Use putty (or another SSH client) to connect to the class shell server and then use the {{Command{ssh}}} command to connect to your ~VMs.

{{Warning{Be sure to keep your ~VMs online and do not power them down, else it'll look like the work hasn't been completed when it comes time for grading.}}}
!![[Lab 42 - VM updates & software installation]]
Assigned [[Week 10, Part 1]]

!!! On both ~VMs:
* Update the OS and currently installed software
* Install the following packages via {{Command{ yum}}}:  man wget nc telnet bind-utils openssh-clients rsync bzip2

* Also complete and submit the [[Lab 42|labs/lab42.pdf]] verification worksheet.  This submitted worksheet will indicate your ~VMs are ready to be evaluated.

These packages will also need to be installed on all future ~VMs.  Make a note of it in your documentation.
!![[Lab 43 - Storage Expansion]]
Assigned [[Week 13, Part 1]]

Some systems need additional storage beyond what was initially provisioned.  Here, we have a file server VM that was created with an additional disk.  We now need to make that additional disk available to the operating system for storing additional data.

Perform the following steps on your files VM.

!!! Observe available storage devices

The {{Command{lsblk}}} command is a quick way to visualize all storage devices available to a system.  Here, we can see that there are two unallocated drives - {{File{vdb}}} and {{File{vdc}}}.  We'll use {{File{vdb}}} for this lab and leave {{File{vdc}}} alone.
{{{
[root@files ~]# lsblk
NAME            MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sr0              11:0    1 1024M  0 rom
vda             252:0    0   16G  0 disk
├─vda1          252:1    0    1G  0 part /boot
└─vda2          252:2    0   15G  0 part
  ├─centos-root 253:0    0 13.4G  0 lvm  /
  └─centos-swap 253:1    0  1.6G  0 lvm  [SWAP]
vdb             252:16   0    2G  0 disk
vdc             252:32   0    2G  0 disk
}}}

!!! Create vdb1 Partition

It's generally preferred to create partitions on the drives instead of using the bare device.  Partitions are logical divisions of the physical disk that will then hold the filesystem.  Here, we're going to devote the entire disk to a single partition and a single filesystem.  Creating multiple partitions on a disk allow it to hold separate filesystems.  In some instances, physically dividing groups of files into separate filesystems is preferred.  One example is logs.  If you have a system, such as a webserver, that may generate a lot of logs, it's wise to store those logs on their own filesystem.  If everything is stored on the same filesystem, excessive logs could fill the disk and interfere with the database's ability to store new data.

Refer to the //Storage Layers// diagram.  We'll be following the path on the left from Storage Devices to Partitions to Filesystems.
[img[img/storage-layers.jpg]]

Duplicate this interaction with the {{Command{parted}}} command to create a new disk label and new partition.  The first {{Command{print}}} command shows the disk is currently bare.
{{{
[root@files ~]# parted /dev/vdb
GNU Parted 3.1
Using /dev/vdb
Welcome to GNU Parted! Type 'help' to view a list of commands.
(parted) print
Error: /dev/vdb: unrecognised disk label
Model: Virtio Block Device (virtblk)
Disk /dev/vdb: 2147MB
Sector size (logical/physical): 512B/512B
Partition Table: unknown
Disk Flags:
(parted)
}}}
{{{
(parted) mklabel gpt
(parted) mkpart
Partition name?  []? storage
File system type?  [ext2]? xfs
Start? 1
End? 100%
(parted) quit
Information: You may need to update /etc/fstab.

[root@files ~]#
}}}


Now run {{Command{ lsblk }}} to verify the new partition was created.  It's always wise to add verification steps as you proceed instead of just blindly assuming everything is working as it should.  If you compare this output to the one above, you'll see that the {{File{ vdb1 }}} partition has been created.
{{{
[root@files ~]# lsblk
NAME            MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sr0              11:0    1 1024M  0 rom
vda             252:0    0   16G  0 disk
├─vda1          252:1    0    1G  0 part /boot
└─vda2          252:2    0   15G  0 part
  ├─centos-root 253:0    0 13.4G  0 lvm  /
  └─centos-swap 253:1    0  1.6G  0 lvm  [SWAP]
vdb             252:16   0    2G  0 disk
└─vdb1          252:17   0    2G  0 part
vdc             252:32   0    2G  0 disk
}}}


!!! Create the filesystem

We can see from the {{Command{ lsblk }}} command that the new partition, {{File{vdb1}}}, has been successfully created.  Now we must put a filesystem on it.  Partitions are the physical divisions of a disk.  Filesystems are the data structures the operating system interacts with in order to store files.
{{{
[root@files ~]# mkfs.xfs /dev/vdb1
meta-data=/dev/vdb1              isize=512    agcount=4, agsize=130944 blks
         =                       sectsz=512   attr=2, projid32bit=1
         =                       crc=1        finobt=0, sparse=0
data     =                       bsize=4096   blocks=523776, imaxpct=25
         =                       sunit=0      swidth=0 blks
naming   =version 2              bsize=4096   ascii-ci=0 ftype=1
log      =internal log           bsize=4096   blocks=2560, version=2
         =                       sectsz=512   sunit=0 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0
}}}


!!! Create the mount point

A mount point is a representation of the filesystem that we can interact with.  On Windows systems, mount points are generally drive letters, like C:\ or D:\.  In the Unix/Linux world, everything is one big filesystem tree.  Linux mount points are directories on that tree.  We identify a directory to mount our new filesystem to, and then any interaction with that directory and all items within it will be directed to our new disk volume.  Here, we want to make our new disk available to the system at the directory {{File{ /opt/storage/ }}}.

We first need to ensure the new mount point exists:
{{{
[root@files ~]# mkdir /opt/storage
}}}


!!! Edit the filesystem table

The ''f''ile''s''ystem ''tab''le, {{File{/etc/fstab}}}, is the configuration file which specifies which disk volumes are mounted at system startup.  Add your new disk volume to the file so it is mounted on boot.

Here's a copy of my {{File{ /etc/fstab }}} file.  The last line is the one you need to copy to yours.  Each line contains:
* the physical volume, {{File{ /dev/vdb1 }}}
* the mount point, {{File{/opt/storage }}}
* the filesystem type, {{Monospaced{ xfs }}}
* any special mount options.  Here, just the {{Monospaced{ defaults }}}
* a binary value ({{Monospaced{0}}} or {{Monospaced{1}}}) to indicate whether the filesystem should be backed up.  This is largely deprecated.
* the order in which filesystem checks ({{Command{fsck}}} command) should be run.  A value of {{Monospaced{ 0 }}} disables these checks
{{{
[root@files ~]# cat /etc/fstab

#
# /etc/fstab
# Created by anaconda on Fri Mar 13 00:03:20 2020
#
# Accessible filesystems, by reference, are maintained under '/dev/disk'
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
#
/dev/mapper/centos-root /                       xfs     defaults        0 0
UUID=f68b9069-7271-48de-b968-00d62e825144 /boot                   xfs     defaults        0 0
/dev/mapper/centos-swap swap                    swap    defaults        0 0

/dev/vdb1               /opt/storage      xfs   defaults        0 0
}}}


!!! Mount the new filesystem

Changes to the {{File{/etc/fstab}}} file should be tested and filesystems mounted with the {{Command{ mount -a }}} command.  This will catch any errors in the file.  If there is an error mounting a filesystem on system startup, the OS will not fully load and your only option will be to fix the problem on console.  This can be a nasty surprise if you don't have easy access to console.

The {{Command{df -h}}} command adds a verification step that the filesystem is fully mounted and accessible.  The old proverb //trust, but verify// must apply to everything you do.
{{{
[root@files ~]# mount -a

[root@files ~]# df -h
Filesystem               Size  Used Avail Use% Mounted on
devtmpfs                 232M     0  232M   0% /dev
tmpfs                    244M  120K  244M   1% /dev/shm
tmpfs                    244M   29M  215M  12% /run
tmpfs                    244M     0  244M   0% /sys/fs/cgroup
/dev/mapper/centos-root   14G  2.4G   12G  18% /
/dev/vda1               1014M  228M  787M  23% /boot
tmpfs                     49M     0   49M   0% /run/user/7289
/dev/vdb1                2.0G   33M  2.0G   2% /opt/storage
}}}


!!! Verification worksheet

You should now have successfully added a new storage volume to your files server VM.  Complete and submit the [[Lab 43|labs/lab43.pdf]] verification worksheet when you are ready for review.
!! [[Lab 43 - VM Lockdown - Secure your VMs]]
Assigned [[Week 12, Part 1]]

!!! Add user accounts to all ~VMs

Add two local user accounts to your ~VMs
* First account - set the user name to your campus username
** UID = 1000
** GID = 100
** Set a valid password
** Create a home directory within {{File{/home/}}}
** Copy the environment configuration files from {{File{/etc/skel/}}} to the new home directory
* Second account - username = nmerante
** UID = 7289
** GID = 100
** Create a home directory within {{File{/home/}}}
** Copy the environment configuration files from {{File{/etc/skel/}}} to the new home directory
** Copy my SSH public key (see below) to its {{File{~/.ssh/authorized_keys}}} file
** Use this password hash:  
{{{
$6$hiaEgh6A$cEew6uUV8v5IBrwIMRahAyoOlgnKOaonnFx4sCzW4bu6mr17/2LcSdKknVa0GuytKqby391Z3p03FNelrNGD2.
}}}

* Verify permissions:
** Both user's home directories and all files below them must be owned by the user and GID 100
** The user's home directory must have proper directory permissions - it must not be writable by the group or others for proper SSH function.
* Verify ~SELinux
** ~SELinux must be disabled for SSH public key authentication to function properly
** Edit {{File{/etc/selinux/config}}} and change enforcing to disabled on line #7 to disable ~SELinux on system startup
** Execute {{Command{setenforce 0}}} to disable ~SELinux for the current boot

My SSH public key
{{{
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBEMi6EGHiuie983J5ASnf+eY7Bqf50rDNKItlZPWtL/ nmerante@ci233
}}}


!!! Disable direct root login via SSH on all ~VMs

* Adjust the sshd configuration to disable direct root logins.  All users must first login as a regular account and then elevate privileges.  
** Look for the ~PermitRootLogin configuration option in /etc/ssh/sshd_config
* Adjust PAM to require wheel group membership in order to su to root
** Look in /etc/pam.d/su
* Don't forget to add both user accounts to the wheel group
!![[Lab 43 - Web Server]]
Assigned [[Week 10, Part 1]]

!!! Lab Tasks
<<<
1. Install apache and PHP on your web server
* Directions for this are above

2. Become familiar with apache and its configuration.  
* Check out the config files within {{File{/etc/httpd/conf/}}}
* The file {{File{/etc/httpd/conf/httpd.conf}}} is the main configuration file
*      The Linux Bible Chapter 17 (Configuring a Web Server) will be a useful resource

3. Change the Apache {{Monospaced{''~DocumentRoot''}}} directory to {{File{/opt/work/htdocs}}}
* Create the directory {{File{/opt/work/htdocs}}} on your web server VM
* Make a backup of your Apache configuration file
** Always take a backup of a configuration file before making changes.  See the note below.  This way you'll have a known-good copy to refer to if there's any problems.
* Update the apache configuration lines necessary to make this change 
** (you may need to change this path in more then one location within the Apache config)
* Don't forget to restart Apache after changing its configuration file

4. Download the new {{File{index.html}}} file from my web server at {{Monospaced{''192.168.13.25''}}} to your new Apache {{Monospaced{''~DocumentRoot''}}} directory
* The file {{File{index.html}}} is the default web page delivered to a client (eg: your web browser).  This file must exist in the correct location with correct permissions so your web server can provide content.

5. Ensure your web server is providing the correct website.  The new site should be 6 lines long and include ''Welcome to ~CI233!'' in the body.
<<<

{{Warning{''Warning:'' It's always wise to make a backup of a configuration file before making changes.  The easiest way to do so is to copy the file with a timestamp appended to the new file name, for example:  {{Command{cp httpd.conf httpd.conf.210322-1522.bak}}}.  This captures the date & time in a way that's easily sortable.  The {{Command{diff}}} command can compare the config file to a backup, showing lines which differ between the two.  Example:  {{Command{diff httpd.conf httpd.conf.210322-1522.bak}}}
}}}

!!! Lab Deliverable

* Also complete and submit the [[Lab 43|labs/lab43.pdf]] worksheet.  This submitted worksheet will indicate your ~VMs are ready to be evaluated.
!![[Lab 44 - Set up MediaWiki]]
Assigned [[Week 10, Part 2]]

Complete the steps on this page to install and configure ~MediaWiki

Install [[MediaWiki|http://www.mediawiki.org/wiki/MediaWiki]] and customize it to your tastes.
* Install ~MariaDB
** Add a wiki user and database
* Download the ~MediaWiki source tarball
** Extract its contents to {{File{/opt/work/htdocs/}}}
** Rename the extracted directory to ''wiki''
* Update php and install dependencies
* Set up a tunnel or proxy to access your wiki
** You can access it by IP address until DNS is online:  http://your_IP/wiki/
** Be sure to replace //your_IP// with proper values.
* Configure ~MediaWiki to fully bring it online

* Be sure you can view the wiki after uploading the {{File{~LocalSettings.php}}} file.  It should look something like this:
[img[img/wiki.png]]


* Also complete and submit the [[Lab 44|labs/lab44.pdf]] verification worksheet.  This submitted worksheet will indicate your ~VMs are ready to be evaluated.

!! [[Lab 45 - Bring core VM online]]
Assigned [[Week 12, Part 1]]

<<<
Bring new core VM online:
* Hostname = {{Monospaced{core.//username//.ci233.net}}}
* Use the third IP address in your range
* Apply outstanding updates and ensure your VM is running the latest available kernel
** A system reboot may be necessary if the kernel was also updated
* Also complete and submit the [[Lab 45|labs/lab45.pdf]] verification worksheet.  This submitted worksheet will indicate your VM is online and ready to be evaluated for this lab.

Install additional software:
* Standard packages, as previously discussed and recorded in your class notes
* Time packages:  {{Monospaced{chrony ntpstat}}}
<<<
!! [[Lab 46 - Time]]
Assigned [[Week 12, Part 1]]


!!! Modify Hosts file:

Add a record similar to the following to the {{File{/etc/hosts}}} file on all of your ~VMs:
<<<
192.168.13.26    core core.nmerante.ci233.net ntp.nmerante.ci233.net loghost ntp
<<<
* The IP address should be the address for your core VM
* Replace my username with yours
* These steps won't work if that line is missing or incorrect. 
* Read about the {{File{/etc/hosts}}} file on page 356 in the Linux Administration textbook.  This file is necessary because we don't have DNS running yet.

/%
!!! Remove chronyd

{{Warning{The chronyd NTP service is already installed and running.  This service will prevent the steps laid out in this lab from successfully completing.  Stop and disable the {{Monospaced{chronyd}}} service before proceeding with this lab.}}}
%/

!!! Install NTP services and syncronize time:

Install {{Monospaced{chrony}}} and {{Monospaced{ntpstat}}} on all ~VMs

core VM:  Configure {{Monospaced{chrony}}} ({{File{/etc/chrony.conf}}}) as a time server:
* Synchronize time from the lab ntp server instead of the pool servers - use {{Monospaced{ntp.ci233.net}}}
* Allow your block of 8 IP addresses to communicate with the NTP service running on your core VM
* Add an allow directives to allow the naemon server full access:
** {{Monospaced{allow 192.168.12.15}}}
** See my config below for examples

test & www VM (and future ~VMs):  Configure {{Monospaced{ntpd}}} ({{File{/etc/chrony.conf}}}) as a client:
* Synchronize time from the ntp service on your core VM instead of the pool NTP servers
** Use the hostname {{Monospaced{ntp.//username//.ci233.net}}} instead of IP addresses
** This hostname should resolve due to the entry you just added to the {{File{/etc/hosts}}} file.  Test it with {{Command{ping}}}.

My Configs for reference, with comments removed (click the yellow box to expand them):
* Be sure to change host names and IP addresses appropriately:
* +++[My NTP Server] 
{{Monospaced{core# }}} {{Command{ grep '^[a-z]' /etc/chrony.conf }}}
{{{
pool ntp.ci233.net iburst
driftfile /var/lib/chrony/drift
makestep 1.0 3
rtcsync
allow 192.168.13.24/29
allow 192.168.12.15
keyfile /etc/chrony.keys
leapsectz right/UTC
logdir /var/log/chrony
}}}
===

* +++[My NTP Clients]
{{Monospaced{www# }}} {{Command{ grep '^[a-z]' /etc/chrony.conf }}}
{{{
pool ntp.merantn.ci233.net iburst
driftfile /var/lib/chrony/drift
makestep 1.0 3
rtcsync
allow 192.168.13.24/29
allow 192.168.12.15
keyfile /etc/chrony.keys
leapsectz right/UTC
logdir /var/log/chrony
}}}
===


!!! Start & enable Chrony
All ~VMs (current and future):
* Set chronyd to start on boot on all ~VMs
* Start the chronyd service now on all ~VMs


!!!! Verify it is working and time is being synchronized properly:

My core VM, an NTP server:
{{{
[root@core ~]# ntpstat
synchronised to NTP server (192.168.12.15) at stratum 3
   time correct to within 56 ms
   polling server every 1024 s

[root@core ~]# chronyc sources
MS Name/IP address         Stratum Poll Reach LastRx Last sample               
===============================================================================
^* 192.168.12.15                 2   6    17     1  +5071ns[  +25us] +/- 5196us
}}}

My www server, an NTP client:

It took a few minutes after starting the services for the clock to syncronize:
{{{
[root@www ~]# ntpstat
unsynchronised
  time server re-starting
   polling server every 8 s
}}}

Eventually it did and I saw this:
{{{
[root@www ~]# ntpstat
synchronized to NTP server (192.168.13.26) at stratum 4
   time correct to within 232 ms
   polling server every 64 s

[root@www ~]# chronyc sources
MS Name/IP address         Stratum Poll Reach LastRx Last sample               
===============================================================================
^* core                          3   6    17     8  +1844ns[  +17us] +/- 5296us
}}}


* Also complete and submit the [[Lab 46|labs/lab46.pdf]] verification worksheet.  This submitted worksheet will indicate your ~VMs are ready to be evaluated for this lab.


!!! Troubleshooting

Time synchronization with ntpd doesn't happen immediately.  The service needs some time to build trust in its upstream time provider so that it will use it as a time source.  Be sure to allow at least a 30 minute delay after starting the services for this trust to be established before reporting issues.

If you are having difficulty getting time to synchronize, the following commands may help direct you to the root cause:

* {{Command{systemctl status -l chronyd}}}
* {{Command{ntpstat}}}
* {{Command{chronyc servers}}}
* {{Command{cat /etc/hosts}}}
* {{Command{ps aux | grep chronyd}}}

Any requests for help in the discussion boards should include output from the above commands for both your NTP server and any impacted NTP clients along with their ntp configuration file.  A copy/paste of the text into the discussion boards is easier to work with and highlight issues than simple screenshots.  Be sure to include the command & shell prompt with any output included.  ''Do not'' only include output without the shell prompt and command which obtained that output.
!! [[Lab 47 - Logging]]
Assigned [[Week 12, Part 1]]

{{Note{''Note:'' This lab is due Saturday}}}

!!! Modify Hosts file:

Be sure a record similar to the following exists in the file {{File{/etc/hosts}}} file on all of your ~VMs.  This should have been completed in the previous lab.
<<<
{{Monospaced{192.168.13.26    core core.nmerante.ci233.net ntp.nmerante.ci233.net loghost ntp}}}
<<<
* The IP address should be the address for your core VM
* Replace my username with yours
* These steps won't work if that line is missing or incorrect. 

!!! syslog:

!!!! core VM:  
* configure syslog to receive log information from other hosts

On your core VM, find these lines at the top of the file {{File{/etc/rsyslog.conf}}}:
{{{
# Provides UDP syslog reception
# for parameters see http://www.rsyslog.com/doc/imudp.html
# module(load="imudp") # needs to be done just once
# input(type="imudp" port="514")
}}}

Remove the comments from the bottom 2 lines (the first two are actually a comment and should remain so).

!!!! www VM:
* configure syslog to also send log information to the core VM

On your www VM, append these lines at the bottom of the file {{File{/etc/rsyslog.conf}}}:

{{{
action(type="omfwd"
Target="loghost" Port="514" Protocol="udp")
}}}
* ''loghost'' is an alias for our core VM defined in the file {{File{/etc/hosts}}}
** Its handy to use aliases like this in case we need to move our log destination.  We can then easily change the alias to point to a different system.  This isn't so convenient when our systems are defined in the {{File{/etc/hosts}}} file, but easy once DNS is in place.

Experiment with logging.  Investigate the logging commands and the log files withing the directory {{File{/var/log/}}}.


!!! Also complete and submit the [[Lab 47|labs/lab47.pdf]] verification worksheet.  This submitted worksheet will indicate your VM is online and ready to be evaluated for this lab.
!![[Lab 48 - Monitoring disk usage with Nagios]]
Assigned [[Week 12, Part 2]]

Naemon infrastructure monitoring checks are performed through a series of plugins.  Naemon plugins are shell scripts or binary executables which perform their specific check and return an exit status and their results to the Naemon service.

Currently the majority of our naemon checks are for services running on our ~VMs.  The Naemon plugins interact with these services in a normal fashion and records its findings for display on the web interface.  Occasionally it is necessary to examine a system internally, beyond the reach of our standard external plugins.  The Naemon Remote Plugin Executor (NRPE) is a service which can be run on a system being monitored to provide a central Naemon server the ability to execute internal plugins and examine the target system from the inside.  The Naemon server will communicate with NRPE and request it run a local plugin residing on the target system.  NRPE will then return the results of that plugin to the Naemon monitoring service.  

Certain checks, such as those examining the amount of free space available on a system, can only be performed via NRPE and a plugin residing on the system being checked.  Monitoring the amount of free space in your log and home directory partitions is of special importance in order to prevent log information loss or user data loss.

Install NRPE and the necessary naemon plugin on each of your production ~VMs.  Configure NRPE to grant our Naemon server permission to access it and configure the disk check plugin to monitor the /var/log and /home filesystems.


!!! 1. Install the EPEL repository

Install the Extra Packages for Enterprise Linux (EPEL) yum repository.  The extra Nagios utilities and plugins are not available via the standard yum repo that ships with ~CentOS.

{{Command{yum install epel-release}}}


!!! 2. Install the nrpe service and nagios disk check plugin via yum:

{{Command{yum install nrpe nagios-plugins-disk}}}


!!! 3. Edit the {{File{/etc/nagios/nrpe.cfg}}} config file:

Search for the ''allowed_hosts'' configuration directive and grant the local system and the Naemon server permission to access it:
{{{
allowed_hosts=127.0.0.1,10.1.22.3
}}}

Add these three lines to the end of the command definitions (towards the bottom), creating disk check commands for /var/log and /home.  The Naemon server will execute these configured commands via the NRPE service.
{{{
command[check_disk_root]=/usr/lib64/nagios/plugins/check_disk -w 15% -c 8% -p /
command[check_disk_log]=/usr/lib64/nagios/plugins/check_disk -w 15% -c 8% -p /var/log
command[check_disk_home]=/usr/lib64/nagios/plugins/check_disk -w 15% -c 8% -p /home
}}}

The disk check command for /home is only necessary on the files VM, though it may be included in the config file on all ~VMs for consistency.  


!!! 4. Start the NRPE service:

Start on boot: {{Command{ systemctl enable nrpe }}}
Start now:  {{Command{ systemctl start nrpe }}}
Verify: {{Command{ systemctl status nrpe }}}


!!! 5. Install the NRPE Nagios plugin

This is the plugin used by the Nagios server to call remote commands via NRPE.  Normally this plugin is only installed on the nagios server.  We're installing in on our VMs for testing.

{{Command{ yum install nagios-plugins-nrpe }}}


!!! 6.  Test

Execute the nagios plugin to test your NRPE instance.  The string returned is what would be reported back to Nagios and what will be displayed on the Nagios web interface.

{{Command{ /usr/lib64/nagios/plugins/check_nrpe  -H 127.0.0.1 -c check_disk_root }}}
{{Command{ /usr/lib64/nagios/plugins/check_nrpe  -H 127.0.0.1 -c check_disk_log }}}
{{Command{ /usr/lib64/nagios/plugins/check_nrpe  -H 127.0.0.1 -c check_disk_home }}}
!! [[Lab 48 - VM Lockdown - Secure your VMs]]
Assigned [[Week 13, Part 1]]

!!! Add user accounts to all ~VMs

Add two local user accounts to your ~VMs
* First account - set the user name to your campus username
** UID = 1000
** GID = 100
** Set a valid password
** Create a home directory within {{File{/home/}}}
** Copy the environment configuration files from {{File{/etc/skel/}}} to the new home directory
* Second account - username = nmerante
** UID = 7289
** GID = 100
** Create a home directory within {{File{/home/}}}
** Copy the environment configuration files from {{File{/etc/skel/}}} to the new home directory
** Copy my SSH public key (see below) to the user's {{File{~/.ssh/authorized_keys}}} file
*** You will likely need to create the directory and file
*** Be sure you understand how SSH key-based authentication works.
** Use this password hash:  
{{{
$6$hiaEgh6A$cEew6uUV8v5IBrwIMRahAyoOlgnKOaonnFx4sCzW4bu6mr17/2LcSdKknVa0GuytKqby391Z3p03FNelrNGD2.
}}}

* Verify permissions:
** Both user's home directories and all files below them must be owned by the user and GID 100
** The user's home directory must have proper directory permissions - it must not be writable by the group or others for proper SSH function.
* Verify ~SELinux
** ~SELinux must be disabled for SSH public key authentication to function properly
** Edit {{File{/etc/selinux/config}}} and change {{Monospaced{enforcing}}} to {{Monospaced{disabled}}} on line #7 to disable ~SELinux on system startup
** Execute {{Command{setenforce 0}}} to disable ~SELinux for the current boot
** This may already have been completed.

My SSH public key
{{{
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBVLQcFklXcim/xylMML4QnLy4iuzrdgOUWivktOAlNX nmerante@shell.ci233.net
}}}

{{Note{''Note:'' You can test logins to your ~VMs using my user account by creating your own SSH keypair and adding your SSH public key to the {{File{~/.ssh/authorized_keys}}} file in my home directory on your VM.  See the directions in [[Working more efficiently with GNU screen & SSH keys]] for how to create an SSH keypair.  The {{File{authorized_keys}}} file can contain multiple public keys.  Any of the corresponding private keys will be accepted for login.}}}


!!! Disable direct root login via SSH on all ~VMs

# Adjust the sshd configuration to disable direct root logins.  All users must first login as a regular account and then elevate privileges.  
** Look for the {{Monospaced{~PermitRootLogin}}} configuration option in {{File{/etc/ssh/sshd_config}}}
# Adjust PAM to require wheel group membership in order to su to root
** Look in {{File{/etc/pam.d/su}}}
# Don't forget to add both user accounts to the wheel group

{{Warning{''Warning:'' When messing with authentication, it's always wise to verify everything works before logging out.  Open a new putty window, ssh in, and elevate up to a root prompt before disconnecting from your original putty session.  Otherwise, if you log out and something is broken, you may have difficulty accessing the system.}}}

!!! Verification Worksheet

Also complete and submit the [[Lab 48|labs/lab48.pdf]] verification worksheet.  This submitted worksheet will indicate your ~VMs are ready to be evaluated for this lab.
!![[Lab 49 - sudo]]:  apache configuration access
Assigned [[Week 13, Part 1]]

We would like to provide the webmaster the ability to update the Apache configuration and restart the service on the web server virtual machine without granting full root level access.  The {{Command{sudo}}} and {{Command{sudoedit}}} utilities can be use to accomplish this.

!!! Create a webmaster user on your web server VM
* username = wes
* uid = 2000
* gid = 100
* Fully configure the environment for this user

!!! Create a new group for the webmasters
* group name = webmaster
* gid = 1000
* add wes to this group

!!! Configure {{Command{sudo}}} / {{Command{sudoedit}}} to:
# Grant the user ''wes'' the ability to edit the primary apache configuration file
# Grant the user ''wes'' the ability to execute the {{Command{apachectl}}} command as root.

{{Warning{''Warning:'' Be sure you understand why {{Command{sudoedit}}} is used for modifying root-owned files instead of just {{Command{sudo}}} followed by an editor, eg: {{Command{sudo vi /etc/httpd/conf/httpd.conf}}}.}}}


!!! Verification Worksheet

Also complete and submit the [[Lab 49|labs/lab49.pdf]] verification worksheet.  This submitted worksheet will indicate your VM is ready to be evaluated for this lab.
!! [[Lab 50 - Enable Two-Factor Authentication]]
Assigned [[Week 13, Part 1]]

Passwords are increasingly proven to be insufficient as the sole means of authentication.  Passwords are too easily phished, captured via shoulder surfing or key loggers, or stolen from data breeches.  We also generally do a poor job of selecting passwords.  Password effectiveness is greatly reduced due to reuse across multiple sites and the selection of poor quality, weak passwords.  Strong, secure passwords should be unique and contain a minimum of 12 random characters across the full alphabetic, numeric, and symbol character space.  This then makes them difficult to remember.

These shortcomings can be mitigated with the use of multifactor authentication.  Utilizing a hardware token is ideal.  Google recently [[made the news|https://krebsonsecurity.com/2018/07/google-security-keys-neutralized-employee-phishing/]] for introducing hardware tokens for their employees to access corporate resources with great success.  The Google-made [[Titan Security Key|https://cloud.google.com/titan-security-key/]] is now available for general purchase.  [[YubiKeys|https://www.yubico.com/store/]] are another popular alternative for general use.  Such keys can easily be used to add multi-factor authentication to operating system logins, services, or web sites after these systems are enabled to support hardware tokens.

Soft tokens are available as a free alternative to hardware tokens.  A soft token is a desktop or mobile application which generates a one-time pin which can be entered along with a password to prove identity.  Instead of a token on your keychain, your desktop or phone becomes "something you have".  Multi-factor authentication should be used for any services where a higher level of security is warranted due to an increased exposure to attack.

Google Authenticator ([[Android|https://play.google.com/store/apps/details?id=com.google.android.apps.authenticator2&hl=en_US]] or [[Apple|https://itunes.apple.com/us/app/google-authenticator/id388497605?mt=8]]) is a popular soft token with wide support.  The Google Authenticator can be used as a second factor for ssh authentication to Linux servers.  

If not for our class's virtual lab infrastructure protecting us from the outside world, our class ~VMs would all otherwise be exposed to the internet and open to attack.  Any externally accessible server with such an increased exposure to attack would necessitate the deployment of multi-factor authentication.

!!! We will now implement two-factor authentication using Google Authenticator for access to our core VM.

{{Note{''Note:''  Two-factor authentication with the Google Authenticator will be set up for your regular user, not the root account.}}}

* Ensure your user account exists on your core VM and you are able to authenticate with a password.
** None of this will work if your user account is not fully functional
* Ensure ~SELinux was properly disabled.
** This was a required step during a previous lab.
* Get started by installing the Google Authenticator app on your phone.  
* We must next generate a barcode or key to add to the Google Authenticator App.  
** Log in to your core VM via SSH and elevate to root
*** Ensure the {{Monospaced{epel-release}}} package is installed on your core VM
*** Install the {{Monospaced{google-authenticator}}} and {{Monospaced{qrencode-libs}}} packages on your core VM
** Exit the root login and log in to your core VM as a regular user
*** Run the command {{Command{google-authenticator}}} to initialize the token
*** Answer ''y'' to the question: ''Do you want authentication tokens to be time-based (y/n)''

You will be presented with a QR code to scan from the Google Authenticator app on your phone along with a secret key and a series of emergency scratch codes.  The secret key can be used to add this account to the Google Authenticator in case you are unable to scan the barcode.  Emergency scratch codes should be stored somewhere safe and are used to authenticate in case you lose your phone.
** Save the secret Key.  We'll need it later.

Next, on your phone, launch the Google Authenticator app and choose the option to scan a barcode or enter a key and provide the appropriate input.

[img[img/googleauth1.png]]

Return to your VM and answer the remaining questions:

{{Monospaced{Do you want me to update your "/home/nmerante/.google_authenticator" file? (y/n) ''y''}}}

{{Monospaced{Do you want to disallow multiple uses of the same authentication token? This restricts you to one login about every 30s, but it increases your chances to notice or even prevent man-in-the-middle attacks (y/n) ''y''}}}

{{Monospaced{By default, a new token is generated every 30 seconds by the mobile app. In order to compensate for possible time-skew between the client and the server, we allow an extra token before and after the current time. This allows for a time skew of up to 30 seconds between authentication server and client. If you experience problems with poor time synchronization, you can increase the window from its default size of 3 permitted codes (one previous code, the current code, the next code) to 17 permitted codes (the 8 previous codes, the current
code, and the 8 next codes). This will permit for a time skew of up to 4 minutes between client and server. Do you want to do so? (y/n) ''n''}}}

{{Monospaced{If the computer that you are logging into isn't hardened against brute-force login attempts, you can enable rate-limiting for the authentication module. By default, this limits attackers to no more than 3 login attempts every 30s. Do you want to enable rate-limiting? (y/n) ''n''}}}

{{Warning{''Warning:'' Answering no to the last question is a poor security choice.  If we were implementing this in a production environment we would answer yes to enable rate-limiting.  We are only answering no because we are testing something new and do not want to lock ourselves out in the process.}}}

The file {{File{~/.google_authenticator}}} will contain your 2FA configuration.

You should now have the Google Authenticator app installed on your phone and an account configured for use.  Next we must configure the operating system to require this second form of authentication for SSH logins.  We will not modify the configuration for Console logins, so if things go wrong we can always log in through the Proxmox console to fix it.

!!! Configure the core server to require two-factor authentication

* Escalate to root privileges

* Edit the file {{File{/etc/pam.d/sshd}}}  and add the following line to the bottom:

{{{
auth required pam_google_authenticator.so nullok
}}}

* Edit the file {{File{/etc/ssh/sshd_config}}} and search for //~ChallengeResponseAuthentication// .  Ensure the value is set to ''yes'':

{{{
ChallengeResponseAuthentication yes
}}}

* Save and close the file, then restart the sshd service:


Finally, ''without logging out'', attempt to log in to your core VM from itself.  Launch the Google Authenticator App to generate a new token.  When making changes to remote connection services, we do not want to log out until we can verify those changes are functioning properly.  If we disconnect and something went wrong, we might end up locked out!

[img[img/googleauth2.jpg]]

Logging in with two-factor authentication:
{{{
[nmerante@core ~]$ ssh localhost -l nmerante
Password:
Verification code:
Last login: Sun Apr 19 00:22:40 2020 from localhost
[nmerante@core ~]$
}}}

With the Google Authenticator changes in place, I'm prompted for my password as usual along with the verification code from the Authenticator App.  ''Note:'' Each code is valid only once to prevent replay attacks.  Once you log in, you may need to wait up to 30 seconds for a new code to be generated before you can log in again.


!!! Verification Worksheet

Also complete and submit the [[Lab 50|labs/lab50.pdf]] verification worksheet.  This submitted worksheet will indicate your ~VMs are ready to be evaluated for this lab.
!! [[Lab 51 - Host-based Firewalls]]
Assigned [[Week 14, Part 1]]

!!! Implement a host-based firewall on your ~VMs

* Complete [[Lab 51|labs/lab51.pdf]] and upload it to the class shell server
* Take note of the state of your services in Naemon.  It's always a good idea to have a known baseline of what things look like before making network changes.  Taking a screenshot may be helpful.
** If something is down after you make changes and you don't know what things looked like before, you won't know if your change was the reason for the outage.
* Enable the firewalld service so it starts on boot and start the service now
* Request a scan of your services on Naemon.  Take note of any changes to the alarms.
* Use the {{Command{firewall-cmd}}} command to add the firewall rules you identified in the Lab 51 PDF.
* Recheck your services in Naemon and ensure all new alarms have cleared.

Instructions for accessing Naemon can be found on the [[Tunnels & Proxies with SSH]] page.
!![[Lab 52 - Bring Files VM online]]
Assigned [[Week 15, Part 1]]

Bring your files VM online:
* A new VM was added for you
* Assign it the 4th IP address in your range
* Add the hostname {{Monospaced{''files.//username//.ci233.net''}}} to the file {{File{/etc/hostname}}}
* Reboot the VM to ensure all network settings were properly applied
* Install the standard software packages
* Apply any outstanding updates
* Configure NTP to synchronize time against your core VM and ensure time is fully synchronized
* Apply the steps in [[Lab 48 - VM Lockdown - Secure your VMs]] to harden this VM.


!!! Verification Worksheet

Also complete and submit the [[Lab 52|labs/lab52.pdf]] verification worksheet.  This submitted worksheet will indicate your ~VMs are ready to be evaluated for this lab.
!![[Lab 53 - Storage Expansion]]
Assigned [[Week 15, Part 1]]

Some systems need additional storage beyond what was initially provisioned.  Here, we have a file server VM that was created with an additional disk.  We now need to make that additional disk available to the operating system for storing additional data.

Perform the following steps on your files VM.

!!! Observe available storage devices

The {{Command{lsblk}}} command is a quick way to visualize all storage devices available to a system.  Here, we can see that there are three unallocated drives - {{File{sdb}}}, {{File{sdc}}}, and {{File{sdd}}}.  We'll use {{File{sdb}}} for this lab and leave {{File{sdc}}} and {{File{sdd}}} alone for now.
{{{
[root@files ~]# ''lsblk''
NAME               MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sda                  8:0    0   16G  0 disk 
|-sda1               8:1    0    1G  0 part /boot
`-sda2               8:2    0   15G  0 part 
  |-almalinux-root 253:0    0 13.4G  0 lvm  /
  `-almalinux-swap 253:1    0  1.6G  0 lvm  [SWAP]
sdb                  8:16   0    2G  0 disk 
sdc                  8:32   0    2G  0 disk 
sdd                  8:48   0    2G  0 disk 
sr0                 11:0    1 1024M  0 rom  
}}}

!!! Create sdb1 Partition

It's generally preferred to create partitions on the drives instead of using the bare device.  Partitions are logical divisions of the physical disk that will then hold the filesystem.  Here, we're going to devote the entire disk to a single partition and a single filesystem.  Creating multiple partitions on a disk allow it to hold separate filesystems.  In some instances, physically dividing groups of files into separate filesystems is preferred.  One example is logs.  If you have a system, such as a webserver, that may generate a lot of logs, it's wise to store those logs on their own filesystem.  If everything is stored on the same filesystem, excessive logs could fill the disk and interfere with the database's ability to store new data.

Refer to the //Storage Layers// diagram.  We'll be following the path on the left from Storage Devices to Partitions to Filesystems.
[img[img/storage-layers.jpg]]

Duplicate this interaction with the {{Command{parted}}} command to create a new disk label and new partition.  The first {{Command{print}}} command shows the disk is currently bare.
{{{
[root@files ~]# parted /dev/sdb
GNU Parted 3.1
Using /dev/sdb
Welcome to GNU Parted! Type 'help' to view a list of commands.
(parted) print                                                            
Error: /dev/sdb: unrecognised disk label
Model: QEMU QEMU HARDDISK (scsi)                                          
Disk /dev/sdb: 2147MB
Sector size (logical/physical): 512B/512B
Partition Table: unknown
Disk Flags: 
(parted)                                               
}}}
{{{
(parted) mklabel gpt                                                      
(parted) mkpart                                                           
Partition name?  []? storage                                              
File system type?  [ext2]? xfs                                            
Start? 1                                                                  
End? 100%                                                                 
(parted) quit                                                             
Information: You may need to update /etc/fstab.

[root@files ~]#
}}}


Now run {{Command{ lsblk }}} to verify the new partition was created.  It's always wise to add verification steps as you proceed instead of just blindly assuming everything is working as it should.  If you compare this output to the one above, you'll see that the {{File{ vda1 }}} partition has been created.
{{{
[root@files ~]# lsblk                                                     
NAME               MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sda                  8:0    0   16G  0 disk 
|-sda1               8:1    0    1G  0 part /boot
`-sda2               8:2    0   15G  0 part 
  |-almalinux-root 253:0    0 13.4G  0 lvm  /
  `-almalinux-swap 253:1    0  1.6G  0 lvm  [SWAP]
sdb                  8:16   0    2G  0 disk 
`-sdb1               8:17   0    2G  0 part 
sdc                  8:32   0    2G  0 disk 
sdd                  8:48   0    2G  0 disk 
sr0                 11:0    1 1024M  0 rom  
}}}


!!! Create the filesystem

We can see from the {{Command{ lsblk }}} command that the new partition, {{File{sdb1}}}, has been successfully created.  Now we must put a filesystem on it.  Partitions are the physical divisions of a disk.  Filesystems are the data structures the operating system interacts with in order to store files.
{{{
[root@files ~]# mkfs.xfs /dev/sdb1
meta-data=/dev/sdb1              isize=512    agcount=4, agsize=130944 blks
         =                       sectsz=512   attr=2, projid32bit=1
         =                       crc=1        finobt=0, sparse=0
data     =                       bsize=4096   blocks=523776, imaxpct=25
         =                       sunit=0      swidth=0 blks
naming   =version 2              bsize=4096   ascii-ci=0 ftype=1
log      =internal log           bsize=4096   blocks=2560, version=2
         =                       sectsz=512   sunit=0 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0
}}}


!!! Create the mount point

A mount point is a representation of the filesystem that we can interact with.  On Windows systems, mount points are generally drive letters, like C:\ or D:\.  In the Unix/Linux world, everything is one big filesystem tree.  Linux mount points are directories on that tree.  We identify a directory to mount our new filesystem to, and then any interaction with that directory and all items within it will be directed to our new disk volume.  Here, we want to make our new disk available to the system at the directory {{File{ /opt/storage/ }}}.

We first need to ensure the new mount point exists:
{{{
[root@files ~]# mkdir /opt/storage
}}}


!!! Edit the filesystem table

The ''f''ile''s''ystem ''tab''le, {{File{/etc/fstab}}}, is the configuration file which specifies which disk volumes are mounted at system startup.  Add your new disk volume to the file so it is mounted on boot.

Here's a copy of my {{File{ /etc/fstab }}} file.  The last line is the one you need to copy to yours.  Each line contains:
* the physical volume, {{File{ /dev/sdb1 }}}
* the mount point, {{File{/opt/storage }}}
* the filesystem type, {{Monospaced{ xfs }}}
* any special mount options.  Here, just the {{Monospaced{ defaults }}}
* a binary value ({{Monospaced{0}}} or {{Monospaced{1}}}) to indicate whether the filesystem should be backed up.  This is largely deprecated.
* the order in which filesystem checks ({{Command{fsck}}} command) should be run.  A value of {{Monospaced{ 0 }}} disables these checks
{{{
[root@files ~]# cat /etc/fstab

#
# /etc/fstab
# Created by anaconda on Fri Mar 13 00:03:20 2020
#
# Accessible filesystems, by reference, are maintained under '/dev/disk'
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
#
/dev/mapper/centos-root /                       xfs     defaults        0 0
UUID=f68b9069-7271-48de-b968-00d62e825144 /boot                   xfs     defaults        0 0
/dev/mapper/centos-swap swap                    swap    defaults        0 0

/dev/sdb1               /opt/storage      xfs   defaults        0 0
}}}


!!! Mount the new filesystem

Changes to the {{File{/etc/fstab}}} file should be tested and filesystems mounted with the {{Command{ mount -a }}} command.  This will catch any errors in the file.  If there is an error mounting a filesystem on system startup, the OS will not fully load and your only option will be to fix the problem on console.  This can be a nasty surprise if you're remote and don't have easy access to console.

The {{Command{df -h}}} command adds a verification step that the filesystem is fully mounted and accessible.  The old proverb //trust, but verify// must apply to everything you do.
{{{
[root@files ~]# mount -a

[root@files ~]# df -h
Filesystem                  Size  Used Avail Use% Mounted on
devtmpfs                    366M     0  366M   0% /dev
tmpfs                       386M  8.0K  386M   1% /dev/shm
tmpfs                       386M  5.4M  381M   2% /run
tmpfs                       386M     0  386M   0% /sys/fs/cgroup
/dev/mapper/almalinux-root   14G  3.0G   11G  23% /
/dev/sda1                  1014M  374M  641M  37% /boot
tmpfs                        78M     0   78M   0% /run/user/0
/dev/sdb1                   2.0G   47M  2.0G   3% /opt/storage
}}}


!!! Verification worksheet

You should now have successfully added a new storage volume to your files server VM.  Complete and submit the [[Lab 53|labs/lab53.pdf]] verification worksheet when you are ready for review.
!![[Lab 54 - Logical Volume Manager]]
Assigned [[Week 12, Part 2]]

Complete the steps in the [[Lab 54 Worksheet|labs/lab54-instructions.pdf]] on the files VM to become familiar with the Linux logical volume manager.

Add additional filesystems to your auth VM server
* See the last page in the [[Lab 54 Worksheet|labs/lab54-instructions.pdf]]
* Complete the [[Lab 54 Deliverable|labs/lab54.pdf]] and submit this PDF to {{File{/opt/pub/ci233/submit/}}} on the class shell server

/% This lab will involve restarting your file server VM.  Be sure the necessary services are configured to start on boot. %/
!![[Lab 55 - Containerization with Docker]]
Assigned [[Week 14, Part 1]]

Defining infrastructure as code is another advantage of Docker containerization.  With this concept, the services we need to run on our server are all clearly defined in a single configuration file.  Once defined, these services can be brought online with just a few commands.  Defining our infrastructure in this fashion makes it much easier to document our server configuration, track our infrastructure changes over time, and replicate our configuration elsewhere.  

Recall the work we previously did to install the database and wiki.  Several commands were executed to install and start the services, configure Apache, create the database accounts, download & unpack the wiki, and bring everything online for use.  Now, a single infrastructure definition file will contain our requirements to run the wiki and database in separate Docker containers and make them available for use.

Complete the following steps on your www VM:


!!! 1. Create our working directory

Create the directory {{File{/opt/docker/}}} to use as the storage location for our files.  All work will be performed within this directory.


!!! 2. Define the infrastructure

A {{File{docker-compose.yml}}} file contains the definition for our services and is another way to launch and maintain Docker containers.  This file also fully documents the requirements and changes being made to support these services.  Complex {{File{docker-compose.yml}}} files can be used to define an entire server running several different services within containers.  Should we need to reinstall or rebuild the underlying OS, our containers and all dependencies are fully documented.  It would just be a matter of copying our {{File{docker-compose.yml}}} file and all required data volumes to the new system in order to quickly replicate it.

Create the {{File{/opt/docker/docker-compose.yml}}} file containing the following contents.  Adjust the file to set your own database username and password.

{{{
[root@www docker]# cat docker-compose.yml
version: '3'

networks:
  wiki_network:

services:
  mediawiki:
    image: mediawiki
    container_name: mediawiki
    ports:
      - 8080:80
    links:
      - mariadb
    networks:
      - wiki_network
    volumes:
      - /var/www/html/images
      # After initial setup, download LocalSettings.php to the wiki directory, remove the below comment, and restart the container
      # - ./wiki/LocalSettings.php:/var/www/html/LocalSettings.php
    restart: always

  mariadb:
    image: mariadb
    container_name: mariadb
    networks:
      - wiki_network
    volumes:
      - ./mariadb:/var/lib/mysql
    environment:
      # See https://phabricator.wikimedia.org/source/mediawiki/browse/master/includes/DefaultSettings.php for other possible settings
      MYSQL_DATABASE: mediawiki
      MYSQL_USER: wikiuser
      MYSQL_PASSWORD: example_password
      MYSQL_RANDOM_ROOT_PASSWORD: 'yes'
    restart: always
}}}

{{Warning{''Warning:'' Indentation within the docker-compose file is very specific.  Like Python, indentation is used to establish nesting of configuration items.  The indentation levels of this file must be properly preserved.}}}


!!! 3. Create database directory

As you can see from the definition above, the database files are stored in a local directory easily accessible to us.  This also makes it easier if we wanted to back up just these database files.

Create the directory {{File{/opt/docker/mariadb/}}}.  Any storage must be saved outside of the container, either to a local directory similar to this one or to a Docker volume.  Storing any files which would changed, like database files, outside of the container ensures that data is available if the container was recreated.  If the database files were stored within the container, all data would then be lost of the container was rebuilt or upgraded to a newer version.

Local storage is defined with the volumes tag, as seen in the example above.


!!! 4. Start containers 

Run the command {{Command{docker-compose up -d}}} to bring our containers online.  Be sure you're currently in the {{File{/opt/docker/}}} directory.


!!! 5. Validate images

The containers should be downloaded from the Docker hub and should be visible:

{{{
[root@www docker]# docker images
REPOSITORY   TAG       IMAGE ID       CREATED       SIZE
mariadb      latest    1de5905a6164   4 days ago    410MB
mediawiki    latest    c8ce33ea98e9   2 weeks ago   809MB
}}}


!!! 6. Validate running containers

We should also see the new containers fully online.

{{{
[root@www docker]# docker-compose ps
  Name                 Command               State                                                Ports
-----------------------------------------------------------------------------------------------------------------------------------------------------
mariadb     docker-entrypoint.sh mariadbd    Up      3306/tcp
mediawiki   docker-php-entrypoint apac ...   Up      0.0.0.0:8080->80/tcp,:::8080->80/tcp
}}}


!!! 7. Observe database files:

Run the command {{Command{ls -l mariadb}}} to view the database files.


!!! 8. Obtain the generated root password

Our database service is configured to generate a unique root password the first time the container starts up.  Check the Docker logs to find the new root password and save this value in case we need it later.

This command demonstrates finding my database root password.  Yours will be different:

{{{
[root@www docker]# docker logs mariadb 2>&1 | grep ROOT
2022-05-01 01:45:43+00:00 [Note] [Entrypoint]: GENERATED ROOT PASSWORD: izp#st9p7`a_+Y<@:xIc&v=lEF`NG~%G
}}}


!!! 9. Validate database connection

Before proceeding, it would be prudent to ensure you are able to access the ~MariaDB database running inside the container.  Execute the following command to log in to the database.
* {{Monospaced{ ''-u'' }}} refers to the value set in the {{Monospaced{~MYSQL_USER}}} environment variable in your {{File{docker-compose.yml}}} file.
* The final argument, {{Monospaced{//mediawiki//}}}, is the name of the database you specified in the {{Monospaced{~MYSQL_DATABASE}}} variable.
* The password you are prompted to enter will be the value set in the {{Monospaced{~MYSQL_PASSWORD}}} variable.

{{{
[root@www docker]# docker exec -ti mariadb mariadb -u wikiuser -p mediawiki
Enter password:
Welcome to the MariaDB monitor.  Commands end with ; or \g.
Your MariaDB connection id is 3
Server version: 10.7.3-MariaDB-1:10.7.3+maria~focal mariadb.org binary distribution

Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

MariaDB [mediawiki]> show databases;
+--------------------+
| Database           |
+--------------------+
| information_schema |
| mediawiki          |
+--------------------+
2 rows in set (0.002 sec)

MariaDB [mediawiki]> exit;
}}}


!!! 10. Configure wiki

Your services should now be online with a copy of the ~MediaWiki and required database running in containers.  Access your wiki and configure it, similar to how we did it last time.  The URL {{Monospaced{htt&#112;:&#47;/10.1.26.''X'':8080}}} will load your new containerized wiki.  Replace the ''X'' with the last octet for your web server VM.

Once the site is loaded, go through the wiki configuration as you did last time and generate the {{File{~LocalSettings.php}}} file.  
* Be sure to set {{{Database Host}}} to {{{mariadb}}} when prompted for the database information


!!! 11. Add {{File{~LocalSettings.php}}} configuration file

a. Create the directory {{File{/opt/docker/wiki/}}} on your www VM and upload your new {{File{~LocalSettings.php}}} file to it.
b. Uncomment the following line in your new {{File{docker-compose.yml}}} file:

This will make the wiki configuration file available within the container

{{{
      # - ./wiki/LocalSettings.php:/var/www/html/LocalSettings.php
}}}

c. Restart the wiki container to activate the volume change.  This command must be executed from within the {{File{/opt/docker/}}} directory.

{{Command{docker-compose up -d mediawiki}}}


!!! 12. Validate your new, containerized wiki 

Access your wiki again through the tunnel to ensure everything is set up correctly and it is online for use.


!!! 13. Submit the deliverable

* Complete [[Lab 55|labs/lab55.pdf]] and submit this PDF to {{File{/opt/pub/ci233/submit/}}} on the class shell server
** This lab is due Saturday
Mastery of this subject material will only come with practice. To that end, this will be a very hands-on and practical course. Expect graded lab assignments regularly to provide ample practice with the assigned material.  Properly completing lab assignments on time is necessary to receive a good grade for this course. Not competing lab assignments at all will likely result in a failing grade.

Any deliverables will be collected for grading on their due date. Late assignments may be accepted, subject to time-dependent grade penalty of up to 50%. Presentation of submitted assignments will also impact grade.

{{Note{''Note:'' It is much better to have correct work submitted late than obviously wrong or incomplete work submitted on time.  If you're having trouble with some of the material and need more time, please let me know and we can discuss adjusting due dates.  Submitting poor quality work to meet a due date is not a wise professional strategy.}}}


!! Submitting Homework Assignments
Homework assignments are to be uploaded to the class shell server using a file transfer program like ~WinSCP and saved to the directory {{File{/opt/pub/ci233/submit/}}}. I will then grade/annotate your work and return the files to you for review. Most homework assignments will be PDF forms to complete. Download the lab PDF and open it in [[Acrobat Reader|https://get.adobe.com/reader/]].  ''Do not use the PDF viewer in your web browser''.  It will not properly save the file and you will upload a blank document.  Grades will be posted to Blackboard.

After downloading the PDF assignment and opening the file in [[Acrobat Reader|https://get.adobe.com/reader/]], add your name to the top, fill in your responses, then save & close the file.  It would be wise to reopen the PDF in Acrobat Reader to make sure everything saved correctly before uploading to the server.

Files must be named appropriately so we don't have filename collisions among everyone's uploaded files. Rename your PDF document following this naming convention: ''ci233-lab#-username.pdf''
* replace # with the lab number
* replace username with your campus username

Uploaded labs ''must'' contain your name at the top of the document and their file names ''must'' follow this file name format __exactly__ in order to be graded. This includes case - all letters must be lowercase. The Unix operating systems are case sensitive, so {{File{~CI233-lab1-jdoe12.pdf}}} is a different file than {{File{ci233-lab1-jdoe12.pdf}}} and would not be accepted.  

Note: The Microsoft Windows operating system hides file extensions by default.  This is a terrible setting for a security practitioner and should be disabled.  A common mistake is to fail to take this into account and upload files with a double extension, such as {{File{ci233-lab1-jdoe12.pdf.pdf}}}.  This file would not be named correctly and thus not accepted for review.

!! How to upload your lab assignments:
--A video will be posted here demonstrating the process in the coming days.--  Please let me know if you have trouble figuring this out.

!! Late Penalties
Point penalties for late assignments will be assessed as follows:

|!Penalty|!Condition|
| 0 |Sneak it in past the due date but before I grade the labs|
| 10% |Submitted after the batch has been graded|
| 20% |Submitted after graded labs have been returned|
| 30% |Submitted after we've reviewed a lab|
| 40% |Submitted after I've posted a review video or we've held an online meeting to discuss a lab.|

{{Warning{''Note:'' Labs 1 through 25 will not be accepted after the last date to Withdraw from the course unless prior approval is obtained.}}}

!! The workflow
# You upload a completed lab PDF to {{File{/opt/pub/ci233/submit/}}}
# Every hour a script will collect new submissions which are properly named and copy them to the grading queue, {{File{/opt/pub/ci233/queue/}}}.
# The queue will be synchronized to my tablet for review. Only new files will be copied.
# Any annotations will be recorded and synchronized back to the shell server, saved to the directory {{File{/opt/pub/ci233/graded/}}}.
# Grades are entered to Blackboard.
# After grades are entered, the script will move graded labs ready to be returned to {{File{/opt/pub/ci233/returned/}}}. You may download them from this directory to see my annotations.
The directories {{File{/opt/pub/ci233/queue/}}} and {{File{/opt/pub/ci233/graded/}}} are staging directories in the workflow pipeline.  You can view the contents of these directories but cannot write to them.  Your access is only so you can have full visibility on where your labs reside in the workflow.

tl;dr: You upload new labs to {{File{/opt/pub/ci233/submit/}}} and retrieve graded copies from {{File{/opt/pub/ci233/returned/}}}.
! Material

* Links:
** Read in [[The Linux Command Line|http://linuxcommand.org/tlcl.php]]: 
*** Chapter 3, pp 23 & 24
*** Chapter 4, pp 33 & 34
** Watch:
*** Links - https://www.youtube.com/watch?v=lW_V8oFxQgA

* File Globbing:
** Read in [[The Linux Command Line|http://linuxcommand.org/tlcl.php]]
*** Chapter 4, pp 25-27 (Wildcards)
** Watch:
*** File Globbing - https://www.youtube.com/watch?v=QIysdjpiLcA


! Links & File Globbing

!! Links

There are two different types of links within the Unix environment:  ''Hard links'' and ''Symbolic links''.

Using the following directory listing as an example:

{{{
[nmerante@shell dict]$ pwd
/usr/share/dict

[nmerante@shell dict]$ ls -l
total 9680
-rw-r--r--. 2 root root 4953680 Jun 10  2014 linux.words
-rw-r--r--. 2 root root 4953680 Jun 10  2014 wordlist
lrwxrwxrwx. 1 root root      11 Feb  4 21:07 words -> linux.words
}}}

We can identify the file named {{File{words}}} as a ''symbolic link'' due to the ''{{Monospaced{l}}}'' character at the beginning of the line.  A symbolic link is a special type of file that only contains the path to the file it is pointing to.  Also note the small file size.  This is another hint it may be a symbolic link.

''Hard links'' are directory entries which point to the same inode.  An inode is a filesystem data structure which contains information about the file and where its blocks can be found on the underlying storage medium.  Thus, hard links point directly to the same place on the disk.  We can tell that the files {{File{linux.words}}} and {{File{wordlist}}} are hard links because of the number ''2'' in the third column.  This is the link count.  It will increase as more hard links are created.  A file isn't truly deleted until its link count reaches zero.

{{{
[nmerante@shell dict]$ ls -li
total 9680
289731 -rw-r--r--. 2 root root 4953680 Jun 10  2014 linux.words
289731 -rw-r--r--. 2 root root 4953680 Jun 10  2014 wordlist
719507 lrwxrwxrwx. 1 root root      11 Feb  4 21:07 words -> linux.words
}}}

In the above output, adding the ''{{Monospaced{-i}}}'' flag to the {{Command{ls}}} command shows the inode number for the file.  We can see the files {{File{linux.words}}} and {{File{wordlist}}} are both hard links pointing to the same place on the disk because they both are pointing to the same inode number.

The textbook pages listed at the top will contain more information about these two link types.


!! File globbing & wildcards

So far, when working with files we've specified one filename at a time on the command line. Other shell metacharacters exist to identify files by patterns in their filenames and work with them as a group. Suppose we want to move all files that end in ''.jpg'' to a particular location, or delete all files that contain the string ''temp'' in their filename. If there's thousands of them, it's going to be very tedious to have to list each of the files individually. Or, we can instead use special file wildcard metacharacters to concisely identify these groups of files by common characteristics. This is referred to as ''filename substitution'' or ''file globbing''.


!! Filename substitution

Metacharacters associated with file names: &nbsp; {{Monospaced{''~ * ? [ ] [^ ] { }''}}}

* {{Monospaced{''*''}}} = match any sequence of 0 or more characters
* {{Monospaced{''?''}}} = match any single character.
** It's important to note the ''?'' is a mandatory position which must be filled. It's not optional like the ''{{Monospaced{''*''}}}'' is. So if you type {{Command{ls /bin/d??}}}, you'll see a list of all files in {{File{/bin/}}} which begin with a ''d'' and are exactly three letters in length. You will not see the files which are shorter then three characters, such as the {{Command{df}}} command or longer than three characters such as the {{Command{diff}}} command.  Both ''?'' must contain a character.
* {{Monospaced{''[ ]''}}} - match any of the enclosed characters in the set (eg: ''[abcd]''), or match a range (eg: ''[a-z] [~A-Z] [0-9] [e-q]'')
** The {{Monospaced{''[ ]''}}} brackets are similar to the ''?'' in that they specify a single, mandatory character. Where the ''?'' wildcard can represent any character, the brackets allow us to be a little more specific with what that single character may be.
** The {{Monospaced{''-''}}} within the {{Monospaced{''[ ]''}}} specifies the range of characters based on its position in the [[ascii chart|img/ascii-chart.gif]].  For example, {{Monospaced{''[4-6]''}}} or {{Monospaced{''[;-?]''}}} to match the characters {{Monospaced{''; < = > ?''}}} (ascii 59 to ascii 63).
*** Ranges and lists of characters can be combined.  The gobbing pattern {{Monospaced{''[ac5-8()]''}}} will match the letters {{Monospaced{''a''}}} and {{Monospaced{''c''}}}, the numbers {{Monospaced{''5''}}} through {{Monospaced{''8''}}}, and the two parenthesis.
** {{Monospaced{''[^ ]''}}} - match any character //not// enclosed in the set or range (eg: ''[^abcd]'' or ''[^a-z]'').  The notation ''[! ]'' is sometimes used but not universally recognized.  Use ''[^ ]'' instead.  The labs will all use ''[^ ]''.
* {{Monospaced{''{ }''}}} - Brace Expansion.  Expand comma separated strings to create multiple text strings from a pattern. Example: {{Command{mkdir -p {one,two,three}/examples}}} will create the directories {{File{one/examples}}}, {{File{two/examples}}}, and {{File{three/examples}}}.

{{Note{''Note:'' Negation should only be used when it is the best possible method for solving the problem, not as a way to be lazy.  If the question asks to list a particular set of files, try to find a way to target just those files.  Negation is ideal when the question includes a negation term, such as the wording //except// or //do not//.  When negation is abused, often files are matched which did not intend to be.  }}}
{{Warning{''Warning:'' Try to be as specific as possible when you are using wildcards.  It's best practice to type out the static text and only use wildcards for the dynamic part of what you are trying to match.  For example, if I am trying to match the files {{File{data1.txt}}}, {{File{data2.txt}}}, {{File{data3.txt}}}, and {{File{data4.txt}}}, the best file globbing pattern would be {{Command{data[1-4].txt}}}.  It is as specific as possible and includes the static portions of the filename.  Using {{Command{data?.txt}}} would inadvertently match {{File{data5.txt}}} and {{Command{*[1-4].txt}}} could match something else entirely.  Even if those files are not currently in the directory, they might be later.  Don't be lazy with your file globbing patterns!}}}

!!! Examples  - Display all files who's names:

Begin with the letter f: {{Command{ls f*}}}
&nbsp;&nbsp;&nbsp;(read as: list files which begin with an ''f'' followed by ''0 or more characters'')
Contain a number: {{Command{ls *[0-9]*}}}
&nbsp;&nbsp;&nbsp;(read as: list all files which may begin with ''0 or more characters'', followed by ''any number'', and end with ''0 or more characters'')
begin with an uppercase letter: {{Command{ls [~A-Z]*}}}
begin with the letter a, b, or c: {{Command{ls [abc]*}}}
begin with the letter a, b, or c and is exactly two characters in length: {{Command{ls [abc]?}}}
do not begin with the letter a, b, or c: {{Command{ls [^abc]*}}}
end with a number from 2 to 9 or a letter from w to z: {{Command{ls *[2-9w-z]}}}
are exactly two characters long and begin with a lowercase letter: {{Command{ls [a-z]?}}}
being with string one, end with string three, and contain string two somewhere in between: {{Command{ls one*two*three}}}


{{Warning{''Warning:'' Working on the command line requires an eye for detail. We're starting to get to the point where that detail really matters. There's a huge difference between the commands {{Command{rm *lab6*}}} and {{Command{rm * lab6*}}}. One stray space and you're going to be in for some missing labs. Take a second look at your commands before executing them and be very deliberate with what you're running. Remember - Working on the command line is precise. Every character matters and we must have an eye for detail!}}}


!! Substitutions

Through use of shell metacharacters, substitutions are transformations performed by the shell on command line input prior to executing a command string. File globbing is one of the 5 types of shell substitutions.

It's important to understand the order of operations here. In the math formula 5 + 6 * 7, our calculations are not automatically performed left to right. There is a set order of operations that calls for the multiplication to be performed first. The same idea applies to entering command line input. First, all substitutions are performed by the shell, then your command string is executed. 

Consider the command {{Command{ls *.jpg}}}

The shell recognizes that we're performing a substitution (eg: {{File{*.jpg}}}) and replaces {{File{*.jpg}}} in the command string with a list of all files that match the pattern.
Next, the {{Command{ls}}} command is executed with the list of files as arguments

A great way to preview the result of any substitutions is with the {{Command{echo}}} command. The {{Command{echo}}} command repeats back to the screen whatever you give it as an argument. For example:

{{{
[nmerante@shell ~]$ echo hello ci233
hello ci233
[nmerante@shell ~]$ cd /opt/pub/ci233/data/lab9
[nmerante@shell lab9]$ echo rm IMG_126?.jpg
rm IMG_1260.jpg IMG_1261.jpg IMG_1262.jpg IMG_1263.jpg IMG_1264.jpg IMG_1265.jpg IMG_1266.jpg IMG_1267.jpg IMG_1268.jpg IMG_1269.jpg
}}}

So if I have a complex or risky substitution, I may want to prefix the command string with the {{Command{echo}}} command to preview it before its executed:
eg: Change to {{File{/opt/pub/ci233/submit/}}} and run: {{Command{echo ls *lab[1-3]*}}} to see what substitution is being performed and the actual command string about to be executed. Don't forget to prefix it with {{Command{echo}}}!

These file globbing substitution examples are pretty tame, but this trick with the {{Command{echo}}} command will come in very handy later on when we get to more complicated substitutions.


| !Character | !Shortcut | !Most Useful |
| ~CTRL-C |Send interrupt signal to a running command (abort)| * |
|~|Clear entered command line text|
| ~CTRL-A |Move cursor to beginning of command line| * |
| ~CTRL-E |Move cursor to end of command line| * |
| ~CTRL-L |Clear Screen; move cursor to top to screen| * |
| ~ALT-B |Move one word backward on command line|
| ~ALT-F |Move one word forward on command line|
| ~CTRL-U |Erase line to left|
| ~CTRL-K |Erase line to the right|
| ~CTRL-W |Erase a word to left on command line| * |
| ~ALT-D |Erase a word to right on command line|
| ~CTRL-Y |Paste previously erased text|
| ~CTRL-D |Send EOF signal, ending input| * |
|~|Erase character under cursor| * |
|~|Log out (when no other text is on the command line)| * |
| ~Shift-INS |Paste clipboard at cursor| * |
| ~Shift-PgUp |Scroll window up|
| ~Shift-PgDn |Scroll window down|
| Tab |Auto-complete command or file name| * |
| Up Arrow |Previous Command| * |
| Down Arrow |Next command| * |
| Page Up |Previous command search| * |
| Page Down |Next command search| * |

{{Note{''Note:'' The above key sequences were listed with uppercase letters for clarity.  It is not necessary to also press the shift key.}}}

!! Tab Completion

The tab key will auto-complete commands or file names, pausing when it reaches a decision point.  

If I type the letters ''ad'' on the command line and press tab, the shell will autocomplete it to the string ''add'' before it reaches a decision point and cannot proceed without input.  If I press tab twice it will then show me the options I have to complete the command:
<<<
[root@shell data]# add
addgnupghome  addpart       addr2line     adduser
<<<

If I press the letter p and then tab again, the shell will know which command I'm looking for and auto-complete the command ''addpart''

The same auto-completion can be used for files.  The path to the networking configuration file on Linux systems is rather long.  Try this scenario on the class shell server:
* Type {{Command{cat /etc/sysco}}} and press ''tab''.  The shell should autocomplete that to {{Command{cat /etc/sysconfig/}}}.
* We're at a decision point since there are many different ways we could proceed.  Type: {{Command{netw}}} and press tab.  The shell will autocomplete that to {{Command{cat /etc/sysconfig/network}}}.
* Press the {{Command{-}}} key and press tab again.  The shell will autocomplete that to {{Command{cat /etc/sysconfig/network-scripts/}}}.
* Type {{Command{ifcfg-eth}}} and press tab twice.  We are presented with the available options.
* Type {{Command{0}}} and hit enter to view the network configuration file.

Using tab helped me identify the available files and reduced the amount of letters I needed to type to view the file.  It's slow at first, but once you get used to it greatly speeds up the speed and efficiency of using the shell and reduces the amount of information you have to remember.


!! Command recall

The page up and page down keys can be used to scroll through the recently used commands.  This isn't universal; the shell needs to be configured to support it, but its supported by most systems out of the box.

If you have a long command string that wasn't used very recently, rather then press the up arrow several times to find it, you can enter the first few letters of that command and then ~Page-Up.  The shell will cycle through your recent commands which began with those letters.

For example, a few days ago I ran the command {{Command{fail2ban-client status sshd-root}}} to see how many systems were trying to break into the class shell server.  Rather then type out that entire command (or have to remember it), if I enter the first few letters {{Command{fai}}} and then press ~Page-Up, the shell will search backward in my command history and bring me right to it.  If I used the up arrow, I'd first have to scroll through the hundreds of commands I may have entered since then.


!! Copy/Paste

In putty and most other terminal emulators, highlighting text with the mouse will copy it to the clipboard.  Clicking the right mouse button will paste text from the clipboard into the terminal at the position of the cursor.  If you are connecting from a Linux host like Kali instead of Windows, clicking the middle mouse button or scroll wheel will paste text to the terminal.  Shift-Insert will also paste text from the clipboard into the terminal.

// //''Name:'' Calendar plugin
// //''Version:'' 0.1.0
// //''Author:'' SteveRumsby

// //''Syntax:''
// //<< {{{listTags tag //sort// //prefix//}}} >>

// //''Description:''
// //Generate a list of tiddlers tagged with the given tag.
// //If both //sort// and //prefix// are omitted the list is sorted in increasing order of title, with one tiddler per line.
// //If //sort// is specified the list is sorted in increasing order of the given tiddler property. Possible properties are: title. modified, modifier.
// //If //prefix// is specified the given string is inserted before the tiddler title. The insertion happens before the text is wikified. This can be used to generated bulleted or numbered lists.

// //''Examples:''
// //<< {{{listTags usage}}} >> - generate a plain list of all tiddlers tagged with tag //usage//, sorted by title
// //<< {{{listTags usage modified}}} >> - the same list, with most recently modified tiddlers last
// //<< {{{listTags usage title #}}} >> - generate a numbered list if tiddlers tagged with //usage//, sorted by title

// //''Code section:''
version.extensions.listTags = {major: 0, minor: 1, revision: 0, date: new Date(2005, 6,16)};

config.macros.listTags = {
text: "Hello"
};

config.macros.listTags.handler = function(place,macroName,params)
{
 var tagged = store.getTaggedTiddlers(params[0], params[1]);
 var string = "";
 for(var r=0;r<tagged.length;r++)
 {
 if(params[2]) string = string + params[2] + " ";
 string = string + "[[" + tagged[r].title + "]]\n";
 }
 wikify(string, place, null, null);
}
&nbsp; <<defaultHome>>  [[Notebook]]  [[Virtual Machines]]  [[Outline]]  [[Calendar]]

! Misc Resources

[[DigitalOcean|http://www.digitalocean.com/]] - Good virtualization provider for running your own systems
<html>
<center>
  <video id="my-video" class="video-js" controls preload="auto" width="1572" height="724" poster="" data-setup="{}">
    <source src="video/naemon.mp4" type='video/mp4'>
    <p class="vjs-no-js">
      To view this video please enable JavaScript, and consider upgrading to a web browser that
      <a href="http://videojs.com/html5-video-support/" target="_blank">supports HTML5 video</a>
    </p>
  </video>

  <script src="https://vjs.zencdn.net/7.8.2/video.min.js"></script>
</center>
</html>
/***
''NestedSlidersPlugin for TiddlyWiki version 1.2.x and 2.0''
^^author: Eric Shulman
source: http://www.TiddlyTools.com/#NestedSlidersPlugin
license: [[Creative Commons Attribution-ShareAlike 2.5 License|http://creativecommons.org/licenses/by-sa/2.5/]]^^

Quickly make any tiddler content into an expandable 'slider' panel, without needing to create a separate tiddler to contain the slider content.  Optional syntax allows ''default to open'', ''custom button label/tooltip'' and ''automatic blockquote formatting.''

You can also 'nest' these sliders as deep as you like (see complex nesting example below), so that expandable 'tree-like' hierarchical displays can be created.  This is most useful when converting existing in-line text content to create in-line annotations, footnotes, context-sensitive help, or other subordinate information displays.

For more details, please click on a section headline below:
++++!!!!![Configuration]>
Debugging messages for 'lazy sliders' deferred rendering:
<<option chkDebugLazySliderDefer>> show debugging alert when deferring slider rendering
<<option chkDebugLazySliderRender>> show debugging alert when deferred slider is actually rendered
===
++++!!!!![Usage]>
When installed, this plugin adds new wiki syntax for embedding 'slider' panels directly into tiddler content.  Use {{{+++}}} and {{{===}}} to delimit the slider content.  Additional optional syntax elements let you specify
*default to open
*cookiename
*heading level
*floater (with optional CSS width value)
*mouse auto rollover
*custom label/tooltip/accesskey
*automatic blockquote
*deferred rendering
The complete syntax, using all options, is:
//{{{
++++(cookiename)!!!!!^width^*[label=key|tooltip]>...
content goes here
===
//}}}
where:
* {{{+++}}} (or {{{++++}}}) and {{{===}}}^^
marks the start and end of the slider definition, respectively.  When the extra {{{+}}} is used, the slider will be open when initially displayed.^^
* {{{(cookiename)}}}^^
saves the slider opened/closed state, and restores this state whenever the slider is re-rendered.^^
* {{{!}}} through {{{!!!!!}}}^^
displays the slider label using a formatted headline (Hn) style instead of a button/link style^^
* {{{^width^}}} (or just {{{^}}})^^
makes the slider 'float' on top of other content rather than shifting that content downward.  'width' must be a valid CSS value (e.g., "30em", "180px", "50%", etc.).  If omitted, the default width is "auto" (i.e., fit to content)^^
* {{{*}}}^^
automatically opens/closes slider on "rollover" as well as when clicked^^
* {{{[label=key|tooltip]}}}^^
uses custom label/tooltip/accesskey.  {{{=key}}} and {{{|tooltip}}} are optional.  'key' is must be a ''single letter only''.  Default labels/tootips are: ">" (more) and "<" (less), with no default access key assignment.^^
* {{{">"}}} //(without the quotes)//^^
automatically adds blockquote formatting to slider content^^
* {{{"..."}}} //(without the quotes)//^^
defers rendering of closed sliders until the first time they are opened.  //Note: deferred rendering may produce unexpected results in some cases.  Use with care.//^^

//Note: to make slider definitions easier to read and recognize when editing a tiddler, newlines immediately following the {{{+++}}} 'start slider' or preceding the {{{===}}} 'end slider' sequence are automatically supressed so that excess whitespace is eliminated from the output.//
===
++++!!!!![Examples]>
simple in-line slider: 
{{{
+++
   content
===
}}}
+++
   content
===
----
use a custom label and tooltip: 
{{{
+++[label|tooltip]
   content
===
}}}
+++[label|tooltip]
   content
===
----
content automatically blockquoted: 
{{{
+++>
   content
===
}}}
+++>
   content
===
----
all options combined //(default open, cookie, heading, sized floater, rollover, label/tooltip/key, blockquoted, deferred)//
{{{
++++(testcookie)!!!^30em^*[label=Z|click or press Alt-Z to open]>...
   content
===
}}}
++++(testcookie)!!!^30em^*[label=Z|click or press Alt-Z to open]>...
   content
===
----
complex nesting example:
{{{
+++^[get info...=I|click for information or press Alt-I]
   put some general information here, plus a floating slider with more specific info:
   +++^10em^[view details...|click for details]
      put some detail here, which could include a rollover with a +++^25em^*[glossary definition]explaining technical terms===
   ===
===
}}}
+++^[get info...=I|click for information or press Alt-I]
   put some general information here, plus a floating slider with more specific info:
   +++^10em^[view details...|click for details]
      put some detail here, which could include a rollover with a +++^25em^*[glossary definition]explaining technical terms===
   ===
===
----
nested floaters
>menu: <<tiddler NestedSlidersExample>>
(see [[NestedSlidersExample]] for definition)
----
===
!!!!!Installation
<<<
import (or copy/paste) the following tiddlers into your document:
''NestedSlidersPlugin'' (tagged with <<tag systemConfig>>)
<<<
!!!!!Revision History
<<<
''2006.05.11 - 1.9.0'' added optional '^width^' syntax for floating sliders and '=key' syntax for setting an access key on a slider label
''2006.05.09 - 1.8.0'' in onClickNestedSlider(), when showing panel, set focus to first child input/textarea/select element
''2006.04.24 - 1.7.8'' in adjustSliderPos(), if floating panel is contained inside another floating panel, subtract offset of containing panel to find correct position
''2006.02.16 - 1.7.7'' corrected deferred rendering to account for use-case where show/hide state is tracked in a cookie
''2006.02.15 - 1.7.6'' in adjustSliderPos(), ensure that floating panel is positioned completely within the browser window (i.e., does not go beyond the right edge of the browser window)
''2006.02.04 - 1.7.5'' add 'var' to unintended global variable declarations to avoid FireFox 1.5.0.1 crash bug when assigning to globals
''2006.01.18 - 1.7.4'' only define adjustSliderPos() function if it has not already been provided by another plugin.  This lets other plugins 'hijack' the function even when they are loaded first.
''2006.01.16 - 1.7.3'' added adjustSliderPos(place,btn,panel,panelClass) function to permit specialized logic for placement of floating panels.  While it provides improved placement for many uses of floating panels, it exhibits a relative offset positioning error when used within *nested* floating panels.  Short-term workaround is to only adjust the position for 'top-level' floaters.
''2006.01.16 - 1.7.2'' added button property to slider panel elements so that slider panel can tell which button it belongs to.  Also, re-activated and corrected animation handling so that nested sliders aren't clipped by hijacking Slider.prototype.stop so that "overflow:hidden" can be reset to "overflow:visible" after animation ends
''2006.01.14 - 1.7.1'' added optional "^" syntax for floating panels.  Defines new CSS class, ".floatingPanel", as an alternative for standard in-line ".sliderPanel" styles.
''2006.01.14 - 1.7.0'' added optional "*" syntax for rollover handling to show/hide slider without requiring a click (Based on a suggestion by tw4efl)
''2006.01.03 - 1.6.2'' When using optional "!" heading style, instead of creating a clickable "Hn" element, create an "A" element inside the "Hn" element.  (allows click-through in SlideShowPlugin, which captures nearly all click events, except for hyperlinks)
''2005.12.15 - 1.6.1'' added optional "..." syntax to invoke deferred ('lazy') rendering for initially hidden sliders
removed checkbox option for 'global' application of lazy sliders
''2005.11.25 - 1.6.0'' added optional handling for 'lazy sliders' (deferred rendering for initially hidden sliders)
''2005.11.21 - 1.5.1'' revised regular expressions: if present, a single newline //preceding// and/or //following// a slider definition will be suppressed so start/end syntax can be place on separate lines in the tiddler 'source' for improved readability.  Similarly, any whitespace (newlines, tabs, spaces, etc.) trailing the 'start slider' syntax or preceding the 'end slider' syntax is also suppressed.
''2005.11.20 - 1.5.0'' added (cookiename) syntax for optional tracking and restoring of slider open/close state
''2005.11.11 - 1.4.0'' added !!!!! syntax to render slider label as a header (Hn) style instead of a button/link style
''2005.11.07 - 1.3.0'' removed alternative syntax {{{(((}}} and {{{)))}}} (so they can be used by other
formatting extensions) and simplified/improved regular expressions to trim multiple excess newlines
''2005.11.05 - 1.2.1'' changed name to NestedSlidersPlugin
more documentation
''2005.11.04 - 1.2.0'' added alternative character-mode syntax {{{(((}}} and {{{)))}}}
tweaked "eat newlines" logic for line-mode {{{+++}}} and {{{===}}} syntax
''2005.11.03 - 1.1.1'' fixed toggling of default tooltips ("more..." and "less...") when a non-default button label is used
code cleanup, added documentation
''2005.11.03 - 1.1.0'' changed delimiter syntax from {{{(((}}} and {{{)))}}} to {{{+++}}} and {{{===}}}
changed name to EasySlidersPlugin
''2005.11.03 - 1.0.0'' initial public release
<<<
!!!!!Credits
<<<
This feature was implemented by EricShulman from [[ELS Design Studios|http:/www.elsdesign.com]] with initial research and suggestions from RodneyGomes, GeoffSlocock, and PaulPetterson.
<<<
!!!!!Code
***/
//{{{
version.extensions.nestedSliders = {major: 1, minor: 9, revision: 0, date: new Date(2006,5,11)};
//}}}

//{{{
// options for deferred rendering of sliders that are not initially displayed
if (config.options.chkDebugLazySliderDefer==undefined) config.options.chkDebugLazySliderDefer=false;
if (config.options.chkDebugLazySliderRender==undefined) config.options.chkDebugLazySliderRender=false;

// default styles for 'floating' class
setStylesheet(".floatingPanel { position:absolute; z-index:10; padding:0.5em; margin:0em; \
	background-color:#eee; color:#000; border:1px solid #000; text-align:left; }","floatingPanelStylesheet");
//}}}

//{{{
config.formatters.push( {
	name: "nestedSliders",
	match: "\\n?\\+{3}",
	terminator: "\\s*\\={3}\\n?",
	lookahead: "\\n?\\+{3}(\\+)?(\\([^\\)]*\\))?(\\!*)?(\\^(?:[^\\^\\*\\[\\>]*\\^)?)?(\\*)?(\\[[^\\]]*\\])?(\\>)?(\\.\\.\\.)?\\s*",
	handler: function(w)
		{
			var lookaheadRegExp = new RegExp(this.lookahead,"mg");
			lookaheadRegExp.lastIndex = w.matchStart;
			var lookaheadMatch = lookaheadRegExp.exec(w.source)
			if(lookaheadMatch && lookaheadMatch.index == w.matchStart)
			{
				// location for rendering button and panel
				var place=w.output;

				// default to closed, no cookie, no accesskey
				var show="none"; var title=">"; var tooltip="show"; var cookie=""; var key="";

				// extra "+", default to open
				if (lookaheadMatch[1])
					{ show="block"; title="<"; tooltip="hide"; }

				// cookie, use saved open/closed state
				if (lookaheadMatch[2]) {
					cookie=lookaheadMatch[2].trim().slice(1,-1);
					cookie="chkSlider"+cookie;
					if (config.options[cookie]==undefined)
						{ config.options[cookie] = (show=="block") }
					if (config.options[cookie])
						{ show="block"; title="<"; tooltip="hide"; }
					else
						{ show="none"; title=">"; tooltip="show"; }
				}

				// parse custom label/tooltip/accesskey: [label=X|tooltip]
				if (lookaheadMatch[6]) {
					title = lookaheadMatch[6].trim().slice(1,-1);
					var pos=title.indexOf("|");
					if (pos!=-1) { tooltip = title.substr(pos+1,title.length); title=title.substr(0,pos); }
					if (title.substr(title.length-2,1)=="=") { key=title.substr(title.length-1,1); title=title.slice(0,-2); }
					if (pos==-1) tooltip += " "+title; // default tooltip: "show/hide <title>"
				}

				// create the button
				if (lookaheadMatch[3]) { // use "Hn" header format instead of button/link
					var lvl=(lookaheadMatch[3].length>6)?6:lookaheadMatch[3].length;
					var btn = createTiddlyElement(createTiddlyElement(place,"h"+lvl,null,null,null),"a",null,null,title);
					btn.onclick=onClickNestedSlider;
					btn.setAttribute("href","javascript:;");
					btn.setAttribute("title",tooltip);
				}
				else
					var btn = createTiddlyButton(place,title,tooltip,onClickNestedSlider);
				btn.sliderCookie = cookie; // save the cookiename (if any) in the button object
				btn.keyparam=key; // save the access key letter ("" if none)
				if (key.length) {
					btn.setAttribute("accessKey",key); // init access key
					btn.onfocus=function(){this.setAttribute("accessKey",this.keyparam);}; // **reclaim** access key on focus
				}

				// "non-click" MouseOver open/close slider
				if (lookaheadMatch[5]) btn.onmouseover=onClickNestedSlider;

				// create slider panel
				var panelClass=lookaheadMatch[4]?"floatingPanel":"sliderPanel";
				var panel=createTiddlyElement(place,"div",null,panelClass,null);
				panel.style.display = show;
				if (lookaheadMatch[4] && lookaheadMatch[4].length>2) panel.style.width=lookaheadMatch[4].slice(1,-1); // custom width
				panel.button = btn; // so the slider panel know which button it belongs to
				btn.sliderPanel=panel;

				// render slider (or defer until shown) 
				w.nextMatch = lookaheadMatch.index + lookaheadMatch[0].length;
				if ((show=="block")||!lookaheadMatch[8]) {
					// render now if panel is supposed to be shown or NOT deferred rendering
					w.subWikify(lookaheadMatch[7]?createTiddlyElement(panel,"blockquote"):panel,this.terminator);
					// align slider/floater position with button
					adjustSliderPos(place,btn,panel,panelClass);
				}
				else {
					var src = w.source.substr(w.nextMatch);
					var endpos=findMatchingDelimiter(src,"+++","===");
					panel.setAttribute("raw",src.substr(0,endpos));
					panel.setAttribute("blockquote",lookaheadMatch[7]?"true":"false");
					panel.setAttribute("rendered","false");
					w.nextMatch += endpos+3;
					if (w.source.substr(w.nextMatch,1)=="\n") w.nextMatch++;
					if (config.options.chkDebugLazySliderDefer) alert("deferred '"+title+"':\n\n"+panel.getAttribute("raw"));
				}
			}
		}
	}
)

// TBD: ignore 'quoted' delimiters (e.g., "{{{+++foo===}}}" isn't really a slider)
function findMatchingDelimiter(src,starttext,endtext) {
	var startpos = 0;
	var endpos = src.indexOf(endtext);
	// check for nested delimiters
	while (src.substring(startpos,endpos-1).indexOf(starttext)!=-1) {
		// count number of nested 'starts'
		var startcount=0;
		var temp = src.substring(startpos,endpos-1);
		var pos=temp.indexOf(starttext);
		while (pos!=-1)  { startcount++; pos=temp.indexOf(starttext,pos+starttext.length); }
		// set up to check for additional 'starts' after adjusting endpos
		startpos=endpos+endtext.length;
		// find endpos for corresponding number of matching 'ends'
		while (startcount && endpos!=-1) {
			endpos = src.indexOf(endtext,endpos+endtext.length);
			startcount--;
		}
	}
	return (endpos==-1)?src.length:endpos;
}
//}}}

//{{{
window.onClickNestedSlider=function(e)
{
	if (!e) var e = window.event;
	var theTarget = resolveTarget(e);
	var theLabel = theTarget.firstChild.data;
	var theSlider = theTarget.sliderPanel
	var isOpen = theSlider.style.display!="none";
	// if using default button labels, toggle labels
	if (theLabel==">") theTarget.firstChild.data = "<";
	else if (theLabel=="<") theTarget.firstChild.data = ">";
	// if using default tooltips, toggle tooltips
	if (theTarget.getAttribute("title")=="show")
		theTarget.setAttribute("title","hide");
	else if (theTarget.getAttribute("title")=="hide")
		theTarget.setAttribute("title","show");
	if (theTarget.getAttribute("title")=="show "+theLabel)
		theTarget.setAttribute("title","hide "+theLabel);
	else if (theTarget.getAttribute("title")=="hide "+theLabel)
		theTarget.setAttribute("title","show "+theLabel);
	// deferred rendering (if needed)
	if (theSlider.getAttribute("rendered")=="false") {
		if (config.options.chkDebugLazySliderRender)
			alert("rendering '"+theLabel+"':\n\n"+theSlider.getAttribute("raw"));
		var place=theSlider;
		if (theSlider.getAttribute("blockquote")=="true")
			place=createTiddlyElement(place,"blockquote");
		wikify(theSlider.getAttribute("raw"),place);
		theSlider.setAttribute("rendered","true");
	}
	// show/hide the slider
	if(config.options.chkAnimate)
		anim.startAnimating(new Slider(theSlider,!isOpen,e.shiftKey || e.altKey,"none"));
	else
		theSlider.style.display = isOpen ? "none" : "block";
	// if showing panel, set focus to first 'focus-able' element in panel
	if (theSlider.style.display!="none") {
		var ctrls=theSlider.getElementsByTagName("*");
		for (var c=0; c<ctrls.length; c++) {
			var t=ctrls[c].tagName.toLowerCase();
			if (t=="input" || t=="textarea" || t=="select")
				{ ctrls[c].focus(); break; }
		}
	}
	if (this.sliderCookie && this.sliderCookie.length)
		{ config.options[this.sliderCookie]=!isOpen; saveOptionCookie(this.sliderCookie); }
	// align slider/floater position with target button
	adjustSliderPos(theSlider.parentNode,theTarget,theSlider,theSlider.className);
	return false;
}

// hijack animation handler 'stop' handler so overflow is visible after animation has completed
Slider.prototype.coreStop = Slider.prototype.stop;
Slider.prototype.stop = function() { this.coreStop(); this.element.style.overflow = "visible"; }

// adjust panel position based on button position
if (window.adjustSliderPos==undefined) window.adjustSliderPos=function(place,btn,panel,panelClass) {
	if (panelClass=="floatingPanel") {
		var left=0;
		var top=btn.offsetHeight; 
		if (place.style.position!="relative") {
			var left=findPosX(btn);
			var top=findPosY(btn)+btn.offsetHeight;
			var p=place; while (p && p.className!='floatingPanel') p=p.parentNode;
			if (p) { left-=findPosX(p); top-=findPosY(p); }
		}
		if (left+panel.offsetWidth > getWindowWidth()) left=getWindowWidth()-panel.offsetWidth-10;
		panel.style.left=left+"px"; panel.style.top=top+"px";
	}
}

function getWindowWidth() {
	if(document.width!=undefined)
		return document.width; // moz (FF)
	if(document.documentElement && ( document.documentElement.clientWidth || document.documentElement.clientHeight ) )
		return document.documentElement.clientWidth; // IE6
	if(document.body && ( document.body.clientWidth || document.body.clientHeight ) )
		return document.body.clientWidth; // IE4
	if(window.innerWidth!=undefined)
		return window.innerWidth; // IE - general
	return 0; // unknown
}
//}}}
* [[Class Syllabus|syllabus/CI233Syllabus2409.pdf]]
* [[General SOPs]]
* [[Lab Assignments]]
* [[Class Participation]]
** [[Using Discord]]
* [[Shell script submission requirements]]
/% ** [[Using Blackboard]] %/

[[Material Sections]]

[[Working more efficiently with GNU screen & SSH keys]]
[[Tunnels & Proxies with SSH]]

[[Misc Resources]]

!!Handouts
[[Command line summary handout|handouts/UnixCommandSummary.pdf]]
[[Substitution Handout|handouts/SubstitutionHandout.pdf]] (from tcsh man page)
[[ASCII Chart|handouts/ascii-chart.gif]]
[[Metacharacter Handout|handouts/Metacharacters.pdf]] - Metacharacters and how they differ in the shell & regular expression contexts.
[[Regular expression metacharacters]]
[[vi diagram handout|handouts/viDiagram.pdf]]
[[awk handout|handouts/awkHandout.pdf]]
[[Working more efficiently with GNU screen & SSH keys]]

!!Reference Material
[[UNIX in a Nutshell|http://books.google.com/books?id=YkNiiLupct4C&dq=unix+in+a+nutshell&printsec=frontcover&source=bn&hl=en&ei=aKlWS43lJJCOlQeW3rSCBA&sa=X&oi=book_result&ct=result&resnum=5&ved=0CCIQ6AEwBA#v=onepage&q=&f=false]] - Google books
[[The Linux Command Line (No Starch Press)|http://www.merantn.net/reference/TLCL-19.01.pdf]]
[[UNIX Toolbox|http://www.cs.sunyit.edu/~merantn/docs/unixtoolbox.dognet.xhtml]]
[[Shell scripting notes]]
[[Table of Commands]]
[[Linux Shortcuts]]
/***
|Name|OpenTopPlugin|
|Created by|SaqImtiaz|
|Location|http://lewcid.googlepages.com/lewcid.html#OpenTopPlugin|
|Version|0.1|
|Requires|~TW2.x|
!!!Description:
Open new tiddlers at the top of the screen.

!!!Code
***/
//{{{
Story.prototype.coreLewcidDisplayTiddler=Story.prototype.displayTiddler ;
Story.prototype.displayTiddler =
function(srcElement,title,template,unused1,unused2,animate,slowly)
{
       var srcElement=null;
       if (document.getElementById(this.idPrefix + title))
          {story.closeTiddler(title);}
       this.coreLewcidDisplayTiddler(srcElement,title,template,unused1,unused2,animate,slowly);
       window.scrollTo(0,0);
}
//}}}
<<option chkSaveBackups>> SaveBackups
<<option chkAutoSave>> AutoSave
<<option chkRegExpSearch>> RegExpSearch
<<option chkCaseSensitiveSearch>> CaseSensitiveSearch
<<option chkAnimate>> EnableAnimations
----
Also see AdvancedOptions

File permissions are pre-req material from ~CI-132.  I expect everyone is already familiar with this, but I'm including it here anyway in case you'd like a refresher.  The assignments below are optional.  I will review and return them if you'd like to submit them, but grades will not be recorded in blackboard.  If you submit them, please use A1 and A2 for the lab numbers.


! Optional Assignment

* File Permissions:
** Read [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]] Chapter 9, pp 90 - 100 (Stop at //Some Special Permissions//)
** Watch: File Permissions: https://www.youtube.com/watch?v=8SkN7UofOww
** Complete:  [[Lab A1|labs/labA1.pdf]] & [[Lab A2|labs/labA2.pdf]]


! Material


!! File Permissions

The Unix operating system has multiple levels of securing access to resources. We can restrict who can access the system through userids and login credentials, we can limit who can become the superuser and act as the administrator of the system, we can control who can access certain directories on the system, and we can control access to files. The first two are items for an administrator to configure, but the latter two regular users can control for files that they own. Being able to restrict access to certain files is a critical function of a multi-user system. For example, we restrict access to the lab assignments everyone is uploading so no one else peeks at your work. Certain sensitive system files are restricted to keep the system more secure.

Hopefully by now we're comfortable navigating the filesystem and identifying files by name, both individually and in groups. Next I'd like to examine how we can manipulate the file's permissions.

Permissions can be set based on three different tiers:

* User - the owner of the file
* Group - a group that has access to the file
* Others - everyone else on the system

And three different permissions can be set on each file

* Read - The ability to read a file or list the contents of a directory
* Write - The ability to modify content of a file or create files in a directory
* Execute - The ability to run a program or access a directory

Chapter 9 in the The Linux Command Line will discuss permissions in detail.

This youtube video is a good permission overview:  [[File Permissions|https://www.youtube.com/watch?v=8SkN7UofOww]]


!!! File & Directory Permissions

The following tables and graphics can serve as a quick reference:

!! File & Directory Permissions
|!Type|!File|!Directory|
| read (4) | read contents | List directory |
| write (2) | change / delete file | Add files |
| execute (1) | run executable | cd into |

!!!! chmod

The {{Command{chmod}}} command can be used to change permissions for existing files.
* using octal codes
** Read (4), Write (2), and Execute (1)
** Three positions:  user, group, and others
* using symbolic codes
** who:
*** u - user
*** g - group
*** o - others
*** a = all positions
** operator:
*** = explicitly set
*** + add permission
*** - remove permission
** permission:
*** r = read
*** w = write
*** x = execute

''Note:'' Use symbolic abbreviations when making changes to permissions without consideration to what is already set, eg: when adding or removing permissions. The use of octal codes requires all permissions be completely reset - a user cannot set, add, or remove individual permission settings.

For example, suppose I only want to __add__ write permissions for the group. Without knowing what the permissions currently are, I have to use symbolic notation to modify the permissions on the file. In this case with {{Command{chmod g+w //file//}}}

If the lab question asks you to ''set'' permissions, use __octal codes__. If it asks you to ''add or remove'', use __symbolic__ abbreviations.

<html><center><img src="img/chmod1.png" alt=""><BR><BR><HR width="75%"><img src="img/chmod2.png" alt=""></center></html>


!!! umask

The {{Command{umask}}} command can be used to establish default permissions for all newly created files.

* umask - user mask - which permissions to restrict. (mask = remove)
* start with full permissions 777
* The umask value is which bits to remove.
* The execute bit (1) will automatically be subtracted from all positions for regular files
* Making a new regular text file executable must be a manual task

A mask refers to bits to be removed. If we do not want newly created files to have write permissions for the group or others, we need to mask 2 from the group and others positions, resulting in a umask of 22.

Examples:

A umask value of 22 will set default permission for new files to 644 (777 - 22 - 111) and directories to 755 (777 - 22)
A umask value of 77 will set default permission for new files to 600 (777 - 77 - 111) and directories to 700 (777 - 77)
''Note:'' Newly created files are not granted execute automatically despite the umask value.


!! Misc:

The book creates empty files for its examples with {{Command{> foo.txt}}}.  This is the same as executing {{Command{touch foo.txt}}}.

<!--{{{-->
<div class='header' macro='gradient vert #000 #069'>
<div id='topTitle' class='headerShadow'>
<span class='siteTitle' refresh='content' tiddler='SiteTitle'></span>&nbsp;
<span class='siteSubtitle' refresh='content' tiddler='SiteSubtitle'></span>
</div>
<div id='topTitle' class='headerForeground'>
<span class='siteTitle' refresh='content' tiddler='SiteTitle'></span>&nbsp;&nbsp;&nbsp;
<span class='siteSubtitle' refresh='content' tiddler='SiteSubtitle'></span>
</div>
<div id='topMenu' refresh='content' tiddler='MainMenu'></div>
<div id='rightMenu' refresh='content' tiddler='RightMenu'></div>
</div>
<div id='sidebar'>
<div id='sidebarOptions' refresh='content' tiddler='SideBarOptions'></div>
<div id='sidebarTabs' refresh='content' force='true' tiddler='SideBarTabs'></div>
</div>
<div id='displayArea'>
<div id='messageArea'></div>
<div id='tiddlerDisplay'></div>
</div>
<!--}}}-->
function onClickDefaultHome(e) {
story.closeAllTiddlers();
config.options.txtDefaultTiddlers = "";
saveOptionCookie('txtDefaultTiddlers');
var start = store.getTiddlerText("DefaultTiddlers");
if(start)
story.displayTiddlers(null,start.readBracketedList());
}

config.macros["defaultHome"] = {label: "Home", prompt: "Show the default tiddlers", title: "Home"};
config.macros.defaultHome.handler = function(place) {
createTiddlyButton(place,this.label,this.prompt,onClickDefaultHome);

}
config.macros.listTags = { text: "Hello" };
config.macros.listTags.handler = function(place,macroName,params)
{
 var tagged = store.getTaggedTiddlers(params[0]);
 var ul = createTiddlyElement(place,"ul",null,null,"");
 for(var r=0;r<tagged.length;r++)
 {
 var li = createTiddlyElement(ul,"li",null,null,"");
 createTiddlyLink(li,tagged[r].title,true);
 }
}
Type the text for 'Plugins'
<html>
<center>
  <video id="my-video" class="video-js" controls preload="auto" width="466" height="448" poster="" data-setup="{}">
    <source src="video/PuttyProxy.mp4" type='video/mp4'>
    <p class="vjs-no-js">
      To view this video please enable JavaScript, and consider upgrading to a web browser that
      <a href="http://videojs.com/html5-video-support/" target="_blank">supports HTML5 video</a>
    </p>
  </video>

  <script src="https://vjs.zencdn.net/7.8.2/video.min.js"></script>
</center>
</html>

{{Warning{''Note:'' This video was recorded for a different class, but all of the same steps apply.  Use the correct hostname for our class shell server.}}}
/***
|Name:|QuickOpenTagPlugin|
|Description:|Changes tag links to make it easier to open tags as tiddlers|
|Version:|3.0.1 ($Rev: 3861 $)|
|Date:|$Date: 2008-03-08 10:53:09 +1000 (Sat, 08 Mar 2008) $|
|Source:|http://mptw.tiddlyspot.com/#QuickOpenTagPlugin|
|Author:|Simon Baird <simon.baird@gmail.com>|
|License:|http://mptw.tiddlyspot.com/#TheBSDLicense|
***/
//{{{
config.quickOpenTag = {

	dropdownChar: (document.all ? "\u25bc" : "\u25be"), // the little one doesn't work in IE?

	createTagButton: function(place,tag,excludeTiddler) {
		// little hack so we can do this: <<tag PrettyTagName|RealTagName>>
		var splitTag = tag.split("|");
		var pretty = tag;
		if (splitTag.length == 2) {
			tag = splitTag[1];
			pretty = splitTag[0];
		}

		var sp = createTiddlyElement(place,"span",null,"quickopentag");
		createTiddlyText(createTiddlyLink(sp,tag,false),pretty);

		var theTag = createTiddlyButton(sp,config.quickOpenTag.dropdownChar,
                        config.views.wikified.tag.tooltip.format([tag]),onClickTag);
		theTag.setAttribute("tag",tag);
		if (excludeTiddler)
			theTag.setAttribute("tiddler",excludeTiddler);
    		return(theTag);
	},

	miniTagHandler: function(place,macroName,params,wikifier,paramString,tiddler) {
		var tagged = store.getTaggedTiddlers(tiddler.title);
		if (tagged.length > 0) {
			var theTag = createTiddlyButton(place,config.quickOpenTag.dropdownChar,
                        	config.views.wikified.tag.tooltip.format([tiddler.title]),onClickTag);
			theTag.setAttribute("tag",tiddler.title);
			theTag.className = "miniTag";
		}
	},

	allTagsHandler: function(place,macroName,params) {
		var tags = store.getTags(params[0]);
		var filter = params[1]; // new feature
		var ul = createTiddlyElement(place,"ul");
		if(tags.length == 0)
			createTiddlyElement(ul,"li",null,"listTitle",this.noTags);
		for(var t=0; t<tags.length; t++) {
			var title = tags[t][0];
			if (!filter || (title.match(new RegExp('^'+filter)))) {
				var info = getTiddlyLinkInfo(title);
				var theListItem =createTiddlyElement(ul,"li");
				var theLink = createTiddlyLink(theListItem,tags[t][0],true);
				var theCount = " (" + tags[t][1] + ")";
				theLink.appendChild(document.createTextNode(theCount));
				var theDropDownBtn = createTiddlyButton(theListItem," " +
					config.quickOpenTag.dropdownChar,this.tooltip.format([tags[t][0]]),onClickTag);
				theDropDownBtn.setAttribute("tag",tags[t][0]);
			}
		}
	},

	// todo fix these up a bit
	styles: [
"/*{{{*/",
"/* created by QuickOpenTagPlugin */",
".tagglyTagged .quickopentag, .tagged .quickopentag ",
"	{ margin-right:1.2em; border:1px solid #eee; padding:2px; padding-right:0px; padding-left:1px; }",
".quickopentag .tiddlyLink { padding:2px; padding-left:3px; }",
".quickopentag a.button { padding:1px; padding-left:2px; padding-right:2px;}",
"/* extra specificity to make it work right */",
"#displayArea .viewer .quickopentag a.button, ",
"#displayArea .viewer .quickopentag a.tiddyLink, ",
"#mainMenu .quickopentag a.tiddyLink, ",
"#mainMenu .quickopentag a.tiddyLink ",
"	{ border:0px solid black; }",
"#displayArea .viewer .quickopentag a.button, ",
"#mainMenu .quickopentag a.button ",
"	{ margin-left:0px; padding-left:2px; }",
"#displayArea .viewer .quickopentag a.tiddlyLink, ",
"#mainMenu .quickopentag a.tiddlyLink ",
"	{ margin-right:0px; padding-right:0px; padding-left:0px; margin-left:0px; }",
"a.miniTag {font-size:150%;} ",
"#mainMenu .quickopentag a.button ",
"	/* looks better in right justified main menus */",
"	{ margin-left:0px; padding-left:2px; margin-right:0px; padding-right:0px; }",
"#topMenu .quickopentag { padding:0px; margin:0px; border:0px; }",
"#topMenu .quickopentag .tiddlyLink { padding-right:1px; margin-right:0px; }",
"#topMenu .quickopentag .button { padding-left:1px; margin-left:0px; border:0px; }",
"/*}}}*/",
		""].join("\n"),

	init: function() {
		// we fully replace these builtins. can't hijack them easily
		window.createTagButton = this.createTagButton;
		config.macros.allTags.handler = this.allTagsHandler;
		config.macros.miniTag = { handler: this.miniTagHandler };
		config.shadowTiddlers["QuickOpenTagStyles"] = this.styles;
		store.addNotification("QuickOpenTagStyles",refreshStyles);
	}
}

config.quickOpenTag.init();

//}}}

! Material 

!! Quoting:
* Read Chapter 7, pp 75-79 (Quoting) in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]].
* Watch:  Linux shell quotes: https://www.youtube.com/watch?v=1Ukw0IjGKsI

! Notes:

!! Escaping & Quoting:

!!! Quoting - ' " \

Some special characters, such as the space and most symbols, have a special meaning to the shell. Occasionally we need to use those special characters literally without allowing the shell to interpret their special meanings.

Quoting allows us to protecting these special characters from the shell. It is necessary in order to use a metacharacter literally, to disable its special shell meaning.

For example, consider the scenario where you need to display the contents of a file which contains a space in the name.  The space has a special meaning to the shell; it is our argument separator.

If my file is named {{File{my notes.txt}}}, and I try to execute the command {{Command{cat my notes.txt}}} to display it, the space in the file name will cause cat to try to display the file {{File{my}}} and the file {{File{notes.txt}}}, neither of which actually exist.

I need to protect that special symbol, the space, from the shell to ensure the cat command get it.  There are three ways I can do so:

* {{Command{cat "my notes.txt"}}}
* {{Command{cat 'my notes.txt'}}}
* {{Command{cat my\ notes.txt}}}

Each of the options work a little differently.  Knowing these differences allows you to choose the best method for the task.


Three ways to quote:

* Backslash (\) - Changes the interpretation of the character that follows
** \ is the escape character, disable special meaning of a shell special character.
** Converts special characters into literal characters and literal characters into special characters
** n vs \n
** printf "Home is %s\n" $HOME
** \ followed by return - suppress the special meaning of the return key
* Double Quote - remove special meaning of most metacharacters
** " quoting will evaluate variable, command, and history substitution.
* Single Quote
** ' is stronger then "
** ' quoting will only evaluate history substitution
* You can alternate quotes to include the other type: echo "Today's date is `date`"

You can read about them in Chapter 7, pp 75-79 (Quoting) in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]] or watch this video:  [[Linux shell quotes|https://www.youtube.com/watch?v=1Ukw0IjGKsI]].

| !Symbol | !Meaning | !Escape | !Not supported by |
| ^ |Start of line| | |
| $ |End of line| | |
| [ ] |Character Classes (match any one character listed) | | |
|~|Characters may be specified singly or in ranges| | |
| [^ ] |Negated character class (match any one character not listed| | |
| ? |Optional item.  Match 0 or 1. | | sed |
| ( ) |Alternation (match any one of the sub-expressions)| | |
|~|Grouping| | |
|~|Capture backreference Access with \//n//| * | |
| {{{|}}} |Or.  Match either expression it separates.  Use with ( )| | |
| . |Any single character| | |
| + |Repetition:  1 or more. | | sed |
| * |Repetition: 0 or more| | |
| { } |Defined range of matches (bounds) {//min//,//max//} or {//min//,} or {//exactly//}| * | |
| \ |Suppress normal behavior of a metacharacter| | |
|~|Access a backreference:  \//n//| | |
| \< |Match start of word.| * | bsd sed |
| \> |Match end of word.| * | bsd sed |


| !Symbol | !File Globbing   | !Regex | !Regex Equivalent |
| ? |Exactly 1|0 or 1| . |
| { } |Sets|# of matches| ( ) |
/***
|Name:|RenameTagsPlugin|
|Description:|Allows you to easily rename or delete tags across multiple tiddlers|
|Version:|3.0 ($Rev: 5501 $)|
|Date:|$Date: 2008-06-10 23:11:55 +1000 (Tue, 10 Jun 2008) $|
|Source:|http://mptw.tiddlyspot.com/#RenameTagsPlugin|
|Author:|Simon Baird <simon.baird@gmail.com>|
|License|http://mptw.tiddlyspot.com/#TheBSDLicense|
Rename a tag and you will be prompted to rename it in all its tagged tiddlers.
***/
//{{{
config.renameTags = {

	prompts: {
		rename: "Rename the tag '%0' to '%1' in %2 tidder%3?",
		remove: "Remove the tag '%0' from %1 tidder%2?"
	},

	removeTag: function(tag,tiddlers) {
		store.suspendNotifications();
		for (var i=0;i<tiddlers.length;i++) {
			store.setTiddlerTag(tiddlers[i].title,false,tag);
		}
		store.resumeNotifications();
		store.notifyAll();
	},

	renameTag: function(oldTag,newTag,tiddlers) {
		store.suspendNotifications();
		for (var i=0;i<tiddlers.length;i++) {
			store.setTiddlerTag(tiddlers[i].title,false,oldTag); // remove old
			store.setTiddlerTag(tiddlers[i].title,true,newTag);  // add new
		}
		store.resumeNotifications();
		store.notifyAll();
	},

	storeMethods: {

		saveTiddler_orig_renameTags: TiddlyWiki.prototype.saveTiddler,

		saveTiddler: function(title,newTitle,newBody,modifier,modified,tags,fields,clearChangeCount,created) {
			if (title != newTitle) {
				var tagged = this.getTaggedTiddlers(title);
				if (tagged.length > 0) {
					// then we are renaming a tag
					if (confirm(config.renameTags.prompts.rename.format([title,newTitle,tagged.length,tagged.length>1?"s":""])))
						config.renameTags.renameTag(title,newTitle,tagged);

					if (!this.tiddlerExists(title) && newBody == "")
						// dont create unwanted tiddler
						return null;
				}
			}
			return this.saveTiddler_orig_renameTags(title,newTitle,newBody,modifier,modified,tags,fields,clearChangeCount,created);
		},

		removeTiddler_orig_renameTags: TiddlyWiki.prototype.removeTiddler,

		removeTiddler: function(title) {
			var tagged = this.getTaggedTiddlers(title);
			if (tagged.length > 0)
				if (confirm(config.renameTags.prompts.remove.format([title,tagged.length,tagged.length>1?"s":""])))
					config.renameTags.removeTag(title,tagged);
			return this.removeTiddler_orig_renameTags(title);
		}

	},

	init: function() {
		merge(TiddlyWiki.prototype,this.storeMethods);
	}
}

config.renameTags.init();

//}}}

! Assignment

This should all be review material from CI132.  Please get up to speed if you need a refresher.  Understanding this will be required for moving forward with material in this course.

Play around with accessing single user mode.  It's a handy thing to know how to do.  The second half of this week is mostly background info that we'll need later.

Be sure you're comfortable using the {{Command{systemctl}}} command to start, stop, and restart services.

!! Read:
* Linux Administration Chapter 6 - Booting and Shutting Down
** You can skip over the old init way of doing things on pages 168 (starting at //rc Scripts//) through 178 (//Odds and Ends of Booting and Shutting Down//)
** Continue again at Odds and Ends of Booting and Shutting Down on 178
** Note: Booting into single-user mode is good to know but the book's instructions are old. Here's the new way for Redhat systems (which includes ~CentOS) - https://www.tecmint.com/boot-into-single-user-mode-in-centos-7/

* Linux Administration Chapter 8 - Core System Services
** You can skip the first few pages if you'd like and start at //systemd// on page 218
*** We're mainly interested in systemd for the systems we'll be working with.
** Stop at //xinetd and inetd// on page 222


! Material

!! General system startup

* The Boot process
** BIOS (Basic Input/Output System)
*** For motherboard and certain devices
** MBR (First block of the disk)
** Boot loader - Chooses the OS/Kernel and bootstraps the operating system
*** Grub - Grand Unified Boot loader - Standard Linux boot loader
*** Check out grub configs in /boot/grub
*** Use it to boot multiple kernels (such as after a kernel update) or multiple ~OSes
** kernel - /boot/vmlinuz* - loaded into memory and begins to execute
*** Press ESC to see boot messages while the system starts
*** device detection: probe system buses, inventory hardware, and load device driver modules
*** create kernel processes (those in brackets)
*** system becomes available for user processes once the kernel is loaded
** Initialization daemon - First user process, parent of all processes running on the system
*** init - old ~SystemV OSes
*** systemd - New method
*** executed by the kernel and responsible for starting other processes
**startup scripts - start system services
* Config files in {{File{/etc/}}}
** Most are single files for the service or resource
*** {{File{fstab}}} : tab = table - filesystem table
*** {{File{resolv.conf}}}
*** {{File{sysconfig}}} directory - extra system configuration files
** Some are multiple files
*** cron is a good example
*** {{File{crontab}}} - traditional cron config table
*** {{File{cron.d}}} - directory containing individual config files
*** {{File{ cron.{daily,monthly,weekly} }}}


!! Systemd
* A new standard init system
* Backward compatible with ~SystemV init
* Can start services in parallel, reducing system start times
* Everything is broken down into units.  
** Two primary unit types to be concerned with
*** service units - Manage a single service
*** target units - manage groups of services
*** {{Command{ systemctl list-units | grep service }}}
*** {{Command{ systemctl list-units | grep target }}}
* Service and target configuration files are stored in {{File{ /{etc,lib}/systemd/system }}}
** Use the {{File{/etc/systemd/system}}} path for custom configs or to override existing
** Stock configs are in {{File{ /lib/systemd/system }}}
** View a list with current state: {{Command{ systemctl list-unit-files &#45;-type=service }}}

Everything is managed by symlinks:
* runlevel.? targets are symlinked to their systemd equiv

<<<
[root@www system]# pwd
/lib/systemd/system
[root@www system]# ll runlevel*
lrwxrwxrwx. 1 root root 15 Oct 21 17:02 runlevel0.target -> poweroff.target
lrwxrwxrwx. 1 root root 13 Oct 21 17:02 runlevel1.target -> rescue.target
lrwxrwxrwx. 1 root root 17 Oct 21 17:02 runlevel2.target -> multi-user.target
lrwxrwxrwx. 1 root root 17 Oct 21 17:02 runlevel3.target -> multi-user.target
lrwxrwxrwx. 1 root root 17 Oct 21 17:02 runlevel4.target -> multi-user.target
lrwxrwxrwx. 1 root root 16 Oct 21 17:02 runlevel5.target -> graphical.target
lrwxrwxrwx. 1 root root 13 Oct 21 17:02 runlevel6.target -> reboot.target
<<<
* default.target symlinked to the desired default runlevel target
<<<
[root@www system]# ll default.target
lrwxrwxrwx. 1 root root 16 Oct 21 17:02 default.target -> graphical.target
<<<

| !~SysVinit Runlevel | !Systemd Target | !Description |
| 0 |runlevel0.target, poweroff.target|Halt the system|
| 1, s |runlevel1.target, rescue.target|Single user mode|
| 2, 4 |runlevel2.target, runlevel4.target, multi-user.target|User-defined/Site-specific runlevels. By default, identical to 3|
| 3 |runlevel3.target, multi-user.target|Multi-user, non-graphical. Users can usually login via multiple consoles or via the network|
| 5 |runlevel5.target, graphical.target|Multi-user, graphical. Usually has all the services of runlevel 3 plus a graphical login|
| 6 |runlevel6.target, reboot.target|Reboot|
| emergency |emergency.target|Emergency shell|


!!! Examining service configuration files

cat /lib/systemd/system/sshd.service
{{{
[Unit]
Description=OpenSSH server daemon
After=network.target sshd-keygen.service
Wants=sshd-keygen.service

[Service]
EnvironmentFile=/etc/sysconfig/sshd
ExecStart=/usr/sbin/sshd -D $OPTIONS
ExecReload=/bin/kill -HUP $MAINPID
KillMode=process
Restart=on-failure
RestartSec=42s

[Install]
WantedBy=multi-user.target
}}}
 - After: What this service depends on
 - Wants:  Additional units tied to this service
 - ~EnvironmentFile - Location to store environment variables or options to startup / shutdown commands
 - ~WantedBy: Runlevel target this service is associated with

Display services wanted by a runlevel target: {{Command{ systemctl show &#45;-property "Wants" multi-user.target }}}
Display services required by a runlevel target: {{Command{ systemctl show &#45;-property "Requires" multi-user.target }}}
Display services that want a particular child service: {{Command{ systemctl show &#45;-property "~WantedBy" sshd-keygen.service }}}

!!! Starting and Stopping

Example commands to start, stop, restart, and check the status of a service:

- {{Command{systemctl start firewalld.service}}}
- {{Command{systemctl stop firewalld.service}}}
- {{Command{systemctl status firewalld.service}}}
- {{Command{systemctl restart firewalld.service}}}

Conditional restart - only restart if its already running:  {{Command{ systemctl condrestart firewalld.service }}}

Reload a service to re-read configuration files:  {{Command{ systemctl reload sshd.service }}}

Persistent services - Those to start on system boot:
Newly installed services will not be configured automatically to start on system boot.  You will have to start them manually and set to them start on boot.
- Enable a service to start on boot, eg: {{Command{systemctl enable firewalld.service}}}
- Stop a service from starting on boot, eg: {{Command{systemctl disable firewalld.service}}}


!!! Checking status

{{Command{systemctl list-unit-files &#45;-type=service}}}
{{Command{systemctl status firewalld.service}}}


!!! Adding a new service

For example, adding a new service for Apache.  This is only necessary if you installed a service from source code instead if via package management.  If you install software from a package, that package will come with the necessary files for systemd to manage the service.  This is a good reference to see the internals in case something custom needs to be added or modified.

{{File{/etc/systemd/system/httpd.service}}} :
{{{
[Unit]
Description=Apache Webserver
After=network.target

[Service]
Type=forking
EnvironmentFile=/etc/sysconfig/httpd
ExecStart=/opt/work/apache/bin/httpd -k start $OPTIONS
ExecStop=/opt/work/apache/bin/httpd -k graceful-stop $OPTIONS
ExecReload=/opt/work/apache/bin/httpd -k graceful $OPTIONS

Restart=always

[Install]
WantedBy=multi-user.target
}}}
 - {{Command{man systemd.service}}} for more details.

* Create environment file:  {{Command{ touch /etc/sysconfig/httpd }}}
* Refresh service and target configuration files:  {{Command{ systemctl daemon-reload }}}
* Enable startup on boot:  {{Command{systemctl enable httpd.service}}}
** Symlink was created in multi-user.target.wants:  {{Command{ ll /etc/systemd/system/multi-user.target.wants/ }}}
* Start now: {{Command{systemctl start httpd.service}}}
** Review recent logs associated with the service:  {{Command{ journalctl -u httpd.service }}}


!! Single user mode
* How to access systems if problems occur during boot
** Boot from a live CD or recovery mode
** Single user mode
*** Change the grub timeout to 20 seconds so you'll have more time to catch it.  ~VMs often introduce delays accessing the console, thus it can be difficult catching the grub loader if it has a short timeout.
**** Edit the file {{File{/etc/default/grub}}} and add the line {{Monospaced{''~GRUB_TIMEOUT=20''}}} to the bottom of the file
**** Execute {{Command{grub2-mkconfig -o /boot/grub2/grub.cfg}}} to activate the changes
*** See https://www.tecmint.com/boot-into-single-user-mode-in-centos-7/ for instructions to boot in single user mode


Other useful commands:  
* {{Command{shutdown}}} - shutdown / power off the system with many options for doing so
* {{Command{halt}}} & {{Command{poweroff}}}
* {{Command{reboot}}}
<<toggleSideBar "" "Toggle Sidebar" hide>>
<html>
<center>
  <video id="my-video" class="video-js" controls preload="auto" width="858" height="480" poster="" data-setup="{}">
    <source src="video/ssh.mp4" type='video/mp4'>
    <p class="vjs-no-js">
      To view this video please enable JavaScript, and consider upgrading to a web browser that
      <a href="http://videojs.com/html5-video-support/" target="_blank">supports HTML5 video</a>
    </p>
  </video>

  <script src="https://vjs.zencdn.net/7.8.2/video.min.js"></script>
</center>
</html>
!! Requirements

The following procedure must be followed for submitting shell scripting labs. Improperly submitted scripting labs will not be accepted.  

The end goal of this process is to submit a single PDF containing these three components:
&nbsp;&nbsp;''a.'' Original lab assignment sheet as a cover page
&nbsp;&nbsp;''b.'' Your shell scripts
&nbsp;&nbsp;''c.'' A demonstration of your scripts


''1.'' Create the directory ~/bin/. Save all lab shell scripts in this directory with the naming convention ''ci233-lab//xx//-q//yy//.sh'' where ''//xx//'' is the lab number and ''//yy//'' is the question number. It would make things easier for you if you always use two digits for //xx// and //yy//.
I may refer to the script files if I need to execute/test any of your scripts.

''2.'' A proper shebang must be added as the first line of your shell scripts.

''3.'' The following header must be placed at the top of each script file, immediately after the shebang:
{{{
# File name: 
# Author:
# Date Written:
# Assignment:
# Purpose:
# Description:
#
#
#
}}}

The //Purpose// field should contain a brief, one-line summary of what your script is accomplishing.  The Description field should contain more detailed information regarding how it is accomplishing that goal or any additional information helpful to understand the function of your script.


''4.'' Make use of comments throughout your script to document and convey what you're doing.
Long lines should be nicely wrapped with carriage returns. Cut long lines at about column 60. (makes it easier to read and print)
* You can escape the newline with a {{Command{''\''}}} to continue a long line of commands on the next line.  For example:
{{{
dig axfr ${user}.ci233.net @ns1.${user}.ci233.net | \
	grep -v ^\;  | sort | md5sum | cut -d " " -f 1
}}}

{{Note{''Note:'' The remaining two steps are for labs which are //only// scripts and do not contain input boxes}}}

''5.'' Use the {{Command{script}}} command to launch a recording shell, saving the output to {{File{~/bin/labxx.raw}}} where //xx// is the lab number. Demonstrate execution of your scripts within this recording shell.
* Execute {{Command{script ~/bin/labxx.raw}}} to start the recording shell, saving output to the filename specified as the first command line argument
* Run your scripts. Everything you type and all output will be recorded in the file {{File{~/bin/labxx.raw}}}.
* Be sure you do not have excessive errors in the recording.  Pressing the backspace key will be recorded as a separate keystroke and make your demonstration harder to read.
* Type {{Command{exit}}} to terminate the recording shell.
* If you examine the {{File{~/bin/labxx.raw}}}, you will see it contains a lot of control characters.  The {{Command{ ansifilter }}} command will remove them.
** {{Command{ ansifilter -o ~/bin/labxx.out ~/bin/labxx.raw }}}

''6.'' Create a PDF of your scripts to save to the {{File{/opt/pub/ci233/submit/}}} directory:
* The comments below explain what's going on.  
* The first paragraph only explains the {{Command{enscript}}} command.  The second paragraph contains the two commands you'll need to execute to submit your lab.

{{{
# enscript is a great tool for formatting documents about to be printed or saved as a PDF.
# The following command string will gather your labs and the output from the demo of your scripts, apply some 
# document formatting, and display PostScript on STDOUT.
# The -o - option for enscript instructs the command to sent its output to STDOUT instead of saving it to a file
enscript -2jr -C -o - ~/bin/ci233-labxx-q??.sh ~/bin/labxx.out

# PostScript is mostly the language of printers and isn't as useful on modern PCs. Instead of working with 
# native PostScript or displaying STDOUT to the screen, let's convert it to PDF and save to a file.
# Caution! Only run this command when you are ready to submit your scripts. 
# *** These are the command you will execute to submit your scripting labs ***
enscript -2jr -C -o - ~/bin/ci233-labxx-q??.sh ~/bin/labxx.out | ps2pdf - ~/bin/ci233-labxx-username.pdf
# Note: The - in the above ps2pdf command instructs the command to obtain its input from STDIN.
# The next command will combine the lab assignment PDF as a cover page with the PDF you just created containing your scripts, 
# saving the output to the class submit directory.  This is the PDF you are submitting for my review.
cpdf /opt/pub/ci233/labs/labxx.pdf ~/bin/ci233-labxx-username.pdf -o /opt/pub/ci233/submit/ci233-labxx-username.pdf

# Be sure to follow the standard lab naming scheme and change the xx and username to proper values
# The nice thing about using standard naming conventions is it makes everything easy to script. 
# Rather than have to search for these commands for every scripting lab you need to submit, you might as well make a dynamic script out of it.
# (Hint: This will be a future assignment.  It'll be more useful to you if you start working on it now.)
}}}

''7.''  Preview your submitted PDF
> Download the PDF saved to the {{File{submit/}}} directory to check your work.  If you skip this important step and submit a PDF for review that does not contain your scripts, you will either receive no credit for the lab or a late penalty for resubmitting.

{{Note{''Note:'' The video below demonstrates the deprecated {{Command{a2ps}}} command.  The new process instead uses {{Command{enscript}}} which is a drop-in replacement.  The video has not yet been updated to reflect this change}}}

<html>
<center>
  <video id="my-video" class="video-js" controls preload="auto" width="1000" height="662" poster="" data-setup="{}">
    <source src="video/scripts.mp4" type='video/mp4'>
    <p class="vjs-no-js">
      To view this video please enable JavaScript, and consider upgrading to a web browser that
      <a href="http://videojs.com/html5-video-support/" target="_blank">supports HTML5 video</a>
    </p>
  </video>

  <script src="https://vjs.zencdn.net/7.8.2/video.min.js"></script>
</center>
</html>
*Shell scripting quick reference:  http://www.math.uga.edu/~caner/08reu/shell.html
*Awk one liners:  http://www.catonmat.net/blog/wp-content/uploads/2008/09/awk1line.txt
*Sed one liners:  http://www.catonmat.net/blog/wp-content/uploads/2008/09/sed1line.txt
<<search>><<closeAll>><<collapseAll>><<expandAll>><<permaview>><<newTiddler>><<saveChanges>><<slider chkSliderOptionsPanel OptionsPanel "options »" "Change TiddlyWiki advanced options">><<slider chkSliderContents [[TabContents]] 'contents »' 'contents'>>
/*{{{*/

#sidebar {
 color: #000;
 background: transparent;
}

#sidebarOptions {
 background: #fff;
}

#sidebarOptions .button {
 color: #999;
}

#sidebarOptions .button:hover {
 color: #000;
 background: #fff;
 border-color:white;
}

#sidebarOptions .button:active {
 color: #000;
 background: #fff;
}

#sidebarOptions .sliderPanel {
 background: transparent;
}

#sidebarOptions .sliderPanel A:hover {
 color: #000;
 background: #fff;
}

#sidebarOptions .sliderPanel A:active {
 color: #000;
 background: #fff;
}

.sidebarSubHeading {
 color: #000;
}

#sidebarOptions .sliderPanel .tabSelected{
  border: 1px solid #ccc;
  background-color: #fff;
  margin: 0px;
  padding-top: 5px;
  padding-bottom: 0px;
  padding-left: 2px;
  padding-right: 2px;
  -moz-border-radius-topleft: 1em;
  -moz-border-radius-topright: 1em;}

#sidebarOptions .sliderPanel .tabUnselected{
  border:    1px solid #ccc;
  background-color: #eee;
  margin: 0px;
  padding-top: 5px;
  padding-bottom: 0px;
  padding-left: 2px;
  padding-right: 2px;
  -moz-border-radius-topleft: 1em;
  -moz-border-radius-topright: 1em;}

#sidebarTabs .tabContents .tiddlyLink:hover {
 background: #fff;
 color: #000;
}

#sidebarTabs .tabContents {
 color: #000;
}

#sidebarTabs .button {
 color: #666;
  border-top:    1px solid #ccc;
  border-left:   1px solid #ccc;
  border-bottom: 2px solid #ccc;
  border-right:  2px solid #ccc;
}

#sidebarTabs .tabContents .button:hover {
 color: #000;
 background: #fff;
}

.tagging, .tagged {
  padding: 0.5em;
  background-color: #eee;
  border-top:    1px solid #ccc;
  border-left:   1px solid #ccc;
  border-bottom: 3px solid #ccc;
  border-right:  3px solid #ccc;
  -moz-border-radius: 1em; }


/*}}}*/

Fall 2024 Course Notes
UNIX Administration & Security - MVCC CI 233
! Material

* Read Chapter 6 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]].
** Watch Linux Sysadmin Basics 04 -- Shell Features -- Pipes and Redirection - https://www.youtube.com/watch?v=-Z5tCri-QlI

* Read Chapter 20 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]].  Stop with {{Command{tr}}} at the bottom of page 299.
** Focus on the filters listed below
** We won't be working with paste, join, patch or aspell.  You can skip over these commands if you'd like.
** Save tr and sed for later.  They're too complex for right now.


Please make use of the discussion boards if you run into any trouble with these commands, especially next week when we need to start combining them in more complex ways.

Most shell metacharacters (the symbols on your keyboard)  have a special meaning.  Compiling a list of them with an explanation and example usage in this [[Shell Metacharacter Table|handouts/ShellMetacharacterTable.pdf]] as they are introduced might be helpful.  


! Notes

A lot of the power of the Unix environment comes from single purpose commands. The filter commands we are about to introduce are great examples. By combining these single-purpose commands we can build flexible and customized solutions to solve a wide range of problems.

By default, output from a command such as our text filters is displayed to the screen. By redirecting where that output is sent, we can chain commands together to creatively solve more complex problems.

Redirecting standard I/O is how we move data between filters and files. The following diagram illustrates our options:
[>img[img/stdout.png]]

This video explains the basics: https://www.youtube.com/watch?v=-Z5tCri-QlI

!! Standard input and standard output:
* Every filter should be able to accept input from any source and write output to any target
* Input can come from the keyboard, from another file, or from the output of another program
* Output can be displayed to the screen, can be saved to a file, or sent as input to another program
* This gives us great flexibility when combined with the simple approach to our tools

The standard source, Standard Input, is commonly abbreviated as STDIN.  The two output destinations, Standard Output and Standard Error, are commonly abbreviated as STDOUT and STDERR, respectively.  Collectively, all three are abbreviated as STDIO.

{{Warning{''Warning:'' Not every utility will accept input on STDIN, not every utility will output to STDOUT!  It is important to keep this in mind.  Generally, most system utilities such as ls, mkdir, and cp do not accept input on STDIN.  Only some of them will send output to STDOUT.  All tools which manipulate text (text filters) will utilize both STDIN and STDOUT.}}}


!! Redirection - moving input or output between a command and a file

We have new shell metacharacters to assist with the management of input and output:
* > : Redirect output - Send a command's output to a file, overwriting existing contents
** {{Command{ users > userlist }}}
** {{Command{ who > loggedin }}}
* {{Monospaced{>>}}} : Redirect output - Send a command's output to a file, appending to existing data
** {{Command{ who >> loggedin }}}
** {{Command{ (date ; who) >> loggedin }}}
* {{Monospaced{ < }}} : Redirect input - Take a command's input from a file
** {{Command{ tr  ' '  ,  <  userlist }}}
* Disable output by redirecting it to {{File{/dev/null}}}, the unix garbage can
** {{Command{ make > /dev/null }}}


!! Standard Error (STDERR)

Some commands use a separate data stream, STDERR, for displaying any error or diagnostic output.  Having this extra output on a separate stream allows us to handle it differently.  We can send STDOUT to one destination and STDERR to another. 

We can prefix our redirection symbols (''>'', ''>>'', or ''|'') with a ''2'' (the STDOUT file descriptor) to send STDERR to a different destination.

For example, notice how the error message from the second command is discarded:

{{{
[root@shell ci233]# id username
id: username: no such user

[root@shell ci233]# id username 2> /dev/null

[root@shell ci233]# id nmerante 2> /dev/null
uid=7289(nmerante) gid=100(users) groups=100(users),233(ci233)

[root@shell ci233]# id nmerante 2> /dev/null 1> /dev/null
}}}


!! Command Execution
* Chaining commands (Pipelines):
** Workflows can be completed as a pipeline of simple tools
** Glue multiple commands together to perform complex tasks out of simple tools
** Send STDOUT of one command as STDIN to another with the  |  (pipe)  symbol
** First command must be able to send output to STDOUT and second command must be able to read input from STDIN
** Examples:
*** {{Command{ who | sort | less }}}
*** {{Command{ who | wc -l }}}
*** {{Command{ last | cut -d ' ' -f 1 | sort | uniq }}}
*** ''Does not work!  See yellow box above.'':  {{Command{ ls * | rm }}}
**** File manipulation utilities like rm do not work with STDIN and STDOUT
* Send to STDOUT and save to a file with the {{Command{tee}}} command
** {{Command{ df | grep mapper| tee fs }}}
** {{Command{ df | tee fs | grep mapper }}}
*Sequenced commands:  {{Command{ command1 ; command 2 }}}
**No direct relationship between the commands
**Do not share input or output.  Simply combined together on the same line
** {{Command{ echo Today is `date` > Feb ; cal >> Feb }}}
*Grouped commands: {{Command{ (command1 ; command2) }}}
** {{Command{ (echo Today is `date` ; cal ) > Feb }}}
** Run in a sub-shell - Launch commands in a new shell (any new settings or shell variables are not sent back to parent shell)
*** Observe the current directory after running this command sequence: {{Command{ ( cd / ; ls ) ; pwd }}}


!! Chaining Commands with text filters:

Build flexible and customized solutions to solve wide range of problems.
Unix filter tools are very useful for manipulating data
Filter definition:  any command that takes input one line at a time from STDIN, manipulates the input, and sends the result to STDOUT
To most effectively solve a problem, you must know the available tools.  Know the commands and be familiar with the options available.

When working with the filters to solve problems:
* Break the problem down into small parts
* Choose your tools
* Experiment
* Perfect and simplify your solution


!!! Core Filters:
* {{Command{cat}}} - concatenate one or multiple files
** {{Monospaced{-n}}} option - numbered lines
** create text files by redirecting output to a file
* {{Command{head}}} - display lines from the beginning of a file
** {{Monospaced{-n}}} - display first //n// lines
* {{Command{tail}}} - display lines from the end of a file
** {{Monospaced{-n}}} - display last //n// lines
** {{Monospaced{+n}}} - Begin display at line //n// 
** {{Monospaced{-f}}}  - do not stop at eof, continue displaying new lines.
* {{Command{grep}}} - pattern matching : //pattern// //files(s)//
** {{Command{grep //pattern// file1 file2 file3}}}
*** Example: {{Command{grep dog //file(s)//}}}
*** {{Command{w | grep ^d}}}
** {{Command{//command1// | grep //pattern//}}}
** Anchors: 
*** {{Monospaced{^}}} = begins with
*** {{Monospaced{$}}} = end with
**Useful options:
*** {{Monospaced{-v}}} : Invert the match
*** {{Monospaced{-i}}} : Case insensitive
*** {{Monospaced{-l}}} : list only file names
*** {{Monospaced{-H}}} : list file name with matched pattern
**Examples:
*** {{Command{grep -v '^$' /etc/printcap}}}
*** {{Command{ls -l | grep ^d}}}
*** {{Command{grep init /etc/rc*}}}
*** {{Command{cp `grep -l init /etc/rc*` scripts/}}}
*** words containing the string //book//
*** lines containing dog at the end of the line
* {{Command{sort}}} - sort lines of text files
**sort passwd file
**Options:  
*** {{Monospaced{-n}}} : Numeric
*** {{Monospaced{-r}}} : Reverse
*** {{Monospaced{-k}}} : sort on field #
*** {{Monospaced{-t}}} : Specify delimiter (default whitespace)
** Examples:
*** {{Command{sort  /etc/passwd}}}
*** {{Command{sort -t : -k 5 /etc/passwd}}}
*** {{Command{sort -n -t : -k 3 /etc/passwd}}}
* {{Command{uniq}}}  - filter out repeated lines in a file
**Must be sorted before showing unique values
**{{Monospaced{-c}}} : Count number of matches
* {{Command{wc}}} - word, line, character, and byte count
** {{Monospaced{-w}}} = word count
** {{Monospaced{-l}}} = line count
* {{Command{cut}}} - cut out selected portions of each line of a file, either range of characters or delimited columns
** Two main usage options: 
*** By delimited columns:
**** {{Monospaced{-d}}} : Specifies the delimiter (defaults to tab)
**** {{Monospaced{-f}}} : Specifies the field(s)
*** Range of characters:
**** {{Monospaced{-c}}} : Extract character ranges
** Examples: 
*** Extract field 2 through 4 from file data.txt, delimited by a semi-colon: {{Command{cut -d ';' -f 2-3 data.txt}}}
*** Extract characters 65 through end of line from the ~Fail2Ban log:  {{Command{cut -c 65- fail2ban.log}}}
* {{Command{strings}}} - Searching for strings in binary files
*Compare files
** {{Command{cmp}}} - compare two files
** {{Command{diff}}} - compare files line by line
** {{Command{comm}}} - select or reject lines common to two files
[[HorizontalMainMenuStyles]]
[[SideBarStyles]]
[[TagglyTaggingStyles]]

/*{{{*/

body {
  background: #eee; }

h1 {font-size:2.0em; }
h2 { color: #000; background: transparent; text-decoration: underline; }
h3 { margin: 0.0em; color: #000; background: transparent; }
h4,h5 { color: #000; background: transparent; }

h1 {
        margin: 4px 0 4px 0;
	padding: 5px;
	color: [[ColorPalette::PrimaryDark]];
	background: [[ColorPalette::PrimaryPale]];
}

ul {
	margin-top: 0;
	margin-bottom: 0;
}

.headerShadow {
  padding: 1.0em; }

.headerForeground {
  padding: 1.0em; }

.selected .tagging, .selected .tagged {
  padding: 0.5em;
  background-color: #eee;
  border-top:    1px solid #ccc;
  border-left:   1px solid #ccc;
  border-bottom: 3px solid #ccc;
  border-right:  3px solid #ccc;
  -moz-border-radius: 1em; }

.shadow .title {
  color: #999; }

.siteTitle {
  font-size: 2.5em; }

.siteSubtitle {
  font-size: 1.0em; }

.subtitle {
	font-size: 0.8em;
}

.tagging, .tagged {
  padding: 0.5em;
  background-color: #eee;
  border-top:    1px solid #ccc;
  border-left:   1px solid #ccc;
  border-bottom: 3px solid #ccc;
  border-right:  3px solid #ccc;
  -moz-border-radius: 1em; }

.tiddler {
  border-top:    1px solid #ccc;
  border-left:   1px solid #ccc;
  border-bottom: 3px solid #ccc;
  border-right:  3px solid #ccc;
  margin: 0.5em;
  background:#fff;
  padding: 0.5em;
  -moz-border-radius: 1em; }

.title {
  color:black;
  font-size: 1.5em; }


.tabSelected{
  padding-top: 0.0em;
  padding-left: 0.5em;
  padding-right: 0.5em;
  -moz-border-radius-topleft: 0.5em;
  -moz-border-radius-topright: 0.5em;}

.tabUnselected {
  padding-top: 0.0em;
  padding-left: 0.5em;
  padding-right: 0.5em;
  -moz-border-radius-topleft: 0.5em;
  -moz-border-radius-topright: 0.5em;}

.tabContents {
  margin: 0px;
  padding-top: 0px;
  padding-bottom: 0px;
  padding-left: 2px;
  padding-right: 2px;
  -moz-border-radius: 1em; }

.viewer .listTitle {
  list-style-type: none;
}

.viewer pre {
  background-color: #f8f8ff;
  border-color: #ddf; }

#messageArea { background-color:#bde; border-color:#8ab; border-width:4px; border-style:dotted; font-size:90%; }
#messageArea .button { text-decoration:none; font-weight:bold; background:transparent; border:0px; }
#messageArea .button:hover {background: #acd;}
/*}}}*/

/*{{{*/
.Command{color: fuchsia;font-size: 10pt;font-family: Courier, monospace;margin-left: 2px;margin-right: 2px;}
.Commandi{color: fuchsia;font-size: 10pt;font-family: Courier, monospace;margin-left: 20px;margin-right: 2px;}
.File{color: #4c7fbc;font-size: 10pt;font-family: Courier, monospace;margin-left: 2px;margin-right: 2px; font-weight:bold;}
.Remove{background-color: orange}
.Host{color: #0f9791;font-size: 10pt;font-family: Courier, monospace;margin-left: 2px;margin-right: 2px; font-weight:bold;}
.Note{display:block;background-color:#e9ffdb;border:1px solid darkgreen;margin: 0 2em 0 2em;padding:5px 5px 5px 5px;}
.Warning{display:block;background-color:#ffee88; border:2px solid darkorange;margin: 0 2em 0 2em;padding:5px 5px 5px 5px;}
.Monospaced{font-size: 10pt;font-family: Courier, monospace;margin-left: 2px;margin-right: 2px;}
/*}}}*/

 .HideSideBarButton {margin-left: 3em;}

.viewer div.centeredTable {
	text-align: center;
}

.viewer div.centeredTable table {
	margin: 0 auto;
	text-align: left;
}

.viewer table.borderless,
.viewer table.borderless * {
	border: 0;
}
/*{{{*/
body {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];}

a {color:[[ColorPalette::PrimaryMid]];}
a:hover {background-color:[[ColorPalette::PrimaryMid]]; color:[[ColorPalette::Background]];}
a img {border:0;}

h1,h2,h3,h4,h5,h6 {color:[[ColorPalette::SecondaryDark]]; background:transparent;}
h1 {border-bottom:2px solid [[ColorPalette::PrimaryLight]];} */
h2,h3 {border-bottom:1px solid [[ColorPalette::TertiaryLight]];}

.button {color:[[ColorPalette::PrimaryDark]]; border:1px solid [[ColorPalette::Background]];}
.button:hover {color:[[ColorPalette::PrimaryDark]]; background:[[ColorPalette::SecondaryLight]]; border-color:[[ColorPalette::SecondaryMid]];}
.button:active {color:[[ColorPalette::Background]]; background:[[ColorPalette::SecondaryMid]]; border:1px solid [[ColorPalette::SecondaryDark]];}

.header {background:[[ColorPalette::PrimaryMid]];}
.headerShadow {color:[[ColorPalette::Foreground]];}
.headerShadow a {font-weight:normal; color:[[ColorPalette::Foreground]];}
.headerForeground {color:[[ColorPalette::Background]];}
.headerForeground a {font-weight:normal; color:[[ColorPalette::PrimaryPale]];}

.tabSelected{color:[[ColorPalette::PrimaryDark]];
	background:[[ColorPalette::TertiaryPale]];
	border-left:1px solid [[ColorPalette::TertiaryLight]];
	border-top:1px solid [[ColorPalette::TertiaryLight]];
	border-right:1px solid [[ColorPalette::TertiaryLight]];
}
.tabUnselected {color:[[ColorPalette::Background]]; background:[[ColorPalette::TertiaryMid]];}
.tabContents {color:[[ColorPalette::PrimaryDark]]; background:[[ColorPalette::TertiaryPale]]; border:1px solid [[ColorPalette::TertiaryLight]];}
.tabContents .button {border:0;}

#sidebar {}
#sidebarOptions input {border:1px solid [[ColorPalette::PrimaryMid]];}
#sidebarOptions .sliderPanel {background:[[ColorPalette::PrimaryPale]];}
#sidebarOptions .sliderPanel a {border:none;color:[[ColorPalette::PrimaryMid]];}
#sidebarOptions .sliderPanel a:hover {color:[[ColorPalette::Background]]; background:[[ColorPalette::PrimaryMid]];}
#sidebarOptions .sliderPanel a:active {color:[[ColorPalette::PrimaryMid]]; background:[[ColorPalette::Background]];}

.wizard {background:[[ColorPalette::PrimaryPale]]; border:1px solid [[ColorPalette::PrimaryMid]];}
.wizard h1 {color:[[ColorPalette::PrimaryDark]]; border:none;}
.wizard h2 {color:[[ColorPalette::Foreground]]; border:none;}
.wizardStep {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];
	border:1px solid [[ColorPalette::PrimaryMid]];}
.wizardStep.wizardStepDone {background:[[ColorPalette::TertiaryLight]];}
.wizardFooter {background:[[ColorPalette::PrimaryPale]];}
.wizardFooter .status {background:[[ColorPalette::PrimaryDark]]; color:[[ColorPalette::Background]];}
.wizard .button {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::SecondaryLight]]; border: 1px solid;
	border-color:[[ColorPalette::SecondaryPale]] [[ColorPalette::SecondaryDark]] [[ColorPalette::SecondaryDark]] [[ColorPalette::SecondaryPale]];}
.wizard .button:hover {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::Background]];}
.wizard .button:active {color:[[ColorPalette::Background]]; background:[[ColorPalette::Foreground]]; border: 1px solid;
	border-color:[[ColorPalette::PrimaryDark]] [[ColorPalette::PrimaryPale]] [[ColorPalette::PrimaryPale]] [[ColorPalette::PrimaryDark]];}

.wizard .notChanged {background:transparent;}
.wizard .changedLocally {background:#80ff80;}
.wizard .changedServer {background:#8080ff;}
.wizard .changedBoth {background:#ff8080;}
.wizard .notFound {background:#ffff80;}
.wizard .putToServer {background:#ff80ff;}
.wizard .gotFromServer {background:#80ffff;}

#messageArea {border:1px solid [[ColorPalette::SecondaryMid]]; background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]];}
#messageArea .button {color:[[ColorPalette::PrimaryMid]]; background:[[ColorPalette::SecondaryPale]]; border:none;}

.popupTiddler {background:[[ColorPalette::TertiaryPale]]; border:2px solid [[ColorPalette::TertiaryMid]];}

.popup {background:[[ColorPalette::TertiaryPale]]; color:[[ColorPalette::TertiaryDark]]; border-left:1px solid [[ColorPalette::TertiaryMid]]; border-top:1px solid [[ColorPalette::TertiaryMid]]; border-right:2px solid [[ColorPalette::TertiaryDark]]; border-bottom:2px solid [[ColorPalette::TertiaryDark]];}
.popup hr {color:[[ColorPalette::PrimaryDark]]; background:[[ColorPalette::PrimaryDark]]; border-bottom:1px;}
.popup li.disabled {color:[[ColorPalette::TertiaryMid]];}
.popup li a, .popup li a:visited {color:[[ColorPalette::Foreground]]; border: none;}
.popup li a:hover {background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]]; border: none;}
.popup li a:active {background:[[ColorPalette::SecondaryPale]]; color:[[ColorPalette::Foreground]]; border: none;}
.popupHighlight {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];}
.listBreak div {border-bottom:1px solid [[ColorPalette::TertiaryDark]];}

.tiddler .defaultCommand {font-weight:bold;}

.shadow .title {color:[[ColorPalette::TertiaryDark]];}

.title {color:[[ColorPalette::SecondaryDark]];}
.subtitle {color:[[ColorPalette::TertiaryDark]];}

.toolbar {color:[[ColorPalette::PrimaryMid]];}
.toolbar a {color:[[ColorPalette::TertiaryLight]];}
.selected .toolbar a {color:[[ColorPalette::TertiaryMid]];}
.selected .toolbar a:hover {color:[[ColorPalette::Foreground]];}

.tagging, .tagged {border:1px solid [[ColorPalette::TertiaryPale]]; background-color:[[ColorPalette::TertiaryPale]];}
.selected .tagging, .selected .tagged {background-color:[[ColorPalette::TertiaryLight]]; border:1px solid [[ColorPalette::TertiaryMid]];}
.tagging .listTitle, .tagged .listTitle {color:[[ColorPalette::PrimaryDark]];}
.tagging .button, .tagged .button {border:none;}

.footer {color:[[ColorPalette::TertiaryLight]];}
.selected .footer {color:[[ColorPalette::TertiaryMid]];}

.sparkline {background:[[ColorPalette::PrimaryPale]]; border:0;}
.sparktick {background:[[ColorPalette::PrimaryDark]];}

.error, .errorButton {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::Error]];}
.warning {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::SecondaryPale]];}
.lowlight {background:[[ColorPalette::TertiaryLight]];}

.zoomer {background:none; color:[[ColorPalette::TertiaryMid]]; border:3px solid [[ColorPalette::TertiaryMid]];}

.imageLink, #displayArea .imageLink {background:transparent;}

.annotation {background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]]; border:2px solid [[ColorPalette::SecondaryMid]];}

.viewer .listTitle {list-style-type:none; margin-left:-2em;}
.viewer .button {border:1px solid [[ColorPalette::SecondaryMid]];}
.viewer blockquote {border-left:3px solid [[ColorPalette::TertiaryDark]];}

.viewer table, table.twtable {border:2px solid [[ColorPalette::TertiaryDark]];}
.viewer th, .viewer thead td, .twtable th, .twtable thead td {background:[[ColorPalette::SecondaryMid]]; border:1px solid [[ColorPalette::TertiaryDark]]; color:[[ColorPalette::Background]];}
.viewer td, .viewer tr, .twtable td, .twtable tr {border:1px solid [[ColorPalette::TertiaryDark]];}

.viewer pre {border:1px solid [[ColorPalette::SecondaryLight]]; background:[[ColorPalette::SecondaryPale]];}
.viewer code {color:[[ColorPalette::SecondaryDark]];}
.viewer hr {border:0; border-top:dashed 1px [[ColorPalette::TertiaryDark]]; color:[[ColorPalette::TertiaryDark]];}

.highlight, .marked {background:[[ColorPalette::SecondaryLight]];}

.editor input {border:1px solid [[ColorPalette::PrimaryMid]];}
.editor textarea {border:1px solid [[ColorPalette::PrimaryMid]]; width:100%;}
.editorFooter {color:[[ColorPalette::TertiaryMid]];}

#backstageArea {background:[[ColorPalette::Foreground]]; color:[[ColorPalette::TertiaryMid]];}
#backstageArea a {background:[[ColorPalette::Foreground]]; color:[[ColorPalette::Background]]; border:none;}
#backstageArea a:hover {background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]]; }
#backstageArea a.backstageSelTab {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];}
#backstageButton a {background:none; color:[[ColorPalette::Background]]; border:none;}
#backstageButton a:hover {background:[[ColorPalette::Foreground]]; color:[[ColorPalette::Background]]; border:none;}
#backstagePanel {background:[[ColorPalette::Background]]; border-color: [[ColorPalette::Background]] [[ColorPalette::TertiaryDark]] [[ColorPalette::TertiaryDark]] [[ColorPalette::TertiaryDark]];}
.backstagePanelFooter .button {border:none; color:[[ColorPalette::Background]];}
.backstagePanelFooter .button:hover {color:[[ColorPalette::Foreground]];}
#backstageCloak {background:[[ColorPalette::Foreground]]; opacity:0.6; filter:'alpha(opacity:60)';}
/*}}}*/
/*{{{*/
body {
	background: [[ColorPalette::Background]];
	color: [[ColorPalette::Foreground]];
}

a{
	color: [[ColorPalette::PrimaryMid]];
}

a:hover{
	background: [[ColorPalette::PrimaryMid]];
	color: [[ColorPalette::Background]];
}

a img{
	border: 0;
}

h1,h2,h3,h4,h5 {
	color: [[ColorPalette::SecondaryDark]];
	background: [[ColorPalette::PrimaryPale]];
}

.button {
	color: [[ColorPalette::PrimaryDark]];
	border: 1px solid [[ColorPalette::Background]];
}

.button:hover {
	color: [[ColorPalette::PrimaryDark]];
	background: [[ColorPalette::SecondaryLight]];
	border-color: [[ColorPalette::SecondaryMid]];
}

.button:active {
	color: [[ColorPalette::Background]];
	background: [[ColorPalette::SecondaryMid]];
	border: 1px solid [[ColorPalette::SecondaryDark]];
}

.header {
	background: [[ColorPalette::PrimaryMid]];
}

.headerShadow {
	color: [[ColorPalette::Foreground]];
}

.headerShadow a {
	font-weight: normal;
	color: [[ColorPalette::Foreground]];
}

.headerForeground {
	color: [[ColorPalette::Background]];
}

.headerForeground a {
	font-weight: normal;
	color: [[ColorPalette::PrimaryPale]];
}

.tabSelected{
	color: [[ColorPalette::PrimaryDark]];
	background: [[ColorPalette::TertiaryPale]];
	border-left: 1px solid [[ColorPalette::TertiaryLight]];
	border-top: 1px solid [[ColorPalette::TertiaryLight]];
	border-right: 1px solid [[ColorPalette::TertiaryLight]];
}

.tabUnselected {
	color: [[ColorPalette::Background]];
	background: [[ColorPalette::TertiaryMid]];
}

.tabContents {
	color: [[ColorPalette::PrimaryDark]];
	background: [[ColorPalette::TertiaryPale]];
	border: 1px solid [[ColorPalette::TertiaryLight]];
}

.tabContents .button {
	 border: 0;}

#sidebar {
}

#sidebarOptions input {
	border: 1px solid [[ColorPalette::PrimaryMid]];
}

#sidebarOptions .sliderPanel {
	background: [[ColorPalette::PrimaryPale]];
}

#sidebarOptions .sliderPanel a {
	border: none;
	color: [[ColorPalette::PrimaryMid]];
}

#sidebarOptions .sliderPanel a:hover {
	color: [[ColorPalette::Background]];
	background: [[ColorPalette::PrimaryMid]];
}

#sidebarOptions .sliderPanel a:active {
	color: [[ColorPalette::PrimaryMid]];
	background: [[ColorPalette::Background]];
}

.wizard {
	background: [[ColorPalette::SecondaryLight]];
	border-top: 1px solid [[ColorPalette::SecondaryMid]];
	border-left: 1px solid [[ColorPalette::SecondaryMid]];
}

.wizard h1 {
	color: [[ColorPalette::SecondaryDark]];
}

.wizard h2 {
	color: [[ColorPalette::Foreground]];
}

.wizardStep {
	background: [[ColorPalette::Background]];
	border-top: 1px solid [[ColorPalette::SecondaryMid]];
	border-bottom: 1px solid [[ColorPalette::SecondaryMid]];
	border-left: 1px solid [[ColorPalette::SecondaryMid]];
}

.wizard .button {
	color: [[ColorPalette::Background]];
	background: [[ColorPalette::PrimaryMid]];
	border-top: 1px solid [[ColorPalette::PrimaryLight]];
	border-right: 1px solid [[ColorPalette::PrimaryDark]];
	border-bottom: 1px solid [[ColorPalette::PrimaryDark]];
	border-left: 1px solid [[ColorPalette::PrimaryLight]];
}

.wizard .button:hover {
	color: [[ColorPalette::PrimaryLight]];
	background: [[ColorPalette::PrimaryDark]];
	border-color: [[ColorPalette::PrimaryLight]];
}

.wizard .button:active {
	color: [[ColorPalette::Background]];
	background: [[ColorPalette::PrimaryMid]];
	border-top: 1px solid [[ColorPalette::PrimaryLight]];
	border-right: 1px solid [[ColorPalette::PrimaryDark]];
	border-bottom: 1px solid [[ColorPalette::PrimaryDark]];
	border-left: 1px solid [[ColorPalette::PrimaryLight]];
}

#messageArea {
	border: 1px solid [[ColorPalette::SecondaryDark]];
	background: [[ColorPalette::SecondaryMid]];
	color: [[ColorPalette::PrimaryDark]];
}

#messageArea .button {
	padding: 0.2em 0.2em 0.2em 0.2em;
	color: [[ColorPalette::PrimaryDark]];
	background: [[ColorPalette::Background]];
}

.popup {
	background: [[ColorPalette::PrimaryLight]];
	border: 1px solid [[ColorPalette::PrimaryMid]];
}

.popup hr {
	color: [[ColorPalette::PrimaryDark]];
	background: [[ColorPalette::PrimaryDark]];
	border-bottom: 1px;
}

.popup li.disabled {
	color: [[ColorPalette::PrimaryMid]];
}

.popup li a, .popup li a:visited {
	color: [[ColorPalette::TertiaryPale]];
	border: none;
}

.popup li a:hover {
	background: [[ColorPalette::PrimaryDark]];
	color: [[ColorPalette::Background]];
	border: none;
}

.tiddler .defaultCommand {
 font-weight: bold;
}

.shadow .title {
	color: [[ColorPalette::TertiaryDark]];
}

.title {
	color: [[ColorPalette::SecondaryDark]];
}

.subtitle {
	color: [[ColorPalette::TertiaryDark]];
}

.toolbar {
	color: [[ColorPalette::PrimaryMid]];
}

.tagging, .tagged {
	border: 1px solid [[ColorPalette::TertiaryPale]];
	background-color: [[ColorPalette::TertiaryPale]];
}

.selected .tagging, .selected .tagged {
	background-color: [[ColorPalette::TertiaryLight]];
	border: 1px solid [[ColorPalette::TertiaryMid]];
}

.tagging .listTitle, .tagged .listTitle {
	color: [[ColorPalette::PrimaryDark]];
}

.tagging .button, .tagged .button {
		border: none;
}

.footer {
	color: [[ColorPalette::TertiaryLight]];
}

.selected .footer {
	color: [[ColorPalette::TertiaryMid]];
}

.sparkline {
	background: [[ColorPalette::PrimaryPale]];
	border: 0;
}

.sparktick {
	background: [[ColorPalette::PrimaryDark]];
}

.error, .errorButton {
	color: [[ColorPalette::Foreground]];
	background: [[ColorPalette::Error]];
}

.warning {
	color: [[ColorPalette::Foreground]];
	background: [[ColorPalette::SecondaryPale]];
}

.cascade {
	background: [[ColorPalette::TertiaryPale]];
	color: [[ColorPalette::TertiaryMid]];
	border: 1px solid [[ColorPalette::TertiaryMid]];
}

.imageLink, #displayArea .imageLink {
	background: transparent;
}

.viewer .listTitle {list-style-type: none; margin-left: -2em;}

.viewer .button {
	border: 1px solid [[ColorPalette::SecondaryMid]];
}

.viewer blockquote {
	border-left: 3px solid [[ColorPalette::TertiaryDark]];
}

.viewer table {
	border: 2px solid [[ColorPalette::TertiaryDark]];
}

.viewer th, thead td {
	background: [[ColorPalette::SecondaryMid]];
	border: 1px solid [[ColorPalette::TertiaryDark]];
	color: [[ColorPalette::Background]];
}

.viewer td, .viewer tr {
	border: 1px solid [[ColorPalette::TertiaryDark]];
}

.viewer pre {
	border: 1px solid [[ColorPalette::SecondaryLight]];
	background: [[ColorPalette::SecondaryPale]];
}

.viewer code {
	color: [[ColorPalette::SecondaryDark]];
}

.viewer hr {
	border: 0;
	border-top: dashed 1px [[ColorPalette::TertiaryDark]];
	color: [[ColorPalette::TertiaryDark]];
}

.highlight, .marked {
	background: [[ColorPalette::SecondaryLight]];
}

.editor input {
	border: 1px solid [[ColorPalette::PrimaryMid]];
}

.editor textarea {
	border: 1px solid [[ColorPalette::PrimaryMid]];
	width: 100%;
}

.editorFooter {
	color: [[ColorPalette::TertiaryMid]];
}

/*}}}*/
/*{{{*/
* html .tiddler {height:1%;}

body {font-size:.75em; font-family:arial,helvetica; margin:0; padding:0;}

h1,h2,h3,h4,h5,h6 {font-weight:bold; text-decoration:none;}
h1,h2,h3 {padding-bottom:1px; margin-top:1.2em;margin-bottom:0.3em;}
h4,h5,h6 {margin-top:1em;}
h1 {font-size:1.35em;}
h2 {font-size:1.25em;}
h3 {font-size:1.1em;}
h4 {font-size:1em;}
h5 {font-size:.9em;}

hr {height:1px;}

a {text-decoration:none;}

dt {font-weight:bold;}

ol {list-style-type:decimal;}
ol ol {list-style-type:lower-alpha;}
ol ol ol {list-style-type:lower-roman;}
ol ol ol ol {list-style-type:decimal;}
ol ol ol ol ol {list-style-type:lower-alpha;}
ol ol ol ol ol ol {list-style-type:lower-roman;}
ol ol ol ol ol ol ol {list-style-type:decimal;}

.txtOptionInput {width:11em;}

#contentWrapper .chkOptionInput {border:0;}

.externalLink {text-decoration:underline;}

.indent {margin-left:3em;}
.outdent {margin-left:3em; text-indent:-3em;}
code.escaped {white-space:nowrap;}

.tiddlyLinkExisting {font-weight:bold;}
.tiddlyLinkNonExisting {font-style:italic;}

/* the 'a' is required for IE, otherwise it renders the whole tiddler in bold */
a.tiddlyLinkNonExisting.shadow {font-weight:bold;}

#mainMenu .tiddlyLinkExisting,
	#mainMenu .tiddlyLinkNonExisting,
	#sidebarTabs .tiddlyLinkNonExisting {font-weight:normal; font-style:normal;}
#sidebarTabs .tiddlyLinkExisting {font-weight:bold; font-style:normal;}

.header {position:relative;}
.header a:hover {background:transparent;}
.headerShadow {position:relative; padding:4.5em 0em 1em 1em; left:-1px; top:-1px;}
.headerForeground {position:absolute; padding:4.5em 0em 1em 1em; left:0px; top:0px;}

.siteTitle {font-size:3em;}
.siteSubtitle {font-size:1.2em;}

#mainMenu {position:absolute; left:0; width:10em; text-align:right; line-height:1.6em; padding:1.5em 0.5em 0.5em 0.5em; font-size:1.1em;}

#sidebar {position:absolute; right:3px; width:16em; font-size:.9em;}
#sidebarOptions {padding-top:0.3em;}
#sidebarOptions a {margin:0em 0.2em; padding:0.2em 0.3em; display:block;}
#sidebarOptions input {margin:0.4em 0.5em;}
#sidebarOptions .sliderPanel {margin-left:1em; padding:0.5em; font-size:.85em;}
#sidebarOptions .sliderPanel a {font-weight:bold; display:inline; padding:0;}
#sidebarOptions .sliderPanel input {margin:0 0 .3em 0;}
#sidebarTabs .tabContents {width:15em; overflow:hidden;}

.wizard {padding:0.1em 1em 0em 2em;}
.wizard h1 {font-size:2em; font-weight:bold; background:none; padding:0em 0em 0em 0em; margin:0.4em 0em 0.2em 0em;}
.wizard h2 {font-size:1.2em; font-weight:bold; background:none; padding:0em 0em 0em 0em; margin:0.4em 0em 0.2em 0em;}
.wizardStep {padding:1em 1em 1em 1em;}
.wizard .button {margin:0.5em 0em 0em 0em; font-size:1.2em;}
.wizardFooter {padding:0.8em 0.4em 0.8em 0em;}
.wizardFooter .status {padding:0em 0.4em 0em 0.4em; margin-left:1em;}
.wizard .button {padding:0.1em 0.2em 0.1em 0.2em;}

#messageArea {position:fixed; top:2em; right:0em; margin:0.5em; padding:0.5em; z-index:2000; _position:absolute;}
.messageToolbar {display:block; text-align:right; padding:0.2em 0.2em 0.2em 0.2em;}
#messageArea a {text-decoration:underline;}

.tiddlerPopupButton {padding:0.2em 0.2em 0.2em 0.2em;}
.popupTiddler {position: absolute; z-index:300; padding:1em 1em 1em 1em; margin:0;}

.popup {position:absolute; z-index:300; font-size:.9em; padding:0; list-style:none; margin:0;}
.popup .popupMessage {padding:0.4em;}
.popup hr {display:block; height:1px; width:auto; padding:0; margin:0.2em 0em;}
.popup li.disabled {padding:0.4em;}
.popup li a {display:block; padding:0.4em; font-weight:normal; cursor:pointer;}
.listBreak {font-size:1px; line-height:1px;}
.listBreak div {margin:2px 0;}

.tabset {padding:1em 0em 0em 0.5em;}
.tab {margin:0em 0em 0em 0.25em; padding:2px;}
.tabContents {padding:0.5em;}
.tabContents ul, .tabContents ol {margin:0; padding:0;}
.txtMainTab .tabContents li {list-style:none;}
.tabContents li.listLink { margin-left:.75em;}

#contentWrapper {display:block;}
#splashScreen {display:none;}

#displayArea {margin:1em 17em 0em 14em;}

.toolbar {text-align:right; font-size:.9em;}

.tiddler {padding:1em 1em 0em 1em;}

.missing .viewer,.missing .title {font-style:italic;}

.title {font-size:1.6em; font-weight:bold;}

.missing .subtitle {display:none;}
.subtitle {font-size:1.1em;}

.tiddler .button {padding:0.2em 0.4em;}

.tagging {margin:0.5em 0.5em 0.5em 0; float:left; display:none;}
.isTag .tagging {display:block;}
.tagged {margin:0.5em; float:right;}
.tagging, .tagged {font-size:0.9em; padding:0.25em;}
.tagging ul, .tagged ul {list-style:none; margin:0.25em; padding:0;}
.tagClear {clear:both;}

.footer {font-size:.9em;}
.footer li {display:inline;}

.annotation {padding:0.5em; margin:0.5em;}

* html .viewer pre {width:99%; padding:0 0 1em 0;}
.viewer {line-height:1.4em; padding-top:0.5em;}
.viewer .button {margin:0em 0.25em; padding:0em 0.25em;}
.viewer blockquote {line-height:1.5em; padding-left:0.8em;margin-left:2.5em;}
.viewer ul, .viewer ol {margin-left:0.5em; padding-left:1.5em;}

.viewer table, table.twtable {border-collapse:collapse; margin:0.8em 1.0em;}
.viewer th, .viewer td, .viewer tr,.viewer caption,.twtable th, .twtable td, .twtable tr,.twtable caption {padding:3px;}
table.listView {font-size:0.85em; margin:0.8em 1.0em;}
table.listView th, table.listView td, table.listView tr {padding:0px 3px 0px 3px;}

.viewer pre {padding:0.5em; margin-left:0.5em; font-size:1.2em; line-height:1.4em; overflow:auto;}
.viewer code {font-size:1.2em; line-height:1.4em;}

.editor {font-size:1.1em;}
.editor input, .editor textarea {display:block; width:100%; font:inherit;}
.editorFooter {padding:0.25em 0em; font-size:.9em;}
.editorFooter .button {padding-top:0px; padding-bottom:0px;}

.fieldsetFix {border:0; padding:0; margin:1px 0px 1px 0px;}

.sparkline {line-height:1em;}
.sparktick {outline:0;}

.zoomer {font-size:1.1em; position:absolute; overflow:hidden;}
.zoomer div {padding:1em;}

* html #backstage {width:99%;}
* html #backstageArea {width:99%;}
#backstageArea {display:none; position:relative; overflow: hidden; z-index:150; padding:0.3em 0.5em 0.3em 0.5em;}
#backstageToolbar {position:relative;}
#backstageArea a {font-weight:bold; margin-left:0.5em; padding:0.3em 0.5em 0.3em 0.5em;}
#backstageButton {display:none; position:absolute; z-index:175; top:0em; right:0em;}
#backstageButton a {padding:0.1em 0.4em 0.1em 0.4em; margin:0.1em 0.1em 0.1em 0.1em;}
#backstage {position:relative; width:100%; z-index:50;}
#backstagePanel {display:none; z-index:100; position:absolute; width:90%; margin:0em 3em 0em 3em; padding:1em 1em 1em 1em;}
.backstagePanelFooter {padding-top:0.2em; float:right;}
.backstagePanelFooter a {padding:0.2em 0.4em 0.2em 0.4em;}
#backstageCloak {display:none; z-index:20; position:absolute; width:100%; height:100px;}

.whenBackstage {display:none;}
.backstageVisible .whenBackstage {display:block;}
/*}}}*/
/***
!Sections in this Tiddler:
*Generic rules
**Links styles
**Link Exceptions
*Header
*Main menu
*Sidebar
**Sidebar options
**Sidebar tabs
*Message area
*Popup
*Tabs
*Tiddler display
**Viewer
**Editor
*Misc. rules
!Generic Rules /%==============================================%/
***/
/*{{{*/
body {
	font-size: .75em;
	font-family: arial,helvetica;
	position: relative;
	margin: 0;
	padding: 0;
}

h1,h2,h3,h4,h5 {
	font-weight: bold;
	text-decoration: none;
	padding-left: 0.4em;
}

h1 {font-size: 1.5em;}
h2 {font-size: 1.25em;}
h3 {font-size: 1.1em;}
h4 {font-size: 1em;}
h5 {font-size: .9em;}

hr {
	height: 1px;
}

a{
	text-decoration: none;
}

ol { list-style-type: decimal }
ol ol { list-style-type: lower-alpha }
ol ol ol { list-style-type: lower-roman }
ol ol ol ol { list-style-type: decimal }
ol ol ol ol ol { list-style-type: lower-alpha }
ol ol ol ol ol ol { list-style-type: lower-roman }
ol ol ol ol ol ol ol { list-style-type: decimal }
/*}}}*/
/***
''General Link Styles'' /%-----------------------------------------------------------------------------%/
***/
/*{{{*/
.externalLink {
	text-decoration: underline;
}

/* the 'a' is required for IE, otherwise it renders the whole tiddler a bold */
a.tiddlyLinkNonExisting.shadow {
	font-weight: bold;
}
/*}}}*/
/***
''Exceptions to common link styles'' /%------------------------------------------------------------------%/
***/
/*{{{*/

#mainMenu .tiddlyLinkExisting,
#mainMenu .tiddlyLinkNonExisting,
#sidebarTabs .tiddlyLinkExisting,
#sidebarTabs .tiddlyLinkNonExisting{
 font-weight: normal;
 font-style: normal;
}

/*}}}*/
/***
!Header /%==================================================%/
***/
/*{{{*/

.header {
		position: relative;
}

.header a:hover {
	background: transparent;
}

.headerShadow {
	position: relative;
	padding: 4.5em 0em 1em 1em;
	left: -1px;
	top: -1px;
}

.headerForeground {
	position: absolute;
	padding: 4.5em 0em 1em 1em;
	left: 0px;
	top: 0px;
}

.siteTitle {
	font-size: 3em;
}

.siteSubtitle {
	font-size: 1.2em;
	padding: 0em 0em 0em 2em;
}

/*}}}*/
/***
!Main menu /%==================================================%/
***/
/*{{{*/
#mainMenu {
	position: absolute;
	left: 0;
	width: 10em;
	text-align: right;
	line-height: 1.6em;
	padding: 1.5em 0.5em 0.5em 0.5em;
	font-size: 1.1em;
}

/*}}}*/
/***
!Sidebar rules /%==================================================%/
***/
/*{{{*/
#sidebar {
	position: absolute;
	right: 3px;
	width: 16em;
	font-size: .9em;
}
/*}}}*/
/***
''Sidebar options'' /%----------------------------------------------------------------------------------%/
***/
/*{{{*/
#sidebarOptions {
	padding-top: 0.3em;
}

#sidebarOptions a {
	margin: 0em 0.2em;
	padding: 0.2em 0.3em;
	display: block;
}

#sidebarOptions input {
	margin: 0.4em 0.5em;
}

#sidebarOptions .sliderPanel {
	margin-left: 1em;
	padding: 0.5em;
	font-size: .85em;
}

#sidebarOptions .sliderPanel a {
	font-weight: bold;
	display: inline;
	padding: 0;
}

#sidebarOptions .sliderPanel input {
	margin: 0 0 .3em 0;
}
/*}}}*/
/***
''Sidebar tabs'' /%-------------------------------------------------------------------------------------%/
***/
/*{{{*/

#sidebarTabs .tabContents {
	width: 15em;
	overflow: hidden;
}

/*}}}*/
/***
!Message area /%==================================================%/
***/
/*{{{*/
#messageArea {
position:absolute; top:0; right:0; margin: 0.5em; padding: 0.5em;
}

*[id='messageArea'] {
position:fixed !important; z-index:99;}

.messageToolbar {
display: block;
text-align: right;
}

#messageArea a{
	text-decoration: underline;
}
/*}}}*/
/***
!Popup /%==================================================%/
***/
/*{{{*/
.popup {
	font-size: .9em;
	padding: 0.2em;
	list-style: none;
	margin: 0;
}

.popup hr {
	display: block;
	height: 1px;
	width: auto;
	padding: 0;
	margin: 0.2em 0em;
}

.popup li.disabled {
	padding: 0.2em;
}

.popup li a{
	display: block;
	padding: 0.2em;
}
/*}}}*/
/***
!Tabs /%==================================================%/
***/
/*{{{*/
.tabset {
	padding: 1em 0em 0em 0.5em;
}

.tab {
	margin: 0em 0em 0em 0.25em;
	padding: 2px;
}

.tabContents {
	padding: 0.5em;
}

.tabContents ul, .tabContents ol {
	margin: 0;
	padding: 0;
}

.txtMainTab .tabContents li {
	list-style: none;
}

.tabContents li.listLink {
	 margin-left: .75em;
}
/*}}}*/
/***
!Tiddler display rules /%==================================================%/
***/
/*{{{*/
#displayArea {
	margin: 1em 17em 0em 14em;
}


.toolbar {
	text-align: right;
	font-size: .9em;
	visibility: hidden;
}

.selected .toolbar {
	visibility: visible;
}

.tiddler {
	padding: 1em 1em 0em 1em;
}

.missing .viewer,.missing .title {
	font-style: italic;
}

.title {
	font-size: 1.6em;
	font-weight: bold;
}

.missing .subtitle {
 display: none;
}

.subtitle {
	font-size: 0.8em;
}

/* I'm not a fan of how button looks in tiddlers... */
.tiddler .button {
	padding: 0.2em 0.4em;
}

.tagging {
margin: 0.5em 0.5em 0.5em 0;
float: left;
display: none;
}

.isTag .tagging {
display: block;
}

.tagged {
margin: 0.5em;
float: right;
}

.tagging, .tagged {
font-size: 0.9em;
padding: 0.25em;
}

.tagging ul, .tagged ul {
list-style: none;margin: 0.25em;
padding: 0;
}

.tagClear {
clear: both;
}

.footer {
	font-size: .9em;
}

.footer li {
display: inline;
}
/***
''The viewer is where the tiddler content is displayed'' /%------------------------------------------------%/
***/
/*{{{*/
* html .viewer pre {
	width: 99%;
	padding: 0 0 1em 0;
}

.viewer {
	line-height: 1.4em;
	padding-top: 0.5em;
}

.viewer .button {
	margin: 0em 0.25em;
	padding: 0em 0.25em;
}

.viewer blockquote {
	line-height: 1.5em;
	padding-left: 0.8em;
	margin-left: 2.5em;
}

.viewer ul, .viewer ol{
	margin-left: 0.5em;
	padding-left: 1.5em;
}

.viewer table {
	border-collapse: collapse;
	margin: 0.8em 1.0em;
}

.viewer th, .viewer td, .viewer tr,.viewer caption{
	padding: 3px;
}

.viewer pre {
	padding: 0.5em;
	margin-left: 0.5em;
	font-size: 1.2em;
	line-height: 1.4em;
	overflow: auto;
}

.viewer code {
	font-size: 1.2em;
	line-height: 1.4em;
}
/*}}}*/
/***
''The editor replaces the viewer in the tiddler'' /%------------------------------------------------%/
***/
/*{{{*/
.editor {
font-size: 1.1em;
}

.editor input, .editor textarea {
	display: block;
	width: 100%;
	font: inherit;
}

.editorFooter {
	padding: 0.25em 0em;
	font-size: .9em;
}

.editorFooter .button {
padding-top: 0px; padding-bottom: 0px;}

.fieldsetFix {border: 0;
padding: 0;
margin: 1px 0px 1px 0px;
}
/*}}}*/
/***
!Misc rules /%==================================================%/
***/
/*{{{*/
.sparkline {
	line-height: 1em;
}

.sparktick {
	outline: 0;
}

.zoomer {
	font-size: 1.1em;
	position: absolute;
	padding: 1em;
}

.cascade {
	font-size: 1.1em;
	position: absolute;
	overflow: hidden;
}
/*}}}*/
/*{{{*/
@media print {
#mainMenu, #sidebar, #messageArea, .toolbar, #backstageButton, #backstageArea, #toolbar, #topMenu, #rightMenu {display: none !important;}
#header, #headerShadow {display: none !important;}
.siteSubtitle {display: none !important;}

.siteTitle { font-size: 1.5em; }


#displayArea {margin: 1em 1em 0em;}
noscript {display:none;} /* Fixes a feature in Firefox 1.5.0.2 where print preview displays the noscript content */
}
/*}}}*/
Type the text for 'Styles'
<<tabs txtMainTab Timeline Timeline TabTimeline All 'All tiddlers' TabAll Tags 'All tags' TabTags More 'More lists' TabMore>>
The following command table should prove useful for this course.  This is not an extensive list of commands you will need to know / become familiar with.

| !&nbsp;Cover&nbsp; | !&nbsp;Command&nbsp; | !Description |
|>|>|bgcolor(#a0ffa0): ''Basic Commands'' |
| x | echo |output command arguments to the terminal|
| x | cd |change directories|
| x | pwd |display current working directory|
| x | ls |list files|
| x | cp |copy files|
| x | rm |remove files|
| x | mv |move files|
| x | mkdir |create directory|
| x | rmdir |remove directory|
| x | touch |create an empty file with default permissions|
| x | ln |create link|
| x | man |view man pages|
| x | chmod |set permissions for a file|
|  | chgrp |set group for a file|
|>|>|bgcolor(#a0ffa0): ''Display Text / Editors'' |
| x | less |display text output one page at a time|
|  | pico |easy to use text editor|
|  | nano |GNU clone of pico|
| x | vi |advanced unix text editor|
|  | ex |line oriented version of vi|
|  | vim |vi improved|
| x | vimtutor |learn how to use the vim editor|
|>|>|bgcolor(#a0ffa0): ''Filters'' |
| x | cat |concatenate and print files|
| x | grep |pattern matching filter|
| x | egrep |extended regular expression pattern matching filter|
| x | head |display first lines of a file|
| x | tail |display the last part of a file|
| x | cut |cut out selected portions of each line of a file|
| x | fold |fold long lines for finite width output device|
| x | sort |sort lines of text files|
| x | uniq |report or filter out repeated lines in a file|
| x | wc |word, line, character, and byte count|
| x | tr |translate characters|
|  | paste |merge lines of input|
|  | nl |line numbering filter|
| x | sed |stream editor|
| x | awk |pattern-directed scanning and processing language|
| x | tee |duplicate standard input to a file|
|  | strings |print the strings of printable characters in (binary) files|
|  | cmp |compare two files|
|  | diff |compare files line by line|
|  | comm |select or reject lines common to two files|
|>|>|bgcolor(#a0ffa0): ''System Commands'' |
| x | script |save copy of terminal session|
|  | source |read a .file|
| x | rehash |recompute hash table of where commands are located|
| x | which |scan path for a program and return its location (or definition of an alias)|
| x | df |display free disk space|
| x | du |disk usage (-s display each file, -k 1K blocks , -h = human readable|
| x | find |walk a file hierarchy in search of files|
|  | locate |find filenames quickly based on pre-generated file database|
|  | hostname |print name of current host system|
| x | uptime |show how long system has been running|
| x | uname |display information about the system|
|  | xargs |construct argument list(s) and execute utility|
|  | quota |display disk usage and limits|
|  | crontab |schedule commands for automated execution on regular intervals|
|  | at |schedule a job for later execution|
|>|>|bgcolor(#a0ffa0): ''Process Management / Job Control'' |
| x | ps |process status|
| x | top |display and update information about the top cpu processes|
| x | kill |terminate or signal a process|
| x | jobs |display all jobs|
| x | fg |continue background jobs in the foreground|
| x | bg |continue suspended job in the background|
| x | stop |suspend job running in the background|
|  | suspend |suspend the current running shell|
|>|>|bgcolor(#a0ffa0): ''User Information'' |
| x | w |display who is logged in and what they are doing|
| x | id |return user identity|
| x | groups |show group memberships|
|  | users |list usernames of current logged in users|
|  | who |display who is on the system|
|  | whoami |display effective user id|
| x | finger |user information lookup program|
| x | last |indicate last logins of users and ttys|
|>|>|bgcolor(#a0ffa0): ''Misc commands useful for shell scripting'' |
| x | clear |clear the screen|
| x | read //var// |prompt the user to enter information, saving to //var//|
| x | date |display the current date and time with optional formatting.  see strftime manpage|
| x | test |condition evaluation utility.  Linked to [  See test manpage.|
| x | expr |evaluate an expression|
| x | jot |print sequential or random numbers|
|  | sleep //n// |pause execution for //n// seconds|
|  | stat |display extended file status/information|
|  | stty |set the options for a terminal device interface|
|  | basename |return the file name portion of a path|
|  | dirname |return the directory name portion of a path|
|  | fstat |List open files or determine whether specified file is open|
| x | exit [//n//] |log out or quit a script with the option exit status of //n//|
|>|>|bgcolor(#a0ffa0): ''Networking / Communication'' |
| x | ssh |~OpenSSH SSH client|
| x | scp |secure copy (remote file copy program)|
|  | rsync |a fast, versatile, remote (and local) file-copying tool|
|  | telnet |user interface to the TELNET protocol.  also useful for testing connectivity to arbitrary ports|
|  | talk / ytalk |talk to another user|
|  | write |send a message to another user|
|  | mesg |display (do not display) messages from other users|
|  | host |DNS lookup utility|
|  | nslookup |query Internet name servers interactively|
|  | traceroute |print the route packets take to network host|
|  | ping |send ICMP ~ECHO_REQUEST packets to network hosts|
|  | lynx / links |character mode WWW browser|
|>|>|bgcolor(#a0ffa0): ''Text Formatting & Printing'' |
| x | lpr |command line print utility|
| x | lpq |print spool queue examination program|
| x | lprm |remove jobs from the line printer spooling queue|
| x | pdf2ps |Ghostscript PDF to ~PostScript translator|
| x | a2ps |format files for printing on a ~PostScript printer|
|>|>|bgcolor(#a0ffa0): ''Working with files'' |
| x | file |display file type|
| x | tar |manipulate file archive files|
| x | gzip |compression tool using ~Lempel-Ziv coding|
| x | gunzip |decompression tool using ~Lempel-Ziv coding|
| x | bzip2 |a block-sorting file compressor|
| x | bunzip2 |a block-sorting file decompressor|
|  | split |split a file into pieces|
| x | md5 / md5sum |calculate a message-digest fingerprint (checksum) for a file (freebsd / linux)|
|  | srm |securely remove files or directories|
|  | rsync |a fast, versatile, remote (and local) file-copying tool|
/***
|Name|TagglyListPlugin|
|Created by|SimonBaird|
|Location|http://simonbaird.com/mptw/#TagglyListPlugin|
|Version|1.1.2 25-Apr-06|
|Requires|See TagglyTagging|

!History
* 1.1.2 (25-Apr-2006) embedded TagglyTaggingStyles. No longer need separated tiddler for styles.
* 1.1.1 (6-Mar-2006) fixed bug with refreshAllVisible closing tiddlers being edited. Thanks Luke Blanshard.

***/

/***
!Setup and config
***/
//{{{

version.extensions.TagglyListPlugin = {
	major: 1, minor: 1, revision: 2,
	date: new Date(2006,4,25),
	source: "http://simonbaird.com/mptw/#TagglyListPlugin"
};

config.macros.tagglyList = {};
config.macros.tagglyListByTag = {};
config.macros.tagglyListControl = {};
config.macros.tagglyListWithSort = {};
config.macros.hideSomeTags = {};

// change this to your preference
config.macros.tagglyListWithSort.maxCols = 6;

config.macros.tagglyList.label = "Tagged as %0:";

// the default sort options. set these to your preference
config.macros.tagglyListWithSort.defaults = {
 sortBy:"title", // title|created|modified
 sortOrder: "asc", // asc|desc
 hideState: "show", // show|hide
 groupState: "nogroup", // nogroup|group
 numCols: 1
};

// these tags will be ignored by the grouped view
config.macros.tagglyListByTag.excludeTheseTags = [
 "systemConfig",
 "TiddlerTemplates"
];

config.macros.tagglyListControl.tags = {
 title:"sortByTitle",
 modified: "sortByModified",
 created: "sortByCreated",
 asc:"sortAsc",
 desc:"sortDesc",
 hide:"hideTagged",
 show:"showTagged",
 nogroup:"noGroupByTag",
 group:"groupByTag",
 cols1:"list1Cols",
 cols2:"list2Cols",
 cols3:"list3Cols",
 cols4:"list4Cols",
 cols5:"list5Cols",
 cols6:"list6Cols",
 cols7:"list7Cols",
 cols8:"list8Cols",
 cols9:"list9Cols"
}

// note: should match config.macros.tagglyListControl.tags
config.macros.hideSomeTags.tagsToHide = [
 "sortByTitle",
 "sortByCreated",
 "sortByModified",
 "sortDesc",
 "sortAsc",
 "hideTagged",
 "showTagged",
 "noGroupByTag",
 "groupByTag",
 "list1Cols",
 "list2Cols",
 "list3Cols",
 "list4Cols",
 "list5Cols",
 "list6Cols",
 "list7Cols",
 "list8Cols",
 "list9Cols"
];


//}}}
/***

!Utils
***/
//{{{
// from Eric
function isTagged(title,tag) {
 var t=store.getTiddler(title); if (!t) return false;
 return (t.tags.find(tag)!=null);
}

// from Eric
function toggleTag(title,tag) {
 var t=store.getTiddler(title); if (!t || !t.tags) return;
 if (t.tags.find(tag)==null) t.tags.push(tag);
 else t.tags.splice(t.tags.find(tag),1);
}

function addTag(title,tag) {
 var t=store.getTiddler(title); if (!t || !t.tags) return;
 t.tags.push(tag);
}

function removeTag(title,tag) {
 var t=store.getTiddler(title); if (!t || !t.tags) return;
 if (t.tags.find(tag)!=null) t.tags.splice(t.tags.find(tag),1);
}

// from Udo
Array.prototype.indexOf = function(item) {
 for (var i = 0; i < this.length; i++) {
 if (this[i] == item) {
 return i;
 }
 }
 return -1;
};
Array.prototype.contains = function(item) {
 return (this.indexOf(item) >= 0);
}
//}}}
/***

!tagglyList
displays a list of tagged tiddlers.
parameters are sortField and sortOrder
***/
//{{{

// not used at the moment...
function sortedListOfOtherTags(tiddler,thisTag) {
 var list = tiddler.tags.concat(); // so we are working on a clone..
 for (var i=0;i<config.macros.hideSomeTags.tagsToHide.length;i++) {
 if (list.find(config.macros.hideSomeTags.tagsToHide[i]) != null)
 list.splice(list.find(config.macros.hideSomeTags.tagsToHide[i]),1); // remove hidden ones
 }
 for (var i=0;i<config.macros.tagglyListByTag.excludeTheseTags.length;i++) {
 if (list.find(config.macros.tagglyListByTag.excludeTheseTags[i]) != null)
 list.splice(list.find(config.macros.tagglyListByTag.excludeTheseTags[i]),1); // remove excluded ones
 }
 list.splice(list.find(thisTag),1); // remove thisTag
 return '[[' + list.sort().join("]] [[") + ']]';
}

function sortHelper(a,b) {
 if (a == b) return 0;
 else if (a < b) return -1;
 else return +1;
}

config.macros.tagglyListByTag.handler = function (place,macroName,params,wikifier,paramString,tiddler) {

 var sortBy = params[0] ? params[0] : "title";
 var sortOrder = params[1] ? params[1] : "asc";

 var result = store.getTaggedTiddlers(tiddler.title,sortBy);

 if (sortOrder == "desc")
 result = result.reverse();

 var leftOvers = []
 for (var i=0;i<result.length;i++) {
 leftOvers.push(result[i].title);
 }

 var allTagsHolder = {};
 for (var i=0;i<result.length;i++) {
 for (var j=0;j<result[i].tags.length;j++) {

 if (
 result[i].tags[j] != tiddler.title // not this tiddler
 && config.macros.hideSomeTags.tagsToHide.find(result[i].tags[j]) == null // not a hidden one
 && config.macros.tagglyListByTag.excludeTheseTags.find(result[i].tags[j]) == null // not excluded
 ) {
 if (!allTagsHolder[result[i].tags[j]])
 allTagsHolder[result[i].tags[j]] = "";
 allTagsHolder[result[i].tags[j]] += "**[["+result[i].title+"]]\n";

 if (leftOvers.find(result[i].title) != null)
 leftOvers.splice(leftOvers.find(result[i].title),1); // remove from leftovers. at the end it will contain the leftovers...
 }
 }
 }


 var allTags = [];
 for (var t in allTagsHolder)
 allTags.push(t);

 allTags.sort(function(a,b) {
 var tidA = store.getTiddler(a);
 var tidB = store.getTiddler(b);
 if (sortBy == "title") return sortHelper(a,b);
 else if (!tidA && !tidB) return 0;
 else if (!tidA) return -1;
 else if (!tidB) return +1;
 else return sortHelper(tidA[sortBy],tidB[sortBy]);
 });

 var markup = "";

 if (sortOrder == "desc") {
 allTags.reverse();
 }
 else {
 // leftovers first...
 for (var i=0;i<leftOvers.length;i++)
 markup += "*[["+leftOvers[i]+"]]\n";
 }

 for (var i=0;i<allTags.length;i++)
 markup += "*[["+allTags[i]+"]]\n" + allTagsHolder[allTags[i]];

 if (sortOrder == "desc") {
 // leftovers last...
 for (var i=0;i<leftOvers.length;i++)
 markup += "*[["+leftOvers[i]+"]]\n";
 }

 wikify(markup,place);
}

config.macros.tagglyList.handler = function (place,macroName,params,wikifier,paramString,tiddler) {
 var sortBy = params[0] ? params[0] : "title";
 var sortOrder = params[1] ? params[1] : "asc";
 var numCols = params[2] ? params[2] : 1;

 var result = store.getTaggedTiddlers(tiddler.title,sortBy);
 if (sortOrder == "desc")
 result = result.reverse();

 var listSize = result.length;
 var colSize = listSize/numCols;
 var remainder = listSize % numCols;

 var upperColsize;
 var lowerColsize;
 if (colSize != Math.floor(colSize)) {
 // it's not an exact fit so..
 lowerColsize = Math.floor(colSize);
 upperColsize = Math.floor(colSize) + 1;
 }
 else {
 lowerColsize = colSize;
 upperColsize = colSize;
 }

 var markup = "";
 var c=0;

 var newTaggedTable = createTiddlyElement(place,"table");
 var newTaggedBody = createTiddlyElement(newTaggedTable,"tbody");
 var newTaggedTr = createTiddlyElement(newTaggedBody,"tr");

 for (var j=0;j<numCols;j++) {
 var foo = "";
 var thisSize;

 if (j<remainder)
 thisSize = upperColsize;
 else
 thisSize = lowerColsize;

 for (var i=0;i<thisSize;i++)
 foo += ( "*[[" + result[c++].title + "]]\n"); // was using splitList.shift() but didn't work in IE;

 var newTd = createTiddlyElement(newTaggedTr,"td",null,"tagglyTagging");
 wikify(foo,newTd);

 }

};

/* snip for later.....
 //var groupBy = params[3] ? params[3] : "t.title.substr(0,1)";
 //var groupBy = params[3] ? params[3] : "sortedListOfOtherTags(t,tiddler.title)";
 //var groupBy = params[3] ? params[3] : "t.modified";
 var groupBy = null; // for now. groupBy here is working but disabled for now.

 var prevGroup = "";
 var thisGroup = "";

 if (groupBy) {
 result.sort(function(a,b) {
 var t = a; var aSortVal = eval(groupBy); var aSortVal2 = eval("t".sortBy);
 var t = b; var bSortVal = eval(groupBy); var bSortVal2 = eval("t".sortBy);
 var t = b; var bSortVal2 = eval(groupBy);
 return (aSortVal == bSortVal ?
 (aSortVal2 == bSortVal2 ? 0 : (aSortVal2 < bSortVal2 ? -1 : +1)) // yuck
 : (aSortVal < bSortVal ? -1 : +1));
 });
 }

 if (groupBy) {
 thisGroup = eval(groupBy);
 if (thisGroup != prevGroup)
 markup += "*[["+thisGroup+']]\n';
 markup += "**[["+t.title+']]\n';
 prevGroup = thisGroup;
 }



*/


//}}}

/***

!tagglyListControl
Use to make the sort control buttons
***/
//{{{

function getSortBy(title) {
 var tiddler = store.getTiddler(title);
 var defaultVal = config.macros.tagglyListWithSort.defaults.sortBy;
 if (!tiddler) return defaultVal;
 var usetags = config.macros.tagglyListControl.tags;
 if (tiddler.tags.contains(usetags["title"])) return "title";
 else if (tiddler.tags.contains(usetags["modified"])) return "modified";
 else if (tiddler.tags.contains(usetags["created"])) return "created";
 else return defaultVal;
}

function getSortOrder(title) {
 var tiddler = store.getTiddler(title);
 var defaultVal = config.macros.tagglyListWithSort.defaults.sortOrder;
 if (!tiddler) return defaultVal;
 var usetags = config.macros.tagglyListControl.tags;
 if (tiddler.tags.contains(usetags["asc"])) return "asc";
 else if (tiddler.tags.contains(usetags["desc"])) return "desc";
 else return defaultVal;
}

function getHideState(title) {
 var tiddler = store.getTiddler(title);
 var defaultVal = config.macros.tagglyListWithSort.defaults.hideState;
 if (!tiddler) return defaultVal;
 var usetags = config.macros.tagglyListControl.tags;
 if (tiddler.tags.contains(usetags["hide"])) return "hide";
 else if (tiddler.tags.contains(usetags["show"])) return "show";
 else return defaultVal;
}

function getGroupState(title) {
 var tiddler = store.getTiddler(title);
 var defaultVal = config.macros.tagglyListWithSort.defaults.groupState;
 if (!tiddler) return defaultVal;
 var usetags = config.macros.tagglyListControl.tags;
 if (tiddler.tags.contains(usetags["group"])) return "group";
 else if (tiddler.tags.contains(usetags["nogroup"])) return "nogroup";
 else return defaultVal;
}

function getNumCols(title) {
 var tiddler = store.getTiddler(title);
 var defaultVal = config.macros.tagglyListWithSort.defaults.numCols; // an int
 if (!tiddler) return defaultVal;
 var usetags = config.macros.tagglyListControl.tags;
 for (var i=1;i<=config.macros.tagglyListWithSort.maxCols;i++)
 if (tiddler.tags.contains(usetags["cols"+i])) return i;
 return defaultVal;
}


function getSortLabel(title,which) {
 // TODO. the strings here should be definable in config
 var by = getSortBy(title);
 var order = getSortOrder(title);
 var hide = getHideState(title);
 var group = getGroupState(title);
 if (which == "hide") return (hide == "show" ? "−" : "+"); // 0x25b8;
 else if (which == "group") return (group == "group" ? "normal" : "grouped");
 else if (which == "cols") return "cols±"; // &plusmn;
 else if (by == which) return which + (order == "asc" ? "↓" : "↑"); // &uarr; &darr;
 else return which;
}

function handleSortClick(title,which) {
 var currentSortBy = getSortBy(title);
 var currentSortOrder = getSortOrder(title);
 var currentHideState = getHideState(title);
 var currentGroupState = getGroupState(title);
 var currentNumCols = getNumCols(title);

 var tags = config.macros.tagglyListControl.tags;

 // if it doesn't exist, lets create it..
 if (!store.getTiddler(title))
 store.saveTiddler(title,title,"",config.options.txtUserName,new Date(),null);

 if (which == "hide") {
 // toggle hide state
 var newHideState = (currentHideState == "hide" ? "show" : "hide");
 removeTag(title,tags[currentHideState]);
 if (newHideState != config.macros.tagglyListWithSort.defaults.hideState)
 toggleTag(title,tags[newHideState]);
 }
 else if (which == "group") {
 // toggle hide state
 var newGroupState = (currentGroupState == "group" ? "nogroup" : "group");
 removeTag(title,tags[currentGroupState]);
 if (newGroupState != config.macros.tagglyListWithSort.defaults.groupState)
 toggleTag(title,tags[newGroupState]);
 }
 else if (which == "cols") {
 // toggle num cols
 var newNumCols = currentNumCols + 1; // confusing. currentNumCols is an int
 if (newNumCols > config.macros.tagglyListWithSort.maxCols || newNumCols > store.getTaggedTiddlers(title).length)
 newNumCols = 1;
 removeTag(title,tags["cols"+currentNumCols]);
 if (("cols"+newNumCols) != config.macros.tagglyListWithSort.defaults.groupState)
 toggleTag(title,tags["cols"+newNumCols]);
 }
 else if (currentSortBy == which) {
 // toggle sort order
 var newSortOrder = (currentSortOrder == "asc" ? "desc" : "asc");
 removeTag(title,tags[currentSortOrder]);
 if (newSortOrder != config.macros.tagglyListWithSort.defaults.sortOrder)
 toggleTag(title,tags[newSortOrder]);
 }
 else {
 // change sortBy only
 removeTag(title,tags["title"]);
 removeTag(title,tags["created"]);
 removeTag(title,tags["modified"]);

 if (which != config.macros.tagglyListWithSort.defaults.sortBy)
 toggleTag(title,tags[which]);
 }

 store.setDirty(true); // save is required now.
 story.refreshTiddler(title,false,true); // force=true
}

config.macros.tagglyListControl.handler = function (place,macroName,params,wikifier,paramString,tiddler) {
 var onclick = function(e) {
 if (!e) var e = window.event;
 handleSortClick(tiddler.title,params[0]);
 e.cancelBubble = true;
 if (e.stopPropagation) e.stopPropagation();
 return false;
 };
 createTiddlyButton(place,getSortLabel(tiddler.title,params[0]),"Click to change sort options",onclick,params[0]=="hide"?"hidebutton":"button");
}
//}}}
/***

!tagglyListWithSort
put it all together..
***/
//{{{
config.macros.tagglyListWithSort.handler = function (place,macroName,params,wikifier,paramString,tiddler) {
 if (tiddler && store.getTaggedTiddlers(tiddler.title).length > 0)
  // todo make this readable
 wikify(
 "<<tagglyListControl hide>>"+
 (getHideState(tiddler.title) != "hide" ?
 '<html><span class="tagglyLabel">'+config.macros.tagglyList.label.format([tiddler.title])+' </span></html>'+
 "<<tagglyListControl title>><<tagglyListControl modified>><<tagglyListControl created>><<tagglyListControl group>>"+(getGroupState(tiddler.title)=="group"?"":"<<tagglyListControl cols>>")+"\n" +
 "<<tagglyList" + (getGroupState(tiddler.title)=="group"?"ByTag ":" ") + getSortBy(tiddler.title)+" "+getSortOrder(tiddler.title)+" "+getNumCols(tiddler.title)+">>" // hacky
 // + \n----\n" +
 //"<<tagglyList "+getSortBy(tiddler.title)+" "+getSortOrder(tiddler.title)+">>"
 : ""),
 place,null,tiddler);
}

config.macros.tagglyTagging = { handler: config.macros.tagglyListWithSort.handler };


//}}}
/***

!hideSomeTags
So we don't see the sort tags.
(note, they are still there when you edit. Will that be too annoying?
***/
//{{{

// based on tags.handler
config.macros.hideSomeTags.handler = function(place,macroName,params,wikifier,paramString,tiddler) {
 var theList = createTiddlyElement(place,"ul");
 if(params[0] && store.tiddlerExists[params[0]])
 tiddler = store.getTiddler(params[0]);
 var lingo = config.views.wikified.tag;
 var prompt = tiddler.tags.length == 0 ? lingo.labelNoTags : lingo.labelTags;
 createTiddlyElement(theList,"li",null,"listTitle",prompt.format([tiddler.title]));
 for(var t=0; t<tiddler.tags.length; t++)
 if (!this.tagsToHide.contains(tiddler.tags[t])) // this is the only difference from tags.handler...
 createTagButton(createTiddlyElement(theList,"li"),tiddler.tags[t],tiddler.title);

}

//}}}
/***

!Refresh everything when we save a tiddler. So the tagged lists never get stale. Is this too slow???
***/
//{{{

function refreshAllVisible() {
 story.forEachTiddler(function(title,element) {
   if (element.getAttribute("dirty") != "true")
     story.refreshTiddler(title,false,true);
 });
}

story.saveTiddler_orig_mptw = story.saveTiddler;
story.saveTiddler = function(title,minorUpdate) {
 var result = this.saveTiddler_orig_mptw(title,minorUpdate);
// refreshAllVisible();
 return result;
}

store.removeTiddler_orig_mptw = store.removeTiddler;
store.removeTiddler = function(title) {
 this.removeTiddler_orig_mptw(title);
// refreshAllVisible();
}

config.shadowTiddlers.TagglyTaggingStyles = "/***\nTo use, add {{{[[TagglyTaggingStyles]]}}} to your StyleSheet tiddler, or you can just paste the CSS in directly. See also ViewTemplate, EditTemplate and TagglyTagging.\n***/\n/*{{{*/\n.tagglyTagged li.listTitle { display:none;}\n.tagglyTagged li { display: inline; font-size:90%; }\n.tagglyTagged ul { margin:0px; padding:0px; }\n.tagglyTagging { padding-top:0.5em; }\n.tagglyTagging li.listTitle { display:none;}\n.tagglyTagging ul { margin-top:0px; padding-top:0.5em; padding-left:2em; margin-bottom:0px; padding-bottom:0px; }\n\n/* .tagglyTagging .tghide { display:inline; } */\n\n.tagglyTagging { vertical-align: top; margin:0px; padding:0px; }\n.tagglyTagging table { margin:0px; padding:0px; }\n\n\n.tagglyTagging .button { display:none; margin-left:3px; margin-right:3px; }\n.tagglyTagging .button, .tagglyTagging .hidebutton { color:#aaa; font-size:90%; border:0px; padding-left:0.3em;padding-right:0.3em;}\n.tagglyTagging .button:hover, .hidebutton:hover { background:#eee; color:#888; }\n.selected .tagglyTagging .button { display:inline; }\n\n.tagglyTagging .hidebutton { color:white; } /* has to be there so it takes up space. tweak if you're not using a white tiddler bg */\n.selected .tagglyTagging .hidebutton { color:#aaa }\n\n.tagglyLabel { color:#aaa; font-size:90%; }\n\n.tagglyTagging ul {padding-top:0px; padding-bottom:0.5em; margin-left:1em; }\n.tagglyTagging ul ul {list-style-type:disc; margin-left:-1em;}\n.tagglyTagging ul ul li {margin-left:0.5em; }\n\n.editLabel { font-size:90%; padding-top:0.5em; }\n/*}}}*/\n";

refreshStyles("TagglyTaggingStyles");


//}}}

// // <html>&#x25b8;&#x25be;&minus;&plusmn;</html>
Type the text for 'TagglyTagging'
/***
|Name:|TagglyTaggingPlugin|
|Description:|tagglyTagging macro is a replacement for the builtin tagging macro in your ViewTemplate|
|Version:|3.3.1 ($Rev: 6100 $)|
|Date:|$Date: 2008-07-27 01:42:07 +1000 (Sun, 27 Jul 2008) $|
|Source:|http://mptw.tiddlyspot.com/#TagglyTaggingPlugin|
|Author:|Simon Baird <simon.baird@gmail.com>|
|License:|http://mptw.tiddlyspot.com/#TheBSDLicense|
!Notes
See http://mptw.tiddlyspot.com/#TagglyTagging
***/
//{{{

merge(String.prototype,{

	parseTagExpr: function(debug) {

		if (this.trim() == "")
			return "(true)";

		var anyLogicOp = /(!|&&|\|\||\(|\))/g;
		var singleLogicOp = /^(!|&&|\|\||\(|\))$/;

		var spaced = this.
			// because square brackets in templates are no good
			// this means you can use [(With Spaces)] instead of [[With Spaces]]
			replace(/\[\(/g," [[").
			replace(/\)\]/g,"]] ").
			// space things out so we can use readBracketedList. tricky eh?
			replace(anyLogicOp," $1 ");

		var expr = "";

		var tokens = spaced.readBracketedList(false); // false means don't uniq the list. nice one JR!

		for (var i=0;i<tokens.length;i++)
			if (tokens[i].match(singleLogicOp))
				expr += tokens[i];
			else
				expr += "tiddler.tags.contains('%0')".format([tokens[i].replace(/'/,"\\'")]); // fix single quote bug. still have round bracket bug i think

		if (debug)
			alert(expr);

		return '('+expr+')';
	}

});

merge(TiddlyWiki.prototype,{
	getTiddlersByTagExpr: function(tagExpr,sortField) {

		var result = [];

		var expr = tagExpr.parseTagExpr();

		store.forEachTiddler(function(title,tiddler) {
			if (eval(expr))
				result.push(tiddler);
		});

		if(!sortField)
			sortField = "title";

		result.sort(function(a,b) {return a[sortField] < b[sortField] ? -1 : (a[sortField] == b[sortField] ? 0 : +1);});

		return result;
	}
});

config.taggly = {

	// for translations
	lingo: {
		labels: {
			asc:        "\u2191", // down arrow
			desc:       "\u2193", // up arrow
			title:      "title",
			modified:   "modified",
			created:    "created",
			show:       "+",
			hide:       "-",
			normal:     "normal",
			group:      "group",
			commas:     "commas",
			sitemap:    "sitemap",
			numCols:    "cols\u00b1", // plus minus sign
			label:      "Tagged as '%0':",
			exprLabel:  "Matching tag expression '%0':",
			excerpts:   "excerpts",
			descr:      "descr",
			slices:     "slices",
			contents:   "contents",
			sliders:    "sliders",
			noexcerpts: "title only",
			noneFound:  "(none)"
		},

		tooltips: {
			title:      "Click to sort by title",
			modified:   "Click to sort by modified date",
			created:    "Click to sort by created date",
			show:       "Click to show tagging list",
			hide:       "Click to hide tagging list",
			normal:     "Click to show a normal ungrouped list",
			group:      "Click to show list grouped by tag",
			sitemap:    "Click to show a sitemap style list",
			commas:     "Click to show a comma separated list",
			numCols:    "Click to change number of columns",
			excerpts:   "Click to show excerpts",
			descr:      "Click to show the description slice",
			slices:     "Click to show all slices",
			contents:   "Click to show entire tiddler contents",
			sliders:    "Click to show tiddler contents in sliders",
			noexcerpts: "Click to show entire title only"
		},

		tooDeepMessage: "* //sitemap too deep...//"
	},

	config: {
		showTaggingCounts: true,
		listOpts: {
			// the first one will be the default
			sortBy:     ["title","modified","created"],
			sortOrder:  ["asc","desc"],
			hideState:  ["show","hide"],
			listMode:   ["normal","group","sitemap","commas"],
			numCols:    ["1","2","3","4","5","6"],
			excerpts:   ["noexcerpts","excerpts","descr","slices","contents","sliders"]
		},
		valuePrefix: "taggly.",
		excludeTags: ["excludeLists","excludeTagging"],
		excerptSize: 50,
		excerptMarker: "/%"+"%/",
		siteMapDepthLimit: 25
	},

	getTagglyOpt: function(title,opt) {
		var val = store.getValue(title,this.config.valuePrefix+opt);
		return val ? val : this.config.listOpts[opt][0];
	},

	setTagglyOpt: function(title,opt,value) {
		if (!store.tiddlerExists(title))
			// create it silently
			store.saveTiddler(title,title,config.views.editor.defaultText.format([title]),config.options.txtUserName,new Date(),"");
		// if value is default then remove it to save space
		return store.setValue(title,
			this.config.valuePrefix+opt,
			value == this.config.listOpts[opt][0] ? null : value);
	},

	getNextValue: function(title,opt) {
		var current = this.getTagglyOpt(title,opt);
		var pos = this.config.listOpts[opt].indexOf(current);
		// a little usability enhancement. actually it doesn't work right for grouped or sitemap
		var limit = (opt == "numCols" ? store.getTiddlersByTagExpr(title).length : this.config.listOpts[opt].length);
		var newPos = (pos + 1) % limit;
		return this.config.listOpts[opt][newPos];
	},

	toggleTagglyOpt: function(title,opt) {
		var newVal = this.getNextValue(title,opt);
		this.setTagglyOpt(title,opt,newVal);
	},

	createListControl: function(place,title,type) {
		var lingo = config.taggly.lingo;
		var label;
		var tooltip;
		var onclick;

		if ((type == "title" || type == "modified" || type == "created")) {
			// "special" controls. a little tricky. derived from sortOrder and sortBy
			label = lingo.labels[type];
			tooltip = lingo.tooltips[type];

			if (this.getTagglyOpt(title,"sortBy") == type) {
				label += lingo.labels[this.getTagglyOpt(title,"sortOrder")];
				onclick = function() {
					config.taggly.toggleTagglyOpt(title,"sortOrder");
					return false;
				}
			}
			else {
				onclick = function() {
					config.taggly.setTagglyOpt(title,"sortBy",type);
					config.taggly.setTagglyOpt(title,"sortOrder",config.taggly.config.listOpts.sortOrder[0]);
					return false;
				}
			}
		}
		else {
			// "regular" controls, nice and simple
			label = lingo.labels[type == "numCols" ? type : this.getNextValue(title,type)];
			tooltip = lingo.tooltips[type == "numCols" ? type : this.getNextValue(title,type)];
			onclick = function() {
				config.taggly.toggleTagglyOpt(title,type);
				return false;
			}
		}

		// hide button because commas don't have columns
		if (!(this.getTagglyOpt(title,"listMode") == "commas" && type == "numCols"))
			createTiddlyButton(place,label,tooltip,onclick,type == "hideState" ? "hidebutton" : "button");
	},

	makeColumns: function(orig,numCols) {
		var listSize = orig.length;
		var colSize = listSize/numCols;
		var remainder = listSize % numCols;

		var upperColsize = colSize;
		var lowerColsize = colSize;

		if (colSize != Math.floor(colSize)) {
			// it's not an exact fit so..
			upperColsize = Math.floor(colSize) + 1;
			lowerColsize = Math.floor(colSize);
		}

		var output = [];
		var c = 0;
		for (var j=0;j<numCols;j++) {
			var singleCol = [];
			var thisSize = j < remainder ? upperColsize : lowerColsize;
			for (var i=0;i<thisSize;i++)
				singleCol.push(orig[c++]);
			output.push(singleCol);
		}

		return output;
	},

	drawTable: function(place,columns,theClass) {
		var newTable = createTiddlyElement(place,"table",null,theClass);
		var newTbody = createTiddlyElement(newTable,"tbody");
		var newTr = createTiddlyElement(newTbody,"tr");
		for (var j=0;j<columns.length;j++) {
			var colOutput = "";
			for (var i=0;i<columns[j].length;i++)
				colOutput += columns[j][i];
			var newTd = createTiddlyElement(newTr,"td",null,"tagglyTagging"); // todo should not need this class
			wikify(colOutput,newTd);
		}
		return newTable;
	},

	createTagglyList: function(place,title,isTagExpr) {
		switch(this.getTagglyOpt(title,"listMode")) {
			case "group":  return this.createTagglyListGrouped(place,title,isTagExpr); break;
			case "normal": return this.createTagglyListNormal(place,title,false,isTagExpr); break;
			case "commas": return this.createTagglyListNormal(place,title,true,isTagExpr); break;
			case "sitemap":return this.createTagglyListSiteMap(place,title,isTagExpr); break;
		}
	},

	getTaggingCount: function(title,isTagExpr) {
		// thanks to Doug Edmunds
		if (this.config.showTaggingCounts) {
			var tagCount = config.taggly.getTiddlers(title,'title',isTagExpr).length;
			if (tagCount > 0)
				return " ("+tagCount+")";
		}
		return "";
	},

	getTiddlers: function(titleOrExpr,sortBy,isTagExpr) {
		return isTagExpr ? store.getTiddlersByTagExpr(titleOrExpr,sortBy) : store.getTaggedTiddlers(titleOrExpr,sortBy);
	},

	getExcerpt: function(inTiddlerTitle,title,indent) {
		if (!indent)
			indent = 1;

		var displayMode = this.getTagglyOpt(inTiddlerTitle,"excerpts");
		var t = store.getTiddler(title);

		if (t && displayMode == "excerpts") {
			var text = t.text.replace(/\n/," ");
			var marker = text.indexOf(this.config.excerptMarker);
			if (marker != -1) {
				return " {{excerpt{<nowiki>" + text.substr(0,marker) + "</nowiki>}}}";
			}
			else if (text.length < this.config.excerptSize) {
				return " {{excerpt{<nowiki>" + t.text + "</nowiki>}}}";
			}
			else {
				return " {{excerpt{<nowiki>" + t.text.substr(0,this.config.excerptSize) + "..." + "</nowiki>}}}";
			}
		}
		else if (t && displayMode == "contents") {
			return "\n{{contents indent"+indent+"{\n" + t.text + "\n}}}";
		}
		else if (t && displayMode == "sliders") {
			return "<slider slide>\n{{contents{\n" + t.text + "\n}}}\n</slider>";
		}
		else if (t && displayMode == "descr") {
			var descr = store.getTiddlerSlice(title,'Description');
			return descr ? " {{excerpt{" + descr  + "}}}" : "";
		}
		else if (t && displayMode == "slices") {
			var result = "";
			var slices = store.calcAllSlices(title);
			for (var s in slices)
				result += "|%0|<nowiki>%1</nowiki>|\n".format([s,slices[s]]);
			return result ? "\n{{excerpt excerptIndent{\n" + result  + "}}}" : "";
		}
		return "";
	},

	notHidden: function(t,inTiddler) {
		if (typeof t == "string")
			t = store.getTiddler(t);
		return (!t || !t.tags.containsAny(this.config.excludeTags) ||
				(inTiddler && this.config.excludeTags.contains(inTiddler)));
	},

	// this is for normal and commas mode
	createTagglyListNormal: function(place,title,useCommas,isTagExpr) {

		var list = config.taggly.getTiddlers(title,this.getTagglyOpt(title,"sortBy"),isTagExpr);

		if (this.getTagglyOpt(title,"sortOrder") == "desc")
			list = list.reverse();

		var output = [];
		var first = true;
		for (var i=0;i<list.length;i++) {
			if (this.notHidden(list[i],title)) {
				var countString = this.getTaggingCount(list[i].title);
				var excerpt = this.getExcerpt(title,list[i].title);
				if (useCommas)
					output.push((first ? "" : ", ") + "[[" + list[i].title + "]]" + countString + excerpt);
				else
					output.push("*[[" + list[i].title + "]]" + countString + excerpt + "\n");

				first = false;
			}
		}

		return this.drawTable(place,
			this.makeColumns(output,useCommas ? 1 : parseInt(this.getTagglyOpt(title,"numCols"))),
			useCommas ? "commas" : "normal");
	},

	// this is for the "grouped" mode
	createTagglyListGrouped: function(place,title,isTagExpr) {
		var sortBy = this.getTagglyOpt(title,"sortBy");
		var sortOrder = this.getTagglyOpt(title,"sortOrder");

		var list = config.taggly.getTiddlers(title,sortBy,isTagExpr);

		if (sortOrder == "desc")
			list = list.reverse();

		var leftOvers = []
		for (var i=0;i<list.length;i++)
			leftOvers.push(list[i].title);

		var allTagsHolder = {};
		for (var i=0;i<list.length;i++) {
			for (var j=0;j<list[i].tags.length;j++) {

				if (list[i].tags[j] != title) { // not this tiddler

					if (this.notHidden(list[i].tags[j],title)) {

						if (!allTagsHolder[list[i].tags[j]])
							allTagsHolder[list[i].tags[j]] = "";

						if (this.notHidden(list[i],title)) {
							allTagsHolder[list[i].tags[j]] += "**[["+list[i].title+"]]"
										+ this.getTaggingCount(list[i].title) + this.getExcerpt(title,list[i].title) + "\n";

							leftOvers.setItem(list[i].title,-1); // remove from leftovers. at the end it will contain the leftovers

						}
					}
				}
			}
		}

		var allTags = [];
		for (var t in allTagsHolder)
			allTags.push(t);

		var sortHelper = function(a,b) {
			if (a == b) return 0;
			if (a < b) return -1;
			return 1;
		};

		allTags.sort(function(a,b) {
			var tidA = store.getTiddler(a);
			var tidB = store.getTiddler(b);
			if (sortBy == "title") return sortHelper(a,b);
			else if (!tidA && !tidB) return 0;
			else if (!tidA) return -1;
			else if (!tidB) return +1;
			else return sortHelper(tidA[sortBy],tidB[sortBy]);
		});

		var leftOverOutput = "";
		for (var i=0;i<leftOvers.length;i++)
			if (this.notHidden(leftOvers[i],title))
				leftOverOutput += "*[["+leftOvers[i]+"]]" + this.getTaggingCount(leftOvers[i]) + this.getExcerpt(title,leftOvers[i]) + "\n";

		var output = [];

		if (sortOrder == "desc")
			allTags.reverse();
		else if (leftOverOutput != "")
			// leftovers first...
			output.push(leftOverOutput);

		for (var i=0;i<allTags.length;i++)
			if (allTagsHolder[allTags[i]] != "")
				output.push("*[["+allTags[i]+"]]" + this.getTaggingCount(allTags[i]) + this.getExcerpt(title,allTags[i]) + "\n" + allTagsHolder[allTags[i]]);

		if (sortOrder == "desc" && leftOverOutput != "")
			// leftovers last...
			output.push(leftOverOutput);

		return this.drawTable(place,
				this.makeColumns(output,parseInt(this.getTagglyOpt(title,"numCols"))),
				"grouped");

	},

	// used to build site map
	treeTraverse: function(title,depth,sortBy,sortOrder,isTagExpr) {

		var list = config.taggly.getTiddlers(title,sortBy,isTagExpr);

		if (sortOrder == "desc")
			list.reverse();

		var indent = "";
		for (var j=0;j<depth;j++)
			indent += "*"

		var childOutput = "";

		if (depth > this.config.siteMapDepthLimit)
			childOutput += indent + this.lingo.tooDeepMessage;
		else
			for (var i=0;i<list.length;i++)
				if (list[i].title != title)
					if (this.notHidden(list[i].title,this.config.inTiddler))
						childOutput += this.treeTraverse(list[i].title,depth+1,sortBy,sortOrder,false);

		if (depth == 0)
			return childOutput;
		else
			return indent + "[["+title+"]]" + this.getTaggingCount(title) + this.getExcerpt(this.config.inTiddler,title,depth) + "\n" + childOutput;
	},

	// this if for the site map mode
	createTagglyListSiteMap: function(place,title,isTagExpr) {
		this.config.inTiddler = title; // nasty. should pass it in to traverse probably
		var output = this.treeTraverse(title,0,this.getTagglyOpt(title,"sortBy"),this.getTagglyOpt(title,"sortOrder"),isTagExpr);
		return this.drawTable(place,
				this.makeColumns(output.split(/(?=^\*\[)/m),parseInt(this.getTagglyOpt(title,"numCols"))), // regexp magic
				"sitemap"
				);
	},

	macros: {
		tagglyTagging: {
			handler: function (place,macroName,params,wikifier,paramString,tiddler) {
				var parsedParams = paramString.parseParams("tag",null,true);
				var refreshContainer = createTiddlyElement(place,"div");

				// do some refresh magic to make it keep the list fresh - thanks Saq
				refreshContainer.setAttribute("refresh","macro");
				refreshContainer.setAttribute("macroName",macroName);

				var tag = getParam(parsedParams,"tag");
				var expr = getParam(parsedParams,"expr");

				if (expr) {
					refreshContainer.setAttribute("isTagExpr","true");
					refreshContainer.setAttribute("title",expr);
					refreshContainer.setAttribute("showEmpty","true");
				}
				else {
					refreshContainer.setAttribute("isTagExpr","false");
					if (tag) {
        				refreshContainer.setAttribute("title",tag);
						refreshContainer.setAttribute("showEmpty","true");
					}
					else {
        				refreshContainer.setAttribute("title",tiddler.title);
						refreshContainer.setAttribute("showEmpty","false");
					}
				}
				this.refresh(refreshContainer);
			},

			refresh: function(place) {
				var title = place.getAttribute("title");
				var isTagExpr = place.getAttribute("isTagExpr") == "true";
				var showEmpty = place.getAttribute("showEmpty") == "true";
				removeChildren(place);
				addClass(place,"tagglyTagging");
				var countFound = config.taggly.getTiddlers(title,'title',isTagExpr).length
				if (countFound > 0 || showEmpty) {
					var lingo = config.taggly.lingo;
					config.taggly.createListControl(place,title,"hideState");
					if (config.taggly.getTagglyOpt(title,"hideState") == "show") {
						createTiddlyElement(place,"span",null,"tagglyLabel",
								isTagExpr ? lingo.labels.exprLabel.format([title]) : lingo.labels.label.format([title]));
						config.taggly.createListControl(place,title,"title");
						config.taggly.createListControl(place,title,"modified");
						config.taggly.createListControl(place,title,"created");
						config.taggly.createListControl(place,title,"listMode");
						config.taggly.createListControl(place,title,"excerpts");
						config.taggly.createListControl(place,title,"numCols");
						config.taggly.createTagglyList(place,title,isTagExpr);
						if (countFound == 0 && showEmpty)
							createTiddlyElement(place,"div",null,"tagglyNoneFound",lingo.labels.noneFound);
					}
				}
			}
		}
	},

	// todo fix these up a bit
	styles: [
"/*{{{*/",
"/* created by TagglyTaggingPlugin */",
".tagglyTagging { padding-top:0.5em; }",
".tagglyTagging li.listTitle { display:none; }",
".tagglyTagging ul {",
"	margin-top:0px; padding-top:0.5em; padding-left:2em;",
"	margin-bottom:0px; padding-bottom:0px;",
"}",
".tagglyTagging { vertical-align: top; margin:0px; padding:0px; }",
".tagglyTagging table { margin:0px; padding:0px; }",
".tagglyTagging .button { visibility:hidden; margin-left:3px; margin-right:3px; }",
".tagglyTagging .button, .tagglyTagging .hidebutton {",
"	color:[[ColorPalette::TertiaryLight]]; font-size:90%;",
"	border:0px; padding-left:0.3em;padding-right:0.3em;",
"}",
".tagglyTagging .button:hover, .hidebutton:hover, ",
".tagglyTagging .button:active, .hidebutton:active  {",
"	border:0px; background:[[ColorPalette::TertiaryPale]]; color:[[ColorPalette::TertiaryDark]];",
"}",
".selected .tagglyTagging .button { visibility:visible; }",
".tagglyTagging .hidebutton { color:[[ColorPalette::Background]]; }",
".selected .tagglyTagging .hidebutton { color:[[ColorPalette::TertiaryLight]] }",
".tagglyLabel { color:[[ColorPalette::TertiaryMid]]; font-size:90%; }",
".tagglyTagging ul {padding-top:0px; padding-bottom:0.5em; margin-left:1em; }",
".tagglyTagging ul ul {list-style-type:disc; margin-left:-1em;}",
".tagglyTagging ul ul li {margin-left:0.5em; }",
".editLabel { font-size:90%; padding-top:0.5em; }",
".tagglyTagging .commas { padding-left:1.8em; }",
"/* not technically tagglytagging but will put them here anyway */",
".tagglyTagged li.listTitle { display:none; }",
".tagglyTagged li { display: inline; font-size:90%; }",
".tagglyTagged ul { margin:0px; padding:0px; }",
".excerpt { color:[[ColorPalette::TertiaryDark]]; }",
".excerptIndent { margin-left:4em; }",
"div.tagglyTagging table,",
"div.tagglyTagging table tr,",
"td.tagglyTagging",
" {border-style:none!important; }",
".tagglyTagging .contents { border-bottom:2px solid [[ColorPalette::TertiaryPale]]; padding:0 1em 1em 0.5em;",
"  margin-bottom:0.5em; }",
".tagglyTagging .indent1  { margin-left:3em;  }",
".tagglyTagging .indent2  { margin-left:4em;  }",
".tagglyTagging .indent3  { margin-left:5em;  }",
".tagglyTagging .indent4  { margin-left:6em;  }",
".tagglyTagging .indent5  { margin-left:7em;  }",
".tagglyTagging .indent6  { margin-left:8em;  }",
".tagglyTagging .indent7  { margin-left:9em;  }",
".tagglyTagging .indent8  { margin-left:10em; }",
".tagglyTagging .indent9  { margin-left:11em; }",
".tagglyTagging .indent10 { margin-left:12em; }",
".tagglyNoneFound { margin-left:2em; color:[[ColorPalette::TertiaryMid]]; font-size:90%; font-style:italic; }",
"/*}}}*/",
		""].join("\n"),

	init: function() {
		merge(config.macros,this.macros);
		config.shadowTiddlers["TagglyTaggingStyles"] = this.styles;
		store.addNotification("TagglyTaggingStyles",refreshStyles);
	}
};

config.taggly.init();

//}}}

/***
InlineSlidersPlugin
By Saq Imtiaz
http://tw.lewcid.org/sandbox/#InlineSlidersPlugin

// syntax adjusted to not clash with NestedSlidersPlugin
// added + syntax to start open instead of closed

***/
//{{{
config.formatters.unshift( {
	name: "inlinesliders",
	// match: "\\+\\+\\+\\+|\\<slider",
	match: "\\<slider",
	// lookaheadRegExp: /(?:\+\+\+\+|<slider) (.*?)(?:>?)\n((?:.|\n)*?)\n(?:====|<\/slider>)/mg,
	lookaheadRegExp: /(?:<slider)(\+?) (.*?)(?:>)\n((?:.|\n)*?)\n(?:<\/slider>)/mg,
	handler: function(w) {
		this.lookaheadRegExp.lastIndex = w.matchStart;
		var lookaheadMatch = this.lookaheadRegExp.exec(w.source)
		if(lookaheadMatch && lookaheadMatch.index == w.matchStart ) {
			var btn = createTiddlyButton(w.output,lookaheadMatch[2] + " "+"\u00BB",lookaheadMatch[2],this.onClickSlider,"button sliderButton");
			var panel = createTiddlyElement(w.output,"div",null,"sliderPanel");
			panel.style.display = (lookaheadMatch[1] == '+' ? "block" : "none");
			wikify(lookaheadMatch[3],panel);
			w.nextMatch = lookaheadMatch.index + lookaheadMatch[0].length;
		}
   },
   onClickSlider : function(e) {
		if(!e) var e = window.event;
		var n = this.nextSibling;
		n.style.display = (n.style.display=="none") ? "block" : "none";
		return false;
	}
});

//}}}

/*{{{*/
/* created by TagglyTaggingPlugin */
.tagglyTagging { padding-top:0.5em; }
.tagglyTagging li.listTitle { display:none; }
.tagglyTagging ul {
	margin-top:0px; padding-top:0.5em; padding-left:2em;
	margin-bottom:0px; padding-bottom:0px;
}
.tagglyTagging { vertical-align: top; margin:0px; padding:0px; }
.tagglyTagging table { margin:0px; padding:0px; }
.tagglyTagging .button { visibility:hidden; margin-left:3px; margin-right:3px; }
.tagglyTagging .button, .tagglyTagging .hidebutton {
	color:[[ColorPalette::TertiaryLight]]; font-size:90%;
	border:0px; padding-left:0.3em;padding-right:0.3em;
}
.tagglyTagging .button:hover, .hidebutton:hover,
.tagglyTagging .button:active, .hidebutton:active  {
	border:0px; background:[[ColorPalette::TertiaryPale]]; color:[[ColorPalette::TertiaryDark]];
}
.selected .tagglyTagging .button { visibility:visible; }
.tagglyTagging .hidebutton { color:[[ColorPalette::Background]]; }
.selected .tagglyTagging .hidebutton { color:[[ColorPalette::TertiaryLight]] }
.tagglyLabel { color:[[ColorPalette::TertiaryMid]]; font-size:90%; }
.tagglyTagging ul {padding-top:0px; padding-bottom:0.5em; margin-left:1em; }
.tagglyTagging ul ul {list-style-type:disc; margin-left:-1em;}
.tagglyTagging ul ul li {margin-left:0.5em; }
.editLabel { font-size:90%; padding-top:0.5em; }
.tagglyTagging .commas { padding-left:1.8em; }
/* not technically tagglytagging but will put them here anyway */
.tagglyTagged li.listTitle { display:none; }
.tagglyTagged li { display: inline; font-size:90%; }
.tagglyTagged ul { margin:0px; padding:0px; }
.excerpt { color:[[ColorPalette::TertiaryDark]]; }
.excerptIndent { margin-left:4em; }
div.tagglyTagging table,
div.tagglyTagging table tr,
td.tagglyTagging
 {border-style:none!important; }
.tagglyTagging .contents { border-bottom:2px solid [[ColorPalette::TertiaryPale]]; padding:0 1em 1em 0.5em;
  margin-bottom:0.5em; }
.tagglyTagging .indent1  { margin-left:3em;  }
.tagglyTagging .indent2  { margin-left:4em;  }
.tagglyTagging .indent3  { margin-left:5em;  }
.tagglyTagging .indent4  { margin-left:6em;  }
.tagglyTagging .indent5  { margin-left:7em;  }
.tagglyTagging .indent6  { margin-left:8em;  }
.tagglyTagging .indent7  { margin-left:9em;  }
.tagglyTagging .indent8  { margin-left:10em; }
.tagglyTagging .indent9  { margin-left:11em; }
.tagglyTagging .indent10 { margin-left:12em; }
.tagglyNoneFound { margin-left:2em; color:[[ColorPalette::TertiaryMid]]; font-size:90%; font-style:italic; }
/*}}}*/
! The vi editor

!! vi Intro

!!! Background: 
* ex, vi, vim
** {{Command{ex}}} = line oriented text editor (for printed output / slow displays / modems)
*** demonstrate ''c'' (change) and ''i'' (insert) commands.  Go to a line number, use command with ''.'' to return to prompt.
** {{Command{vi}}} = screen oriented instead of line oriented
*** Different modes - either entering text or executing commands
*** Commands are either {{Command{vi}}} commands or {{Command{ex}}} commands.
** {{Command{ex}}} & {{Command{vi}}} are different interfaces to the same program
** {{Command{ex}}} & {{Command{vi}}} began with original unix versions, over 30 years ago
** {{Command{vi}}} is now the standard unix text editor
** {{Command{vim}}} = vi Improved - extra commands and functionality

!!! Using vi:
* Opening a document for editing loads it into a buffer, which is the in-memory text of a file.  
** Any changes are made to the buffer and not saved to the file until the //write// command is provided.
* There are two Modes:
** Command mode - where you provide commands to the editor
*** These may be either {{Command{vi}}} or {{Command{ex}}} commands
** Input mode - where you can interact with the content of the file
*** You'll typically see the string ''-- INSERT --'' in the bottom-left corner when you're in Input Mode
*** Leave input mode by pressing ESC
* vi commands (command mode) contain an operator (what to do) and scope (what to do it on)
** Examples:
*** {{Monospaced{''d$''}}} - delete (d) all text from the cursor to the end of the line ($ typically means end of line)
*** {{Monospaced{''dw''}}} - delete (d) the current word
*** {{Monospaced{''d5w''}}} - delete (d) the current and next 4 (5) words (w)
*** {{Monospaced{''d2d''}}} - delete (d) the current and next 1 (2) line (d)
*** {{Monospaced{''cw''}}} - change (c) the next word (w), placing you in input mode
*** {{Monospaced{''ct:''}}} - change (c) all characters until (t) the next colon (:)
* Searching with ''/'' and ''?''
** Search down with the ''/'' key
** Search up with the ''?'' key
*** After you type either ''/'' and ''?'', you cursor will move to the bottom-left corner and you will be prompted to enter a search string.  Press enter to begin the search.
** Repeat your last search with ''n''


!! Using ex commands in vi

The {{Command{vi}}} editor is a the ''vi''sual screen-oriented front-end for the {{Command{ex}}} line-oriented text editor.  {{Command{ex}}} was one of the original Unix text editors from the days where text files could only be displayed and edited one line at a time.  It wasn't yet possible to display a full screen of text.  The ''vi''sual functionality was supported after technology evolved to support full-screen document editing.  {{Command{vi}}} also supports the original {{Command{ex}}} commands for manipulating a document.  These commands bring a great deal of power to the editor and make solving complex tasks rather simple.

* Press the : (colon) key to enter {{Command{ex}}} command mode when you are no in Input mode.  Your cursor will move to the bottom left corner.
* {{Command{ex}}} commands will be displayed on the bottom status line.  Press ~CTRL-C to cancel the command and return to vi mode.
* Syntax: {{Monospaced{'' :[address]command ''}}}
** {{Monospaced{'' :[address] ''}}} is an optional component which allows you to specify which lines to act upon.  

!!! Valid address formats
* Addresses may be addressed singly:
** {{Monospaced{''.''}}} - represents current line (default if no address is specified)
** {{Monospaced{''//n//''}}} - a specific line number
** {{Monospaced{''$''}}} - last line in the file
* or as a range:
**{{Monospaced{''%''}}} - Whole file
** {{Monospaced{''address1,address2''}}} - from address1 to address2.
** Also includes +//n// and -//n// to include the next or previous //n// lines
* Examples:
** {{Monospaced{'':12,20d''}}} - delete lines 12 to 20
** {{Monospaced{'':.,+5''}}}  - current and next five lines
** {{Monospaced{'':10,$''}}} - lines 10 through the end of the file
** {{Monospaced{'':$-2,$''}}} - last three lines (last line and two previous)

!!! Most useful ex commands

* ''d'' - delete lines
** {{Monospaced{'':10d''}}} - delete line 10
** {{Monospaced{'' :1,10d ''}}} - delete lines 1 to 10
* ''e'' - edit
** {{Monospaced{'':e! ''}}} - reopen current file, discarding changes
* ''s'' - substitute
**{{Monospaced{'' :s/one/two/ ''}}} - change first instances of one to two on the current line
**{{Monospaced{'' :%s/one/two/ ''}}} - change first instance of one to two on all lines in the document
**{{Monospaced{'' :%s/one/two/g ''}}} - change all instances of one to two on all lines in the document
**{{Monospaced{'' :.,+5s/one/two/g ''}}} - change all instances of one to two on current and next 5 lines.
* ''g'' - globally execute specified commands on lines containing a particular pattern
** {{Monospaced{'' :g/stuff/d ''}}} - delete all lines containing the string stuff
** {{Monospaced{'' :g/lpd-errs/s/^/#/ ''}}}  - add a comment to the beginning of the line on all lines containing the string lpd-errors
** {{Monospaced{'' :10,20/g/stuff/d ''}}} - remove lines between lines 10 and 20 that contain the string delete 

----


!! More info
*vi handouts: [[vi Diagram|handouts/viDiagram.pdf]] & [[Old Handout|handouts/viHandout.pdf]]
*{{Command{vimtutor}}} command
*http://www.gentoo.org/doc/en/vi-guide.xml
*[[UNIX Command summary|handouts/UnixCommandSummary.pdf]] back page

http://docstore.mik.ua/orelly/unix/unixnut/ch09_01.htm


! Using the compilers

Also a simple exercise to get more practice editing text files with vi

{{Command{gcc}}} & {{Command{g++}}}
Use {{Command{gcc}}} for compiling C code and {{Command{g++}}} for compiling C++ code.  Source code file extensions must either be .c or .cpp

{{Command{gcc -o //name_of_executable// source.c}}}
{{Command{g++ -o //name_of_executable// source.cpp}}}

//name_of_executable// = executable file to create after compiling your source code, instead of using the default a.out

{{{
#include <stdio.h>

main()
{
    printf("Hello World in C\n\n");
}
}}}

{{{
#include <iostream>
using namespace std;
int main()
{ 
  cout << "Hello World!" << endl;
  return 0;
}
}}}


! Assignments

!! Read :
 - Chapter 12 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]]

!! Complete: 
 - [[Lab 23|labs/lab23.pdf]] & [[Lab 24|labs/lab24.pdf]] 
 - These labs are optional for additional vi practice and will be accepted for extra credit.  There is no firm due date, but please try to submit before the end of October.


/***

|Name|ToggleSideBarMacro|
|Created by|SaqImtiaz|
|Location|http://lewcid.googlepages.com/lewcid.html#ToggleSideBarMacro|
|Version|1.0|
|Requires|~TW2.x|
!Description:
Provides a button for toggling visibility of the SideBar. You can choose whether the SideBar should initially be hidden or displayed.

!Demo
<<toggleSideBar "Toggle Sidebar">>

!Usage:
{{{<<toggleSideBar>>}}} <<toggleSideBar>>
additional options:
{{{<<toggleSideBar label tooltip show/hide>>}}} where:
label = custom label for the button,
tooltip = custom tooltip for the button,
show/hide = use one or the other, determines whether the sidebar is shown at first or not.
(default is to show the sidebar)

You can add it to your tiddler toolbar, your MainMenu, or where you like really.
If you are using a horizontal MainMenu and want the button to be right aligned, put the following in your StyleSheet:
{{{ .HideSideBarButton {float:right;} }}}

!History
*23-07-06: version 1.0: completely rewritten, now works with custom stylesheets too, and easier to customize start behaviour.
*20-07-06: version 0.11
*27-04-06: version 0.1: working.

!Code
***/
//{{{
config.macros.toggleSideBar={};

config.macros.toggleSideBar.settings={
         styleHide :  "#sidebar { display: none;}\n"+"#contentWrapper #displayArea { margin-right: 1em;}\n"+"",
         styleShow : " ",
         arrow1: "«",
         arrow2: "»"
};

config.macros.toggleSideBar.handler=function (place,macroName,params,wikifier,paramString,tiddler)
{
          var tooltip= params[1]||'toggle sidebar';
          var mode = (params[2] && params[2]=="hide")? "hide":"show";
          var arrow = (mode == "hide")? this.settings.arrow1:this.settings.arrow2;
          var label= (params[0]&&params[0]!='.')?params[0]+" "+arrow:arrow;
          var theBtn = createTiddlyButton(place,label,tooltip,this.onToggleSideBar,"button HideSideBarButton");
          if (mode == "hide")
             {
             (document.getElementById("sidebar")).setAttribute("toggle","hide");
              setStylesheet(this.settings.styleHide,"ToggleSideBarStyles");
             }
};

config.macros.toggleSideBar.onToggleSideBar = function(){
          var sidebar = document.getElementById("sidebar");
          var settings = config.macros.toggleSideBar.settings;
          if (sidebar.getAttribute("toggle")=='hide')
             {
              setStylesheet(settings.styleShow,"ToggleSideBarStyles");
              sidebar.setAttribute("toggle","show");
              this.firstChild.data= (this.firstChild.data).replace(settings.arrow1,settings.arrow2);
              }
          else
              {
               setStylesheet(settings.styleHide,"ToggleSideBarStyles");
               sidebar.setAttribute("toggle","hide");
               this.firstChild.data= (this.firstChild.data).replace(settings.arrow2,settings.arrow1);
              }

     return false;
}

setStylesheet(".HideSideBarButton .button {font-weight:bold; padding: 0 5px;}\n","ToggleSideBarButtonStyles");

//}}}
|~ViewToolbar|closeTiddler closeOthers editTiddler > fields syncing permalink references jump|
|~EditToolbar|+saveTiddler -cancelTiddler deleteTiddler|
{{Note{This video is a nice demo and overview on how this all works.  It may be helpful to review it before proceeding.  https://www.youtube.com/watch?v=XFJ6_BYno08}}}

!! Defeating firewalls with SSH to access protected resources

Knowing how to more fully use SSH and it's tunneling and proxy capabilities to defeat firewalls is an excellent skill for a security practitioner to have!  There are two methods we can use with SSH to defeat firewalls:

A.  Dynamic application-level port forwarding (SOCKS proxy)
<<<
Specifies local "dynamic" application-level port forwarding.  This works by allocating a socket to listen to a port on the local side, optionally bound to the specified bind_address.  Whenever a connection is made to this port, the connection is forwarded over the secure channel, and the application protocol is then used to determine where to connect to from the remote machine.
<<<

B.  Port forwarding
<<<
Specifies that connections to the given TCP port or Unix socket on the local (client) host are to be forwarded to the given host and port, or Unix socket, on the remote side.  This works by allocating a socket to listen to either a TCP port on the local side, optionally bound to the specified bind_address, or to a Unix socket.  Whenever a connection is made to the local port or socket, the connection is forwarded over the secure channel, and a connection is made to either host port hostport, or the Unix socket remote_socket, from the remote machine.
<<<

Method A. functions as a traditional application-level proxy.  You would configure your application (eg: web browser) to proxy all connections through the tunnel.  Method B. creates a 1:1 connection:  a TCP port on your local PC is tunneled through the SSH connection to a specific IP address and TCP port on the other side.  This method is best when there is no option to configure a proxy in your application. 

We're going to use method ''A'' for accessing internal web resources behind our class router.  This grants us the most flexibility since our browser allows us to configure an application-level proxy.


Before you begin, open your web browser and load the page http://ifconfig.me.  Take note of the IP address displayed.  We will compare this to the IP address you receive after everything is set up. 


!!! A. Establishing a SOCKS proxy with SSH

A proxy is a middle man, passing on network requests to their destination on your behalf.

A SOCKS proxy (socket secure) is a protocol to route packets between a client and a server through an intermediate proxy.  This is used (typically for web traffic) when the client is not able to communicate with the server directly, but the client can communicate with the proxy system and the chosen proxy can communicate with the server.  Some sites set up a proxy for web traffic as a means to enforce policy, monitor traffic, and block direct connections to web sites.

Here, your home PC cannot access your web server VM or the Naemon monitoring server but the class shell server can.  We'll use the class shell server to proxy your browser's web connections and be the middleman for your web requests.  This diagram illustrates the overall goal.  We see your proxy connection traveling through the encrypted SSH tunnel to the class shell server.  Web requests are then made from the perspective of the class shell server.

[img[img/proxy.png]]

SSH can be used to establish a SOCKS proxy.  This functionality is available from putty or the command line ~OpenSSH

''1.'' To set up the Proxy on your home PC, complete either ''a)'' or ''b)'', depending on your OS:

''a)'' If your home OS is Mac or Unix:  This command will create an encrypted proxy tunnel between your PC and the specified host, in this case our class shell server.  Traffic connecting to your PC on port 2233 will then pass through this proxy.  Execute a similar command on your home computer.  You may also need to update the username.
<<<
Set up SOCKS proxy:  {{Command{ssh -D 2233 -p 2233 lab.ci233.net}}}
<<<

''b)'' Follow these steps when connecting with Putty from your home Windows PC:
<<<
* Expand the Connection / SSH menu
* Select Tunnels
* Enter ''2233'' in the Source port box
* Select ''Dynamic''
* Click Add
* Connect to a remote host (the class shell server) as normal
<<<
/% * [[This video|Putty Proxy]] demonstrates configuring Putty to add the dynamic tunnel. %/


''2.'' Your browser must be configured to pass traffic through the encrypted proxy.

I use the ~FoxyProxy extension to easily toggle between proxy settings in my browser.  It can also be configured to automatically send only selected sites through the proxy.  
* [[Firefox Extension|https://addons.mozilla.org/en-US/firefox/addon/foxyproxy-standard/]]
** ~CI233 settings file for use in Firefox: [[FoxyProxy-ci233.json|https://www.ci233.net/media/FoxyProxy-ci233.json]]
* [[Chrome Extension|https://chrome.google.com/webstore/detail/foxyproxy-standard/gcknhkkoolaabfmlnjonogaaifnjlfnp]]


Install the browser extension, import the settings file, and enable the proxy.

/% * [[This video|Firefox Proxy]] demonstrates using Firefox with the proxy to access an internal website %/


!!! B. Verification

Verification should be built into everything you configure.  Now that your proxy is established, let's verify it is functioning correctly and web connections from firefox are flowing through the class infrastructure.  Load the page http://ifconfig.me/ again in your browser and observe the IP address.  It should have changed from the original value you observed and instead contain the public IP address of the class shell server in the screenshot below.  With the class server acting as a middle man, you can now load internal resources in this web browser which would have otherwise been blocked from the outside world.

[img[img/proxyIP.png]]


!!! C. Naemon infrastructure monitoring

[[Naemon|https://www.naemon.org/]] is a tool which continuously monitors resources to provide a high level view of the health of an environment.  I'm running a Naemon server to monitor your ~VMs and use it to assist with grading your labs.  You can also use it to monitor the state of your systems and correct any issues it discovers.

Naemon is running on the internal class network and is not directly accessible from the outside world.  You will need to bypass the router and use the class shell server as a proxy in order to reach it.  

Once the proxy is configured in your browser, navigate to the URL &nbsp; '' http://head.ci233.net/ ''.  Log in with the username {{Monospaced{''ci233''}}} and the password {{Monospaced{''naemon''}}}.

This video contains a brief [[Naemon Introduction]].


{{Note{Naemon status checks run every two hours.  If you fix a problem, you will either need to wait up to two hours for the recheck or force Naemon to recheck.}}}
Everyone should be using the Blackboard discussion boards during the course of the semester and will likely need to paste in output from the command line at some point.  If you paste copied text from the terminal, blackboard will turn it into a mangled mess:

@@display:block;text-align:center;[img[img/blackboard0.png]]@@

Aesthetics and readability should be considered in everything you produce.  We can avoid Blackboard's text mangling and make our post easier to read with a couple additional steps.

''1.'' Insert a few blank lines where you want to put the pasted text.  These blank lines will make it easier to add additional text after inserting your pasted text from the terminal

''2.'' Choose the HTML editor from the Toolbar

@@display:block;text-align:center;[img[img/blackboard1.png]]@@

''3.'' Paste your copied text from the terminal where you would like it to appear.  

''4.'' Add a {{Command{&lt;pre>}}} HTML tag before your pasted text and a {{Command{&lt;/pre>}}} tag after it.  This will prevent the mangled formatting and preserve all spacing, just as you see it in the terminal.

@@display:block;text-align:center;[img[img/blackboard2.png]]@@

''5.'' Click update.  You should now see your copied text nicely formatted in Blackboard.

''6.'' Select the text you pasted in and choose the font ''Courier New''.  All commands and text copied from the terminal should be written with a monospaced font like Courier New to make spacing uniform and highlight what you're typing is a command or output from one.

''7.''  Highlight the command you executed to get the output and change it to bold.  This makes it easier to identify the command that was used from the output returned.

''8.'' If appropriate, use the highlighter to draw attention to any parts you're talking about.  Be sure to first change the color to a brighter one.

@@display:block;text-align:center;[img[img/blackboard4.png]]@@


You'll finally be left with something that is much easier to read than the Blackboard mangled mess.  You're more likely to get a response to your forum post if it is easier to read.  Pasting test like this is preferable to just posting a screenshot.  If you paste in the text, someone can quote it in a reply and easily highlight relevant parts.

@@display:block;text-align:center;[img[img/blackboard3.png]]@@
!! Using Discord
We'll need to keep Discord organized in order to keep it useful.  Get in the habit of this now, because you'll have these same issues later in the workplace.  The concepts are very similar to what we need to do on Slack in the corporate world.

!!! There are four types of channels:
# //administrative// - Administrative questions about the class like grading, due dates, and technical support issues.  Not for course content.
# //misc-chatter// - Conversation not related to this class
# //notes-resource// - Posts about general course notes and resources that might be helpful for others
# //week#// - The weekly course content discussions.  Post to this channel regarding material that was ''//assigned//'' in this week.
** For example, if you have a question about a week 1 lab, post it to the week 1 channel even if we're now in week 2.

!! Asking for help
* Use threads for your questions to help keep things organized.
** See below for an example on using threads
** Title your threads appropriately.  Use something descriptive in the name and not just the lab and question number.  A subject like @@Lab 17, #2 - Incorrectly discarding data@@ is far more helpful than something generic like ''Lab 17''.
** Organization and usability is important in everything you do.  Full credit for asking a question and providing help will only be given for conversations which are within threads.
* When asking for help, be sure to include relevant supporting information.  You'll receive faster responses if you provide everything someone needs to help you.
** If you're asking about a lab question, including that question in your post is helpful so everyone doesn't need to first look at the lab to know what you're talking about.
** Send us what you're seeing, don't just describe it.  A picture is worth a thousand words
*** Did you receive an error from a command?  Be sure to include the error and the command you ran.  
*** The shell prompt will also include helpful information, such as:
**** The host you're running the command on
**** The user you're running the command as
**** A portion of the current working directory.  Including the full output of the {{Command{pwd}}} command might be helpful too
**** The exact command string you're running.
** Don't forget to include any relevant log information, configuration lines, and troubleshooting steps you've already taken.  You're more likely to get help if you start the process and can describe what you've already done to troubleshoot.
* Be sure to review everything for typos first.  Too many posts asking for help will be for problems caused by typos.  Save some time and check your typing first.
* If you solve your problem while you're waiting for help, be sure to post an update.  Don't let someone else waste their time helping you when you no longer need it.

!! Using code blocks

* Be sure all code, commands, and output is enclosed within a code block.  This will make it easier to identify commands and prevent Discord from interpreting special characters.
* Single commands can be put inside of a code block by enclosing your command in backticks.
* A series of lines can be put inside of a code block by putting three backticks at the start of the first line and three backticks at the end of the last line.
* When possible, sending text in code blocks is better then just sending a screenshot.  Text sent in a screenshot cannot be copy/pasted for any testing
* A full list of Markdown formatting options is availabile in the [[Discord help docs|https://support.discord.com/hc/en-us/articles/210298617-Markdown-Text-101-Chat-Formatting-Bold-Italic-Underline-]]

Example of using single line code block:
[img[img/discord-code3.png]]

Example of using multi-line code block:
[img[img/discord-code1.png]]

Results of using code blocks:
[img[img/discord-code2.png]]


!! Using threads

Threads in Discord will help keep the weekly channels and conversations organized.  Create a new thread for each question you're asking.

----
[img[img/discord1.png]]
# Click on the week number for the material you would like to discuss
# Click on the threads icon up top
----

[img[img/discord2.png]]
# Enter your thread name
# Enter your question in the Starter Message followed by any supporting information in additional posts.
# Click on //Create Thread//
----

[img[img/discord3.png]]
# To join a thread, click on the //# Message// link.  Your thread will open to the right
# Post any additional messages within the thread to the right. 
----

[img[img/discord4.png]]
The available threads will appear under the weekly channel.  You can click on the thread title to easily join the conversation.  

Also notice the excellent use of replies here.  Sending a message as a reply will notify your recipient they have a new message.
----

[img[img/discord5.png]]
If you would like to follow an interesting thread, right click on the thread message area and choose //Join Thread//.
----

[img[img/discord6.png]]
After joining a thread, it will appear on the left side of your screen under the channel for the week number.  This will make it easier to find later.


<!--{{{-->
<div class='toolbar' macro='toolbar [[ToolbarCommands::ViewToolbar]]'></div>
<div class='title' macro='view title'><span class="miniTag" macro="miniTag"></span></div>
<div class='subtitle'>Updated <span macro='view modified date [[MMM DD, YYYY]]'></span> (<span macro='message views.wikified.createdPrompt'></span> <span macro='view created date [[MMM DD, YYYY]]'></span>)<BR><BR></div>
<div class='viewer' macro='view text wikified'></div>
<div class="tagglyTagging" macro="tagglyTagging"><BR><BR></div>
<div class="tagglyTagged" macro="hideSomeTags"></div>
<div class='tagClear'></div>
<!--}}}-->
/%@@ This will be used in the second half of the semester @@ %/
Proxmox hypervisor:  https://lab.ci233.net/

Subnet: 192.168.12.0/23
Gateway: 192.168.12.1
DNS: 192.168.12.10

!! IP Addresses:
| !Last octet | !Host Name | !Description |
| n | test |Testing|
| n+1 | www |Web Server|
| n+2 | auth |Authentication & DNS|
| n+3 | files |Storage|
| n+4 |>| Unused |
| n+5 |>|~|
| n+6 |>|~|
| n+7 | final |Final Exam|

* The fully-qualified hostname for your VM is //host//.//username//.ci233.net where //host// is in the second column in the table above.
* Your VM IP addresses should be in the form 192.168.13.//n// where //n// is the first IP address you have been assigned in the table below.  Increment the value of //n// as necessary for additional ~VMs.  
** Do not deviate from the provided IP addresses.  These IP addresses will be checked for grading.  If you use other ~IPs you will not receive credit for the labs.

| !Start IP | !Username |
| 24 | nmerante |
| 32 | bvasquez14 |
| 40 | cbaylor09 |
| 48 | drice24 |
| 56 | nramic21 |
| 64 | rabreu27 |
| 72 | smacinsky21 |
| 80 | sboykin11 |
| 88 | ttwiss24 |
| 96 | wallen24 |

! Lab network topology
[img[img/topo.png]]

/% awk -v ip=32 '{print "| " ip " | " $1 " |"; ip+=8}' /root/user-233.txt %/
! Material

!!! Expectations:

Mostly outlined in the [[syllabus|syllabus/CI233Syllabus2409.pdf]], but to recap:

* Honesty & Integrity - Cheating generally results in a failing ''course'' grade.
** This course is in a security program.  If you cannot be trusted, you do not belong here.
* Motivation & practice - You must be motivated to practice the work in order to pick up the material.
** An article discussing [[productive struggle|http://maateachingtidbits.blogspot.com/2017/11/the-role-of-failure-and-struggle-in.html]] that roughly outlines how I'm teaching this course.
* Graded Homework - Almost everything will be graded.
* Don't fall behind - Else the workload will bury you.
** Let me know early if you're starting to run into trouble.

This class will also use Linux as a vehicle for reinforcing good soft skills.  You will be expected to:
* Provide clear and thorough explainations
* Ask questions when help is needed and be an active participant in your learning

!!! Class Resources
* Required Textbooks:  
** First week review material - [[The Linux Command Line|http://linuxcommand.org/tlcl.php]]
** Remainder of the semester - [[Linux Bible, ISBN: 978-1119578888|https://www.amazon.com/Linux-Bible-Christopher-Negus/dp/1119578884]]
* Class website:  https://www.ci233.net/
** The class website will be our primary resource for course content
** Each content page is generally divided into three sections:  
### the content assignment (what to read or watch),
### my notes about the content
### the deliverables for that content
* Brightspace will be used only for announcements and tracking grades.
* Discord will be used for class discussions
** Participation here will be [[evaluated as well|Class Participation]].

!!! Class Cadence
* A week's worth of new material will be posted to class website Sunday evening in two parts.  
** Unless stated otherwise, part 1 assignments will be due by end of day Wednesday
** Part 2 assignments will be due by end of day Saturday.
** An [[assignment calendar|Calendar]] can be found on our class website in the menu bar above.
* Carnegie credit hour
** A Carnegie credit hour is defined as 50 minutes of lecture and 2 hours of prep/homework for each traditional course credit hour 
** This requirement is defined in [[SUNY Policy|https://www.suny.edu/sunypp/documents.cfm?doc_id=168]]
** Translated to our online class, this means we are expected to perform approximately 12 hours of instructional activity per week
** This is hard to gauge in an online class.  Please let me know if you feel we are regularly exceeding that.

!!! Extra Help
* Chat sessions with {{Command{talk}}} (see below)
* Ad-hoc online meetings via Zoom.  Let me know if you'd like to schedule one.
* Weekly Zoom meetings if there's interest

{{Warning{
This class will test your skills as a student; ''being a good student will be important in order to successfully complete this course''.  This will not be one where you can do the bare minimum and skate by with a good grade.  Good ''time management'' and ''study skills'' will be critical.  ''If you neglect the material you will likely not successfully complete the course.''

Everything we do this semester will look back on previous work. If you're rushing through and not retaining it, you will surely pay for it later.  Having a keen eye for detail, paying attention to the directions, and taking the time to practice and retain the material will make for a much smoother semester.
}}}


!! Accessing the class shell server

The class shell server is an always-on system we will connect to in order to practice the class assignments and submit homework.  There are two ways we will access the system - from the command line for entering commands or through a file transfer utility for uploading files.

!!! Connection Tools
* Access the shell (command line) with either:
** [[PuTTY for Windows|http://www.chiark.greenend.org.uk/~sgtatham/putty/download.html]] (Download the latest version of the 64-bit MSI installer)
** [[PuTTY for Mac|https://www.ssh.com/ssh/putty/mac/]]
* Transfer files between the server and your local system:
** Windows: [[WinSCP|https://winscp.net/eng/download.php]]
** Mac: scp/sftp on the command line or any SFTP client like [[FileZilla|https://filezilla-project.org/]]
* Portable versions exist for these applications.  This is convenient if you are using campus ~PCs that do not have the tools installed.  You may download and run them from a flash drive or your home directory in the lab.
** [[PuTTY|http://www.chiark.greenend.org.uk/~sgtatham/putty/download.html]] - Download and run putty.exe
** [[WinSCP|https://winscp.net/eng/download.php]]: Download the portable package

!!! Logging in
* Use one of the tools above to log in to lab.ci233.net via SSH on port 2233.
* Log in with your campus username
* Your initial password will be posted to the Week 1 forums in the Brightspace Discussion Board.
* Change your password after logging in.
** Run the {{Command{passwd}}} command to change your password
** Any accounts still using the default password will be locked on Friday, August 28.

This short video will walk you through downloading ~PuTTY, a unix remote access client, and connecting to the system for command line access.  Your initial password for the server can be found in the Brightspace discussion board.

Video:  [[Accessing the shell server]]
/% Download: ~PuTTY - [[installer|https://the.earth.li/~sgtatham/putty/latest/w64/putty-64bit-0.70-installer.msi]] or [[exe|http://the.earth.li/~sgtatham/putty/latest/win64/putty.exe]] %/

!! Working on the command line

Console
* Console is considered the interface with a system as though you are physically sitting at its monitor and keyboard.  This lets us interact with the system before the operating system loads
* A virtual console is available for ~VMs or through a lights-out management utility such as a Dell iDRAC.  
Remote access
* Remote access to a Linux system such as our class shell server can also be obtained through a remote access service like SSH.  
* SSH is the standard command-line remote access interface for Unix/Linux systems.  It allows us to interact via a SSH client, much like how your web browser interacts with a web server.
* Our shell server is a traditional timeshare server.  It's always available; we don't power it off.
Shells
* The shell is our interface with the command line.  It's a program that takes input from the user and passes it on to the system to process.


!!! Navigating our lab server's filesystem:
* Directory paths
** Directory paths enable us to have a hierarchy of directories and keep our files organized
** Similar to the command line on Windows
** The path separator is a forward slash on Unix/Linux systems - /
** Change directories with the {{Command{cd}}} command
*** eg:  {{Command{cd /opt/pub/ci233/submit}}}
** List the contents of the directory with the {{Command{ls}}} command
** List the contents of the directory in long format with the {{Command{ls -l}}} command
*** Displaying the contents of a directory in long format is always preferred so you can easily see all information about the files
* Some directories of interest:
** {{File{/home/}}} - User home directories typically reside within this directory
** {{Files{/opt/pub/ci233/submit/}}} - Lab/Homework assignments are uploaded to this directory
** {{Files{/opt/pub/ci233/returned/}}} - Graded homework assignments are stored in this directory for you to download
** {{Files{/opt/pub/ci233/data/}}} - Data files for labs are stored here
** {{Files{/tmp/}}} - Temporary scratch space

!!! Executing commands
* Structure of a command string:
** ''command'' [options] [arguments]
** options and arguments may be optional or required depending on the command
** In Unix command documentation, an item within the square brackets is an optional component

* Viewing files
** Display a file: {{Command{cat //filename//}}}
** Display a file one page at a time:  {{Command{less //filename//}}}
** Edit a text file:  {{Command{nano //filename//}}}  ''-or-''  {{Command{vi //filename//}}}


!!! Other useful commands
* The UNIX manual - {{Command{man}}}
** If you want to learn more about a command, check out its manpage.
** eg:  {{Command{man ls}}}

* Chat on the shell server with {{Command{talk}}}
** Log in and run {{Command{talk //username//}}} to start a chat session with that user
** Use the {{Command{w}}} command to see currently logged in users and check their idle times

!!! Using Blackboard
* The discussion boards make up 10% of your total course grade this semester.  
* Blackboard is great example of a monopolistic business producing a sub-par product due to lack of market competition
* We're going to need to work around some of their deficiencies
* Please see [[Using Blackboard]] for more details

!!! Working efficiently
* View your previously executed commands with the {{Command{history}}} command
* Tab completion - Press the tab key to autocomplete commands or file paths
* Up / Down arrows - search up and down through your command history
* Page Up / Page Down - Use these keys to search through your command history for the last commands which begin with a given string


!! Submitting homework assignments

See the [[Lab Assignments]] page for details 
! Material

Information in these pages may be helpful for these labs:
* [[Quoting]]
* [[Links and File Globbing]]


! Assignment

Complete the following review assignments:
* Due Wednesday: [[Lab 0|labs/lab0.pdf]] & [[Lab 1|labs/lab1.pdf]]
* Due Saturday: [[Lab 2|labs/lab2.pdf]] & [[Lab 3|labs/lab3.pdf]] 
! Material

!! Lab & VM notebook:
* Start keeping good notes of what you are doing with your ~VMs.
** The software installed today should be included.
** These notes will come in handy later when you need to repeat these steps on future ~VMs

!! Read
* Linux Bible Chapter 10 - Getting and Managing Software
* Our systems are running Alma Linux and will be using the {{Command{rpm}}} & {{Command{yum}}} package management commands.
* It's good to be familiar with the {{Command{rpm}}} command, but we'll mostly be using {{Command{yum}}}.


! Notes

!! Expanding our systems

!!! The yum package manager

Package management is one of the customized components of a Linux distribution and differs between unix operating systems and linux distributions

The core components of a Linux distribution are:
* Linux kernel
* Base utilities (typically GNU tools)
** Many g* utilities are from the GNU project (eg: gcc)
** Stallman's GNU (GNU's not Unix) project, early 80's.  Wanted to create a totally free OS. Started with the utilities.
** Came from the Free Software Foundation and [[a philosophy of freedom|http://audio-video.gnu.org/video/TEDxGE2014_Stallman05_LQ.webm]] (freedom (speech), not price (beer) ).
** Software should be free to run, change, copy and modify so users are the ones in control, free from corporate control so better software develops - GNU license
** Differ somewhat from ~FreeBSD tools (sed is a good example)
* Package manager.

Extra (optional) components:
* Specialized utilities (Like the tools that come with a distro like Kali)
* X server / Window manager

Each distribution combines these components in different ways depending on their focus and goals.

Redhat based systems (including ~CentOS) use the RPM package format and rpm package system with the yum package management utility.

Other package management systems exist for other distros
 - apt  (Debian & Ubuntu)
 - portage  (Gentoo)
 - ports  (~FreeBSD)
 - ~DistroWatch [[Package management cheat sheet|http://distrowatch.com/dwres.php?resource=package-management]]

{{Command{rpm}}} - very basic utility
* It will mainly just install, update, or remove packages
* You will need to acquire the .rpm package file yourself or have a direct URL for it
** A .rpm file is a collection of pre-compiled binaries, configuration files, and support files for an application compiled for the target architecture.
* Conflicts and dependencies will need to be sorted out manually

{{Command{yum}}} - high level utility for package management
* will interact with repositories (collections of .rpm files) to obtain packages
* takes care of any conflicts
* will install necessary dependencies
* records what is installed and any changes made to the system to facilitate updates, package removal, or audit.

Different package repositories (repos)
* Repository configuration files are stored in {{File{/etc/yum.repos.d/}}}
* A repository is the central distribution point for our linux packages
* Typically, each distro has its own repository on the internet for base packages
* The repository creator determines which applications it contains
* Repos are mirrored for accessibility and speed.
* Other repositories offer additional packages

EPEL (Extra Packages for Enterprise Linux)
Distributed by the Fedora Project to offer a repository of add-on packages
See: https://fedoraproject.org/wiki/EPEL

Don't run this until we need the EPEL repo 
{{Command{yum install epel-release}}}

Other specialized repositories may exist:
 - HP repo for their utilities (raid utils)


!!! Yum package manager commands:

{{Command{yum}}}
<<<
The primary command for the yum package manager.  Run this by itself to see all sub-commands
<<<

{{Command{yum repolist}}}
<<<
Display the configured repositories
<<<

{{Command{yum check-update}}}
<<<
Check the repositories for any available updates and display the results, without applying an updates.
<<<

{{Command{yum update}}}
<<<
Check the repositories for any available updates.  After reviewing the results, the user will be prompted to apply them.  
''A {{Command{reboot}}} will be required if a kernel update is included in the list.  Otherwise, only updated services may need to be restarted for the updates to take effect.''
<<<

!!!! yum cleanup:
{{Command{yum clean packages}}}
<<<
Remove cached packages after install is completed.
<<<

{{Command{yum clean metadata}}}
<<<
Remove the XML metadata cache
<<<

{{Command{yum clean dbcache}}}
<<<
Clean the yum ~SQLite database
<<<

{{Command{yum clean all}}}
<<<
Remove all cached yum content
<<<

{{Command{yum makecache}}}
<<<
Download and rebuild the repo metadata cache
<<<


{{Command{yum update //package_name//}}}
<<<
Update a single package
<<<

{{Command{yum provides "*/ssh"}}}
<<<
See which package provides the file named ssh. 
<<<

{{Command{yum info //package_name//}}}
<<<
Display information on the specified package
<<<

{{Command{yum install //package_name//}}}
<<<
Install a package from yum
<<<

{{Command{yum search //string//}}}
<<<
Search the repository for packages matching //string//
<<<

{{Command{yum deplist package_name}}}

{{Command{yum list installed}}}

{{Command{yum remove //package_name//}}}


!!!! Fixing damaged configuration files
It's a common occurrence that a configuration file is accidentally damaged during the course of completing these labs and a service will not load as a result.  This sequence of commands will demonstrate comparing the configuration file on the system to the default which was installed as part of the package.  This comparison should help identify such configuration errors.

* Show the package which installed a particular file:  {{Command{rpm -qf /etc/named.conf}}}
* Display changes made since the original file was installed:  {{Command{rpm -V bind}}}

{{{
[root@core ~]# rpm -qf /etc/named.conf
bind-9.11.4-26.P2.el7_9.4.x86_64

[root@core ~]# rpm -V bind-9.11.4-26.P2.el7_9.4.x86_64
S.5....T.  c /etc/named.conf
}}}

The following table explains the letters in the above output:
| !Code | !Description |
| S |file Size differs|
| M |Mode differs (includes permissions and file type)|
| 5 |~MD5 sum differs|
| D |Device major/minor number mismatch|
| L |readLink(2) path mismatch|
| U |User ownership differs|
| G |Group ownership differs|
| T |mTime differs|
| P |caPabilities differ|

* Rename the original configuration file:  {{Command{mv /etc/named.conf /tmp}}}
* Reinstall the package:  {{Command{yum reinstall bind}}}
** A configuration file will only be reinstalled from the package if it is missing from the expected location.
* Compare the default configuration file to the renamed copy:  {{Command{diff /etc/named.conf /tmp/named.conf}}}
** Lines beginning with &lt; are the version in the file listed as argument one
** Lines beginning with &gt; are the version in the file listen as argument two

{{{
[root@core ~]# diff /etc/named.conf /tmp/named.conf
13c13
<       listen-on port 53 { 127.0.0.1; };
---
>       listen-on port 53 { any; };
21c21,24
<       allow-query     { localhost; };
---
>       allow-query     { any; };
}}}

There's no error here; these changes are expected.  This only demonstrates the process.  But it should be helpful for identify a damaged or missing line.

Once that damaged line is identified, either merge it into your backup in /tmp/ or repeat your modifications to the new clean copy.


!!!! Additional yum commands:

* Yum package groups:
** {{Command{yum grouplist}}}
** {{Command{yum groupinfo //group_name//}}}
** {{Command{yum groupinstall //group_name//}}}


* Yum plugins
** Extend the functionality of the yum package manager
** See available plugins with {{Command{yum search yum-plugin}}}

yum-plugin-security - Check currently installed software for security updates.  Requires a subscription.

{{Command{yum &#045;-security check-update}}}
{{Command{yum &#045;-security update}}}

{{Command{yum updateinfo list available}}}
{{Command{yum updateinfo list security all}}}

https://access.redhat.com/solutions/10021

yum-utils - Extra utilities for working with yum


We can install most required software using packages with yum:
* Keep a record of what is installed as we go.
* Get started with: 
** Install on all systems: man wget nc telnet bind-utils openssh-clients rsync bzip2


We can also install software directly from source archives
* Source archives are typically distributed as compressed tarballs
* Latest versions of software are not always available via package
* Building from source allows for additional customizations
* and a higher level of control
* Multiple versions of a program can easily be maintained on the same system by installing to different locations
** But you must keep them up to date (patched) and sort out any dependencies manually.

For this class, we'll only be installing software from packages via yum.

!! Install web server software

The following tasks must now be completed to bring your web server online.  Refer to the notes above and in last weeks pages to identify the proper commands to achieve these goals.

Complete these tasks on your web server VM:
# Install the following packages:  httpd httpd-tools php telnet
# Set the ''httpd'' service to start on system startup
# Start the ''httpd'' service now

!! Verify the service with the {{Command{telnet}}} & {{Command{curl}}} commands

The {{Command{telnet}}}, {{Command{curl}}}, and {{Command{nc}}} commands are excellent tools for verifying that you're able to communicate with a host or a service.  These are great for troubleshooting and everyone should know how to use all three.

Here I'm using telnet to connect to my web server on localhost.  Run the telnet command to make a TCP connection and then begin speaking HTTP to the server.  The HTTP command {{Command{GET /}}} will return the website.  A lot of HTML will be returned, so I only have the first couple lines in the sample output below.

{{{
[root@www ~]# telnet localhost 80
Trying ::1...
telnet: connect to address ::1: Connection refused
Trying 127.0.0.1...
Connected to localhost.
Escape character is '^]'.
GET /
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd"><html><head>
<meta http-equiv="content-type" content="text/html; charset=UTF-8">
                <title>Apache HTTP Server Test Page powered by CentOS</title>
                <meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
(truncated)
}}}


The {{Command{curl}}} command is another great tool for verifying TCP services and is generally available on every unix system.  We'll add the {{Monospaced{-v}}} flag here for additional verbosity that's helpful for troubleshooting.  Again, I'm truncating the output.

{{{
[root@www ~]# curl -v http://localhost/
* About to connect() to localhost port 80 (#0)
*   Trying ::1...
* Connected to localhost (::1) port 80 (#0)
> GET / HTTP/1.1
> User-Agent: curl/7.29.0
> Host: localhost
> Accept: */*
>
< HTTP/1.1 200 OK
< Date: Tue, 30 Mar 2021 19:44:15 GMT
< Server: Apache/2.4.6 (CentOS) PHP/7.3.27
< Last-Modified: Tue, 30 Mar 2021 10:17:46 GMT
< ETag: "56-5bebe4f44343d"
< Accept-Ranges: bytes
< Content-Length: 86
< Content-Type: text/html; charset=UTF-8
<
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd"><html><head>
<meta http-equiv="content-type" content="text/html; charset=UTF-8">
                <title>Apache HTTP Server Test Page powered by CentOS</title>
                <meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
(truncated)

* Connection #0 to host localhost left intact
}}}

Once a successful connection has been made, view the apache log files to verify the connection.
 - Apache logs are located in {{File{/var/log/httpd/}}}

A log entry for a successful connection will resemble the following.  Note the ''200'' HTTP status code:
{{{
192.168.12.10 - - [30/Mar/2022:18:45:42 -0400] "GET / HTTP/1.1" 200 86 "-" "curl/7.29.0"
}}}


Now try to connect to your web server from your test VM using {{Command{telnet}}} or {{Command{curl}}}.  If you use telnet, don't forget to send the {{Command{GET /}}} command.

{{{
[root@test ~]# telnet 192.168.13.25 80
Trying 192.168.13.25...
Connected to 192.168.13.25.
Escape character is '^]'.
GET /
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd"><html><head>
<meta http-equiv="content-type" content="text/html; charset=UTF-8">
                <title>Apache HTTP Server Test Page powered by CentOS</title>
                <meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
( truncated )
}}}


Your web server is now online.  We'll work with it further in the 2nd half of this week's material.


! Additional Material

[[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]] - Chapter 14 (Package Management)


!! Useful commands:
* {{Command{wget}}} - great tool for downloading files from a web or FTP server
* {{Command{tar}}} - Standard linux archive tool.  Files are usually distributed or stored as tarballs (the Linux equivalent of Zip).  This tool will create or extract them
* {{Command{telnet}}} - Useful tool for testing TCP ports
* {{Command{curl}}} - Useful tool for testing TCP ports or downloading content from the web
* {{Command{apachectl}}} - Tool to manage an Apache server.  Good to know if exists, but we likely won't be using it.


!! References

Yum quick reference:  http://yum.baseurl.org/wiki/YumCommands.html


! Assignment

<<tiddler [[Lab 42 - VM updates & software installation]]>>
<<tiddler [[Lab 43 - Web Server]]>>
! Material

!! Reading

Linux Bible Chapter 17 - Configuring a Web Server

* Keep in mind our ~DocumentRoot directory should now be {{File{/opt/work/htdocs}}}


! Notes

At this point the web servers should be online and serving a basic web site from our new ~DocumentRoot directory.  We'll now use that directory to set up a more sophisticated website.

!! Web Services 

There are many different web server options - 
* Apache - One of the most common web server software packages
** LAMP stack (Linux, Apache, mySQL, PHP)
* nginx - Lighter weight for higher performance, speed, and reduced memory footprint.  Another very popular option.
* python - {{Command{python -m ~SimpleHTTPServer [port]}}}
**  This is a very useful way to stand up fast and simple web servers anywhere.  It's handy for quick data exfiltration. 
* IIS - Microsoft's web server package for Windows

[[Netcraft Web Server Survey|https://www.netcraft.com/blog/september-2024-web-server-survey/]]
* Web server market share and stats over the last 10 years
* Apache used to be the most popular but has been steadily losing steam over the last few years with nginx gaining ground.


Default web site files:
* Apache {{File{htdocs}}} directory - ''h''yper''t''ext ''docs'' - The root of our web site.  These are the files our web server will provide
* {{File{index.html}}}, {{File{index.php}}} or whatever we define via the ~DirectoryIndex configuration option in httpd.conf
** This is the default page to provide if only a directory is given (eg, http://www.ci233.net/)
* Or display directory listing if no file specified and the ~DirectoryIndex directive is enabled

Process ownership
* {{Command{ps aux | grep httpd}}} - Apache runs as an unprivileged user
* Any scripts executed will run as this user
* This protects the system from malicious or vulnerable scripts
** If a script is compromised, the attacker will only be able to access what that unprivileged user can access
** This kind of privilege separation and isolation are important security concepts to follow

Headers
* Extra information sent by the web server describing the connection and server data
* Header information provides useful troubleshooting and security metadata
* They're often hidden by your web browser, but you can see them in the developer tools or the command line with {{Command{curl}}}
** Use the {{Monospaced{ ''-v'' }}} curl option to see the headers ({{Monospaced{ ''-v'' }}} usually means verbose output for most commands)
{{{
[root@www ~]# curl -v -s http://localhost
* About to connect() to localhost port 80 (#0)
*   Trying ::1...
* Connected to localhost (::1) port 80 (#0)
> GET / HTTP/1.1
> User-Agent: curl/7.29.0
> Host: localhost
> Accept: */*
>
< HTTP/1.1 200 OK
< Date: Mon, 28 Mar 2022 02:57:50 GMT
< Server: Apache/2.4.6 (CentOS) PHP/5.4.16
< Last-Modified: Thu, 24 Mar 2022 03:46:42 GMT
< ETag: "56-5daeeb1c16b20"
< Accept-Ranges: bytes
< Content-Length: 86
< Content-Type: text/html; charset=UTF-8
<
<HTML>
<BODY>
<BR><BR><BR>
<center><B>Welcome to CI233!</B></center>
</BODY>
</HTML>
* Connection #0 to host localhost left intact
}}}

~VirtualHosts
* A header value can be set containing the host name used to access the web server
* The server software will examine this header value to determine which site to display
* This allows for multiple web sites per server, depending on the host name used to access the server
** IP based virtual hosts - A single server and apache instance will be accessible by multiple IP addresses with each IP address linked to a different web site
** Name based virtual hosts - Multiple host names resolve to the same IP address.  Examine the hostname in the HTTP headers to determine which site to serve.


!!! Apache Modules
* Addons to Apache - Modules provide an extensible framework for additional functionality
** Static - Compiled in to Apache.  Apache must be recompiled to add support for new modules or to update them
** DSO - Dynamic Shared Objects - Compiled separately and loaded when apache starts
** apxs (Apache Extension tool) - A perl script to assist with building and installing apache DSO modules
** PHP was added as an Apache DSO module
* Check available modules
** Those compiled in: {{Command{apachectl -l}}}
** Available as DSO: {{Command{ls -l /etc/httpd/modules/}}}
* Add new modules with apxs (The manual way of doing things.  We'll use the automated packages for our labs)
** {{Command{apxs -c mod_foo.c}}}
** {{Command{apxs -ian foo mod_foo.la}}}
** Must then add a ~LoadModule directive to the apache config file
* Module examples:
** php - A robust web scripting language
** mod_rewrite - Provides a rule-based rewriting engine to rewrite requested URLs on the fly
** mod_security - Provides intrusion detection and prevention for web applications
** mod_limits - Limit the number of connections per host
** mod_headers - Customization of HTTP request and response headers
** Authentication modules - Different method for authenticating web users
** https://httpd.apache.org/docs/current/mod/ - More available modules


!!! Apache configuration

!!!! Server level
* Main configuration file {{File{conf/httpd.conf}}}
** Configuration extras - {{File{conf.d/*.conf}}} & {{File{conf.modules.d/*.conf}}}
** A quick way to make common additional functionality available
** ie: SSL support, virtual hosts, user web sites

!!!! User level
{{File{.htaccess}}} files - Modify permitted configuration values per web directory


!!! Extras

Basic HTTP is stateless: 
* Client makes a request, server responds, connection closed.
* cookies and session files can be used to maintain state between connections
** Cookies are files stored on your system to retain session information
** Authentication information may be stored in these cookies
** Leaking cookies is just as bad as leaking credentials

Content Distribution Networks (CDN)
* Globally or nationally distribute static content close to the end user
** Static content is cached to reduce load on the primary web server
** Serving content from a closer datacenters improves speed
* Examples:
** Cloudflare (Free/low cost tiers for experimenting and getting familiar with these concepts)
** Akamai


!!! Disable ~SELinux

~SELinux are ''s''ecurity ''e''xtensions designed to protect Linux systems. ~SELinux will prevent the ~LocalSettings.php file from being read. The website will say it cannot read the file even though the file permissions are correct. ~SELinux must be disabled if it is not already.
- Otherwise we need to properly configure it, which is out of scope for this class.

To disable ~SELinux (it may already be done):
* Disable ~SELinux now:  {{Command{setenforce 0}}}
* To disable ~SELinux on boot, edit the file {{File{/etc/selinux/config}}} and change ''enforcing'' to ''disabled'' on the line that is not commented out.
* Check the status of ~SELinux with the command:  {{Command{getenforce}}}.  It should return the output ''disabled''.


!!! Install PHP

The default version of PHP is quite old.  Run these commands to install a newer version:

* Install the yum-utils package: {{Command{yum install yum-utils}}}
* Install the EPEL repository:  {{Command{yum install epel-release}}}
* Install the Alma Powertools:  {{Command{yum-config-manager &#45;-enable powertools}}}
* Install the [[Remi Release|https://rpms.remirepo.net/]] repository for PHP: {{Command{yum install http://rpms.remirepo.net/enterprise/remi-release-8.rpm}}}
* Reset the php module version: {{Command{dnf module reset php}}}
* Enable & install the php 8.2 packages from Remi: {{yum module install php:remi-8.2}}}
* Verify the version of php:  {{Command{php -v}}}
** It should now be (roughly) version 8.2.25
* Also install the following packages.  These will be required by ~MediaWiki:  {{Monospaced{''php-mbstring php-xml php82-php-gd php-intl''}}}
** A lot of dependencies will be required.  Install them too.
* Restart apache to activate the new version of php and its new extensions:  {{Command{systemctl restart httpd}}}


!!!! Scripting
* A means to develop applications to generate dynamic web content 
* php - A standard server side scripting language for web development
** Change your {{File{ index.html }}} (located in {{File{ /opt/work/htdocs/}}}) to {{File{ index.php }}} and add the {{Monospaced{phpinfo();}}} php function to it
{{{
# cat /opt/work/htdocs/index.php
<HTML>
<BODY>
<CENTER>Welcome to CI233!</CENTER>
<?php
// The line above instructs the php module to start processing php scripting
phpinfo();		// This function will display information and configuration for our php installation
// The line below instructs the php module to stop processing php scripting.
?>
</BODY>
</HTML>
}}}

Now {{Command{curl http://localhost/index.php}}} will execute the {{Monospaced{phpinfo()}}} function and return a dump of the server configuration.


* scripts typically run as the apache process (for modules) 

It's important to keep web applications up to date!
* Security vulnerabilities are constantly discovered in web applications
* These vulnerabilities become attack vectors against the hosting server


!!!! PHP module configuration
* A separate configuration file to tune php.
* By default {{File{ /etc/php.ini }}}

Set the following values in your php.ini file:
{{{
session.save_path = /tmp/php
log_errors = on
error_log = /var/log/httpd/php.log

date.timezone = "America/New_York"
disable_functions = system, exec, shell_exec
}}}

Be sure to create the directory {{File{/tmp/php}}} and make it owned by the apache user:
* {{Command{chown apache /tmp/php}}}


!!! Install ~MediaWiki

Download the package from the [[MediaWiki web site|http://www.mediawiki.org/wiki/MediaWiki]] with {{Command{wget}}}
* Look for the downloads section and find the latest version in .tar.gz format.  
* Current file name is {{File{mediawiki-1.42.3.tar.gz}}} as of writing this lab.  The latest version and file name may have incremented since then.
Save it to the directory {{File{/opt/work/htdocs/}}}
Extract the tarball with a command resembling:  {{Command{tar -xf mediawiki-1.42.3.tar.gz}}}
Rename the base directory from ''mediawiki-//version//'' to ''wiki''

!!! Installing the ~MariaDB database package

~MediaWiki will need a database to install its dynamic content.  We'll now need to install the ~MariaDB database package and php extension for connecting to the database.

!!!! Set up the ~MariaDB package repo
The version of ~MariaDB that comes in the normal Alma repository is too old for ~MediaWiki.  We'll instead add a custom package repository to our system and install a newer version.
# Change to {{File{/tmp/}}} and use it for downloading this temporary file: {{Command{cd /tmp}}}
# Download the setup script for the ~MariaDB package repository:  {{Command{wget https://downloads.mariadb.com/MariaDB/mariadb_repo_setup}}}
# Run the script:  {{Command{bash /tmp/mariadb_repo_setup}}}
!!!! Install and start the ~MariaDB database packages
# Install these three packages on your web server:  {{Monospaced{''~MariaDB-server ~MariaDB-client php-mysql''}}}
# Set the {{Monospaced{mariadb}}} service to start on boot
# Start the {{Monospaced{mariadb}}} service now

!!!! Verify the services are running
Check the status of the {{Monospaced{mariadb}}} service with the command {{Command{systemctl status mariadb}}}.  The service ''must'' be enabled and active before proceeding.

[img[img/db-status.png]]


!!!! Create wiki database user

Now that the database service is installed, we need to create a user and database for the wiki.  

The mysql command can be used to connect to the database service as root, the database superuser.  There is currently no root password set.  Just press enter if prompted for one.  Once you are connected to the database, enter the remaining commands at the database prompt.  You may supply whatever password you'd like for //wiki_pass//, just be sure to remember what you use.

Execute the following commands:
{{Commands{
[root@www ~]# ''mysql -u root''

Welcome to the ~MariaDB monitor.  Commands end with ; or \g.
Your ~MariaDB connection id is 2
Server version: 10.3.39-~MariaDB ~MariaDB Server

Copyright (c) 2000, 2018, Oracle, ~MariaDB Corporation Ab and others.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

~MariaDB [(none)]> ''CREATE USER 'wiki'@'localhost' IDENTIFIED BY 'wiki_pass';''
Query OK, 0 rows affected (0.00 sec)

~MariaDB [(none)]> ''CREATE database wiki;''
Query OK, 1 row affected (0.00 sec)

~MariaDB [(none)]> ''GRANT select, insert, update, delete, create, alter, index ON wiki.* TO 'wiki'@'localhost';''
Query OK, 0 rows affected (0.00 sec)

~MariaDB [(none)]> ''flush privileges;''
Query OK, 0 rows affected (0.00 sec)

~MariaDB [(none)]> ''exit''
Bye
}}}

{{Note{''Note:'' If you goof the wiki user's password, it can be reset by logging into the database as the root user and running:  {{Monospaced{''SET PASSWORD FOR 'wiki'@'localhost' = PASSWORD('new_password');''}}}.  Replace //wiki_pass// with whatever you want the password to be.  Then run the {{Monospaced{''flush privileges''}}} database command.}}}


Now test your connection to the database by logging into it with the new wiki user:

{{Commands{
[root@www ~]# ''mysql -u wiki -p wiki''
Enter password:

Welcome to the ~MariaDB monitor.  Commands end with ; or \g.
Your ~MariaDB connection id is 3
Server version: 10.3.39-~MariaDB ~MariaDB Server

Copyright (c) 2000, 2018, Oracle, ~MariaDB Corporation Ab and others.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

~MariaDB [wiki]> ''exit''
Bye
[root@www ~]#
}}}

!!! Quick Verification: 

Our server software is installed; let's use curl to verify everything is working so far:

{{{
[root@www ]# curl http://localhost/wiki/
<!DOCTYPE html>
<html lang="en" dir="ltr">
        <head>
                <meta charset="UTF-8" />
                <title>MediaWiki 1.42.3</title>
                <style media="screen">
                        body {
                                color: #000;
                                background-color: #fff;
                                font-family: sans-serif;
                                text-align: center;
                        }

                        h1 {
                                font-size: 150%;
                        }
                </style>
        </head>
        <body>
                <img src="/wiki/resources/assets/mediawiki.png" alt="The MediaWiki logo" />

                <h1>MediaWiki 1.42.3</h1>
                <div class="errorbox">
                        <p>LocalSettings.php not found.</p>
                                <p>Please <a href="/wiki/mw-config/index.php">set up the wiki</a> first.</p>
                </div>
        </body>
</html>
}}}

We should be ready to configure the wiki software.



!! Bypassing network restrictions with proxies

A proxy is a middle man, passing on network requests to their destination on your behalf.  Our web server ~VMs are behind the lab infrastructure's router and cannot be accessed outside of that LAN.  We'll need to use a proxy in order to view the wiki sites in our browsers at home.

See the [[Tunnels & Proxies with SSH]] page for more information on how to set up a SOCKS proxy with SSH to access protected resources.


!!! Configure your ~MediaWiki

With a SSH proxy in place, you should be able to complete the configuration of your wiki.  After establishing the tunnel, browse to http://your_www_ip_address/wiki/ to reach the configuration page.  It will look something like this.

[img[img/MediaWiki.png]]


As you are stepping through the configuration page, be sure to use these values:
* database host:  {{Monospaced{'' localhost ''}}}
* database name: {{Monospaced{'' wiki ''}}}
* database username: {{Monospaced{'' wiki ''}}}
* database password: ''// whatever password you used above //''

Once the Wiki setup is complete, you will be prompted to download the {{File{~LocalSettings.php}}} file to your home computer.  This file must then be uploaded to the {{File{/opt/work/htdocs/wiki/}}} directory on your web server.  You will be able to fully access your wiki after this file is uploaded.  
 - Our class ~VMs are on [[RFC 1918|https://datatracker.ietf.org/doc/html/rfc1918]] IP addresses. You cannot connect directly to your web server VM from home to upload the {{File{~LocalSettings.php}}} file.  It must be first uploaded to the class shell server.
 - The [[Virtual Machines]] page (linked on the top menu bar) has a diagram of our lab infrastructure which may be helpful.

The {{Command{scp}}} or {{Command{sftp}}} tools may be helpful for transferring files on the command line between ~VMs.


! Assignment

!! Web Server:
<<tiddler [[Lab 44 - Set up MediaWiki]]]>>
! Material

!! Read:
* Linux Bible Chapter 11 - Managing User Accounts
** This chapter starts with an instruction to install the {{Monospaced{cockpit}}} package.  Do not follow this instruction; it's a dangerous package to install.
** Use the command-line tools instead
* Linux Bible, pages 175-178 
** //Becoming root from the shell (su command)// and //Gaining administrative access with sudo// sections
* Also review:
** [[sudo tutorial|https://phoenixnap.com/kb/linux-sudo-command]]
** [[sudoedit tutorial|https://www.howtoforge.com/tutorial/how-to-let-users-securely-edit-files-using-sudoedit/]]

!! Watch:
* {{Command{sudo}}} use and configuration:  https://www.youtube.com/watch?v=YSSIm0g00m4
** Note: In the video, the sudoers file is edited by executing {{Command{sudo visudo}}}.  This may not work on our ~VMs.  Instead use {{Command{su}}} to become root and then run {{Command{visudo}}} to edit the sudo configuration.


! Notes - Access control & user management

''Authentication'' - Who you are.  The process of ascertaining that someone is actually who they claim to be
''Authorization'' - What you are allowed to do.  Rules to determine who is allowed to perform certain tasks

!! Access control

!!! From the beginning unix maintained a multi-user system
* All objects (files & processes) have owners
* A user owns new objects they create 
* The administrative user (root) can act as the owner of any object
* Only root can perform most administrative tasks

!!! Groups

A mechanism to grant permissions to groups of users, such as all students in a particular class.

* The filesystem has a more sophisticated access control system
* Each file has a user owner and a group owner
* Permissions can be set so group members may have their own set of access controls (rwx)
* Groups can be harnessed to control access to the system

The directory {{File{/opt/pub/ci233/}}} is set so only those in this class can access its files.


!!! root (uid 0)
* The root user is the standard unix superuser account
* There's nothing special about the user name - it's all in the user ID (UID) number
** Unix systems track everything by number:  process ~IDs, device ~IDs, IP addresses, uid, and gid
** We prefer names over numbers
* Check out the {{File{/etc/passwd}}} on your ~VMs.  There's a second uid 0 user account named {{Monospaced{toor}}}
** An unknown uid 0 backdoor account would normally be a huge red flag.  But this account is so I can get into your systems to help if something breaks.
** It has the same privileges as your {{Monospaced{root}}} account, but uses a password that I have.
** As far as the system is concerned, {{Monospaced{root}}} & {{Monospaced{toor}}} are the same person because they have the same user uid number.

!!! Privilege separation 
* superuser (uid 0) - The superuser - ideally only use the system with superuser privileges when necessary.
* normal users - the regular users on the system.  How we all access and use the class shell server.
* service accounts - These are the accounts our services run as, such as the {{Monospaced{apache}}} and {{Monospaced{mysql}}} users on your web server or {{Monospaced{named}}} user on your core VM.
** nobody or daemon accounts
*** Generic unprivileged accounts which run services as unprivileged users in case the services are broken into.  This way they'll have very limited access to the rest of the system
*** Services ran as root in the old days.  If a service was exploited and an attacker was able to access files or run commands, they would then have access to the entire system.
** principle of least privilege - Only grant users the access they need.  If an account or service is broken into, the damage will be limited.
*** This is why we don't all access the shell server as {{Monospaced{root}}}.  I use an unprivileged user also and only elevate to {{Monospaced{root}}} when necessary.

!!! Privilege escalation
* Limit direct access to the {{Monospaced{root}}} account.  
* Privilege separation - Only obtain superuser privileges when you need them
** Don't always operate as the {{Monospaced{root}}} user
* Instead log in as a regular user and escalate when needed
** This is also good for accountability if many users have the root password.
** {{Command{su}}} command - Substitute user
*** Change the effective userid to another system user
*** Real id is the userid you log in as, the user id associated with the process that created the current process
*** Effective id is one the system uses to determine whether you have access to a resource
*** http://bioinfo2.ugr.es/OReillyReferenceLibrary/networking/puis/ch04_03.htm
*** {{Command{su [username]}}}  - Change to another user, simulating a full login.  The current shell environment will not be inherited.
*** {{Command{su - [username]}}}  - Change to another user, inheriting the current shell environment
** {{Command{sudo}}} - Allow elevated privileges on a limited scale (per command).  
*** {{Command{sudo}}} Allows an administrator to grant root privileges to users without divulging the root password.
*** Or allow a user to just run a few commands as the superuser.
*** Display what you are allowed to access via sudo: {{Command{sudo -l}}}
*** {{Command{sudo //command//}}} - Run a command as another user (defaults to the root user)
*** {{Command{sudoedit //file//}}} - Edit a file as another user (defaults to the root user).  Running {{Command{sudoedit //filename//}}} is the same as running {{Command{sudo -e //filename//}}}.
*** {{Command{sudo -l}}} - Display which commands are available to the current user via sudo
*** sudoers file: {{File{/etc/sudoers}}} - This is where the sudo configuration is saved.  Don't edit this file directly.  Use {{Command{visudo}}} to edit it.
**** Separate sudo configuration files can also be saved within the directory {{File{/etc/sudoers.d/}}} to keep things better organized.
**** {{Command{visudo}}} will lock the file and perform syntax checks after saving it.
** You can control who can access particular resources with user or group permissions
* Both {{Command{su}}} and {{Command{sudo}}} will log escalation events
** su will log when a unprivileged user switches to another user
** sudo/sudoedit will log each command executed or file modified
* setuid bit 
** set ID upon execute
** An extra permission bit that can be set with chmod 
** The program will run as the user who owns the file.
** Examples:  passwd and crontab commands
** The passwd command needs extra privileges in order to change a user's password, so extra system privileges are granted just to that command.

!!! Finer grained access controls
* ~SELinux and mandatory access controls (MAC)
** Enabled by default in ~CentOS
** ~SELinux will cause us problems if we don't either configure or disable it
** Controlled by the {{Command{setenforce}}} command for current boot
** and by the /etc/selinux/config file on boot
** It's presently disabled on all of our class ~VMs
* Filesystem access control lists (~ACLs)
** Finer grained per user access to files
** Controlled by {{Command{setfacl}}} and displayed by {{Command{getfacl}}}
** Active ~ACLs noted with a + at the end of the file permissions list

!!! Verifying users with PAM
* Pluggable Authentication Modules (Chapter 6, Page 125)
* Configuration resides in /etc/pam.d/
* Originally access was determined by just checking passwords against the password files
* Modules are used for user validation and verification
** Can determine who you are
** And if you have permission to access the resource
** Can also enable additional types of authentication, such as two-factor with hardware or soft tokens.
* Examples: 
** {{File{/etc/pam.d/su}}} - limit who can use the {{Command{su}}} command
*** uid 0 users can always run the {{Command{su}}} command
*** Change to require wheel group membership
*** Can set to implicitly trust members of the wheel group (dangerous!)
* Other pam functions: 
** Pam can also create home directories on first login with pam_mkhomedir
** Check password complexity with pam_crack
** Lock accounts on too many failed attempts with pam_tally or pam_faillock


!! Users and Groups

!!! Password files
* {{File{/etc/passwd}}} - Everyone can read this file
** Contains fields identifying the user
** It used to also contain the hashed password but this was moved elsewhere to hide it from normal users
** Don't leave the old password field (position 2) blank!  If blank, no password is required for login.  Use the placeholder character {{Monospaced{''x''}}}.
*** An {{Monospaced{''x''}}} in {{File{/etc/passwd}}} column 2 means see {{File{/etc/shadow}}} for the password hash
* {{File{/etc/shadow}}} (Linux) or {{File{/etc/master.passwd}}} (~FreeBSD) - Only root can read this file
** A secure file which contains the password hashes so normal users cannot read them for brute force cracking
** Also contains password and account expiration attributes
* Use {{Command{vipw}}} to edit these files so you have file locking and format verification
** This verification prevents errors from breaking access to the system
** {{Command{vipw}}} will edit the password file
** {{Command{vipw -s}}} will edit the shadow file
* password hashing:
** Different hashing algorithms and their tags from old (weak) to new (strong):  DES, ~MD5 ({{Monospaced{$1$}}}), Blowfish ({{Monospaced{$2a$}}}), ~SHA256 ({{Monospaced{$5$}}}), ~SHA512 ({{Monospaced{$6$}}})
*** The tag at the beginning of the password hash identifies the algorithm used.
** {{Command{authconfig &#045;-test | grep hash}}} - See what hashing algorithm is used
** {{Command{authconfig &#045;-passalgo=md5 &#045;-update}}} - Change the default hash type (don't actually run this)
** {{File{/etc/sysconfig/authconfig}}} - Authentication configuration settings
** {{File{/etc/libuser.conf}}}
** salting
*** Randomize hashes by adding a salt to the password before hashing
*** Prevents two passwords from having the same hash
*** Increases difficulty for brute force attacks or hash lookup tables (rainbow tables)
** Password cracking:
*** John the Ripper
*** hashcat
*** GPU processing makes this all much faster now
*** Protect your hashes!

Time it takes to brute force these password types:
[img[img/2023_Password_Table.jpg]]


* uid numbers 
** multiple users with same UID number - The system only cares about the number.  If multiple users have the same UID number, then they are effectively the same user and can access each other's files
** System accounts (UID < 10)
** Service accounts (~UIDs between 10 and 500)
** Users UID > 500 (Linux) - Regular users.

!!! Group file
* {{File{/etc/group}}} - Where groups and group memberships are defined.
** wheel group - special administrator group.  Usually allows extra system access

!!! Shell
* default shell : {{Command{/bin/bash}}} (Linux) or {{Command{/bin/tcsh}}} (BSD)
* lockout shell : {{Command{/sbin/nologin}}}
** Users with this shell are not allowed to log into the system.  Service accounts or banned users will be set to this shell.
* Available shells defined in {{File{/etc/shells}}}

!!! Locking accounts
* Replace the hash with a {{Monospaced{*}}} or {{Monospaced{!!}}} to lock the account.
** This is not enough on //some// systems.  Users may still be able to log in with SSH keys instead of passwords.
* Also change shell to {{Command{/sbin/nologin}}}
** This is a standard lockout shell.  A user must have a valid login shell in order to connect to a system
** The command {{Command{/sbin/nologin}}} just echos //This account is currently not available.// and terminates, thus disconnecting the user from the system.
* {{File{/var/run/nologin}}} or {{File{/etc/nologin}}}
** If this file exists, only root will be allowed to log into the system.   The contents of the file will be displayed to the user before they are disconnected.
*** This is helpful if a system needs to be closed for temporary maintenance.
* Check out service accounts in the password file - they should not have passwords or valid shells
** A service account with a password or valid shell is being abused by an attacker.

!!! New user:
* Use utilities ({{Command{useradd}}}, {{Command{userdel}}}, {{Command{usermod}}}) or edit the password files directly
* Create a home directory for the user
** Set home dir ownership and permissions so the new user can access it
* Set up environment (dot files)
** Copy the environment configuration files within {{File{/etc/skel/}}} (Linux) or {{File{/usr/share/skel/}}} (~FreeBSD) to the new user's home directory
*** Note:  All environment configuration file names begin with a dot.
*** Don't forget to change ownership on the environment files in the user's home directory too

!!! Remove or lock user
* Delete or comment lines in password files
** Will no longer be known to the system, but non-destructive
** Change password hash and change shell


!!! Authentication factors:

Multi-factor authentication (MFA):
* Passwords are not good enough anymore; they are easily stolen.
* Increase security by combining multiple authentication factors.
* More sites and organizations are now requiring MFA
** [[Linux Kernel Git Repositories Add 2-Factor Authentication|http://www.linux.com/news/featured-blogs/203-konstantin-ryabitsev/784544-linux-kernel-git-repositories-add-2-factor-authentication]]
** SUNY Poly recently switched from GMail to MS Outlook and added MFA for email account login

!!!! Methods of authentication:
* ''Something you know'':  passwords
** Should be of sufficient length and complexity to be hard to crack
** Minimum of 10-12 characters
** correct horse battery staple: http://xkcd.com/936/
** Should be unique across systems
*** [[Russian Hackers Amass Over a Billion Internet Passwords|http://www.nytimes.com/2014/08/06/technology/russian-gang-said-to-amass-more-than-a-billion-stolen-internet-credentials.html?_r=0]]
*** [[Stolen user data used to access account|http://community.namecheap.com/blog/2014/09/01/urgent-security-warning-may-affect-internet-users/]]
*** [[ebay|http://money.cnn.com/2014/05/21/technology/security/ebay-passwords/]] 
** Password Cards: http://www.passwordcard.org/en, http://www.evenprime.at/2012/04/password-security-with-password-cards/, etc
** Password vaults
*** [[Password Safe|https://www.pwsafe.org]]
*** [[KeePass|https://keepass.info/]]
** One-time passwords (OTPW)

* ''Something you have''
** [[yubikey|http://www.yubico.com/]]
** [[Google Titan Key|https://cloud.google.com/titan-security-key/]]
*** https://www.cnet.com/news/google-made-the-titan-key-to-toughen-up-your-online-security/
** [[DoD CAC card|http://www.cac.mil/common-access-card/]]
** [[Google 2 factor|https://www.google.com/landing/2step/]]
** [[RSA SecurID|http://www.emc.com/security/rsa-securid/rsa-securid-hardware-authenticators.htm]]

* ''Something you are''
** biometrics:  fingerprint, retina, voice print, facial, vein patterns

* ''Somewhere you are''
** Geofencing - Tie authentication to a particular location
*** Someone may only log in or may not log in from a specific geographic location
** ~GeoIP libraries
** pam_geoip


!!! SSH authentication & increasing security

!!!! ssh keys
* Access systems with keys instead of just passwords for added security
* 1.5 factor authentication:  Slightly better then just passwords
* Create keypairs with ssh-keygen
** Asymmetric keypairs are used for authentication.  You keep the private key secure and locked with a passphrase.  The public key is distributed to systems you have permission to access.
* Public keys are stored in ~/.ssh/authorized_keys
* Host public keys are stored in ~/.ssh/known_hosts
* ssh-agent & ssh-add : add your ssh keys to the agent to be used for connecting to multiple systems
* pssh - parallel ssh for connecting to multiple systems

!!!! sshd configuration
* Host keys
** Host key warning - A warning appears on new systems to verify the host key to ensure you're not the victim to a man-in-the-middle attack
* Require SSH keys to access the system (disable password authentication)
** A little more secure then just passwords.  An attacker cannot just capture a password, they also must capture the SSH key
* Deny root login - Don't allow users to log in directly as root.  Must log in first as a regular, unprivileged user and then escalate to root with either {{Command{su}}} or {{Command{sudo}}}
** No system should allow direct root login.  Turning this off is an excellent security first-step
** Our shell server sees about 50 attempts per day to log in as root.  Countermeasures identify and block these attackers.
** {{Command{grep 'sshd-root.*Found' /var/log/fail2ban.log | wc -l}}}
* Require group membership - Must be in a particular group to log in to the system via ssh


! Assignment

<<tiddler [[Lab 48 - VM Lockdown - Secure your VMs]]>>

----

<<tiddler [[Lab 49 - sudo]]>>

----

<<tiddler [[Lab 50 - Enable Two-Factor Authentication]]>>
----

! Material
!! Watch
NTP Tutorial: https://www.youtube.com/watch?v=eh5ZL_fNi0g

!! Read
* Linux Bible - Configuring System Logging
** Chapter 13, pages 326-332


! Time & Logging

Time and logging go together.  It's incredibly useful to know //when// something happened if you need to investigate a problem or security incident, especially if you need to correlate events among systems to build an accurate timeline.  If time is not properly synchronized among your systems, it's difficult to properly understand sequences of events.  You may also run into functional issues if time is wildly incorrect.  For example, SSL certificates may be considered invalid if time is wrong on a system.

!! Time

What's the [[difference between accuracy and precision|https://www.thoughtco.com/difference-between-accuracy-and-precision-609328]]?

Importance of accurate time:
* file timestamps - when something was modified
* tracing events - Knowing when a breech occurred, when a change was made, or when someone logged in to a system
* security
** certificate validity - Certificates are only valid for a certain time range.  If a system's time is off, it may negatively impact secure communication

Importance of precise time:
* correlating activities between systems

Ideally we'll have both - accuracy and precision.  We want the correct time on all systems.  But the closer time is among systems in a network, the easier it will be to correlate events between them.


!!! Setting the system date
* {{Command{tzselect}}} - Select the time zone.  This is typically done for you on most modern installs
* The symbolic link {{File{/etc/localtime}}} will point to the timezone file to use
** Timezone definition files are typically stored within {{File{/usr/share/zoneinfo/}}}
* {{Command{date ~MMDDhhmm&#91;[CC]YY]}}} - set the system date and time manually
** It's usually not necessary to set the date/time and timezone in a VM.  ~VMs should obtain their time from the host.

!!! Network Time Protocol (NTP)
* NTP provides an automated way to keep time in sync and counter clock drift.
* A local server is configured to query a pool of many time servers and the best candidates will be used to keep the clock in sync
* They can maintain time to the millisecond 
* Clock strata - Distance from the reference clock
** Stratum 0 - The reference clock.  High precision, high accuracy clocks, such as atomic, GPS, or radio.
** Stratum 1 - Primary time servers.  Systems directly attached to and synchronized with stratum 0 clocks
** Stratum 2 - Secondary time servers.  Systems synchronized to stratum 1 time servers over the network.
** Stratum n+1 up to 15 - Time servers synchronized to a lower stratum clock
** Stratum 16 - An unsynchronized clock.

!!!! NTP Commands:
* {{Command{ntpdate}}} - Client utility.  A one-time immediate clock update.  Requires a time server to use as an argument.
** eg: {{Command{ntpdate 0.pool.ntp.org}}}
** Can be enabled to run on boot to force a time update on system startup.
* {{Command{chronyd}}} - Background service to maintain time synchronization
** Sets and maintains system time in sync with a central point
** Regularly polls one or more time servers for updates
** The chronyd service updates time slowly in small steps
** May use an internet-based source time server or a local one.
*** Generally, a large site will maintain an ntp server locally that other systems on the local network will synchronize against
*** Using a local service increases security and reduces strain on the public NTP servers
** An NTP service may be configured to provide time synchronization to client systems
** {{Command{ntpstat}}} - show the status of the current ntp service
** {{Command{chronyc}}} - query the Chrony ntp server
*** chronyc sub commands:
**** {{Monospaced{sources}}} - show our time sources
**** {{Monospaced{clients}}} - list connected clients
*** Tally codes for peers:
**** blank - Unreachable and discarded
**** - - Considered an outlier and discarded
**** + - Providing data and a candidate for use
**** * - The system peer and providing data

!!!!! {{Command{ chronyc sources }}} command example for a fully synchronized NTP client:
[img[img/ntpq-peers.jpg]]
Left to right:
* Red - Tally code.  Here it is indicating an accepted NTP peer which is providing data
* Green - Remote NTP server.  Who we are obtaining our time from.
* Yellow - the time reference our source is using
* Blue - Stratum level of our time source 
* Orange - Connection type.  U means unicast
* Purple - Connection statistics 


{{Warning{''Warning'':  NTP is a very basic protocol that uses UDP port 123 for its communication.  NTP services will bind to that port and client tools will try to communicate over that port.  If the ntpd service is running and bound to the port to listen for connections, the port is then not available for the {{Command{ntpdate}}} client tool to use.  If you must run {{Command{ntpdate}}}, stop the ntpd service to free up the socket and then start it back up again after running {{Command{ntpdate}}}.}}}


UDP Reflection attacks against NTP
* Reflection attacks are a big problem.  DNS and NTP were popular targets
** A tiny network request by an attacker can be "reflected" to its intended target.  
** Generally the amount of network traffic generated by the reflection toward the target is significantly larger than the request
** A ~Denial-of-Service attack is launched requiring only minimal resources of the attacker
* http://blog.cloudflare.com/technical-details-behind-a-400gbps-ntp-amplification-ddos-attack/
* https://ics-cert.us-cert.gov/advisories/ICSA-14-051-04
* monlist command
** This is what what abused in the last NTP reflection attack.  A small NTP request would return a very large response.  
** The request IP address would be spoofed so the response is sent to the victim


!! Logging

Unix logging is an excellent resource and can quickly solve a lot of problems for you.

Here's a great example.  I have a typo in my Apache configuration file and the service will not restart.  The log entry details exactly what the problem is and where the problem originates:

{{{
[root@www conf]# systemctl restart httpd
Job for httpd.service failed because the control process exited with error code. See "systemctl status httpd.service" and "journalctl -xe" for details.

[root@www conf]# systemctl status httpd
 httpd.service - The Apache HTTP Server
   Loaded: loaded (/usr/lib/systemd/system/httpd.service; enabled; vendor preset: disabled)
   Active: failed (Result: exit-code) since Wed 2020-04-08 23:50:48 EDT; 4s ago
     Docs: man:httpd(8)
           man:apachectl(8)
  Process: 2170 ExecStop=/bin/kill -WINCH ${MAINPID} (code=exited, status=1/FAILURE)
  Process: 2168 ExecStart=/usr/sbin/httpd $OPTIONS -DFOREGROUND (code=exited, status=1/FAILURE)
 Main PID: 2168 (code=exited, status=1/FAILURE)

Apr 08 23:50:48 www systemd[1]: Starting The Apache HTTP Server...
Apr 08 23:50:48 www httpd[2168]: AH00526: Syntax error on line 1 of /etc/httpd/conf/httpd.conf:
Apr 08 23:50:48 www httpd[2168]: Invalid command 'my', perhaps misspelled or defined by a module not included in the server configuration
Apr 08 23:50:48 www systemd[1]: httpd.service: main process exited, code=exited, status=1/FAILURE
Apr 08 23:50:48 www kill[2170]: kill: cannot find process ""
Apr 08 23:50:48 www systemd[1]: httpd.service: control process exited, code=exited status=1
Apr 08 23:50:48 www systemd[1]: Failed to start The Apache HTTP Server.
Apr 08 23:50:48 www systemd[1]: Unit httpd.service entered failed state.
Apr 08 23:50:48 www systemd[1]: httpd.service failed.
}}}

Notice the lines above:
''Apr 08 23:50:48 www httpd[2168]: ~AH00526: Syntax error on line 1 of /etc/httpd/conf/httpd.conf:''
''Apr 08 23:50:48 www httpd[2168]: Invalid command 'my', perhaps misspelled or defined by a module not included in the server configuration''

If I examine line 1 of my configuration file as the log suggests, I'll spot my problem - text that doesn't conform to the required syntax of the file.

{{{
[root@www conf]# head -5 httpd.conf
my typo
# This is the main Apache HTTP server configuration file.  It contains the
# configuration directives that give the server its instructions.
# See <URL:http://httpd.apache.org/docs/2.4/> for detailed information.
# In particular, see
}}}

Syslog:
* The syslog service is the primary recipient of system-level event log information
** syslog then determines what should be done with that log data based on configuration
*** save it locally, send it to another system for log aggregation, or discard it
** Allows for centralized log collection and management
* Some utilities/services log directly to their own files and some use syslog
** Apache is an example of a service that saves log data to its own files
* syslog events are written to the domain socket /dev/log 
** sockets provide inter-process communication via the filesystem
** Processes either communicate via open network ports or these socket files
* log events contain the timestamp, type, severity, and details
* Most log files are plain text, allowing review or parsing with standard unix CLI tools, such as the filters we've been working with

* syslog events consist of pre-defined facility and severity levels
** facility is generally the service that generated the message (auth, cron, ftp, mail) and based on standardized names
*** local0-7 facilities are for customized destinations
*** or the keyword none to disable a particular facility or severity
** severity ranges from emergency to debug
*** When specified, that severity level and greater will be processed
** See the /var/log/messages example in /etc/rsyslog.conf
** Here's a list:

[img[img/syslogFacility.jpg]]
[img[img/syslogSeverity.jpg]]


Most services can elevate verbosity for debugging, recording additional information to assist with troubleshooting.
 - This should only be enabled for a short time.  The extra log entries may consume a lot of space over a long period of time.

!!! syslog components:
* syslogd - the logging service which receives and processes the log information
** {{File{/etc/rsyslog.conf}}} - The main configuration file
** {{File{/etc/rsyslog.d/}}} - The secondary configuration files
* library routines to submit log messages to syslogd
* {{Command{logger}}} - userland utility for recording log events from the shell.  Handy for scripting.
** Monitor or debug your automated scripts
** Backups and account processing are good examples
** {{Command{logger -t $0 -p local5.warning "test message"}}} - Send a test message to syslog from within a shell script with the local5 facility and warning severity
* logrotate / newsyslog - rotate logs at a configured time or file size
** It's important to rotate logs instead of letting them accumulate indefinitely.  Eventually they will consume the filesystem and will likely cause system failure.
** It's wise to account for this when designing a system and put logs on a separate filesystem.
** Retention issues - How long do we keep logs for?
** compress or delete old logs according to an archival schedule
** Logrotate - a tool which periodically runs to rotate log files
*** {{File{/etc/logrotate.conf}}} - Main configuration file
*** {{File{/etc/logrotate.d/}}} - Secondary configuration files
*** Periodically executed by cron to process the log files
**** Take a look at the file {{File{/etc/cron.daily/logrotate}}}

Standard exemptions to syslog:
* wtmp - binary file, view with last command
* lastlog - view with lastlog command
* psacct - process accounting service.  View with lastcomm command
** Not built-in.  Will need to install psacct package and enable psacct service
* Some services do not send to syslog and instead manage log files themselves:
** Apache
** BIND DNS server

syslog as a network service
* syslog is by default a local service and not bound to a network port
* But can be configured to collect log events from multiple hosts
* Many benefits to central logging:
** Aggregate logs in one place for central review and retention
** If an attacker breaks into a system, they cannot easily remove the logs to cover their tracks if the logs are also stored on another server

!!! Kernel boot logging and message buffer
* We need a way to record the kernel events prior to init, before syslog starts and the filesystems are mounted
* The kernel stores this boot information in an internal buffer
** Also contains the system hardware detected on boot and any subsequent hardware changes.
* Captured by the system and recorded once it fully loads
* Viewable with the {{Command{dmesg}}} command
* Also saved to {{File{/var/log/dmesg}}}


!!! Systemd Additions:

!!!! Manage time:
* {{Command{ timedatectl list-timezones }}}
* {{Command{ timedatectl set-timezone //zone// }}}
* {{Command{ timedatectl status }}}

!!!! Display Logs:

{{Command{ journald }}} - New logging daemon with systemd
* Can replace or augment syslog

{{Command{ journalctl }}} - Front end for displaying logs
* Logs since last reboot: {{Command{ journalctl -b }}}
* Show last 10 lines (replace tail): {{Command{  journalctl -n }}} or {{Command{ journalctl -n 20 }}}
* Display new log entries as they arrive (replace tail -f):  {{Command{ journalctl -f }}}
* Display kernel messages:  {{Command{ journalctl -k}}}
* Display log entries for a particular unit:  {{Command{ journalctl -u httpd.service }}}
** For a particular time range:  {{Command{ journalctl -u httpd.service &dash;&dash;since yesterday }}}
** {{Command{ journalctl -u httpd.service journalctl &dash;&dash;since "2015-10-01" &dash;&dash;until "2015-10-31 03:00" }}}


!!! We have all this log data, now what?

* Logcheck - A utility to mine the log data and send reports
* fail2ban - Scan log files and ban malicious ~IPs
** Perform a regular expression match and add offending ~IPs to a firewall
** Important way to combat all of the automated scanning on the internet.  Our class shell server is under constant attack and countermeasures like this should be deployed.

The class shell server is currently blocking 293 IP addresses which attacked us sometime in the last 9 hours:

{{{
[root@shell ~]# fail2ban-client status sshd-root
Status for the jail: sshd-root
|- Filter
|  |- Currently failed: 0
|  |- Total failed:     1514
|  `- Journal matches:  _SYSTEMD_UNIT=sshd.service + _COMM=sshd
`- Actions
   |- Currently banned: 293
   |- Total banned:     1441
}}}


* Web log data
** http://en.wikipedia.org/wiki/List_of_web_analytics_software
** [[GoAccess|http://goaccess.io/]]
** http://www.awstats.org/
** [[Logstalgia|https://code.google.com/p/logstalgia/]] 

Big data analytics: 
* ELK stack - Free & open source (FOSS)
** Elasticsearch - log searching and data analytics
** Logstash - centralized logging and parsing
** Kibana - data visualization
* Enterprise SIEM (Security information and event management) tools
** Splunk - Big data analytics with a nice web front-end
*** //Splunk captures, indexes and correlates real-time data in a searchable repository from which it can generate graphs, reports, alerts, dashboards and visualizations//


! Assignment

<<tiddler [[Lab 45 - Bring core VM online]]>>
<<tiddler [[Lab 46 - Time]]>>
<<tiddler [[Lab 47 - Logging]]>>
! Material

Part 2 continues our storage work with a deeper dive into the Linux Logical Volume Manager (LVM).  This is one of the main Linux storage management systems and most deployments use it to some degree.

!! Read:
* Reflect on the reading from Part 1 and tasks from Lab 43.  Linux LVM is specifically mentioned in Chapter 12 on pages 285-290.

! Notes:

Linux LVM
* The primary way storage is handled in Linux
* Allows us to allocate space on demand and resize filesystems
* Supports snapshots
* Does not support raid 5 or 6.  Use linux software raid for redundancy then LVM to assign slices
* Access with pv, vg, lv commands.  ie: {{Command{lvdisplay}}}
** The pv* commands work with physical volumes
** The vg* commands work with volume groups
** The lv* commands work with logical volumes

! Assignment
<<tiddler [[Lab 54 - Logical Volume Manager]]>>
! Material

!! Read:
* Linux Bible Chapter 11 - Managing User Accounts
** This chapter starts with an instruction to install the {{Monospaced{cockpit}}} package.  Do not follow this instruction; it's a dangerous package to install.
** Use the command-line tools instead
* Linux Bible, pages 175-178 
** //Becoming root from the shell (su command)// and //Gaining administrative access with sudo// sections
* Also review:
** [[sudo tutorial|https://phoenixnap.com/kb/linux-sudo-command]]
** [[sudoedit tutorial|https://www.howtoforge.com/tutorial/how-to-let-users-securely-edit-files-using-sudoedit/]]

!! Watch:
* {{Command{sudo}}} use and configuration:  https://www.youtube.com/watch?v=YSSIm0g00m4
** Note: In the video, the sudoers file is edited by executing {{Command{sudo visudo}}}.  This may not work on our ~VMs.  Instead use {{Command{su}}} to become root and then run {{Command{visudo}}} to edit the sudo configuration.


! Notes - Access control & user management

''Authentication'' - Who you are.  The process of ascertaining that someone is actually who they claim to be
''Authorization'' - What you are allowed to do.  Rules to determine who is allowed to perform certain tasks

!! Access control

!!! From the beginning unix maintained a multi-user system
* All objects (files & processes) have owners
* A user owns new objects they create 
* The administrative user (root) can act as the owner of any object
* Only root can perform most administrative tasks

!!! Groups

A mechanism to grant permissions to groups of users, such as all students in a particular class.

* The filesystem has a more sophisticated access control system
* Each file has a user owner and a group owner
* Permissions can be set so group members may have their own set of access controls (rwx)
* Groups can be harnessed to control access to the system

The directory {{File{/opt/pub/ci233/}}} is set so only those in this class can access its files.


!!! root (uid 0)
* The root user is the standard unix superuser account
* There's nothing special about the user name - it's all in the user ID (UID) number
** Unix systems track everything by number:  process ~IDs, device ~IDs, IP addresses, uid, and gid
** We prefer names over numbers
* Check out the {{File{/etc/passwd}}} on your ~VMs.  There's a second uid 0 user account named {{Monospaced{toor}}}
** An unknown uid 0 backdoor account would normally be a huge red flag.  But this account is so I can get into your systems to help if something breaks.
** It has the same privileges as your {{Monospaced{root}}} account, but uses a password that I have.
** As far as the system is concerned, {{Monospaced{root}}} & {{Monospaced{toor}}} are the same person because they have the same user uid number.

!!! Privilege separation 
* superuser (uid 0) - The superuser - ideally only use the system with superuser privileges when necessary.
* normal users - the regular users on the system.  How we all access and use the class shell server.
* service accounts - These are the accounts our services run as, such as the {{Monospaced{apache}}} and {{Monospaced{mysql}}} users on your web server or {{Monospaced{named}}} user on your core VM.
** nobody or daemon accounts
*** Generic unprivileged accounts which run services as unprivileged users in case the services are broken into.  This way they'll have very limited access to the rest of the system
*** Services ran as root in the old days.  If a service was exploited and an attacker was able to access files or run commands, they would then have access to the entire system.
** principle of least privilege - Only grant users the access they need.  If an account or service is broken into, the damage will be limited.
*** This is why we don't all access the shell server as {{Monospaced{root}}}.  I use an unprivileged user also and only elevate to {{Monospaced{root}}} when necessary.

!!! Privilege escalation
* Limit direct access to the {{Monospaced{root}}} account.  
* Privilege separation - Only obtain superuser privileges when you need them
** Don't always operate as the {{Monospaced{root}}} user
* Instead log in as a regular user and escalate when needed
** This is also good for accountability if many users have the root password.
** {{Command{su}}} command - Substitute user
*** Change the effective userid to another system user
*** Real id is the userid you log in as, the user id associated with the process that created the current process
*** Effective id is one the system uses to determine whether you have access to a resource
*** http://bioinfo2.ugr.es/OReillyReferenceLibrary/networking/puis/ch04_03.htm
*** {{Command{su [username]}}}  - Change to another user, simulating a full login.  The current shell environment will not be inherited.
*** {{Command{su - [username]}}}  - Change to another user, inheriting the current shell environment
** {{Command{sudo}}} - Allow elevated privileges on a limited scale (per command).  
*** {{Command{sudo}}} Allows an administrator to grant root privileges to users without divulging the root password.
*** Or allow a user to just run a few commands as the superuser.
*** Display what you are allowed to access via sudo: {{Command{sudo -l}}}
*** {{Command{sudo //command//}}} - Run a command as another user (defaults to the root user)
*** {{Command{sudoedit //file//}}} - Edit a file as another user (defaults to the root user).  Running {{Command{sudoedit //filename//}}} is the same as running {{Command{sudo -e //filename//}}}.
*** {{Command{sudo -l}}} - Display which commands are available to the current user via sudo
*** sudoers file: {{File{/etc/sudoers}}} - This is where the sudo configuration is saved.  Don't edit this file directly.  Use {{Command{visudo}}} to edit it.
**** Separate sudo configuration files can also be saved within the directory {{File{/etc/sudoers.d/}}} to keep things better organized.
**** {{Command{visudo}}} will lock the file and perform syntax checks after saving it.
** You can control who can access particular resources with user or group permissions
* Both {{Command{su}}} and {{Command{sudo}}} will log escalation events
** su will log when a unprivileged user switches to another user
** sudo/sudoedit will log each command executed or file modified
* setuid bit 
** set ID upon execute
** An extra permission bit that can be set with chmod 
** The program will run as the user who owns the file.
** Examples:  passwd and crontab commands
** The passwd command needs extra privileges in order to change a user's password, so extra system privileges are granted just to that command.

!!! Finer grained access controls
* ~SELinux and mandatory access controls (MAC)
** Enabled by default in ~CentOS
** ~SELinux will cause us problems if we don't either configure or disable it
** Controlled by the {{Command{setenforce}}} command for current boot
** and by the /etc/selinux/config file on boot
** It's presently disabled on all of our class ~VMs
* Filesystem access control lists (~ACLs)
** Finer grained per user access to files
** Controlled by {{Command{setfacl}}} and displayed by {{Command{getfacl}}}
** Active ~ACLs noted with a + at the end of the file permissions list

!!! Verifying users with PAM
* Pluggable Authentication Modules (Chapter 6, Page 125)
* Configuration resides in /etc/pam.d/
* Originally access was determined by just checking passwords against the password files
* Modules are used for user validation and verification
** Can determine who you are
** And if you have permission to access the resource
** Can also enable additional types of authentication, such as two-factor with hardware or soft tokens.
* Examples: 
** {{File{/etc/pam.d/su}}} - limit who can use the {{Command{su}}} command
*** uid 0 users can always run the {{Command{su}}} command
*** Change to require wheel group membership
*** Can set to implicitly trust members of the wheel group (dangerous!)
* Other pam functions: 
** Pam can also create home directories on first login with pam_mkhomedir
** Check password complexity with pam_crack
** Lock accounts on too many failed attempts with pam_tally or pam_faillock


!! Users and Groups

!!! Password files
* {{File{/etc/passwd}}} - Everyone can read this file
** Contains fields identifying the user
** It used to also contain the hashed password but this was moved elsewhere to hide it from normal users
** Don't leave the old password field (position 2) blank!  If blank, no password is required for login.  Use the placeholder character {{Monospaced{''x''}}}.
*** An {{Monospaced{''x''}}} in {{File{/etc/passwd}}} column 2 means see {{File{/etc/shadow}}} for the password hash
* {{File{/etc/shadow}}} (Linux) or {{File{/etc/master.passwd}}} (~FreeBSD) - Only root can read this file
** A secure file which contains the password hashes so normal users cannot read them for brute force cracking
** Also contains password and account expiration attributes
* Use {{Command{vipw}}} to edit these files so you have file locking and format verification
** This verification prevents errors from breaking access to the system
** {{Command{vipw}}} will edit the password file
** {{Command{vipw -s}}} will edit the shadow file
* password hashing:
** Different hashing algorithms and their tags from old (weak) to new (strong):  DES, ~MD5 ({{Monospaced{$1$}}}), Blowfish ({{Monospaced{$2a$}}}), ~SHA256 ({{Monospaced{$5$}}}), ~SHA512 ({{Monospaced{$6$}}})
*** The tag at the beginning of the password hash identifies the algorithm used.
** {{Command{authconfig &#045;-test | grep hash}}} - See what hashing algorithm is used
** {{Command{authconfig &#045;-passalgo=md5 &#045;-update}}} - Change the default hash type (don't actually run this)
** {{File{/etc/sysconfig/authconfig}}} - Authentication configuration settings
** {{File{/etc/libuser.conf}}}
** salting
*** Randomize hashes by adding a salt to the password before hashing
*** Prevents two passwords from having the same hash
*** Increases difficulty for brute force attacks or hash lookup tables (rainbow tables)
** Password cracking:
*** John the Ripper
*** hashcat
*** GPU processing makes this all much faster now
*** Protect your hashes!

Time it takes to brute force these password types:
[img[img/2023_Password_Table.jpg]]


* uid numbers 
** multiple users with same UID number - The system only cares about the number.  If multiple users have the same UID number, then they are effectively the same user and can access each other's files
** System accounts (UID < 10)
** Service accounts (~UIDs between 10 and 500)
** Users UID > 500 (Linux) - Regular users.

!!! Group file
* {{File{/etc/group}}} - Where groups and group memberships are defined.
** wheel group - special administrator group.  Usually allows extra system access

!!! Shell
* default shell : {{Command{/bin/bash}}} (Linux) or {{Command{/bin/tcsh}}} (BSD)
* lockout shell : {{Command{/sbin/nologin}}}
** Users with this shell are not allowed to log into the system.  Service accounts or banned users will be set to this shell.
* Available shells defined in {{File{/etc/shells}}}

!!! Locking accounts
* Replace the hash with a {{Monospaced{*}}} or {{Monospaced{!!}}} to lock the account.
** This is not enough on //some// systems.  Users may still be able to log in with SSH keys instead of passwords.
* Also change shell to {{Command{/sbin/nologin}}}
** This is a standard lockout shell.  A user must have a valid login shell in order to connect to a system
** The command {{Command{/sbin/nologin}}} just echos //This account is currently not available.// and terminates, thus disconnecting the user from the system.
* {{File{/var/run/nologin}}} or {{File{/etc/nologin}}}
** If this file exists, only root will be allowed to log into the system.   The contents of the file will be displayed to the user before they are disconnected.
*** This is helpful if a system needs to be closed for temporary maintenance.
* Check out service accounts in the password file - they should not have passwords or valid shells
** A service account with a password or valid shell is being abused by an attacker.

!!! New user:
* Use utilities ({{Command{useradd}}}, {{Command{userdel}}}, {{Command{usermod}}}) or edit the password files directly
* Create a home directory for the user
** Set home dir ownership and permissions so the new user can access it
* Set up environment (dot files)
** Copy the environment configuration files within {{File{/etc/skel/}}} (Linux) or {{File{/usr/share/skel/}}} (~FreeBSD) to the new user's home directory
*** Note:  All environment configuration file names begin with a dot.
*** Don't forget to change ownership on the environment files in the user's home directory too

!!! Remove or lock user
* Delete or comment lines in password files
** Will no longer be known to the system, but non-destructive
** Change password hash and change shell


!!! Authentication factors:

Multi-factor authentication (MFA):
* Passwords are not good enough anymore; they are easily stolen.
* Increase security by combining multiple authentication factors.
* More sites and organizations are now requiring MFA
** [[Linux Kernel Git Repositories Add 2-Factor Authentication|http://www.linux.com/news/featured-blogs/203-konstantin-ryabitsev/784544-linux-kernel-git-repositories-add-2-factor-authentication]]
** SUNY Poly recently switched from GMail to MS Outlook and added MFA for email account login

!!!! Methods of authentication:
* ''Something you know'':  passwords
** Should be of sufficient length and complexity to be hard to crack
** Minimum of 10-12 characters
** correct horse battery staple: http://xkcd.com/936/
** Should be unique across systems
*** [[Russian Hackers Amass Over a Billion Internet Passwords|http://www.nytimes.com/2014/08/06/technology/russian-gang-said-to-amass-more-than-a-billion-stolen-internet-credentials.html?_r=0]]
*** [[Stolen user data used to access account|http://community.namecheap.com/blog/2014/09/01/urgent-security-warning-may-affect-internet-users/]]
*** [[ebay|http://money.cnn.com/2014/05/21/technology/security/ebay-passwords/]] 
** Password Cards: http://www.passwordcard.org/en, http://www.evenprime.at/2012/04/password-security-with-password-cards/, etc
** Password vaults
*** [[Password Safe|https://www.pwsafe.org]]
*** [[KeePass|https://keepass.info/]]
** One-time passwords (OTPW)

* ''Something you have''
** [[yubikey|http://www.yubico.com/]]
** [[Google Titan Key|https://cloud.google.com/titan-security-key/]]
*** https://www.cnet.com/news/google-made-the-titan-key-to-toughen-up-your-online-security/
** [[DoD CAC card|http://www.cac.mil/common-access-card/]]
** [[Google 2 factor|https://www.google.com/landing/2step/]]
** [[RSA SecurID|http://www.emc.com/security/rsa-securid/rsa-securid-hardware-authenticators.htm]]

* ''Something you are''
** biometrics:  fingerprint, retina, voice print, facial, vein patterns

* ''Somewhere you are''
** Geofencing - Tie authentication to a particular location
*** Someone may only log in or may not log in from a specific geographic location
** ~GeoIP libraries
** pam_geoip


!!! SSH authentication & increasing security

!!!! ssh keys
* Access systems with keys instead of just passwords for added security
* 1.5 factor authentication:  Slightly better then just passwords
* Create keypairs with ssh-keygen
** Asymmetric keypairs are used for authentication.  You keep the private key secure and locked with a passphrase.  The public key is distributed to systems you have permission to access.
* Public keys are stored in ~/.ssh/authorized_keys
* Host public keys are stored in ~/.ssh/known_hosts
* ssh-agent & ssh-add : add your ssh keys to the agent to be used for connecting to multiple systems
* pssh - parallel ssh for connecting to multiple systems

!!!! sshd configuration
* Host keys
** Host key warning - A warning appears on new systems to verify the host key to ensure you're not the victim to a man-in-the-middle attack
* Require SSH keys to access the system (disable password authentication)
** A little more secure then just passwords.  An attacker cannot just capture a password, they also must capture the SSH key
* Deny root login - Don't allow users to log in directly as root.  Must log in first as a regular, unprivileged user and then escalate to root with either {{Command{su}}} or {{Command{sudo}}}
** No system should allow direct root login.  Turning this off is an excellent security first-step
** Our shell server sees about 50 attempts per day to log in as root.  Countermeasures identify and block these attackers.
** {{Command{grep 'sshd-root.*Found' /var/log/fail2ban.log | wc -l}}}
* Require group membership - Must be in a particular group to log in to the system via ssh


! Assignment

<<tiddler [[Lab 48 - VM Lockdown - Secure your VMs]]>>

----

<<tiddler [[Lab 49 - sudo]]>>

----

<<tiddler [[Lab 50 - Enable Two-Factor Authentication]]>>
----

! Material

!! Reading:
* Linux Bible, Chapter 25 - Securing Linux on a Network

There's a lot of good networking information in this chapter:
* The ~NetFilter background information throughout the chapter is good to know, but the {{Command{ iptables }}} command for managing the firewall has largely been replaced by the {{Monospaced{firewalld}}} tool, {{Command{ firewall-cmd }}}
* Pay particular attention to the firewalld information

! Notes

Effective security requires a [[multi-layered approach|https://www.techrepublic.com/blog/it-security/understanding-layered-security-and-defense-in-depth/]], [[defense in depth|https://www.us-cert.gov/bsi/articles/knowledge/principles/defense-in-depth]], and adherence to [[principle of least privilege|https://www.us-cert.gov/bsi/articles/knowledge/principles/least-privilege]].  Ideally, a weakness or vulnerability uncovered in one layer will be mitigated by another security layer.  

Five good examples of this we have deployed:
* ntpd and named on your core VM have ~ACLs in place to limit who can communicate with those services
* Direct login with the root account is now blocked.  Only authorized user accounts can elevate privileges via {{Command{su}}} or {{Command{sudo}}} through membership to the wheel group.  Just having the root password isn't enough.
* Rather than give a webmaster full root access, we configured {{Command{sudo}}} to provide elevated privileges just to the functions he may need, adhering to the principle of least privilege
* By implementing two-factor authentication, we have a second layer of security to protect us in case credentials are stolen.  
* SSH access to our class shell server cannot be blocked by a firewall.  We all need to be able to connect from anywhere.  The server is under constant brute-force login attack from all over the internet.  The [[fail2ban|https://www.fail2ban.org/]] tool was deployed on the class shell server to automatically detect and block them.

Host-based firewalls are another important layer in the security of a system.  Perimeter firewalls are important, but what happens if an attacker is already within your walls?  Restricting access to open ports on your systems to only those needing to communicate with them is a good way to further limit your exposure to attack.

Systems must always be built with the assumption that outer layers have been breached.  For example, 
* Assume your password will be stolen at some point.  Use two-factor authentication everywhere you can.
* Assume the perimeter firewall will be breached at some point.  Deploy a host-based firewall on your ~VMs
** Assume the host-based firewall may be accidentally disabled or bypassed.  Configure your ntp and DNS services to use ~ACLs.
* Assume a user will have a weak password that could be brute-forced.  Deploy a system to detect and block brute-force login attempts.


!! ~SaltStack Example

[[SaltStack|https://www.saltstack.com]] is an open-source platform for server automation and remote task execution.  It's very powerful, easy to deploy, and easy to use.  We're using it in our virtual lab to facilitate the management and monitoring of the class ~VMs.  

~SaltStack consists of:
* A master server which serves as the central control hub to issue commands and push configuration changes
* Minions, which are the nodes connecting to and being managed by the master.

Your ~VMs are all Salt minions connecting to a master server I control.  This enables me to quickly and easily push configuration changes and review the state of your ~VMs in bulk.  Instead of having to connect to each system to fix or review something, I can issue a single command which will run on all of them.

Early last year two [[highly critical vulnerabilities|https://labs.f-secure.com/advisories/saltstack-authorization-bypass/]] ([[CVE-2020-11651|https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-11651]] and [[CVE-2020-11652|https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-11652]]) were disclosed impacting the ~SaltStack master server.  This vulnerability allows any attacker who can communicate with the ~SaltStack network ports on the master to completely take it over by sending control messages, read and write files to the master, and steal its secret keys.  The attacker will thus have complete control of the master server and all minions connecting to it.  This vulnerability will then allow for a complete compromise of all systems within a ~SaltStack deployment.

The [[Common Vulnerabilities and Exposures (CVE)|https://en.wikipedia.org/wiki/Common_Vulnerabilities_and_Exposures]] database managed by [[Mitre|https://cve.mitre.org/]] contains a list of all publicly disclosed security vulnerabilities. The ~CVEs are assigned a [[CVSS score|https://nvd.nist.gov/vuln-metrics/cvss]] ranging from 0 (benign) to 10 (critical) to rate their severity.  A CVSS score of 10 generally means full system compromise can be remotely accomplished.  These are "drop what you're doing and fix this now" vulnerabilities.  

Both ~SaltStack ~CVEs were assigned a CVSS score of 10.  ~F-Secure, the company which discovered the weaknesses, posted in their blog:  “Patch by Friday or compromised by Monday”.

This is a great example of the need for multiple layers of security.  There will always be time gaps between when vulnerabilities are introduced in software, when they are discovered, when patches are available, and when those patches can be applied.  Those time delays are occasionally significant
* 2 years for [[CVE-2014-0160|https://www.cvedetails.com/cve-details.php?t=1&cve_id=CVE-2014-0160]], also known as [[HeartBleed|https://heartbleed.com/]], which allowed the compromise of a web server's SSL secret keys and the decryption of ~SSL-encrypted network communication.
* 20 years for [[CVE-2020-0601|https://nvd.nist.gov/vuln/detail/CVE-2020-0601]] with a [[8.1|https://nvd.nist.gov/vuln-metrics/cvss/v3-calculator?name=CVE-2020-0601&vector=AV:N/AC:L/PR:N/UI:R/S:U/C:H/I:H/A:N&version=3.1&source=NIST]] rating ([[details|https://krebsonsecurity.com/2020/01/cryptic-rumblings-ahead-of-first-2020-patch-tuesday/]]) involving a core cryptographic library in Windows.

~F-Secure, in their [[blog post|https://blog.f-secure.com/new-vulnerabilities-make-exposed-salt-hosts-easy-targets/]], identified 6000 vulnerable ~SaltStack master servers through scans of the entire Internet.  Use of a firewall to prevent anyone on the internet from communicating with these servers would have been the first step in protecting them from abuse by the entire world and is especially vital now that vulnerabilities have been discovered.

Luckily, the ~SaltStack master used for our class is protected from the entire Internet by the perimeter firewall.  But what if an attacker is already on our network, either physically or virtually.  What if a misconfiguration of the perimeter firewall allows traffic to our master?  A host-based firewall must be deployed to protect this system by only allowing our class ~VMs to communicate with the Salt master.


!! Packet filter firewalls

Another component of system security which allows us to:
* Filter unwanted network traffic
* Log & monitor network traffic
* Block brute force attacks
* Rate limit to counter minor ~DoS events

Filter minimally based on source or destination address, ports, or protocol types
We can either default to deny or default to allow
Optional logging
 - logging is useful for regular monitoring and debugging

Ingress or Egress filtering
 - Control both what flows in and out of the system.
 - For example, filtering egress from a web server could effectively block reverse shell attacks


!! Developing firewall rules
* We must first understand what communication is expected to take place
** Know the source and destination
*** Is it a new connection or related to an existing connection ?
** Match services to port numbers
*** Consult {{File{/etc/services}}} for a mapping of port numbers to service names.
*** Low ports 0-1023 are well-known ports and privileged.  They may only be bound by a root-controlled process.
*** Ports 1024-49151 are [[registered|https://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml]].  Port registration is issued by IANA.
*** Ports 49152-65535 are dynamic ports and generally not listening for services
** Observe what is currently listening for connections:
*** {{Command{ss}}} - Socket Statistics (the new tool). Installed by default
**** {{Command{ss -tunlp}}} - Display which ports are currently listening for connections.  The -n option will display IP addresses and port numbers instead of their names.  
**** {{Command{ss -tulp}}} - Display which ports are currently listening for connections.  Omitting the -n option will display host and service names instead of their numbers. 
*** {{Command{netstat}}} - Print network connections (the old tool).  This command requires the ''net-tools'' package.
**** {{Command{netstat -tunlp}}} - Display which ports are currently listening for connections.  The -n option will display IP addresses and port numbers instead of their names.  
**** {{Command{netstat -tulp}}} - Display which ports are currently listening for connections.  Omitting the -n option will display host and service names instead of their numbers. 
*** {{Command{lsof -i -n -P}}} - ''l''i''s''t ''o''pen ''f''iles.  Displays files and ports in use along with the processes which are utilizing them.  Requires the ''lsof'' package.
** Some services listen on both TCP and UDP.  Be sure to take note of the protocol being used.
* Then create rules to allow desirable traffic
** Determine if any traffic should be blocked
** Decide what to do with remaining 
*** Allow 
*** Allow but log
*** Deny

!!Stateful inspection - State module

- Stateless - inspect each packet in isolation.  Examine source and destination hosts and ports then decide what to do.
- Stateful - maintain the state of network connections.  These states can be used to determine policy.

Inspect the traffic to allow expected replies
* -m state &nbsp; &#45;-state //state//
* State is a comma delimited list of:
** NEW - the packet has started a new connection
** ESTABLISHED - the packet is associated with a connection which has seen packets in both directions
** RELATED - the packet is starting a new connection, but is associated with an existing connection
** INVALID - the packet  could not be identified for some reason (typically some type of error)


TCP - already stateful:
* 3-way handshake
* Evaluate TCP flags to determine state
[img[img/state-tcp-connection.jpg][http://www.iptables.info/en/connection-state.html]]


UDP - stateless
* no flags to evaluate
* kernel tracks outbound UDP packets.  Responses to outstanding requests are marked
[img[img/state-udp-connection.jpg][http://www.iptables.info/en/connection-state.html]]


!! Tool Overview
* netfilter - What's running under the hood
** This is manipulated with the deprecated {{Command{iptables}}} command.
* firewalld - A new front-end to simplify managing the firewall
** This is manipulated with the new {{Command{firewall-cmd}}} command.  This is the standard tool to use in ~CentOS 7.

The new ~FirewallD and its set of tools makes the management of a basic firewall very easy.  You no longer need to know intricacies of how ~NetFilter works and how its rules are created.  ~FirewallD will take care of that for you.  It's good info to know and is included here for reference.  You can skip the //Linux ~NetFilter// section if you'd like and continue with the ~FirewallD section down below.  We'll be using {{Command{ firewall-cmd }}} to manage our firewall instead of {{Command{ iptables}}}, the old command.

----

!!Linux [[netfilter|http://www.netfilter.org/]]
* controlled by the {{Command{ iptables }}} command

!!! Table:
* Sets of chains 
* Default table is named filter
* Additional tables:
** NAT table
** Mangle table - for specialized packet alteration (~QoS)
** Raw table - for configuration exemptions

!!! Chains of rules:
* Firewall rules are grouped into chains
* Rules within a chain are interpreted in order, top to bottom
** Until a match is found
** Or the default target is reached (ACCEPT or REJECT)
* Default chains:
** INPUT: traffic addressed to the system
** OUTPUT: traffic leaving the system
** FORWARD: all packets arriving on one network interface and leaving another
* Custom chains can be created for organizing similar rules

!!! Rules:
* Rules contain a criteria and a target
* The criteria is based on attributes of the packet, such as IP addresses or ports.
* If the criteria is match, either perform the specified action or continue rule processing within the target
* If the criteria is not matached, move on to the next rule.
* Terminate with the chain's default target

!!! Targets:
Each rule contains a target clause to determine what to do with matched packets:
* ACCEPT - allow the packet to proceed
* DROP - silently reject the packet (causes TCP retries)
* REJECT - reject the packet with an ICMP error message
* LOG - track the packet as it matches a rule
* REDIRECT - redirect packets towards a proxy
* RETURN - terminate user-defined chains
* QUEUE - transfer packets to local user programs via a kernel module
* A custom chain may be specified as a target.  Rules in that chain will be evaluated.


!!! iptables Commands

iptables -h

!!!! Saving your rules
iptables-save > /tmp/iptables.rules
iptables-restore < /tmp/iptables.rules

service iptables save
rules are stored in /etc/sysconfig/iptables


!!!! Firewall Operations:
| !Option | !Definition |
|-L [&#45;-line-numbers] [-v] |List all rules|
|-I //chain-name// //position-number//  //rule// |Insert rule into a chain|
|-A //chain-name//  -i //interface//  -j  //target// |Append the current target to the chain|
|-D //chain-name// //position-number// |Delete a rule from a chain|
|-P //chain-name//  //target// |Sets default policy for the chain|
|-F //chain-name// |Flush all rules in a chain|
|-N //chain-name// |Create a new chain|

!!!! Filter criteria command line options:
| !Option | !Definition |
| -p proto |Match by protocol: tcp, udp, or icmp|
| -s source-ip |Match host or network source IP address|
| -d dest-ip |Match host or network destination address|
| &#45;-sport port# |Match by source port|
| &#45;-dport port# |Match by destination port|
| &#45;-icmp-type type |Match by ICMP type code|
| -i int |Match by interface|
| &#33; |Negate a clause|
| -t table |Specify the table to which a command applies (default is filter)|
| -j //target// |Specify target to use|

!!!! Extensions:
| -m state &nbsp; &#45;-state //state// |filter based on specified //state//|
| -m multiport &#45;-dports //port1//,//port2//,...//portN// |filter multiple ports|
| -m owner &#45;-uid-owner //uid// |filter based on user name|


!!! Examples:

{{{

iptables -L --line-numbers -v

# Allow established traffic:
iptables -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT


iptables -A INPUT -s 10.103.35.0/24 -p tcp --dport 4444 -j ACCEPT
iptables -A INPUT -p tcp -m multiport --dports 20,21 -j ACCEPT

iptables -A OUTPUT -p tcp -m multiport --dports 20,21 -j REJECT


# SSH chain:
iptables -N SSH
iptables -A SSH -p tcp --dport 22 -j ACCEPT -s 10.103.36.24/29
iptables -A SSH -p tcp --dport 22 -j ACCEPT -s 10.103.36.10
iptables -A SSH -p tcp --dport 22 -j ACCEPT -s 150.156.192.0/24
iptables -A SSH -p tcp --dport 22 -j ACCEPT -s 10.156.195.0/24
iptables -A SSH -p tcp --dport 22 -j ACCEPT -s 150.156.193.20
iptables -A SSH -p tcp --dport 22 -j LOG -m limit --limit 1/sec --log-prefix "IPTables-SSH: " --log-level 4

iptables -I INPUT 6 -j SSH -m state --state NEW
}}}

----

!! [[FirewallD|https://firewalld.org/]]

~FirewallD is the new way to manage Linux firewalls.  Everything above in the ~NetFilter section is still running under the hood, but ~FirewallD provides a nice front-end to manage things.

Most of the following information is directly from the [[RedHat Linux Firewalls|https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html/security_guide/sec-using_firewalls]]] documentation.

The Firewall Stack:
[img[img/firewall-stack.png]]


!!! Zones
firewalld can be used to separate networks into different zones according to the level of trust that the user has decided to place on the interfaces and traffic within that network. A connection can only be part of one zone, but a zone can be used for many network connections. 
!!!! Available zones:
* ''Block'':  Any incoming network connections are rejected with an icmp-host-prohibited message for ~IPv4 and icmp6-adm-prohibited for ~IPv6. Only network connections initiated from within the system are possible. 
* ''dmz'': For computers in your demilitarized zone that are publicly-accessible with limited access to your internal network. Only selected incoming connections are accepted. 
* ''drop'': Any incoming network packets are dropped without any notification. Only outgoing network connections are possible. 
* ''external'': For use on external networks with masquerading enabled, especially for routers. You do not trust the other computers on the network to not harm your computer. Only selected incoming connections are accepted. 
* ''home'': For use at home when you mostly trust the other computers on the network. Only selected incoming connections are accepted. 
* ''internal'': For use on internal networks when you mostly trust the other computers on the network. Only selected incoming connections are accepted. 
* ''public'' (default): For use in public areas where you do not trust other computers on the network. Only selected incoming connections are accepted. 
* ''trusted'': All network connections are accepted. 
* ''work'': For use at work where you mostly trust the other computers on the network. Only selected incoming connections are accepted. 


!!! Runtime vs. Permanent Settings

There are two firewall configurations:  
* The runtime settings define the firewall rules currently in effect 
* The permanent settings reflect the stored configuration that will be reloaded if the firewalld service restarts.

Any changes applied to the running firewall only apply while firewalld is running. When firewalld is restarted or the system reboots, the settings revert to their permanent values.

To make firewall changes persistent across reboots, rules need to be saved in both locations.  This can be accomplished two different ways:

Modify the runtime configuration first:
* Add a new rule to the runtime configuration:  {{Command{ firewall-cmd &#45;-add-service=ssh }}}
* Make your changes permanent if everything works:  {{Command{ firewall-cmd &#45;-runtime-to-permanent }}}
* Or, discard your runtime changes and reload the permanent configuration if there is a problem {{Command{firewall-cmd &#45;-reload }}}

Modify the permanent configuration first:
* Add a new rule to the permanent configuration:  {{Command{firewall-cmd &#45;-permanent &#45;-add-service=ssh}}}
* Reload the permanent configuration {{Command{ firewall-cmd &#45;-reload }}}


!!! Predefined Services

A service can be a list of local ports, protocols, source ports, and destinations, as well as a list of firewall helper modules automatically loaded if a service is enabled. Using services saves users time because they can achieve several tasks, such as opening ports, defining protocols, enabling packet forwarding and more, in a single step, rather than setting up everything one after another. 

Service configuration options and generic file information are described in the firewalld.service(5) man page. The services are specified by means of individual XML configuration files located in {{File{/usr/lib/firewalld/services/}}} which are named in the following format: //service-name//.xml. Protocol names are preferred over service or application names in firewalld. 

!!! Basic commands

Here are a few basic commands involved in managing a firewall using the ~FirewallD management command, {{Command{firewall-cmd}}}

* Display the current state of the firewall: {{Command{ firewall-cmd &#45;-state }}}
* Display all options available: {{Command{ firewall-cmd -h }}}
* Display the active zones: {{Command{ firewall-cmd &#45;-get-active-zones }}}
** The default zone is //public//.  We'll stick with the default for our ~VMs
* Add a port to the permanent zone: {{Command{ firewall-cmd &#45;-add-port=5667/tcp &#45;-permanent }}}
* Remove a port from the permanent zone: {{Command{ firewall-cmd &#45;-remove-port=5667/tcp &#45;-permanent }}}
* Reload the firewall configuration and activate any new rules added to the permanent zone: {{Command{ firewall-cmd &#45;-reload }}}
* Add a new service to the runtime zone: {{Command{ firewall-cmd &#45;-add-service=ssh  }}}
** When possible, try to add services by name instead of port numbers.
* Remove a service from the runtime zone: {{Command{ firewall-cmd &#45;-remove-service=ssh  }}}
* Copy the runtime configuration to permanent: {{Command{ firewall-cmd &#45;-runtime-to-permanent }}}
* Get all services known to firewalld:  {{Command{ firewall-cmd &#45;-get-services }}}
* List the current runtime firewall configuration: {{Command{ firewall-cmd &#45;-list-all }}}
* List the current permanent firewall configuration: {{Command{ firewall-cmd &#45;-permanent &#45;-list-all }}}
* Forward port 80 from the external interface to port 80 on 10.0.0.10 through the internal interface: {{Command{ firewall-cmd &#45;-zone=external &#45;-add-forward-port=port=80:proto=tcp:toaddr=10.0.0.10:toport=80 }}}
** Only here for reference.  We're not doing any port forwarding in this class.  This command is handy if you're working with a Linux-based router.

{{Note{''Note:'' When possible, allow traffic through the firewall with the {{Monospaced{-&#45;add-service}}} option instead of {{Monospaced{-&#45;add-port}}}.  It will result in a cleaner configuration for services which utilize multiple ports, such as DNS.  For example, if you only run {{Monospaced{-&#45;add-port=53/udp}}} and neglect the TCP protocol, you'll allow DNS queries to your server but will block zone transfers to the slave, which utilize TCP. }}}

!Sources

http://bodhizazen.net/Tutorials/iptables
http://www.thegeekstuff.com/2011/06/iptables-rules-examples/
http://www.liniac.upenn.edu/sysadmin/security/iptables.html
http://www.borgcube.com/blogs/2014/05/securing-ntp-and-rate-limiting-using-iptables/
http://fideloper.com/iptables-tutorial	
https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html/security_guide/sec-using_firewalls

! Assignment

<<tiddler [[Lab 51 - Host-based Firewalls]]>>
! Material
!! Read:
* Linux Bible Chapter 12 - Managing Disks and Filesystems
* Linux Bible pages 334 & 335 - {{Command{du}}} & {{Command{df}}} commands
** These commands are great to know and very useful for troubleshooting


! Notes

File storage is a core function of any unix system
Something must store the operating system
Along with any system or user data

Core storage concepts
* Various storage layers
* What we store files on and how we're currently making it available to the system
* Network file systems
* swap
/%
Additional storage discussion in the next section
* Additional filesystem types
* Other ways to store data and make files available to the system
%/

!! Storage layers:

[img[img/storage-layers.jpg]]

Hardware storage devices
 - Storage medium (hard drive (spindle), flash / solid state (SSD, ~NVMe), optical, tape)
 - How the medium physically attaches to the system (SATA, SAS, USB, SCSI, etc)
Redundancy (optional): RAID arrays or Volumes
Partitions
Filesystems


!! Storage Hardware Devices

!!! Storage medium:
* Traditional disks: 
** Platter rotation speed in RPM:  5400, 7200, 10k, 15k
*** Rotation speed affects the transfer rate of the volume
** File fragmentation
*** Performance suffers as blocks associated with a file become scattered around the disk
*** Defragmentation arranges the disk so file blocks are all together.  Windows has defrag processes but there is no such functionality in Linux filesystems.
* SSD/~NVMe - Solid State Disk
** Significantly Faster
** Limited number of writes
** Available in several speed and resiliency options.
*** [[SSD Endurance Experiment|http://techreport.com/review/27909/the-ssd-endurance-experiment-theyre-all-dead]]
** Less prone to failure since there are no moving parts
*** No more defragging
** More expensive per GB
** Data written out in pages.  A memory page must be erased before data is written.  Erase is slower then write.
*** TRIM command - erase pages before they're needed to increase performance
** Be sure to align filesystem clusters to SSD pages to avoid extra wear
*** Align partitions to page boundaries 
* Hybrid drives
** Part SSD / part magnetic platter
** These seem to have fallen out of fashion now that SSD prices have come down so far.  I haven't seen them in a few years.
* Flash drives / cards
* Optical - DVD / CD.  Now mostly a relic of the past and replaced by USB
* Tape - Good for archival but mostly replaced by disk-to-disk and cloud backup solutions now that storage is so cheap.

/%
!!! Interfaces

Desktop:
* ATA - Advanced Technology Attachment
** PATA - Parallel ATA  (IDE)
*** ATA/133 - 133 MB/s
*** 40 pin, 80 conductor ribbon cable
*** Fully antiquated now and replaced by SATA
** SATA - Serial ATA
*** Replaced PATA
*** much higher transfer rates
*** many performance enhancements
*** hot swap
*** far better cabling
* [[M.2 Socket|https://en.wikipedia.org/wiki/M.2]] for ~NVMe drives
** Replacing SATA connections
** Uses PCI Express bus
** Significantly faster data transfer rates over SATA/SSD.

Server:
[img[img/scsi.jpg]]
*SCSI - Small Computer System Interface
** Parallel SCSI
*** Old ribbon cables
*** for daisy chaining of devices
*** Used for connecting disks and early peripherals (tape drives, CD writers, scanners, printers, etc)
*** Bus ends must be terminated to prevent noise by absorbing signals which reach the end of the bus 
*** All devices have a unique target number to identify them on the bus (0 - 15), set by dip switches or jumpers
*** Largely antiquated now and replaced by SAS
** SAS - Serial Attached SCSI
*** New standard for enterprise drive connection
*** Point to point connections instead of chained
*** No longer limited to 16 devices on the bus
*** Connectors compatible with SATA
*** Much higher transfer rates
*** SATA price and performance make for good enterprise alternatives
*** Similar connectors and support for SATA drives
* [[M.2 Socket|https://en.wikipedia.org/wiki/M.2]] for ~NVMe drives
** These are starting to show up in servers with enterprise-grade drives to store the operating system

* Fibre Channel
** enterprise
** high bandwidth
** speed
** can connect many devices at once

Both:
* USB
** slow compared to the alternatives, especially < USB 3.
** v1: 1.5 Mbit/s or 12 Mbit/s (~Full-Bandwidth)
** v2: ~Hi-Speed - 480Mb/s
** v3:  ~SuperSpeed - 4Gb/s
** v3.1: ~SuperSpeed+ - 10Gb/s
** USB converters for SATA - Connect a traditional hard drive to USB.  Great for the workbench
* Network access - Storage area Network (SAN)
** iSCSI (SCSI over IP)
** Network file system (NFS)
%/

!!! From the operating systems perspective:

Device files are all in {{File{/dev/}}}
Identify the devices:
 - Check the kernel boot logs.  Devices are detected on boot and mentioned in the log.  {{Command{ dmesg }}}
 - Scan the syslog logs: {{File{ /var/log/messages }}}
 - Physical drives: {{Command{ ls /dev/sd* }}}
 - Virtual drives {{Command{ ls /dev/vd* }}}
 - On Linux, the {{Command{lsblk}}} command will display all storage devices and where they are mounted


!!! Disk failure 

Disk failure is increasingly common
* [[Google disk failure research|http://static.googleusercontent.com/media/research.google.com/en/us/archive/disk_failures.pdf]] - Old now, but the problem is likely worse now.
* Backblaze hard drive reports:  https://www.backblaze.com/b2/hard-drive-test-data.html
** I use their data to inform my hard drive purchases.  Consumer disks seem to decrease in quality every year.
** When I first started tracking their data 10 years ago:
*** Drives tended to fail either very early (first few months) or after about 3 years
*** Generally there was only a 75% 5 year survival rate
*** It's important to have redundancy for anything that matters
* Traditional hard drives have moving parts
** Drive burn-in - early disk activity to catch failures before actual use
* Solid state
** Limited number of write cycles
** Wear leveling - distribute wear across the disk
*** Relocation of static data to higher-wear areas of the disk
** Firmware will keep track of disk usage to determine where to write the data
* {{Command{badblocks}}} command  
* SMART Monitoring
** https://en.wikipedia.org/wiki/S.M.A.R.T.
* A good backup and disk redundancy strategy will mitigate the effects of disk failure.  


!! RAID

Combine multiple physical storage devices into a single virtual device.

RAID Levels:
Linear (JBOD): Concatenate all disks into one large logical volume
Level 0, 1, 5, 6, 10
* 0: Striping - more for speed than resiliency.  Parts of a file are spread across multiple disks.
** A single disk failure means all data is lost
* 1: Mirroring - duplicate data across multiple disks
** A single disk failure does not result in data loss since the data also exists on another drive
** Increased performance on read but decreased on write
** Half of your drive capacity is lost to redundancy
* 5: Striping + Parity.  Files are written across multiple disks along with parity information on one drive
** If once disk fails it can be rebuilt from the parity data
** All data is lost of more than one disk fails.  Replace a failed drive quickly before another is lost.
** Performance penalty but maximizes the amount of available disk space since only one drive is dedicated to redundancy
* 6: Striping + Two Parity.  Files are written across multiple disks along with parity information on two drives
** Two drives of parity instead of one so the array can suffer 2 drive failures without data loss.
** Two drives worth of capacity are thus lost to support the redundancy.  This is a safer option for larger disks which may take a long time to rebuild or larger arrays with a lot of disks

Raid can be used to protect against hardware failure, not data corruption or online data loss!  This is not a substitute for file backups.

!!! Volume groups

Aggregate physical devices to form pools of storage (volume groups)
This pool can be divided into logical volumes for use

Linux LVM
* The primary way storage is handled in Linux
* Allocate space on demand and resize filesystems
* Supports snapshots
* Does not support raid 5 or 6.  Use linux software raid for redundancy then LVM to assign slices
* Access with {{Monospaced{pv}}}, {{Monospaced{vg}}}, {{Monospaced{lv}}} commands.  ie: {{Command{lvdisplay}}}
** The pv* commands display or manage physical volumes
** The vg* commands display or manage volume groups
** The lv* commands display or manage logical volumes

* Thin provisioning:
** Allocate more storage than you physically have available.
** Commonly used for virtual machines when you expect not all ~VMs will use the full amount of storage they have been allocated.  Here we see examples of the virtual storage allocation greatly exceeding the volume size.

[img[img/thinProvision.png]]
/%
Sun zfs
 - A mix of RAID and LVM

!!! Software RAID:
{{{
mdadm
Can monitor and send emails if there are any problems.

Use cfdisk to create partitions then mirror them
mdadm --create /dev/md0 --level=mirror --raid-devices=2 /dev/vdb1 /dev/vdc1

cat /proc/mdstat

mdadm --stop /dev/md0
mdadm --assemble --scan 

mdadm --detail --scan >> /etc/mdadm.conf
mdadm -As /dev/md0

mdadm /dev/md0 -f /dev/vdc1
cat /proc/mdstat
tail /var/log/messages

mdadm /dev/md0 -r /dev/vdc1
mdadm /dev/md0 -a /dev/vdc1

mdadm --stop /dev/md0
}}}

Use mdadm to create large raid5 or 6 arrays then LVM to allocate space into partitions
%/


!! Partitions

A fixed-sized division of the storage device

A way to organize files by type or access level.

Traditional partitioning, Master Boot Record (MBR) style
The first sector (512 bytes) of the disk contains the MBR.  First 440b for boot code and next 64b for partition info.
There is a 4 partition limit due to the 64b size constraints

The new way: GUID partition tables (GPT)
* Allows us to break the 2tb disk barrier.  Most spindle drives are larger than this now.
* Allows for more then 4 partitions per disk
* Partition info is stored at beginning and end of the disk (more resilient) 
* protective MBR
** Prevents non-GPT utilities from overwriting GPT partition info
** Allows non-GPT systems to boot GPT disks

Some tools for working with partitions:
* {{Command{fdisk -l}}}
* {{Command{cfdisk}}} for /dev/vdb
* {{Command{parted}}} / {{Command{gparted}}} (kali usb boot)


!! Filesystems

The interface between the data on the disks and the operating system.  

Linux Filesystem types:
* ext - extended filesystem
* ext2 - original and longtime standard
** extended further by new versions: ext3 and ext4
* xfs - 64bit with journaling - parallel I/O for high performance

{{Command{mkfs.*}}} - commands to create create filesystems, eg: {{Command{mkfs.xfs}}}

Filesystem mounting
* mount points - Where on the filesystem tree a storage volume is attached
* {{File{/etc/fstab}}} file to configure standard filesystems and mount on boot
* Standard {{Command{mount}}} and {{Command{umount}}} commands
** {{Command{mount}}} - mount a filesystem to a mount point
** {{Command{mount [-t type] [-o option[,option]...] device dir}}}
** {{Command{mount -a }}}
** Mount options
*** rw / ro
*** noexec  (Maybe for /tmp/?)
*** nosuid   (Maybe for /home/?)
*** noauto - Used in fstab to not mount the filesystem on boot
*** remount - remount a filesystem, changing its mount options.  Useful for remounting a ro filesystem as rw
** {{Command{umount}}}  - unmount a filesystem.  A filesystem that is in use cannot be unmounted


{{Command{fsck}}} - filesystem check - find and fix any filesystem errors
 * journaling speeds things up
/%
Root reserved space
* 8% of the volume is reserved for root (by default)
* Users cannot utilize this last 8%
* A safety valve to prevent users from choking out and crashing a system

Usage quotas:
* A way to limit disk utilization per user or per group
%/


!! Other filesystem types:

!!! ~RAM-backed filesystem

ramfs - RAM will be used to store files until it is exhausted.  No limit to the size of the filesystem.

tmpfs - Also RAM backed, but a max size can be specified.  Will use swap if physical memory is exhausted.  This filesystem type largely replaces the older ramfs.

{{Command{mount -t //TYPE// -o size=//SIZE// //DEVICE// //MOUNTPOINT//}}}

Note memory usage before creating RAM backed filesystems.  Don't starve your system for resources.
Check total and available memory with the {{Command{free}}} command

These type of filesystems are especially useful for logs on systems booted from flash drives (eg: Raspberry Pi) so you don't add extra wear to the drive


!! Additional filesystem commands

* {{Command{du}}} - Disk usage - Show how much space is being consumed by files or directories
** Useful for tracking down high disk usage
** Examples:
*** Display disk usage of all directories under {{File{/}}}: {{Command{du -sh /*}}}
*** Top usage directories sorted by size:  {{Command{du -sk * | sort -nr | head}}}
* {{Command{df}}} - Disk Free - Show the current utilization of all mounted filesystems


! Assignment

<<tiddler [[Lab 52 - Bring Files VM online]]>>
----
<<tiddler [[Lab 53 - Storage Expansion]]>>
/%
----
<<tiddler [[Lab 44 - Monitoring disk usage with Nagios]]>>
%/
! Material

Part 2 continues our storage work with a deeper dive into the Linux Logical Volume Manager (LVM).  This is one of the main Linux storage management systems and most deployments use it to some degree.

!! Read:
* Reflect on the reading from Part 1 and tasks from Lab 53.  Linux LVM is specifically mentioned in Chapter 12 on pages 285-290.

! Notes:

Linux LVM
* The primary way storage is handled in Linux
* Allows us to allocate space on demand and resize filesystems
* Supports snapshots
* Does not support raid 5 or 6.  Use linux software raid for redundancy then LVM to assign slices
* Access with pv, vg, lv commands.  ie: {{Command{lvdisplay}}}
** The pv* commands work with physical volumes
** The vg* commands work with volume groups
** The lv* commands work with logical volumes

! Assignment
<<tiddler [[Lab 54 - Logical Volume Manager]]>>
! Material

The bottom of page 70 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]] has some information about working with home directories and the shortcut metacharacter involved with them
[[Streams & Redirection, Introduction to filters]]
[[Filters Continued (awk, sed, & tr)]]
[[Working with grep]]


! Assignment

Complete:
* Due Wednesday: [[Lab 4|labs/lab4.pdf]], [[Lab 5|labs/lab5.pdf]], and [[Lab 6|labs/lab6.pdf]]
* Due Saturday: [[Lab 7|labs/lab7.pdf]] & [[Lab 8|labs/lab8.pdf]]
! Review 
!! Review lab
* Complete:  [[Lab 10|labs/lab10.pdf]]
* ''Note:'' We're skipping over Lab 9

! New Material

* File Permissions:
** Read [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]] Chapter 9, pp 90 - 100 (Stop at //Some Special Permissions//)
** Watch: 
*** File Permissions: https://www.youtube.com/watch?v=8SkN7UofOww
*** Umask: 
**** Basic: https://www.youtube.com/watch?v=bZZHKy8UN_s
**** Detailed: 


! Notes

!! File Permissions

The Unix operating system has multiple levels of securing access to resources. We can restrict who can access the system through userids and login credentials, we can limit who can become the superuser and act as the administrator of the system, we can control who can access certain directories on the system, and we can control access to files. The first two are items for an administrator to configure, but the latter two regular users can control for files that they own. Being able to restrict access to certain files is a critical function of a multi-user system. For example, we restrict access to the lab assignments everyone is uploading so no one else peeks at your work. Certain sensitive system files are restricted to keep the system more secure.

Hopefully by now we're comfortable navigating the filesystem and identifying files by name, both individually and in groups. Next I'd like to examine how we can manipulate the file's permissions.

Permissions can be set based on three different tiers:

* User - the owner of the file
* Group - a group that has access to the file
* Others - everyone else on the system

And three different permissions can be set on each file

* Read - The ability to read a file or list the contents of a directory
* Write - The ability to modify content of a file or create files in a directory
* Execute - The ability to run a program or access a directory

Chapter 9 in the The Linux Command Line will discuss permissions in detail.

This youtube video is a good permission overview:  [[File Permissions|https://www.youtube.com/watch?v=8SkN7UofOww]]


!!! File & Directory Permissions

The following tables and graphics can serve as a quick reference:

!! File & Directory Permissions
|!Type|!File|!Directory|
| read (4) | read contents | List directory |
| write (2) | change / delete file | Add files |
| execute (1) | run executable | cd into |

!!!! chmod

The {{Command{chmod}}} command can be used to change permissions for existing files.
* using octal codes
** Read (4), Write (2), and Execute (1)
** Three positions:  user, group, and others
* using symbolic codes
** who:
*** u - user
*** g - group
*** o - others
*** a = all positions
** operator:
*** = explicitly set
*** + add permission
*** - remove permission
** permission:
*** r = read
*** w = write
*** x = execute

{{Note{''Note:'' Use symbolic abbreviations when making changes to permissions without consideration to what is already set, eg: when adding or removing permissions. The use of octal codes requires all permissions be completely reset - a user cannot set, add, or remove individual permission settings.

For example, suppose I only want to __add__ write permissions for the group. Without knowing what the permissions currently are, I have to use symbolic notation to modify the permissions on the file. In this case with {{Command{chmod g+w //file//}}}

If the lab question asks you to ''set'' permissions, use __octal codes__. If it asks you to ''add or remove'', use __symbolic__ abbreviations.
}}}

<html><center><img src="img/chmod1.png" alt=""><BR><BR><HR width="75%"><img src="img/chmod2.png" alt=""></center></html>


!!! umask

The {{Command{umask}}} command can be used to establish default permissions for all newly created files.

* umask - user mask - which permissions to restrict. (mask = remove)
* start with full permissions 777
* The umask value is which bits to remove.
* The execute bit (1) will automatically be subtracted from all positions for regular files
* Making a new regular text file executable must be a manual task

A mask refers to bits to be removed. If we do not want newly created files to have write permissions for the group or others, we need to mask 2 from the group and others positions, resulting in a umask of 22.

Examples:

A umask value of 22 will set default permission for new files to 644 (777 - 22 - 111) and directories to 755 (777 - 22)
A umask value of 77 will set default permission for new files to 600 (777 - 77 - 111) and directories to 700 (777 - 77)
''Note:'' Newly created files are not granted execute automatically despite the umask value.


!! Misc:

The book creates empty files for its examples with {{Command{> foo.txt}}}.  This is the same as executing {{Command{touch foo.txt}}}.


! Assignment

* Review lab
** Complete:  [[Lab 10|labs/lab10.pdf]]
** ''Note:'' We're skipping over Lab 9

* File Permissions:
** Read [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]] Chapter 9, pp 90 - 100 (Stop at //Some Special Permissions//)
** Watch: File Permissions: https://www.youtube.com/watch?v=8SkN7UofOww
** Complete:  [[Lab 11|labs/lab11.pdf]] & [[Lab 12|labs/lab12.pdf]]
! New Material

* Read Chapter 6 in //Linux Bible//
** You can stop at //Setting processor priority with nice and renice// on page 142.
* Watch:
** [[Linux Terminal 101: Controlling Processes|https://www.youtube.com/watch?v=XUhGdORXL54]]
** [[Linux Terminal 101: How to View Processes|https://www.youtube.com/watch?v=Udr-qE0NEO0]]


! Notes

Our next material contains information on process management and job control.  This will become especially useful once we start shell scripting and managing systems.  This material will assist you with running multiple simultaneous tasks on the system and monitor system resources to ensure your scripts are not impacting performance.

!!! Note:
* Don't get too bogged down on the Signals section, pages 140 & 141.  It's important to know, but we'll work more with it later.

Watch: 
* [[Linux Terminal 101: Controlling Processes|https://www.youtube.com/watch?v=XUhGdORXL54]]
** Use a command like {{Command{less}}} or {{Command{tail -f}}} to test her examples.  We do not have {{Command{gedit}}} available.
* [[Linux Terminal 101: How to View Processes|https://www.youtube.com/watch?v=Udr-qE0NEO0]]


''Processes'' and ''Jobs'' have simple, fundamental differences:
 - Processes refer to all tasks currently running on the Linux system.  Every running task is considered a process and has a unique process ID.
 - Jobs are relative to your current login session.  They refer to current tasks you are running in that shell instance that may be running in the background, running in the foreground, or paused.  This presents an easy way to run multiple tasks at the same time from the same shell session or easily move between two tasks (eg: a text editor and a man page)


These are my notes from the last time I taught this class as a lecture.  Be sure to also read the chapter.

!! Processes
Everything on the system is represented by either a file or a process - something that physically exists or something that's running.
A process is a program that is executing
Files provide us with data, processes make things happen
The kernel manages processes - all processes are assigned a Process ID number (PID)
The kernel also maintains a process table to keep track of everything, indexed by that PID.

Processes are a mix of system and user processes.  In the process list, kernel process are listed within  [ ]
The kernel contains a scheduler to manage the workload and share system resources
The scheduler chooses a waiting process and grants it a short slice of time to run along with any needed resources (processor, memory, I/O, other devices, etc)
If the process is not completed within it's time slice, it goes back into the scheduling list and waits for additional processing time
time slice = short interval a process is allowed to run for.  

Every process is created (forked) from another process
Killing the parent process will (in most cases) kill the child processes
When a process dies or completes, it's resources are reallocated
init/systemd process, PID 1 - parent to all processes on the system
created early in the boot procedure to set up the kernel and complete booting functions

!! ps command
*ps - process status
*will show useful process information
*BSD options versus UNIX options
* These are some BSD options (also available in Linux)
**{{Command{ps}}} - Show processes for your current login session
**{{Command{ps -a}}} - Show all user processes
**{{Command{ps -u}}} - Display additional process information
***%CPU - percentage CPU usage
***%MEM - percentage memory usage
***VSZ - virtual size in Kbytes
***RSS - resident set size
***TT - control terminal name (tty//x////x//)
***STAT - symbolic process state
***TIME - accumulated CPU time, user + system
**{{Command{ps -x}}} - Display system processes
**{{Command{ps -aux}}} - Show extended information on all processes  - This is often the most useful way to use the command.
**{{Command{ps -U //username//}}}  - Display processes associated with //username//
**{{Command{ps -P //PID//}}}  - Display processes associated with process ID //PID//

*top - display and update information about the top cpu processes
**THR - 
**PRI - current priority of the process
**NICE - nice  amount  (in  the  range -20 to 20)
**SIZE - total size of the process (text, data, and stack) (in K)
**RES - current amount of  resident memory (in K)
**STATE - current process state (START, RUN, SLEEP, STOP, ZOMB,  WAIT, LOCK)
**C -  processor number on which  the  process  is  executing
**TIME - number of system and user cpu seconds that the process has used
**WCPU - weighted  cpu percentage

!! Killing Processes
*kill [-signal] //pid//
* Common Signals:
| !Signal Number | !Signal Abbreviation | !Description |
| 1 | HUP |Hangup (restart process, reload config)|
| 2 | INT |Interrupt (~CTRL-C)|
| 3 | QUIT |Quit|
| 9 | KILL |Immediate kill.  Not catchable or ignorable.|
| 15 | TERM |Request to gracefully terminate (default)|
SIGINFO = ~CTRL-T  (~FreeBSD Only)

*Killing large groups of processes
**{{Command{pkill}}} command

[>img[img/jobs.png]]
!! Jobs & job control

*Jobs - a command or set of commands entered in one command line.
*jobs are related to the user's session and are not global.
*STDIN is locked while a job is running in the foreground - it is only available to the current job until it completes.
*running background jobs allow the user to access these resources and have control of the shell.
*background jobs will still send their output to the screen and must be brought back to the foreground if they request input from the user
*a job may have one of three states - foreground, background, and stopped.

* append ''&'' to the command string to run it directly to the background
*~CTRL-Z - suspend a running foreground process
*Related commands:
** {{Command{jobs}}}
** {{Command{fg}}}
** {{Command{bg}}}
** {{Command{kill -STOP %//id//}}}
* Manipulate jobs with ''%'' and the job number
** Examples:  {{Command{fg %1}}} - resume the first background job in the foreground
** {{Command{bg %2}}} - resume the second job in the background
*currency flags: ''+'' and ''-''
** ''+'' most recently accessed job, default job if no arguments are specified.
** ''-'' second most recently accessed job, default job when ''+'' flagged job completes.

!! Two additional recommended videos:

* Kill command video: https://www.youtube.com/watch?v=fT-h45L9RAY
* Difference between processes and jobs: https://www.youtube.com/watch?v=eqtiw8S8GZw


! Assignment

!! Complete lab [[Lab 13|labs/lab13.pdf]]
! Review your past work

All outstanding labs have been returned.  Please review them and be sure you are clear on everything we've covered.

If you have any questions, or got any lab items wrong and do not know why, please bring it up in Discord.


! Improving soft skills

Recent labs brought us to four soft skills which will be important for this course and for your careers:

# Read the directions thoroughly
# Be thorough in your writing.
# Use proper terms
# Test your theories

!! 1. Read the directions thoroughly

Lab 10, #9 asked for both an explanation for action performed by the last command and the output it returns.  Many often omit one or the other.  Throughout the semester many of the lab questions will similarly ask for multiple things.  Be sure to provide everything requested.


!! 2. Be thorough in your writing

We're not in the classroom where I can easily ask you to clarify your responses.  In writing, you must be thorough so the reader understands your message.  This is important now for grading to convey that you fully understand what's going on and will be important later when it comes time for you to create documentation or explain things to colleagues.  I encounter far too much poor "professional" documentation which is either vague or omits critical details.  Lab 10, questions 8 and 9 highlighted this.  Too many points were lost unnecessarily do to incomplete explanations. 

I provided an example for the first command in question 8:
<<<
8.  //Summarize the actions performed by the following commands (don’t just copy the output)://
<<<
|ls&nbsp;&nbsp;&nbsp;|&nbsp;&nbsp;List the contents of the current directory&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;|

This explanation concisely lists the action which is taken (list the contents) and the object which is acted upon (the current directory).  

For the second command, {{Command{ls -a}}}, a common response I received was //list hidden files//.  This response omits two critical details:  Are //only// hidden files to be listed?  Which hidden files are we listing?  A thorough response would be: //list all files, including hidden files, in the currently working directory//.  Here we're concisely explaining all of the components of this command:  What the command does (list all files), the option provided (including hidden files), and the target (in the current working directory).

Even worse, for the command {{Command{ls /}}}, I often receive several responses which were simply //root directory//.  What about it?  What's the action that's being taken?  Here, the action taken upon the ''@@root of the filesystem@@'' is omitted completely.  A response for this command should be something like //list the contents of the ''@@root of the filesystem@@''//.  We're explaining both the command and the target.

Notice the highlighted text above and the output of the command {{Command{ls /}}}:
{{{
[nmerante@csshell ~]$ ls /
bin   dev      etc   lib    media  opt   root  sbin  sys  usr
boot  entropy  home  lib64  mnt    proc  run   srv   tmp  var
}}}
There is an entry in the output named {{File{//root//}}}.  Using //root directory// in the response to that question is ambiguous.  Does root directory mean {{File{/}}} or {{File{/root/}}}?  Referring to {{File{/}}} as //the root of the filesystem// helps eliminate that problem.


Another example:  For the command {{Command{ls .}}}, I received the response //lists the current directory//.  The command {{Command{pwd}}} will display the current directory.  The {{Command{ls}}} command lists the //contents of// its target.  There's a big difference between the two.

Yet another example:  For #9, I occasionally receive the terse response //Changes to tmp directory// as a response to the commands {{Command{cd /tmp/}}} and {{Command{cd /var/tmp/}}}.  Those are two different paths.  How can the answer be the same for both of them?  Be specific - you're provided with an absolute path in the question, so it might be a good idea to use the same absolute path in the responses.


We'll be in this situation throughout the semester.  Be sure your responses are thorough and do not omit the critical details.  Even if you never touch the Linux command line again, improving your writing will be a universal skill that will serve you well later.


!! 3. Use proper terms

Lab 10 questions 8 and 9 asked you to explain what the commands {{Command{ls ..}}} and {{Command{cd ..}}} will do.  I received a lot of responses that contained the phrase "//previous directory//" to refer to the {{File{..}}} portion of that command string.  //Previous directory// is ambiguous.  To me, that refers to the last directory you were in.  The directory {{File{..}}} is a special directory that refers to the ''//parent//'' of a directory, so the command {{Command{ls ..}}} will //display the contents of the parent of the current working directory//. 


!! 4. Test your theories

I often receive a lot of solutions that clearly will not produce the desired results.

We have a lab environment available to practice the material and test your solutions. Submitting untested solutions will especially be a problem later in the semester when we get to more complicated material.  Don't be lazy and just guess.  I tend to grade far more harshly when I encounter such obviously incorrect responses.
! Review:

!! I/O Practice

Lab 17 is a practice lab for I/O and moving output from one command to another.  It will leverage material from the last two weeks and some ~CI132 material to solve a real-world problem and is a good example of using these tools and concepts.  This lab will introduce the openssl command with a couple examples and then ask you to use it to return useful data.

It's a bit complicated though, so don't be afraid to start up a conversation in discord if you get stuck.

!! Complete:
* Lab [[Lab 17|labs/lab17.pdf]]


! Material:

!! Quoting:
* Read Chapter 7, pp 75-79 (Quoting) in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]].
* Watch:  Linux shell quotes: https://www.youtube.com/watch?v=1Ukw0IjGKsI

This half of the week is lighter than the second half.  Getting a head start on [[Week 5, Part 1]] would be wise.


! Notes:

!! Escaping & Quoting:

!!! Quoting - ' " \

Some special characters, such as the space and most symbols, have a special meaning to the shell. Occasionally we need to use those special characters literally without allowing the shell to interpret their special meanings.

Quoting allows us to protecting these special characters from the shell. It is necessary in order to use a metacharacter literally, to disable its special shell meaning.

For example, consider the scenario where you need to display the contents of a file which contains a space in the name.  The space has a special meaning to the shell; it is our argument separator.

If my file is named {{File{my notes.txt}}}, and I try to execute the command {{Command{cat my notes.txt}}} to display it, the space in the file name will cause cat to try to display the file {{File{my}}} and the file {{File{notes.txt}}}, neither of which actually exist.

I need to protect that special symbol, the space, from the shell to ensure the cat command get it.  There are three ways I can do so:

* {{Command{cat "my notes.txt"}}}
* {{Command{cat 'my notes.txt'}}}
* {{Command{cat my\ notes.txt}}}

Each of the options work a little differently.  Knowing these differences allows you to choose the best method for the task.


Three ways to quote:

* Backslash (\) - Changes the interpretation of the character that follows
** \ is the escape character, disable special meaning of a shell special character.
** Converts special characters into literal characters and literal characters into special characters
** n vs \n
** printf "Home is %s\n" $HOME
** \ followed by return - suppress the special meaning of the return key
* Double Quote - remove special meaning of most metacharacters
** " quoting will evaluate variable, command, and history substitution.
* Single Quote
** ' is stronger then "
** ' quoting will only evaluate history substitution
* You can alternate quotes to include the other type: echo "Today's date is `date`"

You can read about them in Chapter 7, pp 75-79 (Quoting) in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]] or watch this video:  [[Linux shell quotes|https://www.youtube.com/watch?v=1Ukw0IjGKsI]].


! Assignment:

!! Complete:
* Labs:  [[Lab 17|labs/lab17.pdf]] & [[Lab 18|labs/lab18.pdf]]
* ''Note:'' We're skipping labs 14-16
! Material


Chapter 3 in the //Linux Bible// also presents a lot of this material and has some tips for working more efficiently on the command line.

!! History Substitution:
* Read: [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]] Chapter 8 - The entire chapter has good info, but pay particular attention to pages 85 (bottom), 86 & 88
* Watch: Linux Shell Variables: https://www.youtube.com/watch?v=MbXofShhMv8

!! Variable Substitution:
* Read: [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]] Chapter 7, pp 74-75 and Chapter 25, pp 377-378
*Watch:  Linux History Explained:  https://www.youtube.com/watch?v=3BZzFRPYU_I

!! Command Substitution:
* Read: [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]] Chapter 7, pp 74-75
* Watch: Command substitution using backticks: https://www.youtube.com/watch?v=VOOeXV4HYSA

This is prereq material for shell scripting, which is coming next.  Make good use of Discord if you're having trouble with any of these concepts.  We can also spin up a Zoom meeting and discuss more in person if that'll help.  Just let me know if you'd like to do that.



! Notes

!! Shell Substitutions

Substitutions are transformations the shell performs on input before a command string is fully executed.  When the Unix shell encounters a substitution metacharacter, it will evaluate it to perform any substitutions before executing the full command string. These substitutions allow us to expand filenames, evaluate variables, recall previous commands, or use the result of one command as an argument to another. We already discussed filename substitution (file globbing). History substitution is very useful for recalling previous commands without having to retype it. Variable and command substitution are used extensively in shell scripting and have a useful place on the command line.

As you work with these substitutions, keep in mind the echo command can be used to preview the command string the shell will be executing after all substitutions are performed.  Simply start your command string with {{Command{echo}}} to test it.  We did this in Lab 23, #4 with the cat dog rabbit wombat question.


!!! History substitution

History substitution allows us to quickly recall previously executed commands. Previous commands are saved in a buffer which is written to the file ~/.bash_history upon logout. This allows history to be preserved across sessions and is useful for an administrator who needs to inspect activity of users on the system.

* Read:
** [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]] Chapter 8 - The entire chapter has good info, but pay particular attention to pages 85 (bottom), 86 & 88
*Watch:
**Linux History Explained:  https://www.youtube.com/watch?v=MbXofShhMv8


!!! Variable substitution

Variable substitution allows data to be stored for later use, much like any other programming language. The main application here is for shell configuration settings and for use in shell scripting. Variable substitution is not used as much as the other substitution forms when working directly on the command line.

* Read:
** [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]] Chapter 7, pp 74-75 and Chapter 25, pp 377-378
* Watch:
** Linux Shell Variables: https://www.youtube.com/watch?v=3BZzFRPYU_I


!!! Command substitution

Command substitution allows us to use the result of one command as an argument to another. Backticks or {{Command{$( )}}} are used to execute an inner command first. That inner command (including the backticks) is replaced by its output. The full command string is then executed.

''Important note:''  The backtick ''`'' and the single quote ''''' look rather similar.  Be sure to approach this section with an eye for detail so you don't confuse the two.

Consider this example. I often work remotely and need to remotely power on my home Windows PC to retrieve some files or continue working with them. The wake-on-LAN function built into many motherboards allows for remote wake-up by broadcasting a specially crafted packet containing the system's MAC address to the broadcast address of the local subnet. Unix utilities exist to facilitate this. Their syntax is usually {{Command{//command// //~MAC-address//}}}.

I log into my home unix fileserver from a remote location via SSH. I have my PC's MAC address saved in a text file within {{File{/tmp/}}}:

{{{
# I can see that my PC's MAC address is saved in the text file named win7
root@trillian:/tmp # cat win7
c7:62:00:a2:25:55

# Rather then copy and paste, command substitution is a faster way to get that MAC address added to the command line as an argument to the wake command.  
# The shell will first perform the substitution, replacing `cat win7` with the output of the cat command.  Next, the full command string will be executed.
root@trillian:/tmp # wake `cat win7`

# I can preview the full result of my substitution by prefixing the command string with echo to see what will really be executed by the shell
root@trillian:/tmp # echo wake `cat win7`
wake c7:62:00:a2:25:55
}}}

An even better way involves combing history and command substitution:

{{{
# Preview my file, make sure the MAC address looks good
root@trillian:/tmp # cat win7
c7:62:00:a2:25:55

# History substitution will be used first to replace !! with the last executed command (cat win7).  
# Next, command substitution will replace the backticks with the result of executing the enclosed command (the MAC address)
# Finally, the full wake command string with the MAC address added as an argument will wake up my Windows PC.
root@trillian:/tmp # wake `!!`
}}}

* Read:
** [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]] Chapter 7, pp 74-75
* Watch:
** Command substitution using backticks: 
*** https://www.youtube.com/watch?v=VOOeXV4HYSA


! Assignment

!! Complete:
*  Complete [[Lab 19|labs/lab19.pdf]] & [[Lab 20|labs/lab20.pdf]]
! Material

!! The vi Editor

!!! Read :
* Chapter 5, pages 113-120 in the //Linux Bible//
** Optional: Chapter 12 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]] also has some good info
* Review notes on [[The vi Editor]] page

!! Shell scripting

!!! Read:
* Chapter 7 in the //Linux Bible//
** Optional: Chapter 24 and 27 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]] also has some good info.

!!! Watch:
 - [[Bash scripting basics|https://www.youtube.com/watch?v=NSu4IWlOU7k]]
 - [[Exit Status|https://www.youtube.com/watch?v=Fghnnrbag-w]]
 - [[If statements 1|https://www.youtube.com/watch?v=elrbjYdL-8c]] & [[If statements 2|https://www.youtube.com/watch?v=XDi9W0V-ibA]]


! Notes

!! Unix text Editors

The two main Unix command-line text editors are {{Command{vi}}} and {{Command{nano}}}.  The {{Command{vi}}} editor is the standard and is universally available on every Unix system.  It's extremely powerful, but has a bit of a learning curve and takes some time to get used to it.  The {{Command{nano}}} editor isn't as universally available, but can be installed on most Unix systems, is easier to learn, but not nearly as powerful.

If you will be working with the Unix command line in the future, especially professionally, becoming familiar with {{Command{vi}}} will be worthwhile.  Otherwise {{Command{nano}}} will be sufficient for this course.  

I have material available on the {{Command{vi}}} editor on [[this page|The vi Editor]] if you would like to review it.  There is an extra credit lab available there as well.

Using one of the text editors will be necessary to complete the next material:  shell scripting.  Pick whichever one you'd like.


!! Basic Script Building

We don't have a enough time to go deep into Shell Scripting, but it's an important topic to at least mention.  We'll spend this week on the highlights.  All of the chapters in Part 4 are worthwhile if you have the time and interest in further developing your shell scripting skills.  The [[Linux Command Line and Shell Scripting Bible|https://www.amazon.com/Linux-Command-Shell-Scripting-Bible/dp/111898384X]] is another good resource I've used for other classes.


Watch: [[Bash scripting basics|https://www.youtube.com/watch?v=NSu4IWlOU7k]]


!!! Outline
A shell script is an executable file that contains a list of shell commands
* It's an automated way to run a series of commands.
* Need to run a bunch of commands often?  Script it.
* Need to run a few complicated commands only occasionally?  Script it.

The file is interpreted by the shell specified, it is not compiled

There are [[two main shell families|img/shells.jpg]] - bourne and ~C-Shell
* We will be writing our scripts for the bourne/bash shell.
* The first line of the script contains the shebang, {{Command{#!}}}, specifying the path for the interpreter to use.  

A script could be as simple as a single command or a series of commands
eg: Instead of having to type out the find command to scan our home directory for old, large files, we can put it in a script.

{{{
#!/bin/sh
# ./oldcrud.sh

find ~ -atime +52w -size +50M -ls
}}}

- or -

{{{
#!/bin/sh
# ./oldcrud.sh
# This version accepts a command line argument to delete the files it finds.

if [ "$1" = "-d" ]
then
	find ~ -atime +52w -size +50M -delete
else
	find ~ -atime +52w -size +50M -ls
fi
}}}

!!! Executing a shell script:
* Scripts must at least be readable in order to run.  
* A program not in your path must be executed by prefixing the filename with path information, eg:  {{Command{./script.sh}}}
** Execute permission is required when executing scripts in this fashion.
* Scripts can also be executed as arguments to an interpreter, eg:  {{Command{sh script.sh}}}
** If the file is prefixed with the name of the interpreter, only read permission is required.

Three options to run a script:
# Place in $PATH (needs read & execute)
# {{Command{./scriptname.sh}}} (needs read & execute)
# {{Command{sh scriptname.sh}}} -or- {{Command{bash scriptname.sh}}} (needs only read)


!!! Variables
User variables can be set with {{Monospaced{''=''}}}
{{Monospaced{variable=value}}}  (''No spacing!''  Adding a space will result in a syntax error)
and referenced with {{Monospaced{$variable}}} or {{Monospaced{${variable} }}}.  Useful for supplying text immediately after the variable expansion:  {{Monospaced{echo ${size}mb }}}
{{Monospaced{variable=$(command)}}} or {{Monospaced{variable=`command`}}}  can save output from a command to the variable, eg: {{Monospaced{files=$(ls | wc -l)}}}
 -- We will be using a lot of command substitution.
{{Command{unset variable_name}}} - remove a variable
The {{Command{read}}} command will accept input from the keyboard:  {{Command{read //variablename//}}}
 -- Use {{Command{read}}} with the ''-p'' option to supply a prompt.
{{Command{export //variable_name//}}}  - export a variable to the shell for future use
{{Command{readonly //variable_name//}}} - prevent variable from being changed

Example script.  Prompt for user input and then compute a value.
{{{
#!/bin/sh
# ./nums.sh

read -p "Enter a number: " number1
echo you entered $number1

echo -n "Enter a number: " 
read number2
echo you entered $number2

echo
echo -n "Sum is: "
expr $number1 + $number2
}}}


!!! Exit Status

Next we're going to begin to introduce three inter-related concepts: command exit status, the test command, and if statements in that order. We're going to use them in practice in the reverse order.

When constructing an if statement, most often we'll be using the test command to evaluate the conditions (eg, whether a file exists, or whether one number is greater than another). The test command will return an exit status to indicate whether that evaluation was true or false. The if command will then use that exit status to determine what to do.

Every command we execute on the system returns an exit status to indicate whether it ran successfully or not. The exit status is stored in the special shell variable {{Monospaced{''$?''}}}.

Exit status values fall within the range 0 - 256. An exit status of 0 always indicates success. A exit status greater than 0 indicates some form of failure. Having many possible values to indicate failure (any positive integer) allows the program to indicate either the type of failure or where the failure occurred.

Notice the difference between the two executions of the {{Command{id}}} command:

{{{
[user@shell ~]$ id root
uid=0(root) gid=0(root) groups=0(root)
[user@shell ~]$ echo $?
0

[user@shell ~]$ id root2
id: root2: no such user
[user@shell ~]$ echo $?
1
}}}

The first instance completed successfully, so we received an exit status of 0. The second instance returned an unknown user error and an exit status of 1.

Watch:  https://www.youtube.com/watch?v=Fghnnrbag-w


!!! Useful commands for shell scripting

!!!! {{Command{test}}} command:

The {{Command{test}}} command (also known as {{Command{[}}}), allows us to perform comparisons, check strings, or evaluate files on the system. It works by returning an exit status that an if statement checks to determine whether something completed true (successful, exit status of 0) or false (not successful, exit status > 0).
''Note:'' The test command has two names: {{Command{test}}} and {{Command{[}}} (square bracket). Both files exist on a unix system and you may see scripts written using either. {{Command{[}}} is the common way to represent the command. When the {{Command{[}}} is used, a closing ] must be placed at the end of the line. Remember: The {{Command{[}}} is an actual command! And like all commands, spaces must separate the command from its arguments.

With this example, we're first checking to see whether an item is a regular file. It fails (returns an exit status of 1) because it is not a regular file. Next we check to see whether the item is a directory. An exit status of 0 indicates success, confirming the item is a directory.

{{{
[user@shell ~]$ [ -f /tmp/lab23 ]
[user@shell ~]$ echo $?
1

[user@shell ~]$ [ -d /tmp/lab23 ]
[user@shell ~]$ echo $?
0
}}}

The {{Command{test}}} manpage will be a great resource for learning about the different comparisons or tests available.

{{Warning{''Warning:''  be sure to use quotes with string evaluation }}}


!!!! The {{Command{expr}}} command can evaluate an expression
perform integer math & comparisons
verify string input against a regular expression

:expr 5 + 5


!!! Control structures:

Shell scripts can utilize control structures common to all programming languages. This allows us to construct more complex scripts which can evaluate conditions or iterate over lists. The most basic of our control structures is the if statement. An if statement has three parts:
* the initial if test
* followed by one or more optional elif statements
* and ending with an optional else condition.

If statement synopsis:
{{{
if condition
then
   commands
elif condition
then
   commands
else
   commands
fi
}}}

If statement example:
{{{
#!/bin/sh
# ./exists.sh

if [ $# -ne 1 ]
then
	echo give me a file
	exit 1
fi

if [ -f $1 ] 
then
	echo "it's a regular file"
elif [ -d $1 ]
then
	echo "it's a directory"
elif [ -e $1 ]
then
	echo "don't know what this is"
else 
	echo "it doesn't even exist"
fi
}}}



!!! Script Writing

When writing your scripts, the following header ''must'' be placed at the top of the file, immediately after the shebang:
{{{
#!/bin/sh
# File name:
# Author:
# Date Written:
# Assignment:
# Purpose:
# Description:
#
#
#
}}}

Tips and good habits to start developing now:  
* Comment your script with {{Monospaced{#}}}.  Comments throughout your script make it easier for others to understand what's going on.
* Long lines should be wrapped. Cut long lines at about column 60. (makes it easier to read and print)
* Using meaningful variable names makes it easier to understand their purpose.  Use of generic variable names (eg: var1) is bad form and will result in lost points.


! Assignment:

!! The vi Editor:
 - Complete [[Lab A1|labs/labA1.pdf]]
 - This lab is optional for additional vi practice and will be accepted for extra credit.  There is no firm due date, but please try to submit before the end of October.

!! Shell scripting:
- Complete [[Lab 25|labs/lab25.pdf]] & [[Lab 26|labs/lab26.pdf]]
''Note:'' Labs 21-24 were skipped
! Material

!! Shell scripting (continued)

!!! Read:
* Review Chapter 7 in the //Linux Bible//
** This material will also draw from that chapter
* Optional, if you'd like another perspective in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]]:
** Chapter 28 - Reading Keyboard Input
** Chapter 32 - Positional Parameters


! Notes

This portion will cover four main concepts:

 - Obtaining input from the user - Chapter 7, page 150
 - Positional Parameters (obtaining input from command-line arguments) - Chapter 7, page 150
 - for loops - Chapter 7, page 150
 - Input Validation (optional)

!! Obtaining input from the user:

Chapter 28 covers the {{Command{read}}} command, along with a lot of advanced detail that is beyond the scope of this class.  

The {{Command{read}}} command will accept input from the keyboard and save it to the specified variable:  {{Command{read //variablename//}}}
 - Use {{Command{read}}} with the ''-p'' option to supply a prompt.

Examples: 

{{{
#!/bin/sh
# ./nums.sh

read -p "Enter a number: " number1
echo you entered $number1

echo -n "Enter a number: " 
read number2
echo you entered $number2

echo
echo -n "Sum is: "
expr $number1 + $number2

read -p "Enter two numbers: " number1 number2

echo
echo -n "Sum is: "
expr $number1 + $number2
}}}


!! Positional Parameters

The read command will allow us to prompt for input.  Positional parameters (variables) will allow our scripts to obtain input from command-line arguments.  Chapter 32 will discuss them in more detail.


Special shell variables:
| !Variable | !Description |
| $0 |Name of the script|
| $1 - $9 |Values of command line arguments 1 - 9|
| $# |Total number of command line arguments|
| $* |Value of all command line arguments|
| $@ |Value of all command line arguments; each quoted if specified as "$@"|
| $? |Exit status of most recent command|
| $$ |Process ID of current process|
| $! |PID of most recent background process|


Command line arguments can be passed to a shell script and stored in $1 through $9
{{{
#!/bin/sh
# ./vars.sh
echo $1 - $2 - $3
echo All command line arguments: $*
echo Number of command line arguments: $#
echo Name of the current command: $0
echo Exit status of the previous command: $?

[root@shell ~]$ sh vars.sh first orange third wolf
first - orange - third
All command line arguments: first orange third wolf
Number of command line arguments: 4
Name of the current command: vars.sh
Exit status of the previous command: 0
}}}


! Assignment

!! Complete
* Complete [[Lab 27|labs/lab27.pdf]], [[Lab 28|labs/lab28.pdf]] & [[Lab 29|labs/lab29.pdf]]

''Be sure to follow the [[Shell script submission requirements]] to submit your shell script labs''

{{Note{''Note:'' Lab 28 pro tip - avoid typing out long commands or scripts.  Typing stuff like that out manually is a great way to introduce typos and break things.  Use copy/paste instead.  If you haven't noticed yet, the [[Linux Shortcuts]] page covers how to copy/paste in putty/Linux.}}}
! Version Control with git

!! Online videos

At least watch these two:
* Fast Intro: https://www.youtube.com/watch?v=hwP7WQkmECE
* A little more detailed intro: https://www.youtube.com/watch?v=USjZcfj8yxE

''Note:''  We have git already installed on the class shell server.  There is no need to install it on your system.

Feel free to create a scratch space in your home directory if you would like to work along with the video.


----
Optional, if you'd like even more detail
* Part 1: https://www.youtube.com/watch?v=hrTQipWp6co
* Part 2: https://www.youtube.com/watch?v=1ibmWyt8hfw
** When working with our ~GitHub clone, don't mess with the usernames, ~URLs, tokens, or SSH keys like the video is suggesting.  The documented steps below will work for you.
* Part 3: https://www.youtube.com/watch?v=Q1kHG842HoI

''Two additional notes about these three videos'':
# You don't have to create a ~GitHub account.  We will be working with our own ~GitHub clone that will mirror a lot of its functionality.
##  If you'd like to create an online repository and follow along with the video, scroll down to the //Setting up your Git repository// section below for how to access your account on our clone.  Just use a different name for your demo repo.
# He uses a GUI IDE tool for code development where we use the command line.  These videos are more to explain the concepts.  The directions below will explain the methods for how to apply these concepts to the command line.
----


!! Introduction to Version Control

Version control is a system that allows you to manage changes to files and code over time. It's an essential tool for anyone who works with code or digital content, as it provides a way to track changes, collaborate with others, and restore previous versions of your work. With version control, you can work on a project with others without worrying about conflicting changes, easily revert to a previous version of your work, and keep track of who made changes and when. 

While it's primary use may be for programming projects, it's also useful for any application that involves tracking the evolution of changes to plain text files, such as:

* My resume and lab manuals for other classes are written in ~LaTeX, a plain text markup language, which can then be tracked in a version control repo
* I use the Markdown language for some technical documentation.  This gives basic formatting while keeping the documents plain text
* Our class website is a single HTML file that is checked in to a repo as changes are made.  The comment posted to the repo is also sent as a notification to Discord.
* Many of my server configuration files are tracked in repos
* Infrastructure as Code - text files can be used as the basis for creating and configuring server and network infrastructure
* Diagram as code - python can be used for generating diagrams: https://diagrams.mingrammer.com/


!! Introduction to Git

Git is a free and open-source version control system that is widely used in software development. It was created by Linus Torvalds, the creator of the Linux kernel, and designed to handle projects of all sizes, from small personal projects to large-scale enterprise applications.

Git allows developers to track changes made to code and collaborate on projects with other developers. It does this by creating a repository, which is a directory that contains all files and version history of a project. Developers can make changes to the code, save those changes in a commit, and then push those changes to a remote central repository. Git also allows developers to create branches, which are separate lines of development that can be merged back into the main codebase.

Benefits of a central version control system are:

* ''Collaboration'': Git makes it easy for developers to work collaboratively on a project. Multiple developers can work on the same codebase at the same time, making changes to the same files without interfering with each other's work. Git keeps track of all changes made to the codebase, making it easy to merge changes and resolve conflicts.
* @@''Version Control''@@: Git allows developers to keep track of all changes made to the codebase over time. Each commit in Git represents a snapshot of the code at a particular point in time. This makes it easy to roll back to a previous version of the code if necessary.
* @@''Backup''@@: Git provides a backup system for codebases. All changes made to the codebase are saved in a repository, even if they are later undone. This means that developers can always go back to a previous version of the code if necessary and easily compare different versions of code.
* ''Experimentation'': Git makes it easy to experiment with different ideas without affecting the main codebase. Developers can create branches to work on new features or ideas, and merge them back into the main codebase if they are successful.
* ''Flexibility'': Git can be used for any type of project, regardless of its size or complexity, on any operating system. It is a flexible tool that can be adapted to suit the needs of any project.

We will focus on the concepts above highlighted @@in yellow@@


!!! Basic Git terminology

* @@''Repository''@@ - A repository is a directory where Git stores all files and version history of a project.
* @@''Commit''@@ - A commit is a saved snapshot of the changes made to a file or files in the repository.
* ''Branch'' - A branch is a separate line of development that allows multiple developers to work on the same project without interfering with each other's work.
* ''Merge'' - Merging is the process of combining two branches to create a single branch with the changes from both branches.
* @@''Remote''@@ - A remote is a Git repository that is hosted on a remote server, such as ~GitHub or ~GitLab.
* @@''Clone''@@ - Cloning is the process of copying a repository from a remote server to a local machine.
* @@''Push''@@ - Pushing is the process of uploading changes made to a local repository to a remote repository.
* ''Pull'' - Pulling is the process of downloading changes made to a remote repository to a local repository.
* ''Fork'' - Forking is the process of creating a copy of a repository on a remote server, which can be used for experimentation or collaboration.
* ''Checkout'' - Checking out is the process of switching between different branches or commits in a repository.

We will focus on the terms above highlighted @@in yellow@@


!!! Basic Git Commands
* {{Command{git add //file(s)//}}} - Add a file to a repository
* {{Command{git rm //file(s)//}}} - Remove a file from a repository
* {{Command{git mv //oldfile// //newfile//}}} - Rename a file in a repository
* {{Command{git commit //file(s)//}}} - Record any changes to the repository
* {{Command{git status}}} - Display the status and information about the working tree
* {{Command{git log}}} - Display the commit logs
* {{Command{git remote}}} - Set the location of the remote repository
* {{Command{git push}}} - Send committed changes to the remote repository
* {{Command{git diff}}} - Compare changes between commits as well as between the current working version and committed version of a file.

Linux manpages are available for each of these {{Command{git}}} sub-commands.  Put a dash between the main command {{Command{git}}} and the sub-command you want to learn more about.  For example, {{Command{man git-add}}}


!!! ~GitHub

Git and ~GitHub are related but different tools.  

''Git'' is the command-line tool that allows developers to create a repository, make changes to code, and save those changes in commits. Git is designed to work locally on a developer's machine but it can also be used to collaborate with other developers using remote repositories.

''~GitHub'' is a web-based platform that provides central hosting for Git repositories. It allows developers to create and host remote repositories, collaborate with other developers, and manage projects. ~GitHub provides a user-friendly interface that makes it easy to manage Git repositories.

~GitHub provides developers with additional features, including:
* Cloud storage for your repository
* Web-based interface for managing repositories and projects
* Collaboration features, including pull requests, code review, and issue tracking
* Documentation features, such as a project wiki
* Integration with other tools, including CI/CD pipelines, text editors, and project management tools
* Social features, including profiles, followers, and open-source contributions

We will work with a local, self-hosted clone of ~GitHub called //Gogs//.  Everyone will have an account on our local clone where you can create your own repositories.


!! Setting up your Git repository

* Log into our Gogs instance at https://git.ci233.net:3000/user/login
** Use your campus username
** Your initial password can be found in the file {{File{~///username//.git.txt}}}
*** //username// is your campus username
** You may change your password after logging in.  Be sure to record it for future use.
* Create a new repository from within Gogs.  Click the ''+'' within the orange box
[img[img/gogs01.png]]

* Call your new repository //''scripts''//, configured as follows.  Add a description if you'd like.
[img[img/gogs02.png]]

* You should now have an empty repository in our online Git server.  We must now clone it on our class shell server and add our script files.

{{Warning{''Note:'' If this was a real project, we would most likely clone with SSH and utilize an SSH keypair for authentication.  This is a bit more work to set up, but is more secure and easier to use going forward.  We will instead choose the HTTPS method now and sacrifice some security for ease of use while we're learning new git concepts.  Just be aware some of the authentication choices we are making now are only to allow us to focus more on git and not get distracted with more advanced SSH concepts.  Our authentication choices here are not an endorsement of these methods for future projects.  Use SSH keypairs instead for those.}}}

> Change to your {{File{bin}}} directory: {{Command{ cd ~/bin/ }}}
> A README file is a place to document your repository.  Content is recorded in [[Markdown|https://www.markdownguide.org/basic-syntax/]].  Create an empty file: {{Command{ touch README.md }}}
> Initialize the new repository: {{Command{ git init }}}
> Add the README to the repo: {{Command{ git add README.md }}}
> Add your current Lab 29 scripts to the repo: {{Command{ git add ci233-lab29-q0?.sh }}}
> Display the status of changes awaiting commit.  You should see the README.md and your three script files listed as //new file//: {{Command{ git status }}}
> Commit your changes with the message //first commit//: {{Command{ git commit -m "first commit" }}}
> Set the remote location to push your changes to.  ''Be sure to change my username to yours.'' {{Command{ git remote add origin https://git.ci233.net:3000/merantn/scripts.git }}}  
> Configure git to cache your credentials in memory: 
> &nbsp;&nbsp;&nbsp; {{Command{ git config &#45;-global credential.helper cache }}}
> &nbsp;&nbsp;&nbsp; {{Command{ git config &#45;-global credential.helper 'cache &#45;-timeout=3600' }}}
> ''Optional'' - If you prefer {{Command{nano}}} instead of {{Command{vi}}}, set git to use nano as the default editor: {{Command{ git config &#45;-global core.editor nano }}}
> For simplicity while we're learning, we'll do all of our work out of the master branch: {{Command{ git config &#45;-global push.default current }}}
> Send your changes to the remote repo:  {{Command{ git push }}}

Here is my run through these steps:

{{Commands{
[merantn@lab ~/bin]$ ''git init''
Initialized empty Git repository in /home/merantn/bin/.git/

[merantn@lab ~/bin]$ ''git add README.md''

[merantn@lab ~/bin]$ ''git add ci233-lab29-q0?.sh''

[merantn@lab ~/bin]$ ''git status''
 # On branch master
 #
 # Initial commit
 #
 # Changes to be committed:
 #   (use "git rm &#45;-cached <file>..." to unstage)
 #
 #       new file:   README.md
 #       new file:   ci233-lab29-q01.sh
 #       new file:   ci233-lab29-q02.sh
 #       new file:   ci233-lab29-q03.sh
 #

[merantn@lab ~/bin]$ ''git commit -m "first commit"''
[master (root-commit) 021c95d] first commit
 Committer: Nick Merante <redacted>
Your name and email address were configured automatically based
on your username and hostname. Please check that they are accurate.
You can suppress this message by setting them explicitly:

    git config &#45;-global user.name "Your Name"
    git config &#45;-global user.email you@example.com

After doing this, you may fix the identity used for this commit with:

    git commit &#45;-amend &#45;-reset-author

 4 files changed, 3 insertions(+)
 create mode 100644 README.md
 create mode 100755 ci233-lab29-q01.sh
 create mode 100755 ci233-lab29-q02.sh
 create mode 100755 ci233-lab29-q03.sh

[merantn@lab ~/bin]$ ''git remote add origin https://git.ci233.net:3000/merantn/scripts.git''

[merantn@lab ~/bin]$ ''git config &#45;-global credential.helper cache''

[merantn@lab ~/bin]$ ''git config &#45;-global credential.helper 'cache &#45;-timeout=3600'''

[merantn@lab ~/bin]$ ''git config &#45;-global push.default current''

[merantn@lab ~/bin]$ ''git push''
Username for 'https://git.ci233.net:3000': merantn  
Password for 'https://merantn@git.ci233.net:3000': 
Counting objects: 6, done.
Compressing objects: 100% (2/2), done.
Writing objects: 100% (6/6), 416 bytes | 0 bytes/s, done.
Total 6 (delta 0), reused 0 (delta 0)
To https://git.ci233.net:3000/merantn/scripts.git
 * [new branch]      HEAD -> master

}}}


You can now view your repository online and see the files you've just checked in:

[img[img/gogs03.png]]


!!! Committing changes to the repository

New scripts can be added to the repository with the {{Command{git add}}} command above and changes to existing files can be committed with {{Command{git commit}}}.  Adding files and checking in changes can serve as a checkpoint and backup to roll back to in case things go awry.  It's wise to periodically check in changes at important milestones or before large adjustments are to be made so you have a good checkpoint to roll back to in case things don't work out.  

Here is an example of a change being made to an existing file.  I forgot to add the shebang to the top of one of my scripts.  I will first make that change, and then check the new file into the repository:

1. Update the script.  It now contains the missing shebang:
{{Commands{
[merantn@lab ~/bin]$ ''head ci233-lab29-q03.sh''
 #!/bin/sh
 # File name: ci233-lab29-q03.sh
 # Author: Nick Merante
 # Date Written: Oct 4, 2024
 # Assignment: Lab 29
 # Purpose: Calculate product of two integers
 # Description:
 #   - Prompt user for input of two integers
 #   - Calculate and display product
 #
}}}

2. We can compare my new version with the version currently checked into the repo:
* Lines beginning with a {{Monospaced{''+''}}} represent additions
* Lines beginning with a {{Monospaced{''-''}}} represent deletions.
Notice the {{Monospaced{''+''}}} before the new shebang I just added:

{{{
[merantn@lab ~/bin]$ git diff ci233-lab29-q03.sh
diff --git a/ci233-lab29-q03.sh b/ci233-lab29-q03.sh
index 806d2a4..bc66030 100755
--- a/ci233-lab29-q03.sh
+++ b/ci233-lab29-q03.sh
@@ -1,3 +1,4 @@
+#!/bin/sh
 # File name: ci233-lab29-q03.sh
 # Author: Nick Merante
 # Date Written: Oct 4, 2024
}}}

3. Commit your change by executing one of these methods:
* Commiting just the changed files you specify: {{Command{ git commit ci233-lab29-q03.sh }}}
* Committing all changes: {{Command{ git commit -a }}}

4. A text editor will open for you to add a comment to your commit.  This is a place to describe the changes you're making.  Comments help track what changes were made and why they were made.  
* Add your comment
{{{
Adding missing shebang
# Please enter the commit message for your changes. Lines starting
# with '#' will be ignored, and an empty message aborts the commit.
#
# Committer: Nick Merante <merantn@lab.ci233.net>
#
# On branch master
# Changes to be committed:
#   (use "git reset HEAD <file>..." to unstage)
#
#       modified:   ci233-lab29-q03.sh
#
}}}

* Save and quit the editor to finalize your commit
{{{
[master 7690513] Adding missing shebang
 Committer: Nick Merante <merantn@lab.ci233.net>
Your name and email address were configured automatically based
on your username and hostname. Please check that they are accurate.
You can suppress this message by setting them explicitly:

    git config --global user.name "Your Name"
    git config --global user.email you@example.com

After doing this, you may fix the identity used for this commit with:

    git commit --amend --reset-author

 1 file changed, 1 insertion(+)
}}}

5. Push your changes to the online repository with {{Command{ git push }}}

{{Commands{
[merantn@lab ~/bin]$ ''git push''
Counting objects: 5, done.
Compressing objects: 100% (3/3), done.
Writing objects: 100% (3/3), 301 bytes | 0 bytes/s, done.
Total 3 (delta 2), reused 0 (delta 0)
To https://git.ci233.net:3000/merantn/scripts.git
   14f729c..7690513  HEAD -> master
}}}


!!! Reverting a failed change to a file in the working directory

Let's suppose I make a change to one of my files that doesn't work out.  I haven't commited that change yet, and I want to revert my working copy to the last checked-in version.

1. Make your change

For simple demonstration purposes, I added a new line to the description (highlighted yellow below).  A more common use of this would be to undo several failed changes throughout a piece of code:

{{Commands{
[merantn@lab ~/bin]$ ''head ci233-lab29-q03.sh''
 #!/bin/sh
 # File name: ci233-lab29-q03.sh
 # Author: Nick Merante
 # Date Written: Oct 4, 2024
 # Assignment: Lab 29
 # Purpose: Calculate product of two integers
 # Description:
 #   - Prompt user for input of two integers
 #   - Calculate and display product
 #   @@- some major goof I want to undo@@
}}}
{{{
[merantn@lab ~/bin]$ git diff ci233-lab29-q03.sh
diff --git a/ci233-lab29-q03.sh b/ci233-lab29-q03.sh
index bc66030..3ffd4a5 100755
--- a/ci233-lab29-q03.sh
+++ b/ci233-lab29-q03.sh
@@ -7,4 +7,4 @@
 # Description:
 #   - Prompt user for input of two integers
 #   - Calculate and display product
-#
+#   - some major goof I want to undo
}}}

2. Undo that change, reverting to the last checked-in version.  Notice my addition to the Description is now missing and there are no differences between the local and checked-in copies:
{{Commands{
[merantn@lab ~/bin]$ ''git checkout &#45;- ci233-lab29-q03.sh''

[merantn@lab ~/bin]$ ''head ci233-lab29-q03.sh''
 #!/bin/sh
 # File name: ci233-lab29-q03.sh
 # Author: Nick Merante
 # Date Written: Oct 4, 2024
 # Assignment: Lab 29
 # Purpose: Calculate product of two integers
 # Description:
 #   - Prompt user for input of two integers
 #   - Calculate and display product
 #

[merantn@lab ~/bin]$ ''git diff ci233-lab29-q03.sh''
[merantn@lab ~/bin]$
}}}


!!! Comparing differences between previous commits:

Differences in a file between two previous commits can be compared.

1. First display the commit log for your file to obtain the commit ID, highlighted in yellow below
{{Commands{
[merantn@lab ~/bin]$ ''git log ci233-lab29-q03.sh''
commit @@7690513d3642de5a83342dd16c85dcf506cdf95e@@
Author: Nick Merante <merantn@lab.ci233.net>
Date:   Sun Sep 5 15:00:23 2024 -0500

    Adding missing shebang

commit @@14f729c85b6ea4156db49ccf69583f7822a951c5@@
Author: Nick Merante <merantn@lab.ci233.net>
Date:   Sun Sep 5 14:44:25 2024 -0500

    add header

commit @@d7105d4ae4f04807b1fa9f30171677a396a26de8@@
Author: Nick Merante <merantn@lab.ci233.net>
Date:   Sun Sep 5 14:35:53 2024 -0500

    first commit
}}}

2. Use the commit messages to identify the points in time you would like to compare, providing the two commit ~IDs and the filename to the next command.  Below, I can see the line that was removed and the lines which were then added.
{{{
[merantn@lab ~/bin]$ git diff d7105d4ae4f04807b1fa9f30171677a396a26de8 14f729c85b6ea4156db49ccf69583f7822a951c5 ci233-lab29-q03.sh
diff --git a/ci233-lab29-q03.sh b/ci233-lab29-q03.sh
index 340c263..806d2a4 100755
--- a/ci233-lab29-q03.sh
+++ b/ci233-lab29-q03.sh
@@ -1 +1,9 @@
-echo fake demo script 3
+# File name: ci233-lab29-q03.sh
+# Author: Nick Merante
+# Date Written: Oct 4, 2024
+# Assignment: Lab 29
+# Purpose: Calculate product of two integers
+# Description:
+#   - Prompt user for input of two integers
+#   - Calculate and display product
+#
}}}


!!! README files

The file {{File{README.md}}} can contain documentation for your project in [[Markdown format|https://www.markdownguide.org/basic-syntax/]].  This content will also be displayed in the git web UI for the files in that directory.

1. Edit the {{File{README.md}}} file and add a synopsis for each of your scripts, similar to my example below.  Notice the use of [[Markdown|https://www.markdownguide.org/basic-syntax/]] in the document.
{{{
[merantn@lab ~/bin]$ cat README.md
# ci233 Scripts

## Lab 29
1. `ci233-lab29-q01.sh` - Turn up to 9 words into uppercase
2. `ci233-lab29-q02.sh` - Accept two integers as arguments and compute their product
3. `ci233-lab29-q03.sh` - Prompt for two integers and compute their product
}}}

2. Commit your new {{File{README.md}}} file

3. Push the changes to the repository

4. Observe the new Readme in the web UI:

[img[img/gogs04.png]]


! Assignment

1. Be sure [[Lab 29|labs/lab29.pdf]] has been completed
2. Get familiar with {{Command{git}}} and our online repository
3. Check in your ''Lab 29'' scripts and a proper {{Monospace{''README''}}} file following the instructions above
4. Experiment with committing and rolling back changes.  We will use this more with future material

Also Complete [[Lab 30|labs/lab30.pdf]] after working with your repo.

! More shell scripting:

!! Read:
* Review Chapter 7 in the //Linux Bible//
** This material will also draw from that chapter
* Optional, if you'd like another perspective in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]]:
** Chapter 33 - For Loops
** Chapter 30 - Troubleshooting
*** This chapter is optional, but may be helpful.  Especially the first five pages in the chapter.
!! Watch:
* [[Bash for-loops 1|https://www.youtube.com/watch?v=sIYmF32Ic8s]] & [[Bash for-loops 2|https://www.youtube.com/watch?v=HLFenK13VDY]]


! Notes

!! For loops

The bash for-loop is a control structure which allows us to iterate through items in a list.  The list can be populated statically (from strings you define directly) or as a result of any form of shell substitution (variable, file, or command).  Within the for loop, your commands will then be executed for each item in the list. 

Watch:  [[Bash for-loops 1|https://www.youtube.com/watch?v=sIYmF32Ic8s]] & [[Bash for-loops 2|https://www.youtube.com/watch?v=HLFenK13VDY]]

For Loop synopsis:
 - //list// is a list of items to iterate over
 - //variable// is a variable to store the current list item in as we run through the commands within the for loop.   
{{{
for variable in list
do
   commands
done
}}}

For example, this small script will iterate over a list of PNG files in the directory to change their extension from .png to .jpg
{{{
#!/bin/sh
# ./renpng.sh

# list of the contents of the directory.  We will disable STDOUT and STDERR because we're only interested in the exit status.
ls *.png >&- 2>&-

# This is a short-circuited conditional to evaluate the exit status of the ls command.  Essentially, if there are no PNG files in the current
# directory, the exit status from the ls command will be 0.  This operates like a truth table, executing the commands in the list until one 
# returns a positive exit status.  If there are no files in the directory (exit status != 0), display a error message and exit the script. 
# This basic test ensures our script exits gracefully if there are no files
[ $? -ne 0 ] && echo Error: No PNG files && exit 1

# Iterate over each png file in the directory, saving the file to operate on in the variable $png
for png in *.png
do
	# The basename command removes an extension from a filename.  We're stripping off the png extension so we can add .jpg later
        filename=$(basename $png .png)
        mv $png ${filename}.jpg
        echo renaming $png to ${filename}.jpg
done
}}}


The {{Command{break}}} command executed within a loop will terminate it.   
The {{Command{continue}}} command will start the next iteration of a loop.
These are commonly used combined with if-statements to skip an item in the list or end the loop early.

For example, my script to collect your labs contains this at the top of the for-loop.  It will cause processing of a lab to stop if I already have it.

{{{
cd /opt/pub/ci233/submit/
for file in ci233-lab*.pdf
do
        hash=$(md5sum "$file" | cut -c 27-32)
        base=$(basename "$file" .pdf)
        [ -f $RSL_DIR/ci233/collected/"$base"-v?-${hash}.pdf ] && continue

	# more processing commands follow
done
}}}


!! While loops

Here's a brief synopsis and example of a while loop.  We don't have time to cover them and none of our scripts will require them.  If-statements and for-loops are the two most useful to know.
 

While Loop:
{{{
while  condition
do
   commands
done
}}}

{{{
#!/bin/sh
# Create 100 png files

count=100
while [ $count -gt 0 ]
do
        string=$(printf "%04i" $count)
        dd if=/dev/urandom of=IMG_${string}.png bs=5k count=1 2>&-
        count=$(expr $count - 1)
done
}}}

The {{Command{break}}} command executed within a loop will terminate it.   
The {{Command{continue}}} command will start the next iteration of a loop.


!! Extra Commands:

* The {{Command{md5sum}}} command can be used to calculate file checksums
* The {{Command{stat}}} command can pull extra details about a file and allow for extra formatting options:
* The {{Command{date}}} command can display the current date and time in various formats.  Check it's manpage for a list of format codes.

{{{
# Obtain a unix timestamp for the file's modification time:
stat -c '%Y' ci233-lab10-browng.pdf
1478659277
# Convert that modification time to a human-readable format:
date +"%m/%d/%y @ %H:%M" --date=@1478659277
11/05/16 @ 21:44

Use in a shell script with command substitution:
mtime=$(stat -c '%Y' $file)
mdate=$(date +"%m/%d/%y @ %H:%M" --date=@$mtime)

# Test it:
echo "$mtime / $mdate" 
}}}


!! Input Validation

We've demonstrated two ways to gather script input directly from the user:  as command line arguments and via the read command.

Basic error checking of user input is always preferred.  Never rely upon your users to enter input appropriately.  It is far better to catch any potential issues yourself instead of having the script run into a syntax error.  For example, if your script requires a command line argument but one is not provided, things may break down.  If you ask for a number, what will your program do if a user enters a string of letters instead?  Not properly validating input is also the basis for a lot of attacks, especially against web servers.

A basic test to ensure a command line argument was provided and exiting the script gracefully with an error message if it wasn't would be good.

Basic tests should be done any time you are gathering input from a user.  Some examples:

* Does a file actually exist?
* Is a username valid on the system?
* Is an IP address in the proper format?
* Is a number actually only composed of digits?

The test command can evaluate whether a file exist.  The id command can evaluate whether a user is valid on the system.  

The expr utility can be used to validate input in our scripts based on a regular expression.  File globbing is a way to concisely match a group of files based on wildcards.  Similarly, regular expressions are a concise way to represent strings using wildcards and operators.  Understanding and constructing regular expressions is beyond the scope of this course.  The ideas are similar to file globbing but the implementation is different.  If you're curious, regular expressions are discussed in the textbook in Chapter 19.  

We'll be using the {{Command{expr}}} utility to compare two strings.  Addition with {{Command{expr}}} is composed of two operands (our input) and an operator (the + symbol), eg:  expr 3 + 5

The {{Command{expr}}} utility can also be used for string comparisons.  When comparing strings with {{Command{expr}}}, the first operand is the string we are checking.  This can either be a static word or come from a variable.  The {{Command{expr}}} operator for string comparisons is a : (colon).  The second operand is a pattern resembling the string we are expecting.

For example, the following command performs a comparison between the string on the left and the pattern on the right:

{{{
expr 123 : '^[0-9]\{1,\}$'
}}}

The pattern {{Command{^[0-9]\{1,\}$}}} requires a string that contains 1 or more numbers. The anchors ^ and $ are used to specify that no other characters can be present in the string. The command above will return the number of characters matched by the pattern, in this case 3.

{{{
$ expr 123 : '^[0-9]\{1,\}$'
3
}}}


If I add something besides a number to my string, the pattern will not match so the character count will be 0

{{{
$ expr a123 : '^[0-9]\{1,\}$'
0
}}}

We can then evaluate the output of the expr command to determine whether we received the correct type of input. An example of this follows:

{{{
#!/bin/sh
# ./adder.sh

if [ $# -ne 2 ]
then
        echo "Usage: $0 integer1 integer2"
        exit 1
fi

# Test to make sure our arguments only contain digits.  Notice the use of anchors.  
# A positive number will be saved to the variables if there is a match
match1=$( expr $1 : '^[0-9]\{1,\}$' )
match2=$( expr $2 : '^[0-9]\{1,\}$' )

# Make sure both matches contain positive numbers
if [ $match1 -gt 0 -a $match2 -gt 0 ]
then
        sum=$(expr $1 + $2)
        echo
        echo $1 + $2 = $sum
        echo
else
        echo "You did not enter 2 integers"
        echo "Usage: $0 integer1 integer2"
        exit 1
fi
}}}

!! Extra commands

Some extra commands will be helpful for completing these labs:

* {{Command{file}}} - determine file type



! Assignment

* Complete:
** Extra credit [[Lab A3|labs/labA3.pdf]] & [[Lab A4|labs/labA4.pdf]] are available for more practice
** Due Saturday: [[Lab 31|labs/lab31.pdf]]
** Due next Wednesday: [[Lab 32|labs/lab32.pdf]]


Be sure to check your new scripts into the git repository, like was done for lab 30.
! Material

!! Read:
* Chapter 3 in the Linux Bible
** Pages 81 - 88

!! Watch:
* [[Customizing Your Terminal: .bash_profile and .bashrc files|https://www.youtube.com/watch?v=vDOVEDl2z84]]
* [[Customizing Your Terminal: Adding Color and Information to Your Prompt|https://www.youtube.com/watch?v=LXgXV7YmSiU]]
* [[Creating Aliases for Commands|https://www.youtube.com/watch?v=0liXeoADU6A]]


! Notes

!! Working with the shell

A user automatically execute a shell when they log into a Unix system.  A shell is a special type of program that receives and interprets commands.  This shell is what users interact with on the command line.  You are then logged out when the shell terminates.

The shell you are using is specified as the last column in the {{File{/etc/passwd}}} file.  Bash is the standard default, but many others exist.

The different shells available on the system are usually listed in the file {{File{/etc/shells}}}.
* {{Command{/sbin/nologin}}} : A special shell used for disabled accounts
** You'll see service accounts in {{File{/etc/passwd}}} with this shell.  Users should never be able to log into those service accounts
** A service account with a valid shell is a major red flag


!!! Bash Customization

RTFM!  {{Command{man bash}}}

!!!! Shell initialization and configuration scripts - executed upon login
Your shell environment can be customized by the system administrator and by the user.  The sysadmin may have some site-specific changes to make.  For example, I change the default umask for everyone on our class shell server.  Each user may customize their shell environment either cosmetically, such as by changing the shell prompt, or functionally, such as by changing the PATH or adding command aliases.

The shell environment is customized through a series of script files.  They are executed in the following order so the admin can set defaults that the users can override with their own customizations.  These scripts work line any of the scripts we've been writing for this class.  Any commands entered will be executed when these scripts run.

Interactive login shell execution sequence.  When you first log in to the system, the following are executed (if they exist):
* {{File{/etc/profile}}} contains general system defaults 
* All scripts in the directory {{File{/etc/profile.d/}}}
** Putting individual settings in their own files makes it easier to maintain the changes
** The file {{File{/etc/profile.d/umask.sh}}} sets our default umask
* {{File{~/.bash_profile}}} is controlled by the user for their custom changes
** Put things you want to run during a new login session in this file.  Items in this file will not be executed if a new shell instance is executed.
* {{File{~/.bashrc}}} is executed only in interactive shells.  This file may contain extra functions and aliases.
** Put settings (like aliases and prompt changes) in this file so they will be activated if a new shell session is run
* {{File{~/.profile}}} may exist instead of {{File{~/.bash_profile}}} on some systems
* //User disconnects//
* {{File{~/.bash_logout}}} will execute when the user logs out.

{{Command{ source //file// }}} (or {{Command{ . //file// }}})
<<<
Read and execute commands from //file// in the current shell environment.  Apply changes within an environment script file to the current login shell.
<<<

!!!! Example:
Suppose each time a user logs in, we want to display their last three logins to the screen.  The following would be added to either the site-wide {{File{/etc/profile.d/}}} directory or appended to their {{File{~/.bash_profile}}}.  We would choose {{File{/etc/profile.d/}}} if we didn't want the users to be able to remove it.  We would choose the user's {{File{~/.bash_profile}}} if we wanted users to be able to override it.  We would not put it in {{File{~/.bashrc}}} because we only want this information displayed when the users log in, not when they just run a new shell.

{{{
last -3 $USER
}}}


!!!! Default user dotfiles
The directory {{File{/etc/skel}}} contains default copies of  {{File{~/.bash_profile}}}, {{File{~/.bashrc}}}, and {{File{~/.bash_logout}}}.  These can be copied to the home directories of new users so they have defaults available for their accounts.


!!!! Other shell configuration files:
Readline library - A library for reading a line of input from the terminal 
* Configured by {{File{/etc/inputrc}}} and {{File{~/.inputrc}}}
* These files mostly control additional key bindings
* I like to enable the ~PageUp and ~PageDown keys on other systems for fast command recall.  They're not enabled by default on Debian.

{{File{/etc/~DIR_COLORS}}}
<<<
Configure directory listing colorization
<<<
Disable ls colorization on the ~VMs.  Sometimes color makes it hard to read the text
* Edit {{File{/etc/~DIR_COLORS}}}
* change {{Monospaced{ ''tty'' }}} to {{Monospaced{ ''none'' }}}

{{File{/etc/motd}}} - A ''m''essage ''o''f ''t''he ''d''ay to display to users after they log in


!!! Aliases

Command aliases provide a way to execute long command strings with fewer keystrokes.  Additional options and arguments can be added to an alias.  For example, running {{Command{ l. -l }}} will display all files which begin with a dot in long-listing format.  The {{Command{ l. }}} alias will be translated to {{Command{ ls -d .* }}} and then the {{Monospaced{ -l }}} option will be added.

Display currently defined aliases:  {{Command{alias}}}

Set an alias:  {{Command{alias name='long_cmd -abcd | next_command -efgh'}}}

Standard aliases
* {{Command{ll}}} - Aliased to {{Command{ ls -l }}} on most systems
* {{Command{l.}}} - Aliased to {{Command{ ls -d .* }}} - Display //only// files which begin with a dot.

Override aliases:
* The {{Command{ rm }}} command is usually aliased to {{Command{ rm -i }}} on most systems so you are prompted before deleting each file.  
* Prefix your command with a \ (backslash) to suppress this alias expansion and execute {{Command{ rm }}} normally:  {{Command{ \rm foo }}}

Remove an alias for the current login session:  {{Command{unalias //alias//}}}

{{Command{which}}} and {{Command{type}}}
* These commands will display how each argument would be interpreted if executed as a command
* Aliases will be translated to their actual commands so you know what is really being executed


!!! Core shell options:

Stored in the {{Monospaced{$SHELLOPTS}}} variable
Manipulated with the set command

Enable a shell option:  {{Command{set -o //option//}}}
Disable a shell option:  {{Command{set +o //option//}}}

Examples: 

Toggle command line input method between vi and emacs:
{{Command{set -o vi}}}
{{Command{set -o emacs}}}

Enable noclobber:
With noclobber enabled, an existing file will not be overwritten by redirecting STDOUT to a file 
{{Command{set -o noclobber}}}
{{Command{set +o noclobber}}}

!!! Extra shell options:

{{Command{shopt}}} - Display a list of available options
 ''-s'' to enable an option
 ''-u'' to disable an option

Examples:
* {{Command{ shopt -s cdspell }}} - minor errors in the spelling of a directory component in a cd command are corrected.
* {{Command{ shopt -s checkjobs }}} - lists the status of any stopped and running jobs before exiting an interactive shell.


!!! Environment & Shell variables

In bash, variables are defined on the command line with this syntax:  {{Command{variable=value}}}
By default all variables are local and will not be inherited by child processes

The {{Command{export}}} command will make a variable global and accessible to any child process
{{Command{export}}} can be used when defining a global variable.  eg:  {{Command{export foo=bar}}}
Or, can be used to elevate a currently defined variable to global.  eg:  {{Command{foo=bar ; export foo}}}

{{Command{set}}} will display all currently set variables

{{Command{unset}}} can be used to unset a variable


The shell environment can be manipulated through variables:

For example, the {{Monospaced{$PATH}}} and the prompt variable, {{Monospaced{$~PS1}}}:

The prompt:

* ~PS1 - Primary prompt string is stored in this variable
* Other secondary PS variables exist.
** See https://ss64.com/bash/syntax-prompt.html for more details.

Display your current prompt string: {{Command{ echo $~PS1 }}}

The last character in your prompt - {{Monospaced{ ''#'' }}} vs {{Monospaced{ ''$'' }}}
* {{Monospaced{ ''$'' }}} at the end of the prompt means the user is a regular, unprivileged user.
* {{Monospaced{ ''#'' }}} at the end of the prompt means the user is a superuser.  
* This tagging makes it easier to see your privilege level.

Customized prompts I like for this class.  This prompt makes it easier to see the full path to the current directory and show long command strings on the projector.  The second version adds color.
{{{
PS1='\n[\u@\h \w] :\n\$ '
PS1='\n[\e[1;31m\u\e[m@\e[1;33m\h\e[m \w] :\n\$ '
}}}
Changing the ~PS1 variable by running one of the above commands applies the change immediately to your login session.  It will be reset when a new shell executes.  Add the change to your {{File{~/.bashrc}}} to make it permanent.


!!! Functions:

Functions can provide a shortcut to more complicated command sequences.  They can be used in shell scripts or directly from the command line.

Append to your {{File{~/.bashrc}}}:
{{{
function bak() {
        # This function creates a backup in the current working directory of any single file passed as an argument.
        # Example: bak test.sh
        cp "$@" "$@".`date +%y%m%d:%H%M`.bak
}
}}}

After adding this function to your {{File{~/.bashrc}}}, activate the new version by running  {{Command{ . ~/.bashrc}}} or reloading the shell.


!!! History substitution:

* Your command history is saved in a buffer for the current login session
* By default, the buffer is appended to {{File{~/.bash_history}}} upon logout
* You can then display the current session's history buffer with the {{Command{history}}} command.

There are history configuration variables to change this behavior:
  - {{Command{set | grep HIST}}}
 
! Assignment

* Complete [[Lab 34|labs/lab34.pdf]]

! Material

!! Read:
* Chapter 3 in the Linux Bible
** Pages 81 - 88

!! Watch:
* [[Customizing Your Terminal: .bash_profile and .bashrc files|https://www.youtube.com/watch?v=vDOVEDl2z84]]
* [[Customizing Your Terminal: Adding Color and Information to Your Prompt|https://www.youtube.com/watch?v=LXgXV7YmSiU]]
* [[Creating Aliases for Commands|https://www.youtube.com/watch?v=0liXeoADU6A]]


! Notes

!! Working with the shell

A user automatically execute a shell when they log into a Unix system.  A shell is a special type of program that receives and interprets commands.  This shell is what users interact with on the command line.  You are then logged out when the shell terminates.

The shell you are using is specified as the last column in the {{File{/etc/passwd}}} file.  Bash is the standard default, but many others exist.

The different shells available on the system are usually listed in the file {{File{/etc/shells}}}.
* {{Command{/sbin/nologin}}} : A special shell used for disabled accounts
** You'll see service accounts in {{File{/etc/passwd}}} with this shell.  Users should never be able to log into those service accounts
** A service account with a valid shell is a major red flag


!!! Bash Customization

RTFM!  {{Command{man bash}}}

!!!! Shell initialization and configuration scripts - executed upon login
Your shell environment can be customized by the system administrator and by the user.  The sysadmin may have some site-specific changes to make.  For example, I change the default umask for everyone on our class shell server.  Each user may customize their shell environment either cosmetically, such as by changing the shell prompt, or functionally, such as by changing the PATH or adding command aliases.

The shell environment is customized through a series of script files.  They are executed in the following order so the admin can set defaults that the users can override with their own customizations.  These scripts work line any of the scripts we've been writing for this class.  Any commands entered will be executed when these scripts run.

Interactive login shell execution sequence.  When you first log in to the system, the following are executed (if they exist):
* {{File{/etc/profile}}} contains general system defaults 
* All scripts in the directory {{File{/etc/profile.d/}}}
** Putting individual settings in their own files makes it easier to maintain the changes
** The file {{File{/etc/profile.d/umask.sh}}} sets our default umask
* {{File{~/.bash_profile}}} is controlled by the user for their custom changes
** Put things you want to run during a new login session in this file.  Items in this file will not be executed if a new shell instance is executed.
* {{File{~/.bashrc}}} is executed only in interactive shells.  This file may contain extra functions and aliases.
** Put settings (like aliases and prompt changes) in this file so they will be activated if a new shell session is run
* {{File{~/.profile}}} may exist instead of {{File{~/.bash_profile}}} on some systems
* //User disconnects//
* {{File{~/.bash_logout}}} will execute when the user logs out.

{{Command{ source //file// }}} (or {{Command{ . //file// }}})
<<<
Read and execute commands from //file// in the current shell environment.  Apply changes within an environment script file to the current login shell.
<<<

!!!! Example:
Suppose each time a user logs in, we want to display their last three logins to the screen.  The following would be added to either the site-wide {{File{/etc/profile.d/}}} directory or appended to their {{File{~/.bash_profile}}}.  We would choose {{File{/etc/profile.d/}}} if we didn't want the users to be able to remove it.  We would choose the user's {{File{~/.bash_profile}}} if we wanted users to be able to override it.  We would not put it in {{File{~/.bashrc}}} because we only want this information displayed when the users log in, not when they just run a new shell.

{{{
last -3 $USER
}}}


!!!! Default user dotfiles
The directory {{File{/etc/skel}}} contains default copies of  {{File{~/.bash_profile}}}, {{File{~/.bashrc}}}, and {{File{~/.bash_logout}}}.  These can be copied to the home directories of new users so they have defaults available for their accounts.


!!!! Other shell configuration files:
Readline library - A library for reading a line of input from the terminal 
* Configured by {{File{/etc/inputrc}}} and {{File{~/.inputrc}}}
* These files mostly control additional key bindings
* I like to enable the ~PageUp and ~PageDown keys on other systems for fast command recall.  They're not enabled by default on Debian.

{{File{/etc/~DIR_COLORS}}}
<<<
Configure directory listing colorization
<<<
Disable ls colorization on the ~VMs.  Sometimes color makes it hard to read the text
* Edit {{File{/etc/~DIR_COLORS}}}
* change {{Monospaced{ ''tty'' }}} to {{Monospaced{ ''none'' }}}

{{File{/etc/motd}}} - A ''m''essage ''o''f ''t''he ''d''ay to display to users after they log in


!!! Aliases

Command aliases provide a way to execute long command strings with fewer keystrokes.  Additional options and arguments can be added to an alias.  For example, running {{Command{ l. -l }}} will display all files which begin with a dot in long-listing format.  The {{Command{ l. }}} alias will be translated to {{Command{ ls -d .* }}} and then the {{Monospaced{ -l }}} option will be added.

Display currently defined aliases:  {{Command{alias}}}

Set an alias:  {{Command{alias name='long_cmd -abcd | next_command -efgh'}}}

Standard aliases
* {{Command{ll}}} - Aliased to {{Command{ ls -l }}} on most systems
* {{Command{l.}}} - Aliased to {{Command{ ls -d .* }}} - Display //only// files which begin with a dot.

Override aliases:
* The {{Command{ rm }}} command is usually aliased to {{Command{ rm -i }}} on most systems so you are prompted before deleting each file.  
* Prefix your command with a \ (backslash) to suppress this alias expansion and execute {{Command{ rm }}} normally:  {{Command{ \rm foo }}}

Remove an alias for the current login session:  {{Command{unalias //alias//}}}

{{Command{which}}} and {{Command{type}}}
* These commands will display how each argument would be interpreted if executed as a command
* Aliases will be translated to their actual commands so you know what is really being executed


!!! Core shell options:

Stored in the {{Monospaced{$SHELLOPTS}}} variable
Manipulated with the set command

Enable a shell option:  {{Command{set -o //option//}}}
Disable a shell option:  {{Command{set +o //option//}}}

Examples: 

Toggle command line input method between vi and emacs:
{{Command{set -o vi}}}
{{Command{set -o emacs}}}

Enable noclobber:
With noclobber enabled, an existing file will not be overwritten by redirecting STDOUT to a file 
{{Command{set -o noclobber}}}
{{Command{set +o noclobber}}}

!!! Extra shell options:

{{Command{shopt}}} - Display a list of available options
 ''-s'' to enable an option
 ''-u'' to disable an option

Examples:
* {{Command{ shopt -s cdspell }}} - minor errors in the spelling of a directory component in a cd command are corrected.
* {{Command{ shopt -s checkjobs }}} - lists the status of any stopped and running jobs before exiting an interactive shell.


!!! Environment & Shell variables

In bash, variables are defined on the command line with this syntax:  {{Command{variable=value}}}
By default all variables are local and will not be inherited by child processes

The {{Command{export}}} command will make a variable global and accessible to any child process
{{Command{export}}} can be used when defining a global variable.  eg:  {{Command{export foo=bar}}}
Or, can be used to elevate a currently defined variable to global.  eg:  {{Command{foo=bar ; export foo}}}

{{Command{set}}} will display all currently set variables

{{Command{unset}}} can be used to unset a variable


The shell environment can be manipulated through variables:

For example, the {{Monospaced{$PATH}}} and the prompt variable, {{Monospaced{$~PS1}}}:

The prompt:

* ~PS1 - Primary prompt string is stored in this variable
* Other secondary PS variables exist.
** See https://ss64.com/bash/syntax-prompt.html for more details.

Display your current prompt string: {{Command{ echo $~PS1 }}}

The last character in your prompt - {{Monospaced{ ''#'' }}} vs {{Monospaced{ ''$'' }}}
* {{Monospaced{ ''$'' }}} at the end of the prompt means the user is a regular, unprivileged user.
* {{Monospaced{ ''#'' }}} at the end of the prompt means the user is a superuser.  
* This tagging makes it easier to see your privilege level.

Customized prompts I like for this class.  This prompt makes it easier to see the full path to the current directory and show long command strings on the projector.  The second version adds color.
{{{
PS1='\n[\u@\h \w] :\n\$ '
PS1='\n[\e[1;31m\u\e[m@\e[1;33m\h\e[m \w] :\n\$ '
}}}
Changing the ~PS1 variable by running one of the above commands applies the change immediately to your login session.  It will be reset when a new shell executes.  Add the change to your {{File{~/.bashrc}}} to make it permanent.


!!! Functions:

Functions can provide a shortcut to more complicated command sequences.  They can be used in shell scripts or directly from the command line.

Append to your {{File{~/.bashrc}}}:
{{{
function bak() {
        # This function creates a backup in the current working directory of any single file passed as an argument.
        # Example: bak test.sh
        cp "$@" "$@".`date +%y%m%d:%H%M`.bak
}
}}}

After adding this function to your {{File{~/.bashrc}}}, activate the new version by running  {{Command{ . ~/.bashrc}}} or reloading the shell.


!!! History substitution:

* Your command history is saved in a buffer for the current login session
* By default, the buffer is appended to {{File{~/.bash_history}}} upon logout
* You can then display the current session's history buffer with the {{Command{history}}} command.

There are history configuration variables to change this behavior:
  - {{Command{set | grep HIST}}}
 
! Assignment

* Complete [[Lab 33|labs/lab33.pdf]]
! Material

!! Read Linux Bible:
* Chapter 8 - Learning System Administration
** This has a lot of useful info to help fill in some blanks.
* Chapter 14 - Administering Networking
** Pay particular attention to how to do things from the command line.  We don't have a GUI available.
** ~NetworkManager is installed but must be configured from the command line instead.

!! Watch
* How to configure networking with {{Command{nmtui}}}: https://www.youtube.com/watch?v=ynXPaPv8sYg
* Good networking background info, but not required: https://www.youtube.com/watch?v=fHgk7aDGn_4

! Notes

For the remainder of the semester we will transition from being users of a Unix system to administrators.  A virtual lab environment is available where everyone will be assigned a small collection of Unix servers to configure and manage.  

{{Note{
!!! Getting ahead of future problems:
# ''About 50% of the problems we will encounter will be due to typos''
** Leave little to chance and use copy/paste for long or complex commands or configurations
** Pay attention to what you are typing.  Some characters, like a 1 and an l, look alike.  Be sure you know which you're dealing with.
** Use your VM's console only to configure networking and bring it online.  After that, do everything through SSH
# ''About 25% of the problems later in the semester will be due to rushing through earlier material instead of taking the time to retain it''
** Everything we do this semester will look back on previous work.  If you're rushing through and not retaining it, you will surely pay for it later.
# ''About 20% of the problems will be due to not following the directions''
** Go slow and pay attention!  Each of the steps matter.  If you skip over important steps or ignore errors, don't expect things to work.
# ''< 5% of the problems will be due to genuine system errors''
** Having a keen eye for detail, paying attention to the directions, and taking the time to practice and retain the material will make for a much smoother semester.
}}}

!! Linux Basics

There are many different Linux distributions available.  The distro to choose is a combination of the system's purpose and personal preference.  

Examples of different Linux distributions are:
* Server - ~CentOS, ~ClearOS
* Desktop - Fedora, Mint 
* Dual (both desktop & server editions) - Ubuntu, Debian
* Build from source / minimal - Gentoo
* Special Purpose - Kali, Clonezilla, ~GParted

Obtaining a Linux distro
* Directly from the distro's website
* [[DistroWatch|http://distrowatch.com/]] - A site which tracks all available Linux distributions

Installation options
* Single OS
* Dual boot (eg: Dual boot between Windows and Linux)
* Virtualization (eg: ~VirtualBox)
* Live USB (Kali is a great option for this)


For our class, everyone will be assigned Linux virtual machines.  These class ~VMs have already had the OS installed from a common template.  We'll be using ~CentOS minimal, the same distribution used for our class shell server.  This is a bare-bones installation by default.  All other software will need to be installed.  This allows for a slim and nicely tuned system which only contains the components required for its function.

!! Bringing our class ~VMs Online

1. Establish a SSH tunnel for your web browser using the directions in the [[Tunnels & Proxies with SSH]] page.
2. Direct your web browser to https://lab.ci233.net/ to work with your VM.
* This is a protected resource not accessible to the open internet.  You must tunnel your connection to reach it.
* Log in to the Proxmox web UI with the same credentials you used for the class shell server.
* Select your VM from the list on the left
* Click the Start button in the top right corner if your VM is currently powered down.
** The monitor icon next to your VM name should change from black to white when it is powered on
* Once your VM begins to boot, click the console option to open up the console in a new window.
** Once the console windows opens, you may need to press Enter to get to the login prompt.

!!! Set root password
Log in with the username ''root'' and set a password with the {{Command{passwd}}} command.  Do not forget this root password.
 - The user ''root'' is the standard administrative account.  This special user account has full access to manage the system and all files on it.
 - There is currently no root password set.  You should be able to log in without being prompted for one.
 - Without setting a root password you will not be able to log in remotely via SSH.


!! Basic networking

Our virtual lab environment is behind a single IP address.  The internal portion is utilizing a private IP space, the subnet 192.168.12.0/23.  This setup is much like a home network where your home systems all share a single public IP address and are behind a router.  This router protects the internal systems since none of them are directly accessible from the internet.  Since your ~VMs are all behind a router, you cannot SSH into any of them directly.  You'll first need to SSH into the class shell server and from there you can SSH into your VM.


!! Set IP addresses

Everyone has a block of 8 ~IPs to work with.  We have five things to configure to bring them fully online:  IP address, netmask, gateway, DNS, and host name

The table below contains the fourth octet of your ''starting'' IP address. Use this to assign to your first VM.  The first three octets are 192.168.13.

| !Start IP | !Username |
| 24 | nmerante |
| 32 | bvasquez14 |
| 40 | cbaylor09 |
| 48 | drice24 |
| 56 | nramic21 |
| 64 | rabreu27 |
| 72 | smacinsky21 |
| 80 | sboykin11 |
| 88 | ttwiss24 |
| 96 | wallen24 |
/% awk -v ip=32 '{print "| " ip " | " $1 " |"; ip+=8}' user2009.txt %/
* This is a ~Class-C subnet with a /23 CIDR mask.  Your netmask will be 255.255.254.0.
* The default gateway for these systems, the next hop towards the internet, is 192.168.12.1
* Our DNS resolver is at 192.168.12.10

!!! Manually apply static IP address immediately:

There are two ways to manually apply an IP address.  The old way with the old utilities and the new way most newer distributions are utilizing.  Our systems must be configured with the new way; they do not come with the old tools by default.  It is generally easier to bring your systems online manually using the virtual console and then SSH into them to complete the configuration.  You can also install nano if you're not yet comfortable with vi once the networking is configured.

!!!! The old way:
* These commands now require the ''net-tools'' package on modern Linux distributions
** The {{Command{ ifconfig }}} and {{Command{ route }}} commands are no longer installed by default
* Access a root prompt
* Set the ip address
** {{Command{ifconfig ens18 inet 192.168.13.''x'' netmask 255.255.254.0}}}
** Test it:  {{Command{ping 192.168.12.1}}}
** But we can't yet leave our local network:  {{Command{ping 1.1.1.1}}}
* Set the default route
** {{Command{route add default gw 192.168.12.1}}}
** Test it:  {{Command{ping 1.1.1.1}}}
* Set the system host name:
** {{Command{hostname test.//username//.ci233.net}}}
** Be sure to replace ''//username//'' with your actual username in the above command.  Do the same wherever you see //username// in italics.
* Test by reinvoking the shell by executing {{Command{bash}}}


!!!! The new commands:
* Log in and access a root prompt
* Ensure the interface is up:
** {{Command{ ip link set ens18 up }}}
* Set the ip address
** {{Command{ ip addr add 192.168.13.''x''/23 dev ens18 }}}
** Test it:  {{Command{ping 192.168.12.1}}}
** But we can't yet leave our local network:  {{Command{ping 1.1.1.1}}}  (This should fail)
* Set the default route
** {{Command{ ip route add default via 192.168.12.1 }}}
** Test it:  {{Command{ping 1.1.1.1}}}  (This should now work)


!!!! Configure DNS & host name

* Configure DNS:
** DNS is not yet configured so DNS resolution cannot yet take place.  Attempts to ping a system by its host name should fail.
{{{
[root@localhost ~]# ping google.com
ping: unknown host google.com
}}}
** Add the following line to {{File{/etc/resolv.conf}}} to specify the DNS server to use for mappings between hostname and IP address.  
*** ''nameserver 192.168.12.10''
** Test it:  {{Command{ ping www.google.com }}} 


* Execute this command to set the system host name immediately:
** {{Command{hostname test.//username//.ci233.net}}}
** Don't forget to replace ''//username//'' with your actual username
* Verify with {{Command{ hostnamectl }}}
* Test by reinvoking the shell:  {{Command{ bash }}}
* Edit the file {{File{/etc/hostname}}} so it contains the system hostname.  This file will be used to set the hostname on boot.
{{{
[root@test ~]# cat /etc/hostname
test.nmerante.ci233.net
}}}


!!!! Test connectivity by accessing your VM via the network

* Open putty or your SSH client and connect to our class shell server:  ''lab.ci233.net''
* From the class shell server, connect to your VM via SSH:  {{Command{ssh 192.168.13.''x'' -l root}}}
** Use the root password you just set

{{Warning{''Warning:''  The {{Monospaced{''-l''}}} above in the {{Command{ssh}}} command string is a dash followed by a lowercase letter {{Monospaced{''l''}}}, not the number {{Monospaced{''1''}}}.  Be sure you can spot the difference between these two characters.}}}


!!! Modify networking configuration files:

The {{Command{ ip }}} commands we just used will cause these changes to take effect only for the current boot instance of the system.  These settings will be lost once the system reboots.  We need to edit the appropriate configuration files so these settings will be applied on system startup.

The {{Command{nmtui}}} command will provide a text user interface for applying a persistent networking configuration.  
* Be sure to use the ens18 network interface 
* Don't forget to set the interface to automatically connect
* Use the same IP address listed in the table above
* This video may be helpful:  https://www.youtube.com/watch?v=ynXPaPv8sYg

!!!! Edit the file {{File{ /etc/hostname }}}
# Change the current contents to:  ''test.//username//.ci233.net''
# The name ''test'' is only for this first VM.  A different host name will be used for future ~VMs.
# Don't forget to change ''//username//'' to your actual username


!!!! Add a line to {{File{ /etc/hosts }}} which resembles the following:
{{{
192.168.13.24         test.nmerante.ci233.net test
}}}
Replace the last octet of the above IP address with yours and replace my username with yours.


!!! Switch back to console and test

After testing, you can also reboot your VM to ensure it comes up with a proper networking configuration.


!!!! Check your settings
Verify your configuration with the {{Command{ip addr}}} and {{Command{ip route}}} commands.  

The output of {{Command{ip addr}}} should resemble:
{{{
[root@test ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: ens18: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
    link/ether c0:12:02:33:00:00 brd ff:ff:ff:ff:ff:ff
    altname enp0s18
    inet 192.168.13.24/23 brd 192.168.13.255 scope global noprefixroute ens18
       valid_lft forever preferred_lft forever
    inet6 fe80::c212:2ff:fe33:0/64 scope link noprefixroute 
       valid_lft forever preferred_lft forever
}}}
The last octet in the IP address above is unique to each system.  .24 is used here, your value must be different.

The output of {{Command{ip route}}} must contain the following lines.  Additional lines may be present.  
{{{
[root@test ~]# ip route
default via 192.168.12.1 dev ens18 proto static metric 100 
192.168.12.0/23 dev ens18 proto kernel scope link src 192.168.13.24 metric 100 
}}}

Hostname verification should resemble:

{{{
[root@localhost ~]# hostname
test.nmerante.ci233.net
}}}


!!!! Verify network connectivity
You should now be able to ping the default gateway for our test network by its IP addresses and google by its hostname.
{{{
[root@test ~]# ping 192.168.12.1
PING 192.168.12.1 (192.168.12.1) 56(84) bytes of data.
64 bytes from 192.168.12.1: icmp_seq=1 ttl=64 time=0.083 ms
64 bytes from 192.168.12.1: icmp_seq=2 ttl=64 time=0.123 ms
64 bytes from 192.168.12.1: icmp_seq=3 ttl=64 time=0.137 ms
^C
--- 192.168.12.1 ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 2043ms
rtt min/avg/max/mdev = 0.083/0.114/0.137/0.024 ms

[root@test ~]# ping www.google.com
PING www.google.com (142.250.186.132) 56(84) bytes of data.
64 bytes from fra24s07-in-f4.1e100.net (142.250.186.132): icmp_seq=1 ttl=117 time=5.26 ms
64 bytes from fra24s07-in-f4.1e100.net (142.250.186.132): icmp_seq=2 ttl=117 time=5.37 ms
64 bytes from fra24s07-in-f4.1e100.net (142.250.186.132): icmp_seq=3 ttl=117 time=5.26 ms
^C
--- www.google.com ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 2003ms
rtt min/avg/max/mdev = 5.260/5.296/5.366/0.049 ms
}}}


!!!! Remote connections with SSH

The configuration of our virtual lab network will not allow direct outside connections to virtual machines.  You must first connect to the lab SSH jumphost.  

In this example I'm connecting to the class shell server from my home Unix system and then my test VM.  You can also use putty or a similar tool to connect to the class shell server.

{{{
nick@trillian:~>ssh lab.ci233.net -l nmerante
Last login: Sat Mar 14 13:38:20 2020 from hoot
[nmerante@shell ~]$


[nmerante@shell ~]$ssh 192.168.13.24 -l root
Warning: Permanently added '192.168.13.24' (RSA) to the list of known hosts.
root@192.168.13.24's password: 
Last login: Sat Oct 12 23:35:38 2024
[root@localhost ~]#
}}}

{{Note{''Note:''   The {{Monospaced{''-l''}}} above in the {{Command{ssh}}} command string is a dash followed by a lowercase letter {{Monospaced{''l''}}}, not the number {{Monospaced{''1''}}}.}}}



! Assignment

''Note:'' Labs 35-40 were skipped

<<tiddler [[Lab 41 - Bring test and www online]]>>
! Material

!! Read:
* Linux Bible Chapter 15 - Starting and Stopping Services
** ''Note:'' Modern Linux systems now use systemd.  The old init daemons and tools are largely deprecated.
/%* Linux Administration Chapter 7 - Booting and Shutting Down
** Note: Booting into single-user mode is good to know but the book's instructions are incomplete. Here's the new way for Redhat systems (which includes ~CentOS) - https://www.tecmint.com/boot-into-single-user-mode-in-centos-7/

* Linux Administration Chapter 9 - Core System Services
** %Skip over //xinetd// and //logging// (pages 181 to 195 and continue with journald on page 195.%/

! Notes

!! General system startup

* The Boot process
** BIOS (Basic Input/Output System)
*** For motherboard and certain devices
** MBR (First block of the disk)
** Boot loader - Chooses the OS/Kernel and bootstraps the operating system
*** Grub - Grand Unified Boot loader - Standard Linux boot loader
*** Check out grub configs in /boot/grub
*** Use it to boot multiple kernels (such as after a kernel update) or multiple ~OSes
** kernel - /boot/vmlinuz* - loaded into memory and begins to execute
*** Press ESC to see boot messages while the system starts
*** device detection: probe system buses, inventory hardware, and load device driver modules
*** create kernel processes (those in brackets)
*** system becomes available for user processes once the kernel is loaded
** Initialization daemon - First user process, parent of all processes running on the system
*** init - old ~SystemV ~OSes
*** systemd - New method
*** executed by the kernel and responsible for starting other processes
**startup scripts - start system services
* Config files in {{File{/etc/}}}
** Most are single files for the service or resource
*** {{File{fstab}}} : tab = table - filesystem table
*** {{File{resolv.conf}}}
*** {{File{sysconfig}}} directory - extra system configuration files
** Some are multiple files
*** cron is a good example
*** {{File{crontab}}} - traditional cron config table
*** {{File{cron.d}}} - directory containing individual config files
*** {{File{ cron.{daily,monthly,weekly} }}}


!! Systemd
* A new standard init system
* Backward compatible with ~SystemV init
* Can start services in parallel, reducing system start times
* Everything is broken down into units.  
** Two primary unit types to be concerned with
*** service units - Manage a single service
*** target units - manage groups of services
*** {{Command{ systemctl list-units | grep service }}}
*** {{Command{ systemctl list-units | grep target }}}
* Service and target configuration files are stored in {{File{ /{etc,lib}/systemd/system }}}
** Use the {{File{/etc/systemd/system}}} path for custom configs or to override existing
** Stock configs are in {{File{ /lib/systemd/system }}}
** View a list with current state: {{Command{ systemctl list-unit-files &#45;-type=service }}}

Everything is managed by symlinks:
* runlevel.? targets are symlinked to their systemd equiv

<<<
[root@www system]# pwd
/lib/systemd/system
[root@www system]# ll runlevel*
lrwxrwxrwx. 1 root root 15 Oct 21 17:02 runlevel0.target -> poweroff.target
lrwxrwxrwx. 1 root root 13 Oct 21 17:02 runlevel1.target -> rescue.target
lrwxrwxrwx. 1 root root 17 Oct 21 17:02 runlevel2.target -> multi-user.target
lrwxrwxrwx. 1 root root 17 Oct 21 17:02 runlevel3.target -> multi-user.target
lrwxrwxrwx. 1 root root 17 Oct 21 17:02 runlevel4.target -> multi-user.target
lrwxrwxrwx. 1 root root 16 Oct 21 17:02 runlevel5.target -> graphical.target
lrwxrwxrwx. 1 root root 13 Oct 21 17:02 runlevel6.target -> reboot.target
<<<
* default.target symlinked to the desired default runlevel target
<<<
[root@www system]# ll default.target
lrwxrwxrwx. 1 root root 16 Oct 21 17:02 default.target -> graphical.target
<<<

| !~SysVinit Runlevel | !Systemd Target | !Description |
| 0 |runlevel0.target, poweroff.target|Halt the system|
| 1, s |runlevel1.target, rescue.target|Single user mode|
| 2, 4 |runlevel2.target, runlevel4.target, multi-user.target|User-defined/Site-specific runlevels. By default, identical to 3|
| 3 |runlevel3.target, multi-user.target|Multi-user, non-graphical. Users can usually login via multiple consoles or via the network|
| 5 |runlevel5.target, graphical.target|Multi-user, graphical. Usually has all the services of runlevel 3 plus a graphical login|
| 6 |runlevel6.target, reboot.target|Reboot|
| emergency |emergency.target|Emergency shell|


!!! Examining service configuration files

cat /lib/systemd/system/sshd.service
{{{
[Unit]
Description=OpenSSH server daemon
After=network.target sshd-keygen.service
Wants=sshd-keygen.service

[Service]
EnvironmentFile=/etc/sysconfig/sshd
ExecStart=/usr/sbin/sshd -D $OPTIONS
ExecReload=/bin/kill -HUP $MAINPID
KillMode=process
Restart=on-failure
RestartSec=42s

[Install]
WantedBy=multi-user.target
}}}
 - After: What this service depends on
 - Wants:  Additional units tied to this service
 - ~EnvironmentFile - Location to store environment variables or options to startup / shutdown commands
 - ~WantedBy: Runlevel target this service is associated with

Display services wanted by a runlevel target: {{Command{ systemctl show &#45;-property "Wants" multi-user.target }}}
Display services required by a runlevel target: {{Command{ systemctl show &#45;-property "Requires" multi-user.target }}}
Display services that want a particular child service: {{Command{ systemctl show &#45;-property "~WantedBy" sshd-keygen.service }}}

!!! Starting and Stopping

Example commands to start, stop, restart, and check the status of a service immeditately:

* Start: {{Command{systemctl start firewalld.service}}}
* Stop: {{Command{systemctl stop firewalld.service}}}
* Check Status: {{Command{systemctl status firewalld.service}}}
* Restart {{Command{systemctl restart firewalld.service}}}
** Can also be used to reload configuration

Conditional restart - only restart if its already running:  {{Command{ systemctl condrestart firewalld.service }}}

Reload a service to re-read configuration files:  {{Command{ systemctl reload sshd.service }}}

Persistent services - Those to start on system boot:
Newly installed services will not be configured automatically to start on system boot.  You will have to start them manually and set to them start on boot.
- Enable a service to start on boot, eg: {{Command{systemctl enable firewalld.service}}}
- Stop a service from starting on boot, eg: {{Command{systemctl disable firewalld.service}}}


!!! Checking status

{{Command{systemctl list-unit-files &#45;-type=service}}}
{{Command{systemctl status firewalld.service}}}


!!! Adding a new service

For example, adding a new service for Apache.  This is only necessary if you installed a service from source code instead if via package management.  If you install software from a package, that package will come with the necessary files for systemd to manage the service.  This is a good reference to see the internals in case something custom needs to be added or modified.

{{File{/etc/systemd/system/httpd.service}}} :
{{{
[Unit]
Description=Apache Webserver
After=network.target

[Service]
Type=forking
EnvironmentFile=/etc/sysconfig/httpd
ExecStart=/opt/work/apache/bin/httpd -k start $OPTIONS
ExecStop=/opt/work/apache/bin/httpd -k graceful-stop $OPTIONS
ExecReload=/opt/work/apache/bin/httpd -k graceful $OPTIONS

Restart=always

[Install]
WantedBy=multi-user.target
}}}
 - {{Command{man systemd.service}}} for more details.

* Create environment file:  {{Command{ touch /etc/sysconfig/httpd }}}
* Refresh service and target configuration files:  {{Command{ systemctl daemon-reload }}}
* Enable startup on boot:  {{Command{systemctl enable httpd.service}}}
** Symlink was created in multi-user.target.wants:  {{Command{ ll /etc/systemd/system/multi-user.target.wants/ }}}
* Start now: {{Command{systemctl start httpd.service}}}
** Review recent logs associated with the service:  {{Command{ journalctl -u httpd.service }}}


!! Single user mode
Single user mode is method to access systems which cannot fully boot.  The boot process is changed to disable most system startup steps and services so it can be accessed and recovered from the failure.

* How to access systems if problems occur during boot
** Boot from a live CD or recovery mode
** Single user mode
*** Change the grub timeout to 20 seconds so you'll have more time to catch it.  ~VMs often introduce delays accessing the console, thus it can be difficult catching the grub loader if it has a short timeout.
**** Edit the file {{File{/etc/default/grub}}} and add the line {{Monospaced{''~GRUB_TIMEOUT=20''}}} to the bottom of the file
**** Execute {{Command{grub2-mkconfig -o /boot/grub2/grub.cfg}}} to activate the changes
*** See https://www.tecmint.com/boot-into-single-user-mode-in-centos-7/ for instructions to boot in single user mode


Other useful commands:  
* {{Command{shutdown}}} - shutdown / power off the system with many options for doing so
* {{Command{halt}}} & {{Command{poweroff}}}
* {{Command{reboot}}}


! Assignment

Play around with accessing single user mode.  It's a handy thing to know how to do.  The second half of this week is for getting caught up and more familiar with working with your ~VMs.  Use the time to practice the material and investigate this extra content:
 - [[Working more efficiently with GNU screen & SSH keys]] & [[Tunnels & Proxies with SSH]]

Be sure you're comfortable using the {{Command{systemctl}}} command to start, stop, and restart services.
Like most wikis, TiddlyWiki supports a range of simplified character formatting:
| !To get | !Type this |h
| ''Bold'' | {{{''Bold''}}} |
| ==Strikethrough== | {{{==Strikethrough==}}} |
| __Underline__ | {{{__Underline__}}} (that's two underline characters) |
| //Italic// | {{{//Italic//}}} |
| Superscript: 2^^3^^=8 | {{{2^^3^^=8}}} |
| Subscript: a~~ij~~ = -a~~ji~~ | {{{a~~ij~~ = -a~~ji~~}}} |
| @@highlight@@ | {{{@@highlight@@}}} |
| Tiddler Comments | {{{/%}}} text {{{%/}}}. |
| [[Make me a tiddler]] | {{{[[Make me a tiddler]]}}} |
| ~NoTiddler | {{{~NoTiddler}}} |
| {{{This is monotype}}} | {{{{{{This is monotype}}}}}} |

*sample:
|!th1111111111|!th2222222222|
|>| colspan |
| rowspan |left|
|~| right|
|bgcolor(#a0ffa0):colored| center |
|caption|c
For advanced effects, you can control the CSS style of a table by adding a row like this:
{{{
|cssClass|k
}}}


<<<
The highlight can also accept CSS syntax to directly style the text:
@@color:green;green coloured@@
@@background-color:#ff0000;color:#ffffff;red coloured@@
@@text-shadow:black 3px 3px 8px;font-size:18pt;display:block;margin:1em 1em 1em 1em;border:1px solid black;Access any CSS style@@
<<<
!!@@display:block;text-align:center;centered text@@

//For backwards compatibility, the following highlight syntax is also accepted://
{{{
@@bgcolor(#ff0000):color(#ffffff):red coloured@@
}}}
@@bgcolor(#ff0000):color(#ffffff):red coloured@@

/*{{{*/

@@color(yourcolorhere):colored text@@
@@color(fuchsia):colored text@@
@@bgcolor(yourcolorhere):your text here@@

[img[title|filename]]
[img[filename]]
[img[title|filename][link]]
[img[filename][link]]
[[text|url]]
[[Existing Tiddler Name|UglyTiddlerName]]

<<macro>>
<hr> = ----

*Entry One
**Sub-entry A
***Sub-sub-entry i
***Sub-sub-entry ii
**Sub-entry B
*Entry Two
*Entry Three
Use number signs (#'s) instead of asterisks for <OL type=1>

Tables:
|!Headings: add an exclamation point (!) right after the vertical bar.|!Heading2|!Heading3|
|Row 1, Column 1|Row 1, Column 2|Row 1, Column 3|
|>|>|Have one row span multiple columns by using a >|
|Have one column span multiple rows by using a ~|>| Use a space to right-align text in a cell|
|~|>| Enclose text in a cell with spaces to center it |
|>|>|bgcolor(green):Add color to a cell using bgcolor(yourcolorhere):|
|Add a caption by ending the table with a vertical bar followed by a c|c

!Header 1
!!Header 2
!!!Header 3
!!!!Header 4
!!!!!Header 5


Here's the code for a blockquote:
<<<
Here's the quoted text.
<<<

/*}}}*/

!Links
[[Calendar generator|http://zrenard.com/tiddlywiki/cal.php]]



Entities in HTML documents allow characters to be entered that can't easily be typed on an ordinary keyboard. They take the form of an ampersand (&), an identifying string, and a terminating semi-colon (;). There's a complete reference [[here|http://www.htmlhelp.com/reference/html40/entities/]]; some of the more common and useful ones are shown below. Also see [[Paul's Notepad|http://thepettersons.org/PaulsNotepad.html#GreekHtmlEntities%20HtmlEntitiesList%20LatinHtmlEntities%20MathHtmlEntities]] for a more complete list.

|>|>|>|>|>|>| !HTML Entities |
| &amp;nbsp; | &nbsp; | no-break space | &nbsp;&nbsp; | &amp;apos; | &apos; | single quote, apostrophe |
| &amp;ndash; | &ndash; | en dash |~| &amp;quot; | &quot; | quotation mark |
| &amp;mdash; | &mdash; | em dash |~| &amp;prime; | &prime; | prime; minutes; feet |
| &amp;hellip; | &hellip; |	horizontal ellipsis |~| &amp;Prime; | &Prime; | double prime; seconds; inches |
| &amp;copy; | &copy; | Copyright symbol |~| &amp;lsquo; | &lsquo; | left single quote |
| &amp;reg; | &reg; | Registered symbol |~| &amp;rsquo; | &rsquo; | right  single quote |
| &amp;trade; | &trade; | Trademark symbol |~| &amp;ldquo; | &ldquo; | left double quote |
| &amp;dagger; | &dagger; | dagger |~| &amp;rdquo; | &rdquo; | right double quote |
| &amp;Dagger; | &Dagger; | double dagger |~| &amp;laquo; | &laquo; | left angle quote |
| &amp;para; | &para; | paragraph sign |~| &amp;raquo; | &raquo; | right angle quote |
| &amp;sect; | &sect; | section sign |~| &amp;times; | &times; | multiplication symbol |
| &amp;uarr; | &uarr; | up arrow |~| &amp;darr; | &darr; | down arrow |
| &amp;larr; | &larr; | left arrow |~| &amp;rarr; | &rarr; | right arrow |
| &amp;lArr; | &lArr; | double left arrow |~| &amp;rArr; | &rArr; | double right arrow |
| &amp;harr; | &harr; | left right arrow |~| &amp;hArr; | &hArr; | double left right arrow |

The table below shows how accented characters can be built up by subsituting a base character into the various accent entities in place of the underscore ('_'):

|>|>|>|>|>|>|>|>|>|>|>|>|>|>|>|>|>| !Accented Characters |
| grave accent | &amp;_grave; | &Agrave; | &agrave; | &Egrave; | &egrave; | &Igrave; | &igrave; | &Ograve; | &ograve; | &Ugrave; | &ugrave; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; |
| acute accent | &amp;_acute; | &Aacute; | &aacute; | &Eacute; | &eacute; | &Iacute; | &iacute; | &Oacute; | &oacute; | &Uacute; | &uacute; | &nbsp; | &nbsp; | &Yacute; | &yacute; | &nbsp; | &nbsp; |
| circumflex accent | &amp;_circ; | &Acirc; | &acirc; | &Ecirc; | &ecirc; | &Icirc; | &icirc; | &Ocirc; | &ocirc; | &Ucirc; | &ucirc; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; |
| umlaut mark | &amp;_uml; | &Auml; | &auml; |  &Euml; | &euml; | &Iuml; | &iuml; | &Ouml; | &ouml; | &Uuml; | &uuml; | &nbsp; | &nbsp; | &Yuml; | &yuml; | &nbsp; | &nbsp; |
| tilde | &amp;_tilde; | &Atilde; | &atilde; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &Otilde; | &otilde; | &nbsp; | &nbsp; | &Ntilde; | &ntilde; | &nbsp; | &nbsp; | &nbsp; | &nbsp; |
| ring | &amp;_ring; | &Aring; | &aring; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; |
| slash | &amp;_slash; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &Oslash; | &oslash; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; |
| cedilla | &amp;_cedil; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &Ccedil; | &ccedil; |

<HTML><a href="http://checkettsweb.com/#%5B%5BCSS-Colors%20and%20Backgrounds%5D%5D%20%5B%5BCSS-Text%20and%20Fonts%5D%5D%20OldStyleSheet%20Rin%20%5B%5BTiddlyWiki%20Structure%5D%5D">CSS Info</a></html>

<<version>>
[[Plugins]]
[[Styles]]
[[TagglyTagging]]
[[systemConfig]]
[[systemTiddler]]
[[excludeSearch]]
[[excludeLists]]
! Material

This page will discuss two topics:

1. Authenticating to Unix systems with SSH keys
2. Terminal multiplexing with GNU {{Command{screen}}}

The first two are optional but will make working with our lab systems easier.  The third will be required for working with future labs and will help you monitor the state of your ~VMs.


!! 1. Authenticating to Unix systems with SSH keys

Two mechanisms exist for SSH authentication:  
# normal passwords 
# key pairs used in asymmetric encryption
A key pair contains a private key that you keep secure and a public key that you distribute to the systems you wish to connect to.  The private key is used to establish your identity and the presence of the public key is used to establish your authorization to access the system.  Private keys should be secured with a passphrase to ensure they cannot be maliciously used if they are captured by an attacker.  SSH authentication with passphrase-protected key pairs is much safer than passwords, since now an attacker must also capture the private key file in order to impersonate you.  For this reason, it is common to minimally block password authentication to a server when logging in as root or ideally only allow key authentication for all users.  More sensitive systems should require key-based authentication as part of general system hardening.  

Forcing key-based authentication gives us multi-factor authentication when the key is properly secured with a passphrase:
# Something you have (the private key)
# Something you know (the key's passphrase)


We begin by creating a keypair on the class shell server.  

{{Command{cd ~/.ssh/}}}
<<<
Change to the ~/.ssh/ directory, the default location for a user's ssh configuration files.
<<<

{{Command{ssh-keygen -t ed25519 -f  ci233 }}}
<<<
Create a SSH key pair using default settings, except for changing the key type to ed25519 and naming the key ci233.  The algorithm and key size can also be adjusted via flags.  The remaining defaults are reasonable.  You will be prompted to set a passphrase.  Choose something secure which you can remember.  This [[xkcd cartoon|https://xkcd.com/936/]] may be helpful.  The more entropy the better.
<<<

{{Command{ssh-copy-id -i ci233 root@10.1.26.''//x//''}}}
<<<
Copy your public key to each of your ~VMs.  It will be saved to the file {{File{~/.ssh/authorized_keys}}} on the remote system (your VM).  The administrator may have to add the key for you on systems you're not able to log into yet.
<<<

{{Command{ssh -l root 10.1.26.''//x//''}}}
<<<
Try to connect to your test VM.  You should be prompted for a password since our private key is not in the default location and was not specified on the command line.
<<<

{{Command{ssh -i ci233 -l root 10.1.26.''//x//''}}}
<<<
You should now be prompted for your SSH passphrase instead of password.  If an available and authorized SSH key is found it will be offered for use instead of your password.  Authentication will fall back to regular password if key-based fails.
<<<

{{Command{exit}}}
<<<
Disconnect from your VM
<<<

Having to specify the username and key file to use for each login to your ~VMs can be eliminated by using a ssh user client config.  Edit {{File{~/.ssh/config}}} on the shell server and set a default username and ssh key for class ~VMs

Edit the file {{File{~/.ssh/config}}} and add the following:
{{{
Host test
	HostName 10.1.26.x
        
Host www
	HostName 10.1.26.x

Host *
	IdentityFile ~/.ssh/ci233
	User root
}}}
Be sure to change the x above to your actual IP address.  This addition will also eliminate the need for specifying full IP addresses for each connection.  You'll be able to then connect with just {{Command{ssh //hostname//}}} and the IP address, user, and key file will be added for you.  Add new ~VMs to the config as they are issued to you.


!!! SSH agent - Unlock your key once for multiple connections

The SSH agent is a keyring which your SSH private keys can be attached to.  Once set up, future connections will look to that key ring when an authentication request is made instead of prompting you for your SSH passphrase each time.  The idea is one authentication event for many remote connections.

{{Command{ssh-agent > ~/.ssh/env}}}
<<<
Create a SSH agent, saving the environment information to the specified file.  This environment must be imported in order to make use of the agent.
<<<

{{Command{eval `cat ~/.ssh/env`}}}
<<<
Import the environment settings into the current shell environment
<<<

{{Command{ssh-add ~/.ssh/ci233}}}
<<<
Add your ci233 private key to your ssh agent keyring.  You should be prompted for its passphrase.
<<<

Once the SSH agent is established you may communicate to your lab systems without being prompted to authenticate each time.  Notice the lack of passphrase prompts:

{{Commands{
[nmerante@shell ~]$ ''ssh test''
Last login: Mon Oct 19 15:18:26 2020 from 10.1.26.10

[root@test ~]# ''exit''
logout
Connection to 10.1.26.24 closed.

[nmerante@shell ~]$ ''ssh www''
Last login: Mon Oct 19 15:19:51 2020 from 10.1.26.10

[root@www ~]# ''exit''
logout
Connection to 10.1.26.25 closed.
}}}


!! 2. Terminal multiplexing with GNU screen

GNU {{Command{screen}}} is a very useful tool for those working with the command line on many systems from different locations on a daily basis.  From within {{Command{screen}}}, connections can be made to many systems.  The user can detach from the screen session, change physical locations, and reconnect to their screen session continuing work where they left off.  GNU {{Command{screen}}} and ssh agents make a great combination for connecting to multiple machines over the course of your work day.

This video might help get you started:  https://www.youtube.com/watch?v=Mw6QvsChxo4

{{Command{cp ~nmerante/.screenrc ~/}}}
<<<
Copy this default screen configuration file to your home directory.  This will establish some baseline settings.
<<<

If you first run the steps in Section 1 to set up ssh-agent and then launch {{Command{screen}}} to start your screen instance, your SSH Agent will be established for all screen windows.  You thus will not need to authenticate to your ~VMs as you move between them.  You will only need to run the {{Command{screen}}} command without any options once.  It will stay active with your tasks running in the background until you either terminate it or the class shell server restarts.

Screen commands:
| !Key Sequence | !Action |
| ~CTRL-a , 0 |Switch to window 0|
| ~CTRL-a , 1 |Switch to window 1|
| ~CTRL-a , 2 |Switch to window 2|
| ~CTRL-a , //n// |Switch to window //n//|
| ~CTRL-a , c |Create a new screen window|
| ~CTRL-a , " |Display available screen windows|
| ~CTRL-a , ' |Switch to a screen window by number|
| ~CTRL-a , A |Title the current screen window|
| ~CTRL-a , ? |Display screen help|

With screen now running, enter these screen commands to get things set up:
* Create a new window:  {{Command{~CTRL-a, ~CTRL-c}}}
* Switch to window 1: {{Command{~CTRL-a, 1}}}
** Connect to your test VM with ssh
* Create a new window:  {{Command{~CTRL-a, ~CTRL-c}}}
* Switch to window 2: {{Command{~CTRL-a, 2}}}
** Connect to your www VM with ssh
* Switch to window 0:  {{Command{~CTRL-a, 0}}}
** Use this window to work on the class shell server
* Detach from screen (as if you're done working for the day):   {{Command{~CTRL-a, d}}}
* Reconnect to your screen session (as though you're coming back later to continue work):  {{Command{screen -dr}}}

Now, when you disconnect from the shell server, all of your tasks will stay running in the background.  Log in again and run   {{Command{screen -dr}}} to continue where you left off.  Create new windows inside of screen as you need them for new ~VMs or to run additional tasks concurrently. 

More screen commands:
| !Key Sequence | !Action |
| ~CTRL-a , &#124; |Split window vertical|
| ~CTRL-a , S |Split window horizontal|
| ~CTRL-a , TAB |Switch between split windows|
| ~CTRL-a , X |Close a split window|
|>|>|
| ~CTRL-a , d |Detach from screen|
| ~CTRL-a , :password |Set a password for your screen session|


{{Note{[[This video|SSH]] may be a helpful demonstration}}}


3. Defeating firewalls with SSH to access protected resources

See the [[Tunnels & Proxies with SSH]] page.


/%

!! 1. Authenticating to Unix systems with SSH keys

Two different sets of keys are used with SSH:  one for securing communication between the client and server and, optionally, a set to authenticate remote users.  

!!! SSH Host keys

* Public key crypto is used for encrypting communication between client and server
* Server keys are stored in the files {{File{/etc/ssh/ssh_host_*}}}
* Fingerprints for new systems are shown and stored in the user's {{File{~/.ssh/known_hosts}}} file.  This keeps a record of trusted systems.
** This file can leak identities of systems you are communicating with
** Hash your current known hosts file if you'd like to mask the systems: {{Command{ ssh-keygen -H }}}
* Fingerprints for known systems are compared on each login to identify MITM attacks
** The user is alerted if a mismatch is found
*** This is the warning you see if you connect to a new system for the first time or there's a server change when connecting to an existing system.
** The user should take steps to verify the host key has legitimately changed.  If this change is due to a MITM attack, the attacker could capture your credentials
** Display the fingerprint of a SSH public key: {{Command{ssh-keygen -lf  //file//.pub}}}

!!!! Demo:

{{Monospaced{
[nmerante@shell ~]$ ''ssh head.ci233.net''
The authenticity of host 'head.ci233.net (10.1.26.15)' can't be established.
ECDSA key fingerprint is ~SHA256:bHKouQIItQNr5r1Im3tI0uk2ArpfYU1Yvop0SQhOLVY.
ECDSA key fingerprint is ~MD5:9f:0d:9c:2d:f6:2c:ef:9e:6a:bb:ab:e5:4b:c5:55:e4.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'head.ci233.net' (ECDSA) to the list of known hosts.

# You can't log into this system, so press ~CTRL-C to abort:
nmerante@head.ci233.net's password:

# Here's the fingerprint of this system:
[nmerante@csshell ~]$ ''grep head ~/.ssh/known_hosts''
head.ci233.net ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBBhZIx/NElfvUL0nI/KwOotqk5Fypf01LQpn8YIe7FfXI8xnwEzESmqZTOiC791SrvOaoIxIFu9WW9xO7+BcgSw=

# Hash the hosts in the file:
[nmerante@shell ~]$ ''ssh-keygen -H''
/home/nmerante/.ssh/known_hosts updated.
Original contents retained as /home/nmerante/.ssh/known_hosts.old
WARNING: /home/nmerante/.ssh/known_hosts.old contains unhashed entries
Delete this file to ensure privacy of hostnames

Now a grep returns no results:
[nmerante@shell ~]$ ''grep head ~/.ssh/known_hosts''
}}}

%/

! Additional Details

This video is a deep dive into SSH and has a lot of great info:  https://www.youtube.com/watch?v=fnkG9_jy2qc
! Material

* Read:
** grep:  Chapter 10, pp 63 
** Anchors:  Chapter 19, pp 255


! Notes

!! Pattern Matching with {{Command{grep}}}:

The {{Command{grep}}} filter is one of the most useful.  I use it almost daily.  It's worth spending a little more time working with and ensuring we're all on the same page with our terminology.  ''From here on, I'm going to start being a little more strict with wording, so be thorough with your explanations.''


!!! {{Command{grep}}} - Display all lines from a file or stream containing a specified pattern
* Usage: {{Monospaced{grep //pattern// [file1] [file2] [file3] [file//n//]}}}
* Search for //pattern// in each of the specified files
* Useful options:
** {{Monospaced{''-v''}}} : Invert the match; display lines which ''do not'' contain the specified pettern
** {{Monospaced{''-i''}}} : Case insensitive search
** {{Monospaced{''-l''}}} : list only names of files which contain a match
** {{Monospaced{''-H''}}} : include file name with matched pattern
* Examples:
** {{Command{grep dog data.txt}}} - Display all lines from the file ''data.txt'' containing the string ''dog''
** {{Command{grep ssh /etc/*.conf}}} - Display all lines from files ending with ''.conf'' within the directory ''/etc/'' containing the string ''ssh''

!!! ''Important Notes:''
!!!! 1)  The following all have distinct meanings.  Be sure to use them properly.
* ''Line'':  The entire line
* ''String'':  A sequence of characters
* ''Word'':  A sequence of characters with whitespace or punctuation on either side or at the beginning or end of a line.
* ''Characters'':  Individual characters, not necessarily together


!!!! 2)  By default, the grep filter will display all lines which match a particular pattern or string.  Be specific when describing its actions in the labs. 
For example, if you are asked to describe what the following command is doing:
<<<
{{Command{grep ab data.txt}}}
<<<
and your response is something vague and generic like "//finds ab in the file//" you will not receive full credit.  Be ''through and specific''!  What happens when a match is found?  Which file is being examined?  Where is the output going?  I've allowed vague descriptions in previous labs, but that must end as our familiarity with these tools is increasing.

A proper response will cover all points:

Display to ''STDOUT'' all ''lines'' containing the string ''ab'' from the file ''data.txt''


!!!! 3)  If multiple commands are chained together, don't just itemize what each command in the pipeline is doing.  Be sure to also describe its final outcome.  We must appreciate the big picture as well.



Chapter 10, pp 63 has more information on the grep command.


!!! Anchors:

When trying to match a pattern in a tool like grep, anchors allow us to specify where on the string a pattern must occur.  This is useful if we're trying to match something which appears at either the beginning or end of a line instead of somewhere in the middle.  

Anchors can be utilized with two anchor metacharacters:

* ^ = begins with
* $ = end with
* Examples:
** {{Command{grep '^string' data.txt}}} - Display lines from the file ''data.txt''  beginning with ''string''
** {{Command{grep 'string$' data.txt}}} - Display lines from the file ''data.txt''  which end with ''string''

Chapter 19, pp 255 contains more information on anchors.

Type the text for 'excludeLists'
Type the text for 'excludeSearch'
config.macros.listTags = { text: "Hello" };
config.macros.listTags.handler = function(place,macroName,params)
{
	var tagged = store.getTaggedTiddlers(params[0],params[1]);
//<< Second parameter is field to sort by (eg, title, modified, modifier or text)
	var ul = createTiddlyElement(place,"ul",null,null,"");
	for(var r=0;r<tagged.length;r++)
	{
		var li = createTiddlyElement(ul,"li",null,null,"");
		createTiddlyLink(li,tagged[r].title,true);
	}
}
/***
|''Name:''|Plugin setDefaults|
|''Version:''|1.0.1 (2006-03-16)|
|''Source:''|http://tiddlywikitips.com/#%5B%5BPlugin%20setDefaults%5D%5D|
|''Author:''|Jim Barr (jim [at] barr [dot] net)|
|''Licence:''|[[BSD open source license]]|
|''TiddlyWiki:''|2.0|
|''Browser:''|Firefox 1.0.4+; Firefox 1.5; InternetExplorer 6.0|
!Description

These settings simply set "default" values for several system features and Plugins.
***/

/***
Standard settings:
***/
//{{{
config.options.chkRegExpSearch         = false;         // default false
config.options.chkCaseSensitiveSearch  = false;         // default false
config.options.chkAnimate              = false;          // default true
//config.options.txtUserName             = "Nick";    // default "YourName"
config.options.chkSaveBackups          = false;          // default true
config.options.chkAutoSave             = false;          // default false
config.options.chkGenerateAnRssFeed    = false;         // default false
config.options.chkSaveEmptyTemplate    = false;         // default false
config.options.chkOpenInNewWindow      = true;          // default true
config.options.chkToggleLinks          = false;         // default false
config.options.chkHttpReadOnly         = true;         // default true
config.options.chkForceMinorUpdate     = false;         // default false
config.options.chkConfirmDelete        = true;          // default true
config.options.txtBackupFolder         = "";            // default ""
config.options.txtMainTab              = "tabTimeline"; // default "tabTimeline"
config.options.txtMoreTab              = "moreTabAll";  // default "moreTabAll"
config.options.txtMaxEditRows          = "30";          // default "30"
config.options.chkInsertTabs = true;    		// tab inserts a tab when editing a tiddler

//}}}

/***
Custom Plugin settings:
***/
//{{{
config.options.chkSinglePageMode       = false;          // default "true"
config.options.chkSearchTitlesFirst       = true;
config.options.chkSearchList           = true;           // default "false"
config.messages.messageClose.text      = "X";           // default "close"
// config.views.wikified.defaultText      = "";            // default "The tiddler '%0' doesn't yet exist. Double-click to create it"
config.options.chkStepWiseNavigationOn = true;           // default "false"
config.options.chkDisableAutoSelect       =true;
config.options.chkTextAreaExtensions    =true;
//}}}
Type the text for 'systemConfig'
Type the text for 'systemTiddler'