\documentclass[10pt,a4paper]{article} % Packages \usepackage{fancyhdr} % For header and footer \usepackage{multicol} % Allows multicols in tables \usepackage{tabularx} % Intelligent column widths \usepackage{tabulary} % Used in header and footer \usepackage{hhline} % Border under tables \usepackage{graphicx} % For images \usepackage{xcolor} % For hex colours %\usepackage[utf8x]{inputenc} % For unicode character support \usepackage[T1]{fontenc} % Without this we get weird character replacements \usepackage{colortbl} % For coloured tables \usepackage{setspace} % For line height \usepackage{lastpage} % Needed for total page number \usepackage{seqsplit} % Splits long words. %\usepackage{opensans} % Can't make this work so far. Shame. Would be lovely. \usepackage[normalem]{ulem} % For underlining links % Most of the following are not required for the majority % of cheat sheets but are needed for some symbol support. \usepackage{amsmath} % Symbols \usepackage{MnSymbol} % Symbols \usepackage{wasysym} % Symbols %\usepackage[english,german,french,spanish,italian]{babel} % Languages % Document Info \author{Abbay Kutte} \pdfinfo{ /Title (test.pdf) /Creator (Cheatography) /Author (Abbay Kutte) /Subject (Test Cheat Sheet) } % Lengths and widths \addtolength{\textwidth}{6cm} \addtolength{\textheight}{-1cm} \addtolength{\hoffset}{-3cm} \addtolength{\voffset}{-2cm} \setlength{\tabcolsep}{0.2cm} % Space between columns \setlength{\headsep}{-12pt} % Reduce space between header and content \setlength{\headheight}{85pt} % If less, LaTeX automatically increases it \renewcommand{\footrulewidth}{0pt} % Remove footer line \renewcommand{\headrulewidth}{0pt} % Remove header line \renewcommand{\seqinsert}{\ifmmode\allowbreak\else\-\fi} % Hyphens in seqsplit % This two commands together give roughly % the right line height in the tables \renewcommand{\arraystretch}{1.3} \onehalfspacing % Commands \newcommand{\SetRowColor}[1]{\noalign{\gdef\RowColorName{#1}}\rowcolor{\RowColorName}} % Shortcut for row colour \newcommand{\mymulticolumn}[3]{\multicolumn{#1}{>{\columncolor{\RowColorName}}#2}{#3}} % For coloured multi-cols \newcolumntype{x}[1]{>{\raggedright}p{#1}} % New column types for ragged-right paragraph columns \newcommand{\tn}{\tabularnewline} % Required as custom column type in use % Font and Colours \definecolor{HeadBackground}{HTML}{333333} \definecolor{FootBackground}{HTML}{666666} \definecolor{TextColor}{HTML}{333333} \definecolor{DarkBackground}{HTML}{1D41A3} \definecolor{LightBackground}{HTML}{F0F3F9} \renewcommand{\familydefault}{\sfdefault} \color{TextColor} % Header and Footer \pagestyle{fancy} \fancyhead{} % Set header to blank \fancyfoot{} % Set footer to blank \fancyhead[L]{ \noindent \begin{multicols}{3} \begin{tabulary}{5.8cm}{C} \SetRowColor{DarkBackground} \vspace{-7pt} {\parbox{\dimexpr\textwidth-2\fboxsep\relax}{\noindent \hspace*{-6pt}\includegraphics[width=5.8cm]{/web/www.cheatography.com/public/images/cheatography_logo.pdf}} } \end{tabulary} \columnbreak \begin{tabulary}{11cm}{L} \vspace{-2pt}\large{\bf{\textcolor{DarkBackground}{\textrm{Test Cheat Sheet}}}} \\ \normalsize{by \textcolor{DarkBackground}{Abbay Kutte} via \textcolor{DarkBackground}{\uline{cheatography.com/101747/cs/21182/}}} \end{tabulary} \end{multicols}} \fancyfoot[L]{ \footnotesize \noindent \begin{multicols}{3} \begin{tabulary}{5.8cm}{LL} \SetRowColor{FootBackground} \mymulticolumn{2}{p{5.377cm}}{\bf\textcolor{white}{Cheatographer}} \\ \vspace{-2pt}Abbay Kutte \\ \uline{cheatography.com/abbay-kutte} \\ \end{tabulary} \vfill \columnbreak \begin{tabulary}{5.8cm}{L} \SetRowColor{FootBackground} \mymulticolumn{1}{p{5.377cm}}{\bf\textcolor{white}{Cheat Sheet}} \\ \vspace{-2pt}Not Yet Published.\\ Updated 20th November, 2019.\\ Page {\thepage} of \pageref{LastPage}. \end{tabulary} \vfill \columnbreak \begin{tabulary}{5.8cm}{L} \SetRowColor{FootBackground} \mymulticolumn{1}{p{5.377cm}}{\bf\textcolor{white}{Sponsor}} \\ \SetRowColor{white} \vspace{-5pt} %\includegraphics[width=48px,height=48px]{dave.jpeg} Measure your website readability!\\ www.readability-score.com \end{tabulary} \end{multicols}} \begin{document} \raggedright \raggedcolumns % Set font size to small. Switch to any value % from this page to resize cheat sheet text: % www.emerson.emory.edu/services/latex/latex_169.html \footnotesize % Small font. \begin{multicols*}{4} \begin{tabularx}{3.833cm}{p{0.65825 cm} p{0.65825 cm} p{0.65825 cm} p{0.65825 cm} } \SetRowColor{DarkBackground} \mymulticolumn{4}{x{3.833cm}}{\bf\textcolor{white}{test1}} \tn % Row 0 \SetRowColor{LightBackground} • Stage source data in QVD files and then load from the QVD as this will avoid strain on the source systems and possibly network bandwidth as well as be a lot quicker, safer and more \seqsplit{productive} • Break out different data source load process into different script sections and use an Exit Script section, which can be easily moved to test each of your load processes \seqsplit{separately} • If possible, develop with a \seqsplit{meaningful} subset of data using Where clauses and/or Exists clauses in the load process to ensure you maintain relevant key matches • Avoid trying to create overly large \seqsplit{applications} covering multiple use cases, it is far more efficient to create several smaller \seqsplit{applications} each covering a discrete user journey • Remove synthetic keys and where possible and circular \seqsplit{references} • Remove (or comment out a better practice) all unused fields from the load • Remove or simplify time stamps (for example you don't need 1/100th of a second so you could use the ceil function to round up to the nearest minute) or highly unique system fields • Use Limited Load in debug mode to test your logic of the script before running a full reload or use the First function to limit the load • Use \seqsplit{Autonumber} to replace text string based key fields with more efficient integers • Remove, join or \seqsplit{concatenate} \seqsplit{unnecessary} snow flaked tables • Avoid using nested if \seqsplit{statements} – \seqsplit{alternatives} are mapping tables in the load script and pick (match functions and Set Analysis with flag fields in the User Interface • Consider the use of \seqsplit{incremental} loads for large data sets that need to be regularly updated, this will reduce the load on the source system and speed up the overall load process & • Stage source data in QVD files and then load from the QVD as this will avoid strain on the source systems and possibly network bandwidth as well as be a lot quicker, safer and more \seqsplit{productive} • Break out different data source load process into different script sections and use an Exit Script section, which can be easily moved to test each of your load processes \seqsplit{separately} • If possible, develop with a \seqsplit{meaningful} subset of data using Where clauses and/or Exists clauses in the load process to ensure you maintain relevant key matches • Avoid trying to create overly large \seqsplit{applications} covering multiple use cases, it is far more efficient to create several smaller \seqsplit{applications} each covering a discrete user journey • Remove synthetic keys and where possible and circular \seqsplit{references} • Remove (or comment out a better practice) all unused fields from the load • Remove or simplify time stamps (for example you don't need 1/100th of a second so you could use the ceil function to round up to the nearest minute) or highly unique system fields • Use Limited Load in debug mode to test your logic of the script before running a full reload or use the First function to limit the load • Use \seqsplit{Autonumber} to replace text string based key fields with more efficient integers • Remove, join or \seqsplit{concatenate} \seqsplit{unnecessary} snow flaked tables • Avoid using nested if \seqsplit{statements} – \seqsplit{alternatives} are mapping tables in the load script and pick (match functions and Set Analysis with flag fields in the User Interface • Consider the use of \seqsplit{incremental} loads for large data sets that need to be regularly updated, this will reduce the load on the source system and speed up the overall load process & • Stage source data in QVD files and then load from the QVD as this will avoid strain on the source systems and possibly network bandwidth as well as be a lot quicker, safer and more \seqsplit{productive} • Break out different data source load process into different script sections and use an Exit Script section, which can be easily moved to test each of your load processes \seqsplit{separately} • If possible, develop with a \seqsplit{meaningful} subset of data using Where clauses and/or Exists clauses in the load process to ensure you maintain relevant key matches • Avoid trying to create overly large \seqsplit{applications} covering multiple use cases, it is far more efficient to create several smaller \seqsplit{applications} each covering a discrete user journey • Remove synthetic keys and where possible and circular \seqsplit{references} • Remove (or comment out a better practice) all unused fields from the load • Remove or simplify time stamps (for example you don't need 1/100th of a second so you could use the ceil function to round up to the nearest minute) or highly unique system fields • Use Limited Load in debug mode to test your logic of the script before running a full reload or use the First function to limit the load • Use \seqsplit{Autonumber} to replace text string based key fields with more efficient integers • Remove, join or \seqsplit{concatenate} \seqsplit{unnecessary} snow flaked tables • Avoid using nested if \seqsplit{statements} – \seqsplit{alternatives} are mapping tables in the load script and pick (match functions and Set Analysis with flag fields in the User Interface • Consider the use of \seqsplit{incremental} loads for large data sets that need to be regularly updated, this will reduce the load on the source system and speed up the overall load process & • Stage source data in QVD files and then load from the QVD as this will avoid strain on the source systems and possibly network bandwidth as well as be a lot quicker, safer and more \seqsplit{productive} • Break out different data source load process into different script sections and use an Exit Script section, which can be easily moved to test each of your load processes \seqsplit{separately} • If possible, develop with a \seqsplit{meaningful} subset of data using Where clauses and/or Exists clauses in the load process to ensure you maintain relevant key matches • Avoid trying to create overly large \seqsplit{applications} covering multiple use cases, it is far more efficient to create several smaller \seqsplit{applications} each covering a discrete user journey • Remove synthetic keys and where possible and circular \seqsplit{references} • Remove (or comment out a better practice) all unused fields from the load • Remove or simplify time stamps (for example you don't need 1/100th of a second so you could use the ceil function to round up to the nearest minute) or highly unique system fields • Use Limited Load in debug mode to test your logic of the script before running a full reload or use the First function to limit the load • Use \seqsplit{Autonumber} to replace text string based key fields with more efficient integers • Remove, join or \seqsplit{concatenate} \seqsplit{unnecessary} snow flaked tables • Avoid using nested if \seqsplit{statements} – \seqsplit{alternatives} are mapping tables in the load script and pick (match functions and Set Analysis with flag fields in the User Interface • Consider the use of \seqsplit{incremental} loads for large data sets that need to be regularly updated, this will reduce the load on the source system and speed up the overall load process \tn % Row Count 171 (+ 171) % Row 1 \SetRowColor{white} \textbackslash{}\textbackslash{}eudvmmsqs501\textbackslash{}dev\textbackslash{}1000.Data\_QVD\_Layer\textbackslash{}1.QVD\textbackslash{}1.Extract\textbackslash{}QV\_QVD\_Before\textbackslash{} & \textbackslash{}\textbackslash{}eudvmmsqs501\textbackslash{}dev\textbackslash{}1000.Data\_QVD\_Layer\textbackslash{}1.QVD\textbackslash{}1.Extract\textbackslash{}QV\_QVD\_Before\textbackslash{} & & \tn % Row Count 178 (+ 7) \hhline{>{\arrayrulecolor{DarkBackground}}----} \end{tabularx} \par\addvspace{1.3em} \begin{tabularx}{3.833cm}{X} \SetRowColor{DarkBackground} \mymulticolumn{1}{x{3.833cm}}{\bf\textcolor{white}{Test 2}} \tn % Row 0 \SetRowColor{LightBackground} \mymulticolumn{1}{x{3.833cm}}{• Stage source data in QVD files and then load from the QVD as this will avoid strain on the source systems and possibly network bandwidth as well as be a lot quicker, safer and more productive • Break out different data source load process into different script sections and use an Exit Script section, which can be easily moved to test each of your load processes separately • If possible, develop with a meaningful subset of data using Where clauses and/or Exists clauses in the load process to ensure you maintain relevant key matches • Avoid trying to create overly large applications covering multiple use cases, it is far more efficient to create several smaller applications each covering a discrete user journey • Remove synthetic keys and where possible and circular references • Remove (or comment out a better practice) all unused fields from the load • Remove or simplify time stamps (for example you don't need 1/100th of a second so you could use the ceil function to round up to the nearest minute) or highly unique system fields • Use Limited Load in debug mode to test your logic of the script before running a full reload or use the First function to limit the load • Use Autonumber to replace text string based key fields with more efficient integers • Remove, join or concatenate unnecessary snow flaked tables • Avoid using nested if statements – alternatives are mapping tables in the load script and pick (match functions and Set Analysis with flag fields in the User Interface • Consider the use of incremental loads for large data sets that need to be regularly updated, this will reduce the load on the source system and speed up the overall load process} \tn % Row Count 35 (+ 35) \hhline{>{\arrayrulecolor{DarkBackground}}-} \end{tabularx} \par\addvspace{1.3em} % That's all folks \end{multicols*} \end{document}