feat: Add site data

This commit is contained in:
Robert Prehn 2023-09-22 12:58:31 +00:00
parent 3fc5e9cde6
commit 005d0f6515
No known key found for this signature in database
37 changed files with 1384 additions and 12 deletions

41
assets/css/app.css Normal file
View file

@ -0,0 +1,41 @@
@import "tailwindcss/base";
@import "tailwindcss/components";
@import "tailwindcss/utilities";
html {
--base-font-size: 16.666667;
--base-line-height: 1.32;
--font-size: calc(var(--base-font-size) * 1px);
--line-height: calc(var(--base-line-height) * 1rem);
--base-vspace: calc(var(--base-line-height) * var(--base-font-size));
--vspace: calc(var(--base-vspace) * 1px);
font-size: var(--font-size);
line-height: var(--line-height);
}
.container {
max-width: 80ch;
}
.box {
--tw-border-opacity: 1;
border-color: rgb(181 190 255 / var(--tw-border-opacity));
margin: 10.5px 4.5px;
padding: 10.5px 14.5px;
}
p {
margin-bottom: var(--vspace);
}
@font-face {
font-family: "JetBrainsMono";
src: url("/fonts/JetBrainsMono-VariableFont_wght.ttf");
font-style: normal;
}
@font-face {
font-family: "JetBrainsMono";
src: url("/fonts/JetBrainsMono-Italic-VariableFont_wght.ttf");
font-style: italic;
}

0
assets/js/app.js Normal file
View file

View file

@ -4,8 +4,7 @@
module.exports = {
content: [
"./js/**/*.js",
"../lib/*_web.ex",
"../lib/*_web/**/*.*ex"
"../lib/**/*.ex"
],
theme: {
colors: {
@ -18,7 +17,40 @@ module.exports = {
blue: 'rgb(93, 110, 238)',
magenta: 'rgb(211, 48, 233)',
cyan: 'rgb(139, 243, 231)',
light: 'rgb(167, 176, 241)'
light: 'rgb(181, 190, 255)'
},
fontSize: {
// xs: ['0.75rem', { lineHeight: '1rem' }],
// sm: ['0.875rem', { lineHeight: '1.25rem' }],
base: ['1rem', { lineHeight: '1rem' }],
// lg: ['1.125rem', { lineHeight: '1.75rem' }],
// xl: ['1.25rem', { lineHeight: '1.75rem' }],
// '2xl': ['1.5rem', { lineHeight: '2rem' }],
// '3xl': ['1.875rem', { lineHeight: '2.25rem' }],
// '4xl': ['2.25rem', { lineHeight: '2.5rem' }],
// '5xl': ['3rem', { lineHeight: '1' }],
// '6xl': ['3.75rem', { lineHeight: '1' }],
// '7xl': ['4.5rem', { lineHeight: '1' }],
// '8xl': ['6rem', { lineHeight: '1' }],
// '9xl': ['8rem', { lineHeight: '1' }],
},
lineHeight: {
normal: '1'
},
extend: {
fontFamily: {
mono: [
'JetBrainsMono',
'ui-monospace',
'SFMono-Regular',
'Menlo',
'Monaco',
'Consolas',
'"Liberation Mono"',
'"Courier New"',
'monospace',
],
}
}
},
plugins: [

View file

@ -10,3 +10,16 @@ config :tailwind,
),
cd: Path.expand("../assets", __DIR__)
]
config :esbuild,
version: "0.19.3",
default: [
args:
~w(js/app.js --bundle --target=es2016 --outdir=../priv/static/assets/ --servedir=../priv/static/),
cd: Path.expand("../assets", __DIR__),
env: %{"NODE_PATH" => Path.expand("../deps", __DIR__)}
]
config :tree_sitter, :version, "0.20.8"
config :pre_dot_hn, host: "pre.hn"

View file

@ -3,16 +3,104 @@ defmodule PreDotHn do
Documentation for `PreDotHn`.
"""
@doc """
Hello world.
use Phoenix.Component
## Examples
require Logger
iex> PreDotHn.hello()
:world
import Phoenix.LiveViewTest, only: [rendered_to_string: 1]
"""
def hello do
:world
alias PreDotHn.Frontmatter
alias PreDotHn.Markdown
def read(path) do
path
|> File.read!()
|> Frontmatter.front_matter_split()
|> then(fn {frontmatter_text, body_text} ->
{Frontmatter.make_frontmatter(path, frontmatter_text), body_text}
end)
|> then(fn {frontmatter, body_text} ->
body = Markdown.render(body_text)
Map.merge(frontmatter, %{"body" => body, "path" => path})
end)
end
def validate(%{"slug" => nil, "path" => path}) do
{:error, "slug missing from #{path}"}
end
def validate(%{"slug" => "", "path" => path}) do
{:error, "slug missing from #{path}"}
end
def validate(other), do: {:ok, other}
def run() do
"site/**/*.md"
|> Path.wildcard()
|> Enum.map(&read/1)
|> Enum.map(&validate/1)
|> Enum.filter(fn
{:error, error} ->
Logger.warn(error)
false
{:ok, _other} ->
true
end)
|> Enum.map(fn {:ok, page} ->
page =
page |> Enum.map(fn {key, value} -> {String.to_atom(key), value} end) |> Enum.into(%{})
write_page(page)
end)
end
slot(:inner_block, required: true)
def layout(assigns) do
~H"""
<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>A Basic HTML5 Template</title>
<meta name="description" content="A simple HTML5 Template for new projects.">
<meta name="author" content="SitePoint">
<link rel="icon" href="/favicon.ico">
<link rel="icon" href="/favicon.svg" type="image/svg+xml">
<link rel="apple-touch-icon" href="/apple-touch-icon.png">
<link rel="stylesheet" href="/assets/app.css?v=1.0">
</head>
<body class="bg-dark font-mono text-light">
<%= render_slot(@inner_block) %>
</body>
</html>
"""
end
def write_page(%{slug: slug} = assigns) do
filename = "#{slug}.html"
path = Path.join(["priv", "static", filename])
~H"""
<.layout>
<main class="container mx-auto">
<section class="box border border-light">
<%= {:safe, @body} %>
</section>
</main>
</.layout>
"""
|> rendered_to_string()
|> then(&File.write!(path, &1))
end
end

View file

@ -0,0 +1,53 @@
defmodule PreDotHn.Frontmatter do
@frontmatter_pattern ~r/---\r?\n/
def make_frontmatter(path, frontmatter_text) do
case YamlElixir.read_from_string(frontmatter_text) do
{:ok, fm} ->
fm
_other ->
%{}
end
|> merge_frontmatter_defaults(path)
end
def merge_frontmatter_defaults(fm, path) do
{date, slug} = path_parts(path)
fm
|> Map.put_new_lazy("slug", fn ->
slug
end)
|> Map.put_new_lazy("date", fn ->
date
end)
|> Map.put_new_lazy("title", fn ->
""
end)
end
@path_parts ~r/([0-9]{4}-[0-9]{2}-[0-9]{2})?-?(.*)?/
def path_parts(path) do
filename = Path.basename(path)
[basename | _] = String.split(filename, ".")
[_, date_part, slug_part] = Regex.run(@path_parts, basename)
date_part = if date_part == "", do: nil, else: date_part
slug_part = if slug_part == "", do: date_part, else: slug_part
{date_part, slug_part}
end
def front_matter_split(body) do
case String.split(body, @frontmatter_pattern, parts: 3) do
["", frontmatter, body] ->
{frontmatter, body}
_other ->
{"", body}
end
end
end

View file

@ -0,0 +1,65 @@
defmodule PreDotHn.Markdown do
@default_opts [pure_links: true, wikilinks: true, inner_html: false]
def render(source, opts \\ []) do
opts = Keyword.merge(@default_opts, opts, fn _key, _default_value, value -> value end)
inner_html = Keyword.get(opts, :inner_html, false)
EarmarkParser.as_ast(source || "", opts)
|> case do
{:ok, ast, _} ->
ast
|> Earmark.Transform.map_ast(&transformer/1)
|> Enum.map(&maybe_remove_para(&1, inner_html))
other ->
other
end
|> Earmark.Transform.transform()
end
defp transformer({"a", attrs, ignored, %{wikilink: true} = meta}) do
attrs =
attrs
|> Enum.map(fn
{"href", href} ->
{"href", "/notes#{href}"}
other ->
other
end)
{"a", attrs, ignored, meta}
end
defp transformer({"a", attrs, ignored, meta}) do
href = Enum.find_value(attrs, "", fn {key, value} -> key == "href" && value end)
href_uri = URI.parse(href)
is_internal = href_uri.host in [host(), nil]
attrs =
if is_internal do
attrs
else
[{"target", "_blank"} | attrs]
end
{"a", attrs, ignored, meta}
end
defp transformer(other), do: other
defp maybe_remove_para(node, false), do: node
defp maybe_remove_para({"p", _attrs, children, _meta}, true),
do: Enum.map(children, &add_trailing_newline/1)
defp maybe_remove_para(other, true), do: other
defp add_trailing_newline(string) when is_binary(string), do: "#{string}\n"
defp add_trailing_newline(other), do: other
defp host(), do: Application.get_env(:pre_dot_hn, :host)
end

14
mix.exs
View file

@ -7,6 +7,7 @@ defmodule PreDotHn.MixProject do
version: "0.1.0",
elixir: "~> 1.14",
start_permanent: Mix.env() == :prod,
aliases: aliases(),
deps: deps()
]
end
@ -21,11 +22,22 @@ defmodule PreDotHn.MixProject do
# Run "mix help deps" to learn about dependencies.
defp deps do
[
{:yaml_elixir, "~> 2.9.0"},
{:esbuild, "~> 0.7.1"},
{:tailwind, "~> 0.2.1"},
{:phoenix_live_view, "~> 0.19.5"}
{:phoenix_live_view, "~> 0.19.5"},
{:rustler, "~> 0.29.1"},
{:tree_sitter, path: "../tree_sitter"},
{:earmark_parser, "~> 1.4"},
{:earmark, "~> 1.4"}
# {:dep_from_hexpm, "~> 0.3.0"},
# {:dep_from_git, git: "https://github.com/elixir-lang/my_dep.git", tag: "0.1.0"}
]
end
def aliases() do
[
serve: ["do esbuild default --serve + tailwind default --watch"]
]
end
end

View file

@ -1,6 +1,9 @@
%{
"castore": {:hex, :castore, "1.0.3", "7130ba6d24c8424014194676d608cb989f62ef8039efd50ff4b3f33286d06db8", [:mix], [], "hexpm", "680ab01ef5d15b161ed6a95449fac5c6b8f60055677a8e79acf01b27baa4390b"},
"earmark": {:hex, :earmark, "1.4.43", "2024a0e9fe9bd5ef78fb9c87517de6c6d7deaf1cffdf6572fac3dd49cb34c433", [:mix], [], "hexpm", "958011ea938bc4018797bda3f8d0c871ab04621785bedc1e7188fb079dea2f5b"},
"earmark_parser": {:hex, :earmark_parser, "1.4.35", "437773ca9384edf69830e26e9e7b2e0d22d2596c4a6b17094a3b29f01ea65bb8", [:mix], [], "hexpm", "8652ba3cb85608d0d7aa2d21b45c6fad4ddc9a1f9a1f1b30ca3a246f0acc33f6"},
"esbuild": {:hex, :esbuild, "0.7.1", "fa0947e8c3c3c2f86c9bf7e791a0a385007ccd42b86885e8e893bdb6631f5169", [:mix], [{:castore, ">= 0.0.0", [hex: :castore, repo: "hexpm", optional: false]}], "hexpm", "66661cdf70b1378ee4dc16573fcee67750b59761b2605a0207c267ab9d19f13c"},
"jason": {:hex, :jason, "1.4.1", "af1504e35f629ddcdd6addb3513c3853991f694921b1b9368b0bd32beb9f1b63", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "fbb01ecdfd565b56261302f7e1fcc27c4fb8f32d56eab74db621fc154604a7a1"},
"mime": {:hex, :mime, "2.0.5", "dc34c8efd439abe6ae0343edbb8556f4d63f178594894720607772a041b04b02", [:mix], [], "hexpm", "da0d64a365c45bc9935cc5c8a7fc5e49a0e0f9932a761c55d6c52b142780a05c"},
"phoenix": {:hex, :phoenix, "1.7.7", "4cc501d4d823015007ba3cdd9c41ecaaf2ffb619d6fb283199fa8ddba89191e0", [:mix], [{:castore, ">= 0.0.0", [hex: :castore, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:phoenix_pubsub, "~> 2.1", [hex: :phoenix_pubsub, repo: "hexpm", optional: false]}, {:phoenix_template, "~> 1.0", [hex: :phoenix_template, repo: "hexpm", optional: false]}, {:phoenix_view, "~> 2.0", [hex: :phoenix_view, repo: "hexpm", optional: true]}, {:plug, "~> 1.14", [hex: :plug, repo: "hexpm", optional: false]}, {:plug_cowboy, "~> 2.6", [hex: :plug_cowboy, repo: "hexpm", optional: true]}, {:plug_crypto, "~> 1.2", [hex: :plug_crypto, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}, {:websock_adapter, "~> 0.5.3", [hex: :websock_adapter, repo: "hexpm", optional: false]}], "hexpm", "8966e15c395e5e37591b6ed0bd2ae7f48e961f0f60ac4c733f9566b519453085"},
"phoenix_html": {:hex, :phoenix_html, "3.3.2", "d6ce982c6d8247d2fc0defe625255c721fb8d5f1942c5ac051f6177bffa5973f", [:mix], [{:plug, "~> 1.5", [hex: :plug, repo: "hexpm", optional: true]}], "hexpm", "44adaf8e667c1c20fb9d284b6b0fa8dc7946ce29e81ce621860aa7e96de9a11d"},
@ -9,8 +12,12 @@
"phoenix_template": {:hex, :phoenix_template, "1.0.3", "32de561eefcefa951aead30a1f94f1b5f0379bc9e340bb5c667f65f1edfa4326", [:mix], [{:phoenix_html, "~> 2.14.2 or ~> 3.0", [hex: :phoenix_html, repo: "hexpm", optional: true]}], "hexpm", "16f4b6588a4152f3cc057b9d0c0ba7e82ee23afa65543da535313ad8d25d8e2c"},
"plug": {:hex, :plug, "1.14.2", "cff7d4ec45b4ae176a227acd94a7ab536d9b37b942c8e8fa6dfc0fff98ff4d80", [:mix], [{:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:plug_crypto, "~> 1.1.1 or ~> 1.2", [hex: :plug_crypto, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4.3 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "842fc50187e13cf4ac3b253d47d9474ed6c296a8732752835ce4a86acdf68d13"},
"plug_crypto": {:hex, :plug_crypto, "1.2.5", "918772575e48e81e455818229bf719d4ab4181fcbf7f85b68a35620f78d89ced", [:mix], [], "hexpm", "26549a1d6345e2172eb1c233866756ae44a9609bd33ee6f99147ab3fd87fd842"},
"rustler": {:hex, :rustler, "0.29.1", "880f20ae3027bd7945def6cea767f5257bc926f33ff50c0d5d5a5315883c084d", [:mix], [{:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}, {:toml, "~> 0.6", [hex: :toml, repo: "hexpm", optional: false]}], "hexpm", "109497d701861bfcd26eb8f5801fe327a8eef304f56a5b63ef61151ff44ac9b6"},
"tailwind": {:hex, :tailwind, "0.2.1", "83d8eadbe71a8e8f67861fe7f8d51658ecfb258387123afe4d9dc194eddc36b0", [:mix], [{:castore, ">= 0.0.0", [hex: :castore, repo: "hexpm", optional: false]}], "hexpm", "e8a13f6107c95f73e58ed1b4221744e1eb5a093cd1da244432067e19c8c9a277"},
"telemetry": {:hex, :telemetry, "1.2.1", "68fdfe8d8f05a8428483a97d7aab2f268aaff24b49e0f599faa091f1d4e7f61c", [:rebar3], [], "hexpm", "dad9ce9d8effc621708f99eac538ef1cbe05d6a874dd741de2e689c47feafed5"},
"toml": {:hex, :toml, "0.7.0", "fbcd773caa937d0c7a02c301a1feea25612720ac3fa1ccb8bfd9d30d822911de", [:mix], [], "hexpm", "0690246a2478c1defd100b0c9b89b4ea280a22be9a7b313a8a058a2408a2fa70"},
"websock": {:hex, :websock, "0.5.3", "2f69a6ebe810328555b6fe5c831a851f485e303a7c8ce6c5f675abeb20ebdadc", [:mix], [], "hexpm", "6105453d7fac22c712ad66fab1d45abdf049868f253cf719b625151460b8b453"},
"websock_adapter": {:hex, :websock_adapter, "0.5.4", "7af8408e7ed9d56578539594d1ee7d8461e2dd5c3f57b0f2a5352d610ddde757", [:mix], [{:bandit, ">= 0.6.0", [hex: :bandit, repo: "hexpm", optional: true]}, {:plug, "~> 1.14", [hex: :plug, repo: "hexpm", optional: false]}, {:plug_cowboy, "~> 2.6", [hex: :plug_cowboy, repo: "hexpm", optional: true]}, {:websock, "~> 0.5", [hex: :websock, repo: "hexpm", optional: false]}], "hexpm", "d2c238c79c52cbe223fcdae22ca0bb5007a735b9e933870e241fce66afb4f4ab"},
"yamerl": {:hex, :yamerl, "0.10.0", "4ff81fee2f1f6a46f1700c0d880b24d193ddb74bd14ef42cb0bcf46e81ef2f8e", [:rebar3], [], "hexpm", "346adb2963f1051dc837a2364e4acf6eb7d80097c0f53cbdc3046ec8ec4b4e6e"},
"yaml_elixir": {:hex, :yaml_elixir, "2.9.0", "9a256da867b37b8d2c1ffd5d9de373a4fda77a32a45b452f1708508ba7bbcb53", [:mix], [{:yamerl, "~> 0.10", [hex: :yamerl, repo: "hexpm", optional: false]}], "hexpm", "0cb0e7d4c56f5e99a6253ed1a670ed0e39c13fc45a6da054033928607ac08dfc"},
}

16
site/pages/about.md Normal file
View file

@ -0,0 +1,16 @@
---
date: 2018-12-06
title: About Robert Prehn
slug: index
---
My name is Robert Prehn.
I'm a software engineering leader and independent consultant. You can hire me to help your software team to ship products better and faster. I'm also a Worker-Owner at [Mythic Insight](https: //www.mythicinsight.com/).
A better future is possible!
<p class="h-card">
<img alt="Photo of Robert Prehn" class="Gravatar u-photo" src="https://www.gravatar.com/avatar/925bde4cfd5d42530618204b73200d52">
<a class="p-name u-url" href="https://pre.hn">Robert Prehn</a> | <a class="u-key" href="https://keybase.io/prehnra/pgp_keys.asc">Public Key</a> | <a href="https://github.com/prehnRA" rel="me">GitHub</a> | <a href="https://keybase.io/prehnra" rel="me">Keybase</a> | <a href="https://mastodon.social/@prehnra" rel="me">Fediverse</a>
</p>

View file

@ -0,0 +1,49 @@
---
title: The PERMA Model
date: 2019-02-20
slug: what-is-the-perma-model
---
The PERMA model is a theoretical model of wellbeing designed by Martin Seligman as part of a branch of psychology known as "positive psychology." I originally ran across it as part of [The Science of Well-Being](https://www.coursera.org/learn/the-science-of-well-being) course that I took last year. Seligman and others designed PERMA as a simple, but scientifically-validated model for understanding well-being and happiness. People use the PERMA model in their day to day lives to think about, and improve, their well-being. It is also used in research on well-being in positive psychology.
PERMA is an acronym and each letter represents one of the components of the model:
- <strong>P</strong>ositive Emotions
- <strong>E</strong>ngagement
- <strong>R</strong>elationships
- <strong>M</strong>eaning
- <strong>A</strong>chievement
**Positive Emotions.** Clearly, positive emotions are strongly connected to happiness. In this case, positive emotions specifically means feel joy, positivity, and contentedness.
**Engagement.** Engagement means how often you become absorbed in what you are doing and lose track of time. Research shows that getting into a "flow" state when working or doing a hobby leads to a feeling of satisfaction.
**Relationships.** Our connection to other people also drives our feeling of well-being. Our feeling that people love us and support us contributes to our feeling of well-being.
**Meaning.** We want to feel like the things we are doing matter and that we have a purpose.
**Achievement.** A feeling that we have, make progress to, and achieve our goals contributes to our sense of well-being.
Psychologists have experimentally shown that each of these five components materially correlates with well-being and happiness.
The PERMA model is also the basis for the PERMA Profiler, which assesses these factors as a numerical score. This is useful because we can use this score to quantitatively track well-being. We can do experiments to see which other techniques and factors might influence well-being.
## How to calculate the PERMA Profiler score
Julie Butler and Margaret L. Kern developed the PERMA Profiler as a brief way to measure the PERMA factors and other factors in general well-being.
The PERMA Profiler is a 23 question survey. It includes 3 questions for each of the five PERMA categories (15 PERMA questions), plus 3 questions on negative emotion, 3 questions on health, 1 question on loneliness, and 1 question on general sense of well-being. We rate each question on a relevant 0 to 10 scale-- for example "0 Never to 10 Always" or " 0 Terrible to 10 Excellent." To calculate the PERMA Profiler overall well-being score, you average the 15 PERMA questions and the general well-being question. The other seven questions disrupt answering tendencies, which makes the answers more accurate. They also record other information relevant for positive psychology researchers.
Research psychologists have been experimentally validated the PERMA Profiler measure in several different contexts. They have also compared and correlated the PERMA Profiler to a lot of other measures of well-being. If you'd like to read [the original paper it is freely available here.](http://internationaljournalofwellbeing.org/index.php/ijow/article/download/526/579)
## My Experience With PERMA and the PERMA Profiler
I'm a person who has to apply a lot of techniques to regulate my mood and my energy levels. PERMA feels like it correlates well to how I feel on any given day. This makes it useful for self-experimentation and has part of my self-evaluation of my feelings.
As I said, I first learned about PERMA through [The Science of Well-Being](https://www.coursera.org/learn/the-science-of-well-being) course that I took last year. Throughout the course, I took the PERMA Profiler every week and made note of my score each time. I also made notes about my emotional and physical health at the time I took the PERMA survey. In my experience, PERMA does a fairly good job of mirroring my feelings of well-being or lack thereof. On my worst days, I'll score a 4.5 out of 10. My best days are an eight. My most typical score is around a seven these days.
I created the PERMA profiler as a Google Form. Since the summary score is just an average of 16 of the 23 questions, it was easy to compute. This format makes it easy for me to compare the results to other factors, like how many tasks I do in Todoist or whether I checked off my habits in Habitica.
I've found it useful as a data point I can measure about myself and compare with other factors. Does PERMA go up when I exercise? Does it go down when I have a lot of meetings? Or alternately, am I more likely to stick to my positive habits if my PERMA score is high?
I recognize that I've gamified this well-being score. I realize that by knowing about how the score works, I may be influencing my results. I can't say for sure that this attention I'm placing on the number isn't distorting my results. I worry that I'm putting pressure on myself to increase the _score_, rather than well-being. But I _can_ say that I feel better and that's the point.','5 Factors for Well-Being. What is the PERMA model?','There are 5 factors which have been scientifically shown to drive our sense of well-being and happiness. Learn what they are and how to apply them through the PERMA Profiler.

View file

@ -0,0 +1,64 @@
---
title: Minimal Phoenix and Elixir Dockerfile Example
---
Recently, I was setting up a Dockerfile for a Phoenix web app. There are official **Elixir** Docker images, but finding one specifically set up to handle the Phoenix Framework with asset compilation is messier territory. There's no single set of \"official\" configurations.
Everybody has an opinion about how you should do this. A lot of these opinions involve adding a lot of bells and whistles. For one, their preferred solution might include Docker Compose. For another, Distillery releases. Since I had not successfully deployed my app with Docker yet at all, I wanted fewer variables to debug.
Here's what I came up with:
```dockerfile
FROM elixir:1.8.0-alpine
#### If needed for native dependencies
RUN apk add --no-cache make gcc libc-dev
ENV CC=gcc
ENV MAKE=cmake
#####
RUN mix local.hex --force \\
&& mix local.rebar --force
WORKDIR /root/app
ADD ./ /root/app/
EXPOSE 4000
ENV MIX_ENV=prod
ENV PORT=4000
RUN mix deps.get
RUN mix deps.compile
RUN mix compile
RUN mix phx.digest
CMD [\"mix\", \"phx.server\"]
```
This starts with the elixir alpine image. Alpine is a skinny Linux that works well in containers. I've found that it is a suitable base for Elixir apps. In my case, I needed a C toolchain to compile some libraries. You might not need that part. Then it sets up hex and rebar for fetching and building dependencies. Then it adds the application directory. It sets the default port and environment. It fetches the dependencies, compiles them, compiles the app, and digests the assets. Then, it starts the server. That's it.
This approach follows the minimal instructions for a production Phoenix deployment on Docker, with no extras. From there, you can add ~complexity~ more features if you would like.
## Bonus: .dockerignore for Phoenix and Elixir Projects
Here's the .dockerignore file I use:
```
.git
Dockerfile
# Build artifacts
_build
deps
*.ez
# Crash dumps from Erlang VM
erl_crash.dump
# NPM dependencies added by asset pipeline
node_modules
```

View file

@ -0,0 +1,29 @@
---
title: "Elixir Programming Language and the Phoenix Framework: What can you build with them?"
---
You can use the Elixir programming language to build anything that you can build in any other programming language. It has a great framework for web applications called [Phoenix
](https: //phoenixframework.org/). It can be used in embedded systems— see [Nerves](https://github.com/nerves-project/nerves). It can be used for anything in between.
When people ask [what Elixir can be used for](https://www.quora.com/What-is-Elixir-programming-language-used-for), (or [here](https://www.amberbit.com/blog/2018/5/15/when-to-use-elixir-language/)) the common replies are “chat servers”, “telecomm switches”, or “APIs.” I think the reason these are the common replies is that Elixir really shines in areas where you need to handle very high volumes, have high reliability, and do real-time or near-real-time communication. And Elixir is good for those cases, but it also excels as a language for making systems that dont need to handle huge traffic or real time communication.
Phoenix, as a framework, can essentially do everything Rails, Django, Laravel, or Spring can do. It has models (ok, schema structs), views, and controllers. In my experience, it has some serious advantages over Rails, Django, Laravel, or Spring. For one, yes, it is faster. Responses come in microseconds, not seconds or milliseconds. To me, thats not the most important thing. Whats more important: the architects of Phoenix learned from the missteps of those other frameworks. Phoenix and [Ecto](https://hexdocs.pm/ecto/Ecto.html) (the persistence wrapper) made better choices.
To cherry pick one example: in Ecto, you have to explicitly say which related data you want fetched from the database. In ActiveRecord and Rails, if you miss a join, Rails will just load the related records when you need them. That sounds great until you put it into a loop. Rails will quietly and diligently ping your database with many queries in a row, fetching one extra record at a time. Ecto instead asks you: “Hey, did you want this? Because you didnt ask for it.” It forces you to be clear, and in forcing you to be clear, it can be efficient.
To pick another example: in Phoenix, your entire request-to-response circuit is just a series of functions, output from one piped as input to the next. Each receives a connection and returns a connection which may or may not be different. Most of those functions sit directly in your application source code. The ones that dont are clearly invoked from within your source code. Need to add a new junction in the chain? You go into your code and add the junction. This is how Plug works. In Rails, most of the processing of requests and responses is hidden. It lives within the Rails framework code. To modify it, you better hope that the designers of Rails left an appropriate config variable or lifecycle callback. Otherwise, you just have to “patch” Rails in memory. And you better hope that you patch the right spot and that the patch loads correctly. If the Rails team renames the methods or classes that you patched— your patch falls off and your application breaks.
So, what can you use Elixir for? What can you use Phoenix for? I personally use it for everything unless I have a good reason not to. Its been a long time since Ive had a reason to use something else.
## Bonus Q&A: Ok, but what have you built in Elixir?
Me and my team have built, with Elixir:
- An incentive platform for software developers
- Tons of business workflow automation and management software for several industries
- Real estate tools
- Event booking software
- Local business directories
- Content management systems
- Customer relationship management software
- Chat bots

View file

@ -0,0 +1,29 @@
---
title: This Weekend I Read… (2019-03-25)
slug: twir-2019-03-25
---
Here are some interesting (and sometimes scary) things I read this weekend:
## Facebook Content Reviewers Have A Hellish Job
Content reviewers at Facebook are constantly subjected to horrifying videos of violence, racism, and conspiracy theories, and they arent being given the support they need. Some might wave this off as “the nature of the job,” but I think like anyone in a hazardous job they should be given the right support structure, safety equipment, and hazard pay. Instead, the moderators (who are outside contractors) are paid far less than the average Facebook employee, and are subjected to a high pressure environment where they watch 2,
400 traumatizing videos in an 8 hour shift (4 per minute) and managers time their bathroom breaks.
- [Some Facebook content reviewers in India complain of low pay, high pressure
](https: //www.reuters.com/article/us-facebook-content-india-feature-idUSKCN1QH15I) (Munsif Vengattil, Paresh Dave @ Reuters)
- [THE TRAUMA FLOOR: The secret lives of Facebook moderators in America](https://www.theverge.com/2019/2/25/18229714/cognizant-facebook-content-moderator-interviews-trauma-working-conditions-arizona) (Casey Newton @ The Verge)
## The Real Reason for the 40 Hour Work Week
David Cain, writing at his blog Raptitude (“Getting better at being human”), puts forward an interesting theory about why we still have the 40 hour work week even though [the average office worker is productive only 3 hours a day](https://www.inc.com/melanie-curtin/in-an-8-hour-day-the-average-worker-is-productive-for-this-many-hours.html) and [productivity has been steadily increasing in the years since the 40 hour work week was won](https://en.wikipedia.org/wiki/Real_wages#/media/File:US_productivity_and_real_wages.jpg). His theory: it isnt about labor supply, but instead it is about the demands for goods and services. Tired, time-constrained workers want more creature comforts, and buy more convenience items (fast food). They also prefer hobbies which take less time and energy, but more money (e.g. TV, movies, fast fashion) over hobbies which are cheap or free, but time consuming (e.g. reading, gardening, DIY crafting). His anecdotal observations which are woven throughout— such as developing a habit for expensive takeaway coffee after getting a new high-stress job— jive with my experience as well.
- [Your Lifestyle Has Already Been Designed](https://www.raptitude.com/2010/07/your-lifestyle-has-already-been-designed/) (David Cain @ Raptitude)
## The Life Changing Magic Manga of Tidying Up
Did you know that there is a [graphic novel](https://smile.amazon.com/dp/0399580530/) version of Marie Kondos The Life Changing Magic of Tidying Up? Its a fast read (about 180 pages, mostly pictures) and it is overflowing with charm and wholesome energy. I think it could serve either as a good introduction to the KonMari technique or as a quick refresher.
- [The Life Changing Manga of Tidying Up @ Amazon](https://smile.amazon.com/dp/0399580530/)

View file

@ -0,0 +1,298 @@
---
title: Setting up CI/CD for Docker and Kubernetes Using Drone
---
I have been a Travis CI user. However, Travis has gotten less reliable for me lately. On top of that, I have qualms about how the acquisition of Travis by Idera, and the subsequent layoffs, were handled. Travis is also a square peg in the octagonal hole of my Kubernetes environment. It is a hosted, external service. Everything else I use to develop my applications is hosted inside of my cluster. My Docker registry and my gitops operator run in my cluster. My databases are in my cluster. My storage provider and object store run in cluster. My apps run in cluster. Why would I run my CI/CD service outside of the cluster?
I came across Drone in my research about alternatives. Drone is a fully container-native, container-loving CI solution. It's Docker all the way down. Since my application is already \"Dockerified\", my hosting environment is all Docker all day, and my deployments are already in the form of a Docker push, why not do CI/CD in Docker as well?
Drone is basically a small framework for running CI jobs made of docker containers. You build your pipeline as a series of steps, each of which is a docker base image, some configuration, and your test commands. Drone also has all the standard integrations you would expect for a CI service-- it talks to GitHub, GitLab, Bitbucket, and more.
To use Drone, you'll have to embrace the Docker way. Let go of your test scripts that are building everything from a ubuntu image or a language version manager. Let go of the \"special case magic\" way DBs and other supporting services are handled in other CI platforms. In exchange, you'll find that Drone will let you use any language, any tools, and any languages, so long as they have a Docker image.
## Drone Setup on Kubernetes
I followed [this official (but experimental) guide](https://docs.drone.io/installation/github/kubernetes/) for setting up Drone on Kubernetes.
Here's the configuration I used for Kubernetes:
```yaml
---
apiVersion: v1
kind: Namespace
metadata:
name: drone
---
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
labels:
app: drone
name: drone
namespace: drone
spec:
replicas: 1
template:
metadata:
labels:
app: drone
name: drone
namespace: drone
spec:
containers:
- name: drone
image: drone/drone:1.0.0
env:
- name: DRONE_KUBERNETES_ENABLED
value: \"true\"
- name: DRONE_KUBERNETES_NAMESPACE
value: \"drone\"
- name: DRONE_GITHUB_SERVER
value: \"https://github.com\"
- name: DRONE_GITHUB_CLIENT_ID
value: \"REDACTED\"
- name: DRONE_GITHUB_CLIENT_SECRET
value: \"REDACTED\"
- name: DRONE_RPC_SECRET
value: \"REDACTED\"
- name: DRONE_SERVER_HOST
value: \"REDACTED.example.com\"
- name: DRONE_SERVER_PROTO
value: \"https\"
- name: DRONE_USER_FILTER
value: \"prehnRA\"
- name: DRONE_USER_CREATE
value: username:prehnRA,admin:true
- name: DRONE_DATABASE_DRIVER
value: postgres
- name: DRONE_DATABASE_DATASOURCE
valueFrom:
secretKeyRef:
name: drone-postgres-url
key: url
ports:
- containerPort: 80
- containerPort: 443
---
apiVersion: v1
kind: Service
metadata:
labels:
app: drone
name: drone
namespace: drone
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: 80
selector:
app: drone
type: LoadBalancer
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
annotations:
ingress.kubernetes.io/ssl-redirect: \"true\"
nginx.ingress.kubernetes.io/ssl-redirect: \"true\"
kubernetes.io/tls-acme: \"true\" # enable certificates
certmanager.k8s.io/cluster-issuer: letsencrypt
kubernetes.io/ingress.class: \"nginx\"
labels:
app: drone
name: drone
namespace: drone
spec:
rules:
- host: REDACTED.example.com
http:
paths:
- backend:
serviceName: drone
servicePort: 80
path: /
tls: # specify domains to fetch certificates for
- hosts:
- REDACTED.example.com
secretName: drone-tls
---
apiVersion: kubedb.com/v1alpha1
kind: Postgres
metadata:
name: drone-postgres
namespace: drone
spec:
version: \"10.2-v1\"
storageType: Durable
storage:
storageClassName: \"rook-block\"
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 256Mi
terminationPolicy: DoNotTerminate
```
Note that I use kubedb in my cluster, so I am able to request a new Postgres database through Kubernetes configuration YAML. If you don't use kubedb or similar, you'll have to provide a db to Drone differently. By default, Drone uses a sqlite3 database, but this isn't much good in Kubernetes by default, because if you Drone pods get restarted, you will lose your configuration and job history.
I give the Postgres url to Drone via a [Kubernetes secret](https://kubernetes.io/docs/concepts/configuration/secret/#creating-your-own-secrets) called `drone-postgres-url` in the `url` key. This secret must be in the drone namespace. Here's how you can do that:
```bash
echo -n 'YOUR_POSTGRES_URL' > ./url
kubectl create secret generic drone-postgres-url -n drone --from-file=./url
```
## Testing and Deploying An App
The application I wanted to test and deploy through Drone is an Elixir app that is released by semantic-release as a Docker image. I've written previously [on the Revelry blog](https://revelry.co/semantic-release/) about how to do that.
You configure Drone using a .drone.yml file. The main YAML object defined in that file is a \"pipeline\", which defines a series of steps and services which Drone will use to run your tests and deploy your app.
A basic pipeline for my app might look like this:
```
---
kind: pipeline
name: default
steps:
- name: backend
image: elixir:1.8.0-alpine
commands:
- mix local.hex --force && mix local.rebar --force
- export MIX_ENV=test
- mix do deps.get, deps.compile, compile, phx.digest, ecto.create, ecto.migrate, test
```
There's a problem: my tests won't pass without a working database, and we don't have one yet. In Drone, the way to get a supporting container for something like a database or a cache is via a service. Services are also just a Docker container which runs with a certain configuration. Drone will run them before your steps. Other parts of your pipeline can communicate with services over a network (services are given a hostname that matches their service name) or via a shared volume.
Here's what the same pipeline looks like with a mariadb service:
```
---
kind: pipeline
name: default
steps:
- name: backend
image: elixir:1.8.0-alpine
commands:
- mix local.hex --force && mix local.rebar --force
- export MIX_ENV=test
- mix do deps.get, deps.compile, compile, phx.digest, ecto.create, ecto.migrate, test
services:
- name: cms-database
image: mariadb
ports:
- 3306
environment:
MYSQL_DATABASE: \"cms_test\"
MYSQL_USER: \"REDACTED\"
MYSQL_PASSWORD: \"REDACTED\"
MYSQL_RANDOM_ROOT_PASSWORD: \"yes\"
```
I also configured my test suite to use a mariadb database at the host `cms-database`, port 3306, with the given username and password.
My tests pass!
Next, I need to add my deployment step. As I mentioned before, this app deploys as a docker image, via semantic-release and semantic-release-docker. In order to do that, I need to use a \"docker in docker\" image-- which is just what it says, a Docker image containing the Docker daemon and Docker CLI.
It's actually best to use two of these. Drone (and Docker) prefer if any long running services, such as the Docker daemon, run in their own containers. In my experience, trying to run the Docker daemon in the background of a container that is also doing other commands overcomplicates things. Running the Docker daemon in an isolated container works better, because Docker has provided out of the box initialization scripts for that scenario.
Here's the pipeline with both \"dind\" parts added:
```
---
kind: pipeline
name: default
steps:
- name: backend
image: elixir:1.8.0-alpine
commands:
- mix local.hex --force && mix local.rebar --force
- export MIX_ENV=test
- mix do deps.get, deps.compile, compile, phx.digest, ecto.create, ecto.migrate, test
- name: deployment
image: docker:dind
volumes:
- name: dockersock
path: /var/run
environment:
GH_TOKEN:
from_secret: gh-token
DOCKER_USERNAME: ci
DOCKER_PASSWORD:
from_secret: docker-password
commands:
- apk add nodejs nodejs-npm openssl git
- npm install -g npm
- (cd assets; npm install; npm run deploy)
- npm install
- docker build --network=host . -t REDACTED.example.com/my_repo/my_app
- npx semantic-release
services:
- name: cms-database
image: mariadb
ports:
- 3306
environment:
MYSQL_DATABASE: \"cms_test\"
MYSQL_USER: \"REDACTED\"
MYSQL_PASSWORD: \"REDACTED\"
MYSQL_RANDOM_ROOT_PASSWORD: \"yes\"
- name: docker
image: docker:dind
privileged: true
network: host
hostNetwork: true
mtu: 1200
volumes:
- name: dockersock
path: /var/run
volumes:
- name: dockersock
temp: {}
```
That's a lot of new pieces. What's going on? Well, we've added a `docker:dind` step to the pipeline. This will run our Docker commands, for building and pushing the Docker image. We also need npm and NodeJS, because semantic-release is a Node package. We install npm and update it to the latest version for good measure. Then `(cd assets; npm install; npm run deploy)` builds my assets for production. `npm install` installs semantic-release and the various plugins I use (see my article from the Revelry blog). I build the Docker image. Then, I run semantic-release. Since I'm using semantic-release-docker, it will login to my Docker repo and push the image.
In order to actually build the image, we need a running Docker daemon. That's where the second `dind` comes in. This one is a service. It runs in privileged mode (required for the dind daemon), which means my Drone project must be flagged as \"trusted.\" We use a shared volume to allow the Docker CLI running in my deployment step to communicate with the Docker daemon running as a service.
I need to provide some credentials for GitHub and for my Docker registry. I do that by exposing them as environment variables. Since I don't want to check in a sensitive credential to git, I use Drone secrets for passwords and tokens. You can set the value of the secret in the project settings UI.
When you put it all together, Drone will do the following sequence:
- Start a mariadb database and expose it on a network
- Start a docker and expose it on a shared volume
- Prepare and run my test suite in an Elixir docker image
- If my tests pass, move on to a deployment phase:
- Build my assets
- Build a Docker image containing my latest code
- Tag the image based on the proper semantic version
- Push that tagged version and `latest` to my Docker repo
- Also, tag that release in GitHub
## Other Drone Features
I had a lightbulb moment while I was working with Drone. I had been wondering things like \"I wonder which DBs Drone supports\" or \"I wonder which languages Drone supports.\" Then I realized: Drone supports everything that has a Docker image. To me, this makes it a more flexible and powerful tool than a CI architected like Travis or Codeship. In those CIs, you either have to wait for official support for your language (or service), or you have to hack together a working test script from a base intended for a different language or DB. The way Drone does this means that Drone immediately has a \"feature list\" longer than I can write here.
Beyond this, there are other Drone features worth mentioning:
- Drone supports Agents, which allow you to scale up your CI environment to handle more simultaneous builds. They are small daemons which you deploy to as many servers as you want. Each agent receives orders from the central Drone server which dispatches builds.
- Drone supports multiple secrets plugins. In my example, I used secrets stored in Drone's backing DB, but Drone also supports Vault and Kubernetes secrets, among others.
- In addition to GitHub, Drone supports GitLab, Gitea, gogs, and Bitbucket.
- `drone exec` is a cool feature on the Drone CLI which lets you run a build locally on your laptop, using the same configuration.
The only real issue that I hit along the way is that I could not get Drone to accept an encrypted secret from the `.drone.yml` file. Supposedly, Drone supports encrypting secrets via the CLI and including the encrypted version in your `.drone.yml`. I could not get that to work. I had to use the database secrets method instead.
## The Future of Drone
After experimenting with Drone, I think we're going to see it get a lot of traction. It's such a powerful tool, and it so well leverages the Docker ecosystem, that I can't help but think that it is the future of CI.','Setting up CI/CD for Docker and Kubernetes Using Drone | Robert Prehn','Drone is a CI/CD framework that is all in on Docker. In this article, I will describe how I switched my application off of Travis CI, and onto a container-native CI using Drone, Docker, and Kubernetes.

View file

@ -0,0 +1,47 @@
---
title: This Weekend I Read… (2019-04-14)
slug: this-weekend-i-read-2019-04-14
---
# Amazon Workers Have a Hellish Job
> With her job at Amazon, she hoped she could work and pursue an education at the same time. For years, the 27-year-old English major had taken other short-term warehouse jobs—mostly for retail companies, including the shoe store Zumiez. 
> More than two years later, injuries to her shoulder, neck, and wrist sustained during her time at Amazon—lifting up to 100 items an hour, moving them to conveyor belts, and then hauling them into trailers—have made it nearly impossible for her to type without the aid of voice dictation software.
> Between 2015 and 2018, OSHA reported 41 “severe” injuries resulting in hospitalization, including six amputations and 15 fractures, associated with Amazon delivery or fulfillment jobs. 
Amazon workers are receiving severe, life-changing injuries on the job, and Amazon is covering it up using a system of in-house \"clinics\", complicit company-mandated doctors, and missing OSHA filings.
Who doesn't think Amazon workers suffering through this should have a union? Well, Amazon for one. The company is using old-school union-busting tactics to single out and remove pro-union workers.
- [She Injured Herself Working at Amazon. Then The Real Nightmare Began.](https://www.motherjones.com/politics/2019/03/amazon-workers-compensation-amcare-clinic-warehouse/) by Tonya Riley at Mother Jones
- [Amazon and Union at Odds Over Firing of Staten Island Warehouse Worker](https://www.nytimes.com/2019/03/20/business/economy/amazon-warehouse-labor.html) by Noam Scheiber at NYT
- [Amazon lobbies to exempt employees from labor protections](https://apnews.com/5c01ffdd9fbb48639fc43bc376f501e4) by Tom Janes at AP
- [The Relentless Misery of Working Inside an Amazon Warehouse](https://onezero.medium.com/relentless-com-life-as-a-cog-in-amazons-e-tail-machine-d46b3ef05eb8) by Cameron Brady-Turner at Medium
- [Colony of Hell: 911 Calls From Inside Amazon Warehouses](https://www.thedailybeast.com/amazon-the-shocking-911-calls-from-inside-its-warehouses?ref=home) by Max Zahn, Sharif Paget at The Daily Beast (\"Warning: This story addresses suicidal threats by Amazon employees.\")
# Meritocracy is Still Fake
Belief in meritocracy is a core part of our modern ideology, particularly in the tech industry. Unfortunately, meritocracy is a false idea that does not exist. Worse, research shows that believing in meritocracy makes you more selfish, less self-critical, and more prone to acting in discriminatory ways.
> Although widely held, the belief that merit rather than luck determines success or failure in the world is demonstrably false. This is not least because merit itself is, in large part, the result of luck. Talent and the capacity for determined effort, sometimes called “grit,” depend a great deal on ones genetic endowments and upbringing.
> This is to say nothing of the fortuitous circumstances that figure into every success story. In his book Success and Luck, the U.S. economist Robert Frank recounts the long-shots and coincidences that led to Bill Gatess stellar rise as Microsofts founder, as well as to Franks own success as an academic. Luck intervenes by granting people merit, and again by furnishing circumstances in which merit can translate into success. This is not to deny the industry and talent of successful people. However, it does demonstrate that the link between merit and outcome is tenuous and indirect at best.
> According to Frank, this is especially true where the success in question is great, and where the context in which it is achieved is competitive. There are certainly programmers nearly as skilful as Gates who nonetheless failed to become the richest person on Earth. In competitive contexts, many have merit, but few succeed. What separates the two is luck.
- [Meritocracy doesnt exist, and believing it does is bad for you](https://www.fastcompany.com/40510522/meritocracy-doesnt-exist-and-believing-it-does-is-bad-for-you)
# We Get to Decide What Comes Next
We're living in interesting times. I think there's pressure building up on one of those socio-political-historical fault lines. We might live to see humanity evolve into its next political and economic model. This can be daunting, but it should also be exciting. After all, if we play our cards right, we can determine what this new model will be.
While I don't agree with the authors entirely, [this article](https://www.fastcompany.com/40454254/dont-be-scared-about-the-end-of-capitalism-be-excited-to-build-what-comes-next) gives much food for thought on this idea. It is likely that what comes next won't be any of the old models, and won't be accurately predicted by any futurist.
Where their argument falters is that they seem to assume that which of these models will emerge is a function of which one best addresses the challenges of the modern era, climate change, and the pressures of automation. That's not how economies change. Economies change as a function of who holds economic power, those people's interests, and how people generally relate to economic activity. Without radical economic democracy which places power in many hands, whatever comes next will only serve the few who currently hold power.
Though I do have to give a special shout-out for introducing me to the term \"doughnut economics.\"
- [Dont Be Scared About the End of Capitalism—Be Excited to Build What Comes Next](https://www.fastcompany.com/40454254/dont-be-scared-about-the-end-of-capitalism-be-excited-to-build-what-comes-next) by Jason Hickel and Martin Kirk at Fast Company

View file

@ -0,0 +1,17 @@
---
title: Cybertruck Is the World's First Ecofacist Vehicle
---
Elon Musk and Tesla recently unveiled Cybertruck. Im sure you know this. Its been everywhere. It is the ride that launched a million memes.
I think Cybertruck held the internets attention so intensely because no one knows what to think about the damn thing. What are we to make of a truck that has an all-electric powertrain while also having ballistic glass windows? What are we to make of a vehicle with cold-rolled steel armor plates and a pop-up tent for camping?
Who is this for? It defies the traditional American left-right analysis. Is it for liberal hippies because of the lithium battery banks? Is it for the red-state, rolling-coal, lifted F250 crowd because of the armor and tank-like styling? This is a small mundane demonstration that politics have never neatly fallen on a one-dimensional spectrum.
I propose that Cybertruck defies the usual auto-industry psychographics (and mainstream-media political analysis), because Cybertruck is the first ecofacist vehicle. Its a car that acknowledges that climate change is real, and that petroleum is not a limitless resource. But rather than ask “How we might avoid the looming climate disaster?”, Cybertrucks designers wondered “How might the wealthy avoid the consequences?” Their answer was “With (nominally) bullet-proof glass and armored door panels.”
Cybertruck nods to climate change while dismissing any collective solutions to the problem. Shouldnt we support public transit? Green public infrastructure programs? Unpaving and sprawl reduction? Reduced consumption? Accountability for the corporations who are ruining the Earth in the first place? “No,” says the Cybertruck. You merely need to purchase a warm armored blanket of individualistic protection. The unworthy (i.e. those who cant afford Cybertruck) will merely perish.
Perhaps Cybertruck draws inspiration from the armored Hilux custom trucks that oligarchs around the world— like Elons apartheid-era afrikaner-mine-owner family— favor for zipping from one fortified holding to another. What are you to do when you are low-key a member of the “law & order” crowd, but you cant be caught pulling up to the gala in anything as gauche as an armored personnel carrier? What are you to do when the gasoline is all gone and your TAG-armored Lexus pulls to an unplanned stop among the hoi polloi?
You can already buy untouchability from almost everything through wealth, but how does one purchase invulnerability from the global apocalypse that you almost definitely had a material role in instigating? Dont worry, Cybertruck will whisk you safely from your winter bunker near Mt. Ruapehu to your summer bunker by Tolaga Bay. You may see the unwashed climate refugees along the highway, but the consequences will never reach you through the ballistic glass.

View file

@ -0,0 +1,49 @@
---
title: Elixir on Heroku with Docker Containers
---
It is possible to deploy your Elixir application on Heroku using the [Elixir Buildpack
](https: //github.com/HashNuke/heroku-buildpack-elixir). However, you may want to deploy using Docker on Heroku instead. Perhaps your application has complex setup or configuration needs. Or perhaps you already have a Docker-ified application and you dont want to have to do extra work to convert it to buildpacks and Procfiles.
Heroku supports Docker via the [Container Registry and Runtime](https://devcenter.heroku.com/articles/container-registry-and-runtime). Heres the process of getting it set up:
1. Create a Heroku application with `heroku create <app_name>`.
2. Set the Heroku stack to “container” `heroku set:stack container`. This enables the container functionality and ensures that Heroku doesnt become confused if your project also contains other manifests (such as package.json or Gemfile).
3. Set up your Docker environment to talk to Herokus Docker registry instead of the default Docker registry: `heroku container:login`.
4. Build the image and push it to the Heroku registry: `heroku container:push web`.
5. Deploy it to your dynos: `heroku container:release web`.
6. Check your site and make sure everything works.
7. If your application does not boot and logs the error message `Shall I install Hex? (if running non-interactively, use \"mix local.hex --force\") [Yn] ** (Mix) Could not find an SCM for dependency :phoenix from <app_name>.Mixfile`, you may have to make a small change to your Dockerfile.
- Add ENV MIX_HOME=/opt/mix before you install hex & rebar in your Dockerfile. The reason this is needed from what I can gather is that Herokus Docker execution environment is different from your local development environment.
For reference, heres my full Dockerfile for my Phoenix application:
```Dockerfile
FROM elixir:1.8.0-alpine
RUN apk add make gcc libc-dev
ENV CC=gcc
ENV MIX_HOME=/opt/mix
RUN mix local.hex --force \\
&& mix local.rebar --force
WORKDIR /root/app
ADD ./ /root/app/
EXPOSE 4000
ARG MIX_ENV=prod
RUN echo ${MIX_ENV}
ENV MIX_ENV=$MIX_ENV
ENV PORT=4000
RUN mix deps.get
RUN mix deps.compile
RUN MAKE=cmake mix compile
RUN mix phx.digest
CMD [\"mix\", \"phx.server\"]
```

View file

@ -0,0 +1,14 @@
---
title: "Magic the Gathering: Arena is probably my game of the year"
---
I've been playing Magic the Gathering off and on since 1995. I started out playing with paper cards (it was the only option!), and then briefly played Magic the Gathering Online around 2003. Since then, life changed and I didn't really have time to play. I became a lapsed player.
Last year, I got back into the game in a major way. MTG Arena, the new digital version of Magic, is perfect for me. It's extremely quick and easy to jump in and play a game. And the economy is extremely generous-- if you play consistently, you can get plenty of cards with little-to-no cash investment. And the Magic expansions coming out now are the most interesting and fun the game has been in years.
The community around the game has grown in a great way too. The streamers range from casual entertainers playing off-the-wall decks to the best players in the world streaming their practice sessions. I've even started following along with Magic e-sports and tournament play. In my house, we have favorite pro players!
All-in-all, it's been a great to (re)discover a new old hobby.
<aside><p><i>I'm prehnRA#33926 on MTGA if you want to add me to your friends list.</p></i>
</aside>

View file

@ -0,0 +1,25 @@
---
title: On Capitalist Bread Lines
---
In America, any time someone proposes even modest social democratic reforms,
[some jackass takes to cable TV
](https: //twitter.com/proustmalone/status/1228049815013089280) to warn about the looming danger of socialist bread lines.
So let's talk about bread lines and why they happen.
Visible bread lines happen when a society tries to feed everyone, but can't. People get hungry, but they know that if they go to the grocery and stand in line, they have a chance of getting some bread. No matter who they are or how little money they have, they can stand in the line, and society will at least try to take care of them.
Capitalism has bread lines. They are invisible and omnipresent. You don't see capitalist bread lines because everyone knows that society will make no effort to feed you unless you have money to pay. Why don't people line up for bread at American groceries? Not because they aren't hungry, but because they know it is useless. If you are going to be hungry either way, you might as well save the gas money.
Occasionally, bread lines are allowed to poke through into visibility, so long as they maintain a suitable aesthetic. What are food banks and homeless shelters other than _our bread lines_? Both are evidence of needs unmet. We just consider it acceptable under the guise of \"charity,\" because charity convinces us that the foundation isn't rotten. \"Some people just fall through the cracks, but it mostly works,\" we reassure ourselves.
[37 million Americans don't reliably get enough to eat, including 11 million children.](https://hungerandhealth.feedingamerica.org/understand-food-insecurity/) That's over ten percent of us.
There is another difference between our bread lines and those of the old USSR. Hunger can come from two places: production or distribution.
Bread lines in the USSR were the result of bad harvests. Bad harvests had been a problem in Russia [going all the way back to the tsar](https://en.wikipedia.org/wiki/Russian_famine_of_1891%E2%80%9392). The communists took over a poor feudalist subsistence economy and tried to modernize to stop the cycle of famine. It didn't always work. But they tried to distribute what bread they had to anyone who needed it.
_Our_ bread lines are the result of our distribution. [We make far more food than we need](https://www.pri.org/stories/2013-07-22/millions-hungry-despite-world-food-surplus). We just won't give it to anyone who doesn't have enough money.
Ask yourself which one of these is the greater failure? Is it the country that tries to feed everyone and sometimes fails? Or the country which [throws away its surplus](https://foodforward.org/2017/09/how-much-food-is-wasted-in-america/) rather than feeding those who can't afford to pay?

View file

@ -0,0 +1,17 @@
---
title: One Piece of Work-From-Home Advice That No One Will Give You
---
Since COVID-19 is a serious concern, a lot of companies are asking their employees to work remotely right now. Many of these people have never worked remotely before. Let me tell you, it is different. I've been working from home for about ten years now so let me give you some advice. This time around, I'll skip over the parts about having a desk set aside for your work, making sure to set time boundaries on your work, &c, &c because that's been pretty thoroughly covered elsewhere. I'm going to tell you the part that I haven't seen anyone else say aloud.
You aren't going to feel productive. And it isn't because you are distracted. In fact, it might be because you have fewer distractions!
Allow me to explain. If you are used to working in an office, you are probably accustomed to office chit-chat and doing little laps around the office all day. You talk to the person at the front desk, then to your friend two cubicles down. You get some coffee before you \"dive in.\" Your manager stops by to see how softball went. Someone stops by to say that it is Sylvia's birthday and there is cake in the break room. The guy from facilities needs to stand on your desk to fix that air vent. While he's at it you can't work anyway, so you chat about the Saints. And then you go get that cake, and a coffee refill. You sit down to work, but you stand up again one minute later after you print out that report. You walk to the printer, but your manager catches you on the way and wants to know what you think about the new format for the weekly business unit review. It's getting close to lunch time. &c, &c.
When you work from home your day is different. You are probably trying extra hard to be diligent. You make coffee before your designated start time, because you don't want to be away from your computer if your manager \"pings\" you. There is no one to chat with on your commute down the hall to your post in the spare bedroom. You might not even have a printer, but if you do, you can reach it from your chair. Your manager doesn't swing by your desk or catch you in the corridor. If they need to talk, they'll schedule a meeting. You eat lunch on a schedule and keep snacks on your desk.
The result of this is a lot less \"fluff\" in your day. Because you are moving less and talking less, you feel like you are less productive. The truth is, you've probably been getting all your work done in [the three productive hours you actually have every day](https://www.inc.com/melanie-curtin/in-an-8-hour-day-the-average-worker-is-productive-for-this-many-hours.html). I'm not saying this is a bad thing! A lot of people have this revelation after some time working from home and they feel intense guilt about it. I'm telling you it is natural! [Medieval peasants worked about half as much as we do now!](https://groups.csail.mit.edu/mac/users/rauch/worktime/hours_workweek.html). [Hunter-gatherers work about half as much as typical office workers!](http://rewild.com/in-depth/leisure.html) I think we're honestly not built for eight hours of constantly coding or fiddling with reports in a day. We've been fooling ourselves.
I think we all need negative space in our days. We need time to rest and think. It's gauche to admit that in our puritan work culture. We need to be constantly in motion and be seen being constantly in motion. So the average office is a dryer full of ping-pong balls all bouncing off of each other, going round and round, but not making much forward progress. In fact, we're all probably melting a little.
When it is time to work, work hard. Then, embrace the negative space. Rest and think and plan and read. And find time for chit-chat and cake and coffee. Trust yourself and find your rhythm.

View file

@ -0,0 +1,17 @@
---
title: The Corona Recession is Different
---
The Great Recession began as a plague of capital. The Collateralized Debt Obligations and Mortgage Backed Securities got sick first. The real economy got sick later once people couldn't afford to stay in their homes but couldn't afford to sell them either.
Throughout the Great Recession, the working class in general, and service workers in particular, kept everything running&mdash; as they always do. Economists call the Service sector \"acyclic\" which means it is comparatively resistant to the boom and bust cycles of capitalism. People didn't stop buying haircuts or eating at restaurants during the Great Recession. You still need haircuts, food, and medical care, even if there is havoc in the fictive economy of the stock markets. The Service sector is our life preserver.
The corona recession is a consequence of an actual material plague. People are getting sick first, then the economy is getting sick because people can't show up for work. The Service sector kept us afloat last time, but we need to shut it down for material epidemiological reasons. We are about to be adrift without our life preserver.
Working people make everything: meals, haircuts, medicine and medical care, culture, housing&mdash; everything. For all the emphasis we put on the ups and downs of financial assets, stocks alone have never made anything. We say that capital investment makes the economy move, but it only does so because it commands labor. Without labor, stocks and bonds are inert. A stock has never made you a meal or cared for you when you were sick, and neither has a CEO (putting aside publicity appearances here or there).
This makes the corona recession materially different from the last one. If we don't protect working people through this crisis, we will incur a tremendous human cost. And if the economy is your concern, know that it will _never_ recover unless the working class comes out of this crisis in a strong position. If working people get sick and starve, there will be less food and less medical care as a result. Then _more_ people will get sick and starve. An economic contagion to match the viral contagion.
So what should we do? The tactics we used last time won't work this time. It's debatable whether they worked last time. The government used targeted stimulus to keep the financial, automotive, and real estate industries operating. We _can't_ keep the services sector open. So we need to apply our stimulus directly to working people.
In the short term, we need to get direct cash payments into the hands of every working person in the country. We need that money to stay fed and healthy, which society needs in order to recover when the crisis passes. And long term, we need to make sure working class people share in the prosperity that we create. The risk of this crisis would be far lessened if the working class wasn't in such poor shape to begin with. The working class needs an ownership stake in the economy, savings, and a robust safety net to make it through times like these.','Why the Corona Recession Is Different Than the Great Recession','The Great Recession began as a plague of capital. The corona recession is a consequence of an actual material plague. The tactics we used last time won't work this time. We need to bail out working people and the real economy, not banks and stock markets.

View file

@ -0,0 +1,65 @@
---
title: Will corona recession really kill more people than corona virus? Not unless we're stupid and cruel
---
The question: will corona recession kill more people than corona virus? The answer: not if we take some easy preventative economic medicine.
First, let's do away with the abstractions of economics for a second. People don't die when their bank balance hits zero. It's not some sort of video game life counter. It's not an economic downturn or unemployment _per se_ that kills people. It's starvation, exposure, sickness, and suicide that _actually_ kill people. These "four horsemen" just happen to _correlate_ with economic downturns.
The good news: all of these are thoroughly preventable causes of death, even in an economic downturn. Here's the recommended course of treatment.
## The Plan
### Avoid Starvation.
To prevent starvation, you make sure two things happen: that we make enough food, and that we make sure we distribute it to people who need it. Even in the corona recession, this is not particularly difficult.
- Food production. Firstly, we've been running food surpluses for decades now. In fact, so much so that the government has been holding up food prices by buying up massive "strategic reserves" of food. Reserved for what? Reserved for a situation like this. So we probably don't even need to grow any more corn or wheat this year to avoid starvation. However, we don't even need to stop food production! Only about 7.3 million people in the country are farmers, hunters, fisherpeople, food processors, or grocery workers out of 331 million people in the country. We can avoid any hitch in the food supply by having about 2% of workers keep working and it won't make a dent in social distancing.
- Food distribution. This one is easy. Cut everyone in the country a check for $2,000 a month. People can use that to buy food and pay rent. How do I know we can do that? The cost for this universal basic income (UBI) program would be about $660 billion dollars a month, and we are gearing up to spend $30 trillion _this month_ on Wall Street bailouts.
### Avoid exposure.
We can prevent people from being houseless during this crisis. There are six empty homes for each houseless person. We can prevent a spike in new houselessness by freezing mortgage and rent payments.
We should pass a law that says this: for the next year, mortgage balances are frozen&mdash; no payments are due, no interest will accrue, and no principle will be paid down. Then, in a year, we all pick up where we left off. The only cost of this is the interest that the banks would have been earning otherwise. We could pay off all mortgage interest for about $155 billion dollars a month (way cheaper than those Wall Street bailouts!) or just say "you are welcome for saving you from a foreclosure crisis!" And since landlords won't be paying mortgages, they don't need to collect rent, so we freeze that too.
Or again, we could just cut everyone a month check to cover their payments.
### Avoid sickness.
The most important thing we can do to reduce sickness is keep workers at home if their works is not needed to keep us alive through the crisis! If social distancing is lifted, models predict that 2 million people will die. The options are simple: (1) keep people at home, and there are 90% fewer deaths OR (2) send everyone to work, and two million die.
While we're all here: Medicare for All is cheaper than private insurance. If we're looking for savings, we could save $200 billion per month while making sure that everyone has care! And that's a $200 billion based for a normal month, not per corona month (the savings from M4A increase the more care is demanded)
### Avoid suicide.
Why do you think suicide rates increase in economic downturns? It's because people are hungry, sick, homeless and/or anxious about becoming hungry, sick, or homeless. Solve the others and solve the anxiety about them and you solve the spike in suicide rates. Throw in Medicare for All with access to mental healthcare and lower the rate even further.
## Feasibility
But can we afford it? The UBI would cost $660 billion a month. A mortgage interest freeze costs $155 billion a month, assuming we don't just make banks carry it. Medicare for All _saves_ at least $200 billion a month, probably more in corona times. So our corona recession preventative treatment costs about $600 billion a month. For comparison, the Federal Reserve is about to embark on $30 **trillion** in bank bailouts _in one month_.
On the other side, there's the cost of _not_ preventing the wildfire spread of corona virus. 2.2 million dead according to the latest models. In a normal moral calculus, that should be enough. But let's say you are an economist, and you want to know about the dollar impact, because economists are sociopaths. Normal flu costs about $2.6 million for each person it kills due to lost productivity and medical cost. So if you just linearly scale that to 2.2 million people, it's around $5.7 trillion dollars of impact, which is about 30% of GDP. But flu kills far fewer people than corona does, and epidemics have network effects. A better data point is probably the 1918 flu epidemic. Economic data from 1918 is dodgy, but the areas in the United States which were hit by that year's flu reported 40 - 70% drops in aggregate demand. That's $8.4 to $14.8 trillion in economic cost. **AND TWO POINT TWO MILLION DEATHS.** There. Can I go throw up now?
## Conclusion
Strong social distancing plus strong social safety net _saves_ billions per month and **2 million lives**. There's been a lot of talk of "balance" lately. I propose this: halt non-essential work, and provide a safety net to keep everyone fed, housed, and well. _That's_ the balance we need.
## Sources
- https://www.bls.gov/emp/tables/employment-by-major-industry-sector.htm
- https://www.bls.gov/ooh/production/food-and-tobacco-processing-workers.htm
- https://datausa.io/profile/naics/4451/
- https://www.newyorkfed.org/markets/opolicy/operating_policy_200320a
- https://www.mintpressnews.com/empty-homes-outnumber-the-homeless-6-to-1-so-why-not-give-them-homes/207194/
- https://www.thesimpledollar.com/loans/blog/heres-how-much-the-average-american-pays-in-interest-each-year/
- https://www.washingtonpost.com/health/2020/03/19/coronavirus-projections-us/
- https://www.cnbc.com/2017/10/30/the-flu-costs-the-us-economy-10-point-4-billion.html
- https://www.stlouisfed.org/~/media/files/pdfs/community-development/research-reports/pandemic_flu_report.pdf?la=en
- https://www.latimes.com/business/story/2020-02-14/medicare-for-all-cost
- https://www.imperial.ac.uk/media/imperial-college/medicine/sph/ide/gida-fellowships/Imperial-College-COVID19-NPI-modelling-16-03-2020.pdf

View file

@ -0,0 +1,27 @@
---
title: Notes on Buckminster Fuller's Operating Manual for Spaceship Earth
---
I just finished reading Buckminster Fuller's *Operating Manual for Spaceship Earth*. I've gathered a few of my thoughts on this thought-provoking classic.
My capsule review: The book puts forward a fascinating theory of history and our position in the universe in a concise and engaging package.
As I read the book, my mind kept finding lines between Fuller and eco-socialism or eco-anarchism. While Fuller calls capitalism and socialism \"mutually extinct,\" there are some ideas in his work that wouldn't be out of place in Marx, Proudhon, or certainly Bookchin.
For example: Fuller's Great Pirates theory of history is a first cousin of \"the history of man is the history of class struggle.\" The Great Pirate theory describes the separation of the ruling class from the ruled class based on who labors and who can command labor. Fuller follows this thread through all the mutations of history&mdash; chieftains to kings to industrialists&mdash; until we reach the current epoch. In recent history, the Great Pirates&mdash; the members of the ruling class&mdash; have delegated management of their affairs to states and the scientific-professional class. This is an analysis of our situation that would not be out of place in Piketty.
Fuller correctly concludes that war is a way for the upper class to drive demand and production while controlling the lower classes with xenophobia and violence. He argues that nations and borders are inefficient relics that draw arbitrary unreal lines between people for the purpose of encouraging war and its economic activity. As an engineer, he sees that nations and borders create a tremendous amount of redundancy&mdash; redundancy which is useful if you are a Great Pirate who needs production to continue ceaseless to ensure your continued wealth and power.
Fuller states that if we are to survive long-term, competition between states needs to cease. And since states are not states unless they compete, if we are to survive, states must go.
[I'm starting to understand why Fuller has a lengthy FBI file.](https://paleofuture.gizmodo.com/we-got-buckminster-fullers-fbi-file-1704777475)
People know of this book as an argument for green energy and regenerative systems. Fuller frames Earth as a spaceship with no lifeboats upon which we're hurtling through space. If our species is to live, we need to ensure the continued operation of the life support system that makes our oxygen, water, food, and energy. I know that when the book was written, no one had yet articulated things in this way. It shows the effectiveness of Fuller's ideas and his communication style that these ideas are absolutely everywhere in modern ecology and eco-futurism.
As with all futurism, Fuller's predictive track record is mixed. He predicted that wealth would be essentially limitless and _universal_ at this point. Naturally, as a result, we'd be spending 90% of our day in leisure. This is where lacking a true materialist analysis bites him. Fuller thought we'd either be dead or living without borders by now. He underestimated the ability and desire of the ruling class to adapt to preserve their status. States evolved instead of going extinct.
Fuller couldn't understand why a person with functionally limitless wealth wouldn't start sharing at some point. There comes a point in wealth accumulation when all your barriers are lifted: you have all the creature comforts that you could want, you can travel as much as you would like, and you can work as little as you please. It isn't rational to hoard wealth beyond this point. Fuller, a thoroughly rational person, concludes that the thing to do at that point is to stop seeking further wealth, enjoy what you have, and let the rest flow to everyone else who isn't there yet. He failed to consider that for some people, enough is never enough. Those people seek out wealth, and sometimes they get it. Once they have it, they use their wealth to get more wealth. Since their hunger is infinite, they keep using their wealth to capture more wealth until they have as much as possible, and everyone else is driven to the line of subsistence. The system is inexorable. Even if any individual were to choose differently and give away all their hoarded wealth, another yawning pit of greed would open to fill the gap.
I'm surprised that Fuller missed this, considering he had the Great Pirate theory and whole-systems-thinking right there in the same volume. The Great Pirates keep their fleets because the fleets are power and Great Pirates want power. He spent large stretches of *Operating Manual* discussing how the whole physical world&mdash; galaxy upon galaxy&mash; doesn't constitute a complete system without the metaphysical world of human thought and behavior. The universe acts on us and we act on the universe and then this flow continues round and round forever. If the system of \"human ingenuity plus near-limitless solar energy\" is generally pointed toward a future of universal wealth and leisure, shouldn't we consider that the human element would nudge the ultimate result one way or the other? We should.
*Operating Manual for Spaceship Earth* gives us a lot to consider in a scant 150 pages. I highly recommend it for anyone interested in considering our position in time and space, in realms physical and metaphysical.

View file

@ -0,0 +1,7 @@
---
title: The Only Way to Wrap an Extension Cord
---
<iframe frameborder=\"0\" scrolling=\"no\" marginheight=\"0\" marginwidth=\"0\"width=\"788.54\" height=\"443\" type=\"text/html\" src=\"https://www.youtube.com/embed/kda4DPAn3C4?autoplay=0&fs=0&iv_load_policy=3&showinfo=0&rel=0&cc_load_policy=0&start=0&end=0&origin=https://youtubeembedcode.com\"><div><small><a href=\"https://youtubeembedcode.com/de/\">youtubeembedcode de</a></small></div><div><small><a href=\"http://add-link-exchange.com\">addlink-exchange</a></small></div></iframe>
You are welcome.

View file

@ -0,0 +1,7 @@
---
title: Developers don't need ping-pong tables.
---
> Unless your goal is to win a ping-pong tournament, remember: when developers change jobs, the last thing they care about is your fancy office and table tennis. Developers need autonomy, mastery, and purpose.
From [Developers Don't Need Ping-Pong Tables](https://sizovs.net/2020/03/26/developers-dont-need-ping-pong-tables/).

View file

@ -0,0 +1,25 @@
---
title: "On \"Time Famine\""
---
I was recently introduced to the concept of [\"time famine\"](https://www.cnn.com/2017/07/24/health/time-famine-stress-happiness-study/index.html), which is:
> the universal feeling of having too much to do but not enough time to deal with those demands.
I know the feeling. Though the term has been around since 1999 or so, there's been a spike in discussion lately. The focus of that discussion has been on finding ways to save time, mostly via automation products and delivery services. It's almost as if there are people with a vested interest in selling you a \"solution\" to this problem (whether it works or not). I think most of the discussion and most of the proposed solutions entirely miss the point.
It's natural to look at your todo list and despair. It's _particularly_ natural to despair when you look at your todo list over the span of days, months, and years and never see the numbers tick down. It's _natural_ but it's also _wrong_.
Here's the truth that you don't want to hear: **your todo list will be empty when you are dead**. When are you going to stop doing laundry? When are you going to stop needing to go to the grocery store? When are you going to stop having stuff to do at work? When are you going to stop having to call your landlord or fix things at home? When are you going to stop pursuing your hobbies? When you are dead. Thus, your todo list will be empty when you are dead and not a second before. This is OK.
Your task list isn't a flooded basement, it is a drain pipe. You don't need to be concerned that there is water in there. _That's where the water goes._ You need to be concerned if you are constantly putting more in there than can flow out the other end. Be concerned if the pipes are backing up into the house. Stop having an emotional crisis over the task count in some app. Consider how many things you got done this week, and how many new things you added to the list. These numbers, on a long enough time scale, should be about the same. If you are constantly adding twice as many things to the list as you are checking off, you are going to have a bad time.
By all means, use home automation or a delivery service if it makes you happy. They can be a good way to improve your flow. But if you think of your life as a flooded basement, these things will only ever make you feel like your basement is temporarily less flooded.
How can you stop feeling bad about your task pipeline?
- **Cut yourself some slack.** We're surrounded by pressure to do more. It comes from our bosses, productivity advice sites, our hobbies, and even our families. But you are the only person who can decide that you are doing enough.
- **Say no.** I became much happier after I realized that there were certain hobbies that I can envy from afar, but cannot personally undertake. There are certain social occasions that I wish I could attend, but I simply cannot while keeping my sanity.
- **Embrace an organizational system that is about _flow_, not _zero_.** I'm very partial to David Allen's Getting Things Done methodology, which probably saved me from death by anxiety and depression. That's a big statement and I mean it.
I'm certainly not immune to feeling \"time famine.\" The difference with this mindset and these tools is that I feel it less often. When I do, I notice the thoughts and can quash the anxiety.

View file

@ -0,0 +1,104 @@
---
title: Elixir Patterns for Testing with Mox
---
When I need mocks in my Elixir tests, I prefer to use the [Mox](https: //github.com/dashbitco/mox) library. Mox follows the principle of mocking-by-explicit contract. The mox readme explains it like this:
> 1. No ad-hoc mocks. You can only create mocks based on behaviours
> 2. No dynamic generation of modules during tests. Mocks are preferably defined in your test_helper.exs or in a setup_all block and not per test
> 3. Concurrency support. Tests using the same mock can still use async: true
> 4. Rely on pattern matching and function clauses for asserting on the input instead of complex expectation rules
I've found that this works very well with the overall design of the Elixir language. Elixir developers generally prefer explicitly combining functional pieces over any sort of magical hidden state. Some of the other mocking libraries fiddle with your function definitions behind the scenes. I've found that this makes test code harder to read and debug.
When you are using Mox, I believe that it helps if you follow certain patterns. I've worked on several projects using Mox at this point, and I've seen the pain that can come from not establishing those pattern up front. I will summarize my recommendations briefly here before showing some examples:
1. You should define one facade module which both your application and test code call in almost all cases.
2. The facade module should delegate all its work to either the true adapter or the mock adapter, depending on the environment.
3. One piece of configuration should control which adapter is used, and that configuration should be wrapped up in a nice easy to call function.
To boil that down to only one sentence: your application code should not know or care that you are using Mox. If you find your application code caring that it could be mocked out in a test later, you need a new set of abstractions. In fact, most of your _test_ code shouldn't need to know about the mocks, except where it needs to set function call expectations.
What does the code to do this look like? Let's say your application uses a weather API and you don't want to use the real API in your test suite.
Let's say this is the module your application already has for communicating with the weather API:
```elixir
defmodule Weather do
def current_weather(zip_code) do
# makes a GET request to the API to ask for the current weather in a zip code
end
end
```
We would define a behaviour for a weather adapter:
```elixir
defmodule WeatherBehaviour do
@callback current_weather(binary) :: map
end
```
We would have the Weather module adopt the behavior and we would rename it so that it can serve as our live api adapter. Later, we'll tell our application to use the LiveWeather adapter by default.
```elixir
defmodule LiveWeather
@behaviour WeatherBehaviour
def current_weather(zip_code) do
# makes a GET request to the API to ask for the current weather in a zip code
end
end
```
At this point, we need to create a new Weather module so that all of our application code doesn't explode. Our new Weather module won't do much. It will just delegate down to either LiveWeather, or the mock weather adapter as appropriate for the environment:
```elixir
defmodule Weather do
defdelegate current_weather(zip), to: WeatherApp.weather_adapter()
end
```
and we'll need to define `weather_adapter/0` in our application class:
```elixir
defmodule WeatherApp do
def weather_adapter do
Application.get_env(:weather_app, :weather_api_adapter, LiveWeather)
end
end
```
By default, the application will use LiveWeather. This is good for development and production. However, in the test environment, we need to tell the system to use the mock. In config/test.exs:
```elixir
config :weather_app, weather_api_adapter: WeatherMock
```
And we need to actually define our mock! In test/test_helper.exs:
```elixir
Mox.defmock(WeatherMock, for: WeatherBehaviour)
```
That's all the setup. **Notice that nothing outside of the Weather module and it's configuration changed.** None of our controllers or other contexts were disturbed in the process.
If we want to write a test for a function that uses our Weather API, we need to tell Mox what function calls to expect and what to return:
```elixir
defmodule UserTest do
# standard test boilerplate as before
test \"current_weather/1 gives the current weather for the user\" do
user = %User{zip_code: \"19120\"}
WeatherMock
|> expect(:current_weather, fn \"19120\" ->
# I've heard it is always sunny there
%{\"description\" => \"clear\"}
end)
assert %{\"description\" => \"clear\"} = User.current_weather(user)
end
end
```

View file

@ -0,0 +1,8 @@
---
title: There Are Only Two Programming Errors
---
There are only two programming errors:
1. Not understanding what your program does.
2. Not understanding what your program is supposed to do.

View file

@ -0,0 +1,5 @@
---
title: Yeah, I Guess You Could Say I'm Into Supplements and Nootropics
---
*pops a children's Flintstones vitamin and a Lexapro, washes it down with black coffee directly out of the carafe*

View file

@ -0,0 +1,11 @@
---
title: On Glass Houses and Stones
---
One of my values, as a person, is to endeavor to never be a person who throws a stone in a glass house. In my experience, I've found that there are two ways to avoid being a person who throws stones in a glass house.
The first is to try, to the best of your ability, to not live in anything nearly so fragile as a glass house.
The second, and perhaps, most important way to avoid finding yourself as a glass-house-stone-thrower is to simply not throw stones. This is especially important if you are not certain, beyond a shadow of a doubt, that you are not living in a glass house. However, I find that there are benefits to not throwing stones even if you are reasonably certain that you find yourself living in a house of brick, stone, concrete, or even conventional stick framing. For one, throwing stones carries a certain risk of injury to one's self and others. For two, there's really no perceptible benefit to throwing stones in most cases. For three, it is simply exhausting.
There are, of course, some glass houses that simply need to be shattered from the outside by a well-thrown stone. This is yet another reason to keep your throwing arm rested and the glass out of your hair.

View file

@ -0,0 +1,29 @@
---
title: The Design of Disasters and the Disaster of Design
---
Yesterday, August 30th, 2021, my wife and I put our 19-month old and our cat onto our kayaks and paddled away from our home and out of our neighborhood. The night before, the rivers around our town exceeded flood stage due to storm surge and rain from Hurricane Ida. We woke to three feet of water surrounding our house on all sides. We paddled up the streets of our neighborhood. We made it out and to our car, which we had stashed on high ground in case just such a thing happened. We handed off the kayaks to strangers who were looking for any way to make it to their homes to assess the damage and retrieve anything that could not be replaced. We made it to Jackson, MS, where we are thankfully high and dry with power and wifi.
Much will be written about Ida in the coming weeks, I'm sure. The levees in New Orleans held against a Category 4 (or maybe 5) storm, a triumph of massive public investment. The power grid utterly failed after decades of looting by our privatized utility companies. Apparently, after billions of dollars of city, state, and federal subsidies and sustained year after year rate hikes, neither of their backup power plants nor any of their eight "modernized" main transmission lines are operational. One wonders who pocketed our money.
But I want to talk about something that is within my own little wheelhouse. Something that hits close to home. I want to talk about how the tech industry utterly failed me and everyone in our region.
At the time I most needed information to know what to do for the safety of my family, it was unavailable to me. And it was unavailable to me for stupid, petty reasons. We had cell service and battery power. However, I was unable to track the path of the storm, receive information about power outages in my area, or determine passable evacuation routes away from the storm.
The outage information page for my electric utility weighs in at 5.1 megabytes. It takes 17 seconds to load over the *broadband* connection from which I write to you. The neighboring electric utility that covers the other half of the region clocks in at 4.8 megabytes and loads a little quicker at 6 seconds. Both refused completely to load over LTE, *which is exactly how you would expect customers to access this information when they need it*.
As of the time of writing, the website of NOLA Ready, the official emergency communication channel for the city of New Orleans, will not load at all. One assumes that it has fallen over under the load of everyone in the New Orleans region trying to access emergency information at the same time— a thing people tend to do during emergencies. Or quite possibly, the server is located somewhere in the city of New Orleans, which has no electricity at the moment. Either way, I feel that there is some lack of foresight here.
NOLA Ready's secondary communication channel, Twitter, is totally useless with anything less than a strong LTE signal. First, you need to load the multiple megabytes of mandatory JavaScript and CSS which Twitter *requires* before loading any content. *Then* you need to load all the cute infographics and videos NOLA Ready has produced to transmit the critical emergency data. It's a hopeless endeavor.
And don't get me started on Slack's app, which is apparently just a very thin wrapper around a very heavy web page— none of which was apparently cached on any device I had with a data connection.
What did work during the storm and the evacuation? An informal solidarity network with zero budget operating in the borrowed bits at the edge of the old cellular voice band. I was able to communicate to friends across the gulf coast and in New York, one 140 byte SMS at a time.
It's not that I miss the days of separate mobile versions of web pages. But those mobile sites were slim and to the point. They were not tarted up with ad trackers and multi-megabyte infographics. We were supposed to replace those with progressive enhancement and progressive web apps. The idea was that you would load the no-frills information first, and then *enhance* it with all the colorful bells and whistles. This completely failed to materialize, at least where it matters most.
"Design" failed us. We need real design. Design that centers users, understands their needs and their context, and builds from there. Instead, we got the kind of mediocre design I despise. Design that centers the most highly paid person in the room and/or advertising networks. Design that centers design for the sake of design. Design as pretty, but vapid, commercial art objects. Design that centers mindless adherence to "best practices" where "best practices" is defined as never-ending user surveillance in the service of "analytics" and "A/B testing."
Ironically, what we need here is *less* sophisticated solutions. Our needs after the storm could have been served by plain old unstyled markup, and clear, concise written emergency communication. Hell, plain text files would work. All served from static file hosting instead of some lumbering beast of an enterprise CMS.
Power is out over here. It works over here. These neighborhoods are flooded. These are dry. These hospitals are operational. These are not. The storm is above Jackson and tracking northeast. Give me some good old `<ul>`s and `<li>`s.

View file

@ -0,0 +1,7 @@
---
title: We Have Armin van Buuren
---
![We Have Armin van Buuren](https: //www.dropbox.com/s/adwilchh74aa48d/Screen%20Shot%202021-11-29%20at%209.50.25%20AM.png?dl=1)
So… is this a hostage situation oorrr…?

7
site/posts/2023-01-27.md Normal file
View file

@ -0,0 +1,7 @@
---
title: Three Things I Like This Week
---
1. I've been digging [Oxide and Friends](https: //oxide.computer/) from [Oxide Computer Company](https://oxide.computer/). Oxide is doing something very difficult-- building completely custom, completely secure (from the 1st instruction) servers. Oxide and Friends is the team chatting about the interesting parts of that monumental challenge. I only understand a fraction of what they are talking about, but it is still fascinating. It makes me wish I had finished my Computer Engineering degree. My favorite episode so far is [the episode about the various circuit boards they've designed both for the server AND as tools to doing their work in the lab.](https://www.youtube.com/watch?v=XmiWIlFvSYs).
2. [omg.lol](https://home.omg.lol/) is a delightful place to \"get the best internet address that youve ever had.\" What that means is that it is a simple and very cute service that provides linktree-style profile pages, email accounts, fediverse accounts, link shortening, and more with one subscription-- all attached to the handle you choose for yourself. You can use their URLs, or attach the service to your own domain.
3. I've been enjoying Becky Chambers's [A Psalm for the Wild-Built](https://bookshop.org/p/books/a-psalm-for-the-wild-built-becky-chambers/15125608?ean=9781250236210). The story follows a tea monk whose life is upended when they meet the first robot anyone has seen seen the robots walked off the job and into the wilderness centuries ago. The robot brings with it a seemingly simple and practically impossible-to-answer question: \"what do people need?\"

View file

@ -0,0 +1,8 @@
---
title: Three Things I Like This Week (2023-02-03)
---
- The Weakerthans's 2003 album _[Reconstruction Site](https: //music.youtube.com/playlist?list=OLAK5uy_n4E9NKbNFgsgJQLZczatKfAodn6SXbWr4)_. One of my intentions for this year is to listen to more complete albums instead of hopping from single to single. I've loved this album since it came out, but I rediscovered it this week after the track Plea from a Cat Named Virtute spontaneously appeared in my brain again. Every single track is good, but the album is even better as a whole.
- Consumer Reports released an app called [Permission Slip](https://www.permissionslipcr.com/) that lets you easily opt-out of data collection and sharing with many companies with a few simple taps. In some cases, you can even request that a company delete your data entirely. They estimate that they've saved their users something like 150,000 hours since launch. Amazing and totally free.
- KeokeN Interactive's [Deliver Us the Moon](https://store.epicgames.com/en-US/p/deliver-us-the-moon) is a sci-fi thriller adventure puzzle game. The Earth has been totally depleted of resources, and humanity's only hope for long-term survival seemed to be a Moon-based fusion reactor and the Microwave Power Transmission (MPT) system. Then five years ago, without any warning or explanation, the lunar colonies stopped transmitting and the MPT failed, blacking out the whole world. You play as a lone astronaut sent on a longshot mission to investigate what happened and bring the MPT back on line. I became aware of this game when a sequel (Deliver Us Mars) was released this week.

View file

@ -0,0 +1,23 @@
---
title: "How I Get Fractional Lead Contracts Without \"Doing Sales\""
---
I am often asked how I find the clients for my Fractional Engineering Lead practice. Here's what I do. It has worked for me for about two and a half years now, but it might not work for you and it might not even work for me a month from now.
I don't do sales. I don't do \"lead gen\" or \"biz dev.\" I don't even really network in the traditional sense. I also don't have someone to do these things for me.
What I do is I help people. I don't help them so that they give me contracts some day. I help them because I like helping people, particularly other Software Engineers, and I help them because it's the right thing to do.
The other thing I do is I ask people to help me. I don't ask for contracts, because either someone has work that's a good fit for me, or they don't. I ask people to give me advice, to help troubleshoot an issue, or to introduce me to their friends.
Every contract I've ever gotten has been someone who I've helped and/or someone who has helped me. It's usually not anytime close to when we had that first interaction. They usually email me up out of the blue and say \"I have / heard about a project, and I think you could help.\"
When I started working as a Fractional Engineering Lead, I got my first contract from an amazing kind former coworker of mine. I helped her (in a very small way) to get that job, and she helped me fix some HR snags at that company. A couple years later, she heard that I'd lost my job and she connected me to my first incredible client. I don't think I would have the business I have today without her!
Ok, but you probably want some practical steps you can take. That's very reasonable.
1. Join communities where people who do what you do hang out. When they ask for advice or need help solving problems, help them. Volunteer yourself to get on a call. _You must do this in the spirit of genuinely helping and making the community better, or it won't work._
2. Fix bugs in open source software. Particularly fix the bugs that no one else wants to fix.
3. Re-post jobs and the posts of those seeking work. Proactively connect people you know to jobs that are open, and vice-versa.
4. Ask for calls with people you like and respect. Ask for advice, and ask if there's anything you can offer in exchange. If what you need is a contract, be honest. _Hey, I'm trying my hand at consulting, I'd like to have a short call to (1) get your advice (2) tell you what I'm trying to do and see if you know anyone who could benefit._
5. Don't filter people out just because they're \"not the target demographic\" or whatever. That's sales. If you say to yourself \"I shouldn't talk to this person, because they probably don't need my services\" then you are doing sales. _Don't sell._ Everyone on this earth needs help, and in my experience, the vast majority of people on this planet want to help others.

57
tree-sitter/config.json Normal file
View file

@ -0,0 +1,57 @@
{
"parser-directories": [
"/home/prehnra/github",
"/home/prehnra/src",
"/home/prehnra/source",
"/home/prehnra/treesitter"
],
"theme": {
"function": 26,
"variable.parameter": {
"underline": true
},
"constant.builtin": {
"bold": true,
"color": 94
},
"tag": 18,
"constructor": 136,
"keyword": 56,
"punctuation.bracket": 239,
"number": {
"color": 94,
"bold": true
},
"operator": {
"color": 239,
"bold": true
},
"string": 28,
"attribute": {
"italic": true,
"color": 124
},
"string.special": 30,
"variable.builtin": {
"bold": true
},
"constant": 94,
"embedded": null,
"type.builtin": {
"color": 23,
"bold": true
},
"property": 124,
"module": 136,
"function.builtin": {
"color": 26,
"bold": true
},
"comment": {
"color": 245,
"italic": true
},
"punctuation.delimiter": 239,
"type": 23
}
}