This commit is contained in:
Adam 2024-01-25 02:40:50 -05:00
commit 979baf66a1
51 changed files with 4647 additions and 0 deletions

13
.gitignore vendored Normal file
View file

@ -0,0 +1,13 @@
# Generated by Cargo
# will have compiled files and executables
/target/
pkg
# These are backup files generated by rustfmt
**/*.rs.bk
# node e2e test tools and outputs
node_modules/
test-results/
end2end/playwright-report/
playwright/.cache/

3041
Cargo.lock generated Normal file

File diff suppressed because it is too large Load diff

110
Cargo.toml Normal file
View file

@ -0,0 +1,110 @@
[workspace]
resolver = "2"
members = ["app", "frontend", "server"]
# need to be applied only to wasm build
[profile.release]
codegen-units = 1
lto = true
opt-level = 'z'
[workspace.dependencies]
leptos = { version = "0.5", features = ["nightly", "experimental-islands"] }
leptos_meta = { version = "0.5", features = ["nightly"] }
leptos_router = { version = "0.5", features = ["nightly"] }
leptos_axum = { version = "0.5", features = ["experimental-islands"]}
axum = "0.6.20"
cfg-if = "1"
console_error_panic_hook = "0.1.7"
console_log = "1"
http = "0.2.9"
log = "0.4.20"
simple_logger = "4.2.0"
thiserror = "1"
tokio = { version = "1.33.0", features = ["full"] }
tower = { version = "0.4.13", features = ["full"] }
tower-http = { version = "0.4", features = ["full"] }
wasm-bindgen = "0.2.89"
# See https://github.com/akesson/cargo-leptos for documentation of all the parameters.
# A leptos project defines which workspace members
# that are used together frontend (lib) & server (bin)
[[workspace.metadata.leptos]]
# this name is used for the wasm, js and css file names
name = "doordesk"
# the package in the workspace that contains the server binary (binary crate)
bin-package = "server"
# the package in the workspace that contains the frontend wasm binary (library crate)
lib-package = "frontend"
# The site root folder is where cargo-leptos generate all output. WARNING: all content of this folder will be erased on a rebuild. Use it in your server setup.
site-root = "target/site"
# The site-root relative folder where all compiled output (JS, WASM and CSS) is written
# Defaults to pkg
site-pkg-dir = "pkg"
# [Optional] The source CSS file. If it ends with .sass or .scss then it will be compiled by dart-sass into CSS. The CSS is optimized by Lightning CSS before being written to <site-root>/<site-pkg>/app.css
style-file = "style/tailwind.css"
# The tailwind input file.
#
# Optional, Activates the tailwind build
tailwind-input-file = "style/tailwind.css"
# The tailwind config file.
#
# Optional, defaults to "./tailwind.config.js" which if is not present
# is generated for you
tailwind-config-file = "./tailwind.config.js"
# Assets source dir. All files found here will be copied and synchronized to site-root.
# The assets-dir cannot have a sub directory with the same name/path as site-pkg-dir.
#
# Optional. Env: LEPTOS_ASSETS_DIR.
assets-dir = "public"
# The IP and port (ex: 127.0.0.1:3000) where the server serves the content. Use it in your server setup.
site-addr = "0.0.0.0:3000"
# The port to use for automatic reload monitoring
reload-port = 3001
# [Optional] Command to use when running end2end tests. It will run in the end2end dir.
# [Windows] for non-WSL use "npx.cmd playwright test"
# This binary name can be checked in Powershell with Get-Command npx
end2end-cmd = "npx playwright test"
end2end-dir = "end2end"
# The browserlist query used for optimizing the CSS.
browserquery = "defaults"
# Set by cargo-leptos watch when building with that tool. Controls whether autoreload JS will be included in the head
watch = false
# The environment Leptos will run in, usually either "DEV" or "PROD"
env = "DEV"
# The features to use when compiling the bin target
#
# Optional. Can be over-ridden with the command line parameter --bin-features
bin-features = []
# If the --no-default-features flag should be used when compiling the bin target
#
# Optional. Defaults to false.
bin-default-features = false
# The features to use when compiling the lib target
#
# Optional. Can be over-ridden with the command line parameter --lib-features
lib-features = []
# If the --no-default-features flag should be used when compiling the lib target
#
# Optional. Defaults to false.
lib-default-features = false

21
LICENSE Normal file
View file

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2022 henrik
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

72
README.md Normal file
View file

@ -0,0 +1,72 @@
# doordesk-rs
site-generator-eventually-cms in Rust using Leptos
right now it reads markdown and generates some old blog articles
## Leptos stuff
```bash
cargo install cargo-leptos
```
```bash
cargo leptos watch
```
## Installing Additional Tools
By default, `cargo-leptos` uses `nightly` Rust, `cargo-generate`, and `sass`. If you run into any trouble, you may need to install one or more of these tools.
1. `rustup toolchain install nightly --allow-downgrade` - make sure you have Rust nightly
2. `rustup default nightly` - setup nightly as default, or you can use rust-toolchain file later on
3. `rustup target add wasm32-unknown-unknown` - add the ability to compile Rust to WebAssembly
4. `cargo install cargo-generate` - install `cargo-generate` binary (should be installed automatically in future)
5. `npm install -g sass` - install `dart-sass` (should be optional in future
## Compiling for Release
```bash
cargo leptos build --release
```
Will generate your server binary in target/server/release and your site package in target/site
## Testing Your Project
```bash
cargo leptos end-to-end
```
```bash
cargo leptos end-to-end --release
```
Cargo-leptos uses Playwright as the end-to-end test tool.
Tests are located in end2end/tests directory.
## Executing a Server on a Remote Machine Without the Toolchain
After running a `cargo leptos build --release` the minimum files needed are:
1. The server binary located in `target/server/release`
2. The `site` directory and all files within located in `target/site`
Copy these files to your remote server. The directory structure should be:
```text
doordesk
site/
```
Set the following enviornment variables (updating for your project as needed):
```text
LEPTOS_OUTPUT_NAME="doordesk"
LEPTOS_SITE_ROOT="site"
LEPTOS_SITE_PKG_DIR="pkg"
LEPTOS_SITE_ADDR="0.0.0.0:3000"
LEPTOS_RELOAD_PORT="3001"
```
Finally, run the server binary.

34
app/Cargo.toml Normal file
View file

@ -0,0 +1,34 @@
[package]
name = "app"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
leptos.workspace = true
leptos_meta.workspace = true
leptos_router.workspace = true
leptos_axum = { workspace = true, optional = true }
http.workspace = true
cfg-if.workspace = true
thiserror.workspace = true
serde = "1.0.195"
femark = { version = "0.1.5", optional = true }
toml = { version = "0.8.8", optional = true }
[features]
default = []
hydrate = ["leptos/hydrate", "leptos_meta/hydrate", "leptos_router/hydrate"]
ssr = [
"leptos/ssr",
"leptos_meta/ssr",
"leptos_router/ssr",
"dep:leptos_axum",
"dep:femark",
"dep:toml",
]
[package.metadata.cargo-all-features]
denylist = ["femark"]

2
app/src/components.rs Normal file
View file

@ -0,0 +1,2 @@
pub mod article;
pub mod slingshot;

View file

@ -0,0 +1,22 @@
use leptos::*;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ArticleData {
pub content_type: String,
pub title: String,
pub date: String, // make datetime?
pub content: String,
}
#[component]
pub fn Article(data: ArticleData) -> impl IntoView {
view! {
<article class="p-7 my-5 mx-auto w-11/12 max-w-screen-xl rounded-lg">
<h1 class="text-3xl font-light text-orange-600 capitalize max-6-xs">{&data.title}</h1>
<hr class="opacity-50"/>
<span class="pt-0 pb-3.5 text-xs opacity-50 m-t">{&data.date}</span>
<div inner_html=&data.content></div>
</article>
}
}

View file

@ -0,0 +1,54 @@
use crate::components::article::ArticleData;
use leptos::*;
use serde::Deserialize;
#[derive(Deserialize)]
struct ArticleFrontmatter {
content_type: String,
title: String,
date: String,
}
#[server]
pub async fn slingshot(path: String) -> Result<Vec<ArticleData>, ServerFnError> {
let mut articles = vec![];
for dir in std::fs::read_dir(path)? {
for file in std::fs::read_dir(dir?.path())? {
let fileinfo = file?;
let filepath = fileinfo.path();
if let Some(filetype) = filepath.extension() {
if filetype == "md" {
let file = std::fs::read_to_string(filepath)?;
let html_from_md =
femark::process_markdown_to_html_with_frontmatter(&file.to_string(), true)
.expect("Problem processing markdown");
let content = html_from_md.content;
let _toc = html_from_md.toc;
if let Some(front_raw) = html_from_md.frontmatter {
if let Some(front_code) = front_raw.code_block {
let toml: ArticleFrontmatter =
toml::from_str(&front_code.source)?;
articles.push(ArticleData {
content_type: toml.content_type,
title: toml.title,
date: toml.date,
content,
})
}
}
}
}
}
}
// Simulate lag
// use std::thread::sleep;
// use std::time::Duration;
// sleep(Duration::from_millis(300));
Ok(articles)
}

56
app/src/error_template.rs Normal file
View file

@ -0,0 +1,56 @@
use http::status::StatusCode;
use leptos::*;
use thiserror::Error;
#[derive(Clone, Debug, Error)]
pub enum AppError {
#[error("Not Found")]
NotFound,
}
impl AppError {
pub fn status_code(&self) -> StatusCode {
match self {
AppError::NotFound => StatusCode::NOT_FOUND,
}
}
}
#[component]
pub fn ErrorTemplate(
#[prop(optional)] outside_errors: Option<Errors>,
#[prop(optional)] errors: Option<RwSignal<Errors>>,
) -> impl IntoView {
let errors = match outside_errors {
Some(e) => create_rw_signal(e),
None => match errors {
Some(e) => e,
None => panic!("No Errors found and we expected errors!"),
},
};
// Get Errors from Signal
let errors = errors.get_untracked();
let errors: Box<_> = errors.into_iter().filter_map(|(_k, v)| v.into()).collect();
println!("Errors: {errors:#?}");
view! {
<article class="p-7 my-5 mx-auto w-11/12 max-w-screen-xl bg-opacity-10 rounded-md bg-zinc-700 shadow-1g">
<h1 class="text-3xl font-light text-orange-600 capitalize max-6-xs">
{if errors.len() > 1 { "Errors!" } else { "Error!" }}
</h1>
<hr class="opacity-50"/>
<ul>
{move || {
errors
.into_iter()
.map(|e: &_| view! { <li>{e.to_string()}</li> })
.collect_view()
}}
</ul>
</article>
}
}

62
app/src/lib.rs Normal file
View file

@ -0,0 +1,62 @@
use crate::error_template::{AppError, ErrorTemplate};
use leptos::*;
use leptos_meta::*;
use leptos_router::*;
pub mod components;
pub mod error_template;
pub mod routes;
// use crate::routes::{blog::*, home::*, projects::*};
use crate::routes::home::Home;
#[component]
pub fn App() -> impl IntoView {
provide_meta_context();
view! {
<Stylesheet id="leptos" href="/pkg/doordesk.css"/>
<Title text="doordesk"/>
<Router fallback=|| {
let mut outside_errors = Errors::default();
outside_errors.insert_with_default_key(AppError::NotFound);
view! { <ErrorTemplate outside_errors/> }.into_view()
}>
// Some repetitive nav styling is defined in the main .css file
<nav class="sticky top-0 z-50 bg-gradient-to-b from-zinc-800 to-zinc-900">
<ul class="container flex items-center p-3">
// Logo
<p class="mx-1.5 sm:mx-6">"DoorDesk"</p>
<li>
<A href="" exact=true>
"Home"
</A>
</li>
<li>
<A href="/blog">"Blog"</A>
</li>
<li>
<A href="/projects">"Projects"</A>
</li>
<li>
<a href="https://git.doordesk.net">"Git"</a>
</li>
</ul>
</nav>
<main>
<Routes>
<Route path="" view=Home/>
// <Route path="blog" view=Blog/>
// <Route path="projects" view=Projects/>
</Routes>
</main>
<p class="m-auto w-8 text-center duration-200 hover:rotate-180">
<a href="https://open.spotify.com/playlist/3JRNw9gpt1w5ptsw8uDeYc?si=8f7e4191113f41f9">
":)"
</a>
</p>
<br/>
</Router>
}
}

3
app/src/routes.rs Normal file
View file

@ -0,0 +1,3 @@
pub mod home;
// pub mod blog;
// pub mod projects;

7
app/src/routes/blog.rs Normal file
View file

@ -0,0 +1,7 @@
use crate::components::article::*;
use leptos::*;
#[component]
pub fn Blog() -> impl IntoView {
view! { <Article/> }
}

32
app/src/routes/home.rs Normal file
View file

@ -0,0 +1,32 @@
use crate::components::article::*;
use crate::components::slingshot::*;
use crate::error_template::*;
use leptos::*;
#[component]
#[island]
pub fn Home() -> impl IntoView {
let data_resource = create_local_resource(
|| (),
|_| async move { slingshot("./public/articles".to_string()).await },
);
let articles_view = move || {
data_resource.and_then(|data| {
data.iter()
.map(|article| view! { <Article data=article.clone()/> })
.collect_view()
})
};
view! {
<Suspense fallback=move || {
view! { <p>"Loading..."</p> }
}>
<ErrorBoundary fallback=|errors| {
view! { <ErrorTemplate errors=errors/> }
}>{articles_view}</ErrorBoundary>
</Suspense>
}
}

View file

@ -0,0 +1,7 @@
use crate::components::article::*;
use leptos::*;
#[component]
pub fn Projects() -> impl IntoView {
view! { <Article/> }
}

74
end2end/package-lock.json generated Normal file
View file

@ -0,0 +1,74 @@
{
"name": "end2end",
"version": "1.0.0",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "end2end",
"version": "1.0.0",
"license": "ISC",
"devDependencies": {
"@playwright/test": "^1.28.0"
}
},
"node_modules/@playwright/test": {
"version": "1.28.0",
"resolved": "https://registry.npmjs.org/@playwright/test/-/test-1.28.0.tgz",
"integrity": "sha512-vrHs5DFTPwYox5SGKq/7TDn/S4q6RA1zArd7uhO6EyP9hj3XgZBBM12ktMbnDQNxh/fL1IUKsTNLxihmsU38lQ==",
"dev": true,
"dependencies": {
"@types/node": "*",
"playwright-core": "1.28.0"
},
"bin": {
"playwright": "cli.js"
},
"engines": {
"node": ">=14"
}
},
"node_modules/@types/node": {
"version": "18.11.9",
"resolved": "https://registry.npmjs.org/@types/node/-/node-18.11.9.tgz",
"integrity": "sha512-CRpX21/kGdzjOpFsZSkcrXMGIBWMGNIHXXBVFSH+ggkftxg+XYP20TESbh+zFvFj3EQOl5byk0HTRn1IL6hbqg==",
"dev": true
},
"node_modules/playwright-core": {
"version": "1.28.0",
"resolved": "https://registry.npmjs.org/playwright-core/-/playwright-core-1.28.0.tgz",
"integrity": "sha512-nJLknd28kPBiCNTbqpu6Wmkrh63OEqJSFw9xOfL9qxfNwody7h6/L3O2dZoWQ6Oxcm0VOHjWmGiCUGkc0X3VZA==",
"dev": true,
"bin": {
"playwright": "cli.js"
},
"engines": {
"node": ">=14"
}
}
},
"dependencies": {
"@playwright/test": {
"version": "1.28.0",
"resolved": "https://registry.npmjs.org/@playwright/test/-/test-1.28.0.tgz",
"integrity": "sha512-vrHs5DFTPwYox5SGKq/7TDn/S4q6RA1zArd7uhO6EyP9hj3XgZBBM12ktMbnDQNxh/fL1IUKsTNLxihmsU38lQ==",
"dev": true,
"requires": {
"@types/node": "*",
"playwright-core": "1.28.0"
}
},
"@types/node": {
"version": "18.11.9",
"resolved": "https://registry.npmjs.org/@types/node/-/node-18.11.9.tgz",
"integrity": "sha512-CRpX21/kGdzjOpFsZSkcrXMGIBWMGNIHXXBVFSH+ggkftxg+XYP20TESbh+zFvFj3EQOl5byk0HTRn1IL6hbqg==",
"dev": true
},
"playwright-core": {
"version": "1.28.0",
"resolved": "https://registry.npmjs.org/playwright-core/-/playwright-core-1.28.0.tgz",
"integrity": "sha512-nJLknd28kPBiCNTbqpu6Wmkrh63OEqJSFw9xOfL9qxfNwody7h6/L3O2dZoWQ6Oxcm0VOHjWmGiCUGkc0X3VZA==",
"dev": true
}
}
}

13
end2end/package.json Normal file
View file

@ -0,0 +1,13 @@
{
"name": "end2end",
"version": "1.0.0",
"description": "",
"main": "index.js",
"scripts": {},
"keywords": [],
"author": "",
"license": "ISC",
"devDependencies": {
"@playwright/test": "^1.28.0"
}
}

View file

@ -0,0 +1,107 @@
import type { PlaywrightTestConfig } from "@playwright/test";
import { devices } from "@playwright/test";
/**
* Read environment variables from file.
* https://github.com/motdotla/dotenv
*/
// require('dotenv').config();
/**
* See https://playwright.dev/docs/test-configuration.
*/
const config: PlaywrightTestConfig = {
testDir: "./tests",
/* Maximum time one test can run for. */
timeout: 30 * 1000,
expect: {
/**
* Maximum time expect() should wait for the condition to be met.
* For example in `await expect(locator).toHaveText();`
*/
timeout: 5000,
},
/* Run tests in files in parallel */
fullyParallel: true,
/* Fail the build on CI if you accidentally left test.only in the source code. */
forbidOnly: !!process.env.CI,
/* Retry on CI only */
retries: process.env.CI ? 2 : 0,
/* Opt out of parallel tests on CI. */
workers: process.env.CI ? 1 : undefined,
/* Reporter to use. See https://playwright.dev/docs/test-reporters */
reporter: "html",
/* Shared settings for all the projects below. See https://playwright.dev/docs/api/class-testoptions. */
use: {
/* Maximum time each action such as `click()` can take. Defaults to 0 (no limit). */
actionTimeout: 0,
/* Base URL to use in actions like `await page.goto('/')`. */
// baseURL: 'http://localhost:3000',
/* Collect trace when retrying the failed test. See https://playwright.dev/docs/trace-viewer */
trace: "on-first-retry",
},
/* Configure projects for major browsers */
projects: [
{
name: "chromium",
use: {
...devices["Desktop Chrome"],
},
},
{
name: "firefox",
use: {
...devices["Desktop Firefox"],
},
},
{
name: "webkit",
use: {
...devices["Desktop Safari"],
},
},
/* Test against mobile viewports. */
// {
// name: 'Mobile Chrome',
// use: {
// ...devices['Pixel 5'],
// },
// },
// {
// name: 'Mobile Safari',
// use: {
// ...devices['iPhone 12'],
// },
// },
/* Test against branded browsers. */
// {
// name: 'Microsoft Edge',
// use: {
// channel: 'msedge',
// },
// },
// {
// name: 'Google Chrome',
// use: {
// channel: 'chrome',
// },
// },
],
/* Folder for test artifacts such as screenshots, videos, traces, etc. */
// outputDir: 'test-results/',
/* Run your local dev server before starting the tests */
// webServer: {
// command: 'npm run start',
// port: 3000,
// },
};
export default config;

View file

@ -0,0 +1,9 @@
import { test, expect } from "@playwright/test";
test("homepage has title and links to intro page", async ({ page }) => {
await page.goto("http://localhost:3000/");
await expect(page).toHaveTitle("Welcome to Leptos");
await expect(page.locator("h1")).toHaveText("Welcome to Leptos!");
});

18
frontend/Cargo.toml Normal file
View file

@ -0,0 +1,18 @@
[package]
name = "frontend"
version = "0.1.0"
edition = "2021"
[lib]
crate-type = ["cdylib", "rlib"]
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
app = { path = "../app", default-features = false, features = ["hydrate"] }
leptos = { workspace = true, features = [ "hydrate" ] }
console_error_panic_hook.workspace = true
console_log.workspace = true
log.workspace = true
wasm-bindgen.workspace = true

13
frontend/src/lib.rs Normal file
View file

@ -0,0 +1,13 @@
use app::*;
use leptos::*;
use wasm_bindgen::prelude::wasm_bindgen;
#[wasm_bindgen]
pub fn hydrate() {
// initializes logging using the `log` crate
_ = console_log::init_with_level(log::Level::Debug);
console_error_panic_hook::set_once();
// leptos::mount_to_body(App);
leptos::leptos_dom::HydrationCtx::stop_hydrating();
}

6
public/about.txt Normal file
View file

@ -0,0 +1,6 @@
This favicon was generated using the following graphics from Twitter Twemoji:
- Graphics Title: 1f37b.svg
- Graphics Author: Copyright 2020 Twitter, Inc and other contributors (https://github.com/twitter/twemoji)
- Graphics Source: https://github.com/twitter/twemoji/blob/master/assets/svg/1f37b.svg
- Graphics License: CC-BY 4.0 (https://creativecommons.org/licenses/by/4.0/)

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 38 KiB

BIN
public/apple-touch-icon.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 10 KiB

View file

@ -0,0 +1,144 @@
```toml
content_type = "blog"
title = "Hume"
date = "2022 2 7"
```
<p style="text-align: right;">
April 22, 1958<br />
57 Perry Street<br />
New York City<br />
</p>
Dear Hume,
You ask advice: ah, what a very human and very dangerous thing to do! For to give advice
to a man who asks what to do with his life implies something very close to egomania. To
presume to point a man to the right and ultimate goal&mdash;to point with a trembling
finger in the RIGHT direction is something only a fool would take upon himself.
I am not a fool, but I respect your sincerity in asking my
advice. I ask you though, in listening to what I say, to remember that all advice can
only be a product of the man who gives it. What is truth to one may be a disaster to
another. I do not see life through your eyes, nor you through mine. If I were to attempt
to give you
*specific* advice, it would be too much like the blind leading the blind.
<p style="text-align: center;">
<i> "To be, or not to be: that is the question: Whether 'tis nobler in the mind to
suffer the slings and arrows of outrageous fortune, or to take arms against a sea of
troubles..." </i>
<br />
(Shakespeare)
</p>
And indeed, that IS the question: whether to float with the tide, or to swim for a goal.
It is a choice we must all make consciously or unconsciously at one time in our lives.
So few people understand this! Think of any decision you've ever made which had a
bearing on your future: I may be wrong, but I don't see how it could have been anything
but a choice however indirect&mdash;between the two things I've mentioned: the floating
or the swimming.
But why not float if you have no goal? That is another question.
It is unquestionably better to enjoy the floating than to swim in uncertainty. So how
does a man find a goal? Not a castle in the stars, but a real and tangible thing. How
can a man be sure he's not after the "big rock candy mountain," the enticing sugar-candy
goal that has little taste and no substance?
The answer&mdash;and, in a sense, the tragedy of life&mdash;is
that we seek to understand the goal and not the man. We set up a goal which demands of
us certain things: and we do these things. We adjust to the demands of a concept which
CANNOT be valid. When you were young, let us say that you wanted to be a fireman. I feel
reasonably safe in saying that you no longer want to be a fireman. Why? Because your
perspective has changed. It's not the fireman who has changed, but you. Every man is the
sum total of his reactions to experience. As your experiences differ and multiply, you
become a different man, and hence your perspective changes. This goes on and on. Every
reaction is a learning process; every significant experience alters your perspective.
So it would seem foolish, would it not, to adjust our lives to
the demands of a goal we see from a different angle every day? How could we ever hope to
accomplish anything other than galloping neurosis?
The answer, then, must not deal with goals at all, or not with
tangible goals, anyway. It would take reams of paper to develop this subject to
fulfillment. God only knows how many books have been written on "the meaning of man" and
that sort of thing, and god only knows how many people have pondered the subject. (I use
the term "god only knows" purely as an expression.) There's very little sense in my
trying to give it up to you in the proverbial nutshell, because I'm the first to admit
my absolute lack of qualifications for reducing the meaning of life to one or two
paragraphs.
I'm going to steer clear of the word "existentialism," but you
might keep it in mind as a key of sorts. You might also try something called
<i>Being and Nothingness</i> by Jean-Paul Sartre, and another little thing called
<i>Existentialism: From Dostoyevsky to Sartre.</i> These are merely suggestions. If
you're genuinely statisfied with what you are and what you're doing, then give those
books a wide berth. (Let sleeping dogs lie.) But back to the answer. As I said, to put
our faith in tangible goals would seem to be, at best, unwise. So we do not strive to be
firemen, we do not strive to be bankers, nor policemen, nor doctors. WE STRIVE TO BE
OURSELVES.
But don't misunderstand me. I don't mean that we can't BE
firemen, bankers, or doctors&mdash;but that we must make the goal conform to the
individual, rather than make the individual conform to the goal. In every man, heredity
and environment have combined to produce a creature of certain abilities and
desires&mdash;including a deeply ingrained need to function in such a way that his life
will be MEANINGFUL. A man has to BE something; he has to matter.
As I see it then, the formula runs something like this: a man
must choose a path which will let his ABILITIES function at maximum efficiency toward
the gratification of his DESIRES. In doing this, he is fulfilling a need (giving himself
identity by functioning in a set pattern toward a set goal) he avoids frustrating his
potential (choosing a path which puts no limit on his self-development), and he avoids
the terror of seeing his goal wilt or lose its charm as he draws closer to it (rather
than bending himself to meet the demands of that which he seeks, he has bent his goal to
conform to his own abilities and desires).
In short, he has not dedicated his life to reaching a
pre-defined goal, but he has rather chosen a way of like he KNOWS he will enjoy. The
goal is absolutely secondary: it is the
<i>functioning toward the goal</i> which is important. And it seems almost ridiculous to
say that a man MUST function in a pattern of his own choosing; for to let another man
define your own goals is to give up one of the most meaningful aspects of life&mdash;the
definitive act of will which makes a man an individual.
Let's assume that you think you have a choice of eight paths to
follow (all pre-defined paths, of course). And let's assume that you can't see any real
purpose in any of the eight. Then&mdash;and here is the essence of all I've
said&mdash;you MUST FIND A NINTH PATH.
Naturally, it isn't as easy as it sounds. you've lived a
relatively narrow life, a vertical rather than a horizontal existence. So it isn't any
too difficult to understand why you seem to feel the way you do. But a man who
procrastinates in his CHOOSING will inevitably have his choice made for him by
circumstance.
So if you now number yourself among the disenchanted, then you
have no choice but to accept things as they are, or to seriously seek something else.
But beware of looking for
<i>goals</i>: look for a way of life. Decide how you want to live and then see what you
can do to make a living WITHIN that way of life. But you say, "I don't know where to
look; I don't know what to look for."
And there's the crux. Is it worth giving up what I have to look
for something better? I don't know&mdash;is it? Who can make that decision but you? But
even by DECIDING TO LOOK, you go a long way toward making the choice.
If I don't call this to a halt, I'm going to find myself writing
a book. I hope it's not as confusing as it looks at first glance. Keep in mind, of
course, that this is MY WAY of looking at things. I happen to think that it's pretty
generally applicable, but you may not. Each of us has to create our own credo&mdash;this
merely happens to be mine.
If any part of it doesn't seem to make sense, by all means call
it to my attention. I'm not trying to send you out "on the road" in search of Valhalla,
but merely pointing out that it is not necessary to accept the choices handed down to
you by life as you know it. There is more to it than that&mdash;no one HAS to do
something he doesn't want to do for the rest of his life. But then again, if that's what
you wind up doing, by all means convince yourself that you HAD to do it. You'll have
lots of company.
And that's it for now. Until I hear from you again, I remain,
<p style="text-align: right;">
your friend...<br />
Hunter
</p>

View file

@ -0,0 +1,53 @@
```toml
content_type = "blog"
title = "Change"
date = "2022 5 6"
```
<p style="text-align: center;">
<i>"Life should not be a journey to the grave with the intention of arriving safely in
a pretty and well preserved body, but rather to skid in broadside in a cloud of
smoke, thoroughly used up, totally worn out, and loudly proclaiming "Wow! What a
Ride!"</i>
<br />
(Hunter S.Thompson)
</p>
There comes a time in one's life, perhaps multiple, when there
is an unquestionable need for change. Maybe you're not sure how, why, or where it came
from, or where even it is you're headed, or how to get there, but here you are taking
your first steps toward a new life. A journey into the unknown. I've just set out on one
of these journeys, and even as I sit here typing this now I can't help but feel a little
bit nervous, but even more excited. I have absolutely no idea where I'm headed to be
quite honest. But I know where I've been.
Growing up I would always be taking things apart, I HAD to see
what was inside. What makes this thing, a thing. What makes it tick? Can it tick faster?
For no particular reason I just had to know every little detail about what made the
thing the thing that it was and why it did what it did. It's a gift and a curse of
sorts. Quickly this led to taking apart things of increasing complexity, our home
computer for instance. Luckily I was able to get it put back together before my parents
got home because it was made clear that this was not allowed, and the CPU didn't seem to
mind the sudden absence of thermal compound either. I must have been around 7 or 8 years
old at that time, and it still puzzles me just what is going on inside there.
I have a better idea now, naturally I had to figure out just
what all those pieces were, what they did, and how they did it. What if I replaced some
of these parts with other parts? As I honed my web searching skills to try to answer the
seemingly endless hows and whys I ended up building myself a little hotrod computer and
then raced it against other peoples' computers because why not, right? And I actually
won! It was an overclocking contest called the winter suicides, a kind of computer drag
race. Highest CPU clock speed wins, you have to boot into Windows XP, open CPU-Z, and
take a screenshot. If it crashes immediately after that (and it did) it still counts. I
got some pretty weird looks from my father as I stuck my computer outside in the snow
but that was a small price to pay for the grand prize which was a RAM kit (2GB of DDR400
I believe) and RAM cooler.
After getting comfortable with hardware I started to study the
software side of things, I tried teaching myself C++ (and didn't get very far), I did
teach myself HTML and CSS, some JavaScript, and started playing around with Linux. It
took until only a year or two ago to finally be completely on Linux full time (gaming
holding me back), I even have a Linux phone now (Pinephone Pro). At this point I reached
high school and my attention moved from computers to cars.
To be continued...

View file

@ -0,0 +1,14 @@
```toml
content_type = "blog"
title = "It's about time, NVIDIA"
date = "2022 5 20"
```
It's about time... NVIDIA has finally released and is starting to
support Open-source software with their new modules released recently for the Linux
kernel. NVIDIA historically has been seemingly against Linux/OSS for whatever reason.
This is a huge step forward both for end users and NVIDIA.
<p style="text-align: center;">
<a href="https://github.com/NVIDIA/open-gpu-kernel-modules">
NVIDIA open-gpu-kernel-modules</a > on github.
</p>

View file

@ -0,0 +1,41 @@
```toml
content_type = "blog"
title = "Back to School"
date = "2022 6 2"
```
### Where the hell have I been!?
Looking back at the past 5 weeks, it's impressive the amount of new things that have
been shoved in my face. A list I'll try to make contains:
- [Python](https://www.python.org)
- [Pandas](https://pandas.pydata.org)
- [Matplotlib](https://matplotlib.org)
- [Seaborn](https://seaborn.pydata.org)
- [Statsmodels](https://www.statsmodels.org)
- [Scikit-Learn](https://scikit-learn.org)
- [Beautiful Soup](https://www.crummy.com/software/BeautifulSoup)
- [Selenium](https://www.selenium.dev)
- [PRAW](https://github.com/praw-dev/praw)
- Plus the math and background to go with it all!
It doesn't seem like much at the time except chaos, but then about a week later it
finally sets in. After tomorrow we'll be halfway through the course and while I guess
you could say that it's half over, or that it signifies progress, I feel it's more like
being halfway up Mount Everest and looking&mdash;trying to squint through the clouds and
make out what looks like the peak. I don't see a peak and maybe it's because I'm
nearsighted but I can also tell you that if were to look down then I can't see where
I've started either!
It's been quite a ride and I hope to see it to the end. I don't have time to even think
about it further. It's where I perform my best though, on my heels. Probably by
design...
### After?
I would like to use these skills to expand on some of the class projects I've worked on
and I have some other ideas using language processing I think would be fun to play with.
I think it would be fun to create an internet chat bot, we'll start with text but if
speech recognition is practical then I may add and play with that too. I would also like
to make some sort of "Propaganda Detector"

View file

@ -0,0 +1,87 @@
```toml
content_type = "blog"
title = "It's a post about nothing!"
date = "2022 7 1"
```
The progress update
<p style='text-align: center;'>
<img src="https://old.doordesk.net/pics/plates.gif" />
</p>
### Bots
After finding a number of ways not to begin the project formerly known as my capstone,
I've finally settled on a
[dataset](https://www.kaggle.com/datasets/bwandowando/ukraine-russian-crisis-twitter-dataset-1-2-m-rows). The project is about detecting bots, starting with twitter. I've
[studied](https://old.doordesk.net/projects/bots/docs/debot.pdf) a
[few](https://old.doordesk.net/projects/bots/docs/botwalk.pdf)
[different](https://old.doordesk.net/projects/bots/docs/smu.pdf)
[methods](https://old.doordesk.net/projects/bots/docs/div.pdf) of bot detection and particularly like the
[DeBot](https://old.doordesk.net/projects/bots/docs/debot.pdf) and
[BotWalk](https://old.doordesk.net/projects/bots/docs/botwalk.pdf) methods and think I will try to mimic them,
in that order.
Long story short, DeBot uses a fancy method of time correlation to group accounts
together based on their posting habits. By identifying accounts that all have identical
posting habits that are beyond what a human could do by coincidence, this is a great
first step to identifying an inital group of seed bots. This can then be expanded by
using BotWalk's method of checking all the followers of the bot accounts and comparing
anomalous behavior to separate humans from non-humans. Rinse and repeat. I'll begin this
on twitter but hope to make it platform independent.
### The Real Capstone
The bot project is too much to complete in this short amount of time, so instead I'm
working with a
[small dataset](https://archive-beta.ics.uci.edu/ml/datasets/auto+mpg)
containing info about cars with some specs and I'll predict MPG. The problem itself for
me is trivial from past study/experience as an auto mechanic so I should have a nice
playground to focus completely on modeling. It's a very small data set too at < 400
lines, I should be able to test multiple models in depth very quickly. It may or may not
be interesting, expect a write-up anyway.
### Cartman
Well I guess I've adopted an 8 year old. Based on
[this project](https://github.com/RuolinZheng08/twewy-discord-chatbot)
I've trained a chat bot with the personality of Eric Cartman. He's a feature of my
Discord bot living on a Raspberry Pi 4B, which I would say is probably the slowest
computer you would ever want to run something like this on. It takes a somewhat
reasonable amount of time to respond, almost feeling human if you make it think a bit.
The project uses [PyTorch](https://pytorch.org/) to train the model. I'd like
to re-create it using [TensorFlow](https://www.tensorflow.org/) as an
exercise to understand each one better, but that's a project for another night. It also
only responds to one line at a time so it can't carry a conversation with context,
yet...
### Website
I never thought I'd end up having a blog. I had no plans at all actually when I set up
this server, just to host a silly page that I would change from time to time whenever I
was bored. I've been looking at
[Hugo](https://gohugo.io/) as a way to organize what is now just a list of
divs in a single html file slowly growing out of control. Basically you just dump each
post into its own file, create a template of how to render them, and let it do its
thing. I should be able to create a template that recreates exactly what you see right
now, which is beginning to grow on me.
If you haven't noticed yet, (and I don't blame you if you haven't because only a handful
of people even visit this page) each time there is an update there is a completely new
background image, color scheme, a whole new theme. This is because this page is a near
identical representation of terminal windows open my computer and each time I update the
page I also update it with my current wallpaper, which generates the color scheme
dynamically using
[Pywal](https://github.com/dylanaraps/pywal).
TODO:
* Code blocks with syntax highlighting
* Develop an easy workflow to dump a jupyter notebook into the website and have it display nicely with minimal effort
* Find a way to hack plots generated with matplotlib to change colors with the page color scheme (or find another way to do the same thing)
* Automate generating the site - probably [Hugo](https://gohugo.io/)
* Separate from blog, projects, etc.
* Add socials, contact, about
* A bunch of stuff I haven't even thought of yet
That's all for now

View file

@ -0,0 +1 @@
cartman

View file

@ -0,0 +1,8 @@
```toml
content_type = "game"
title = "adam"
date = "2022 9 11"
```
[adam](https://old.doordesk.net/games/adam/) is a quick fps demo to test how well WebGL
performs using [Unity](https://unity.com).

View file

@ -0,0 +1,8 @@
```toml
content_type = "game"
title = "balls"
date = "2022 9 13"
```
[balls](https://old.doordesk.net/games/balls/) is another demo to test WebGL performance.
This time using [Godot Engine](https://godotengine.org/).

View file

@ -0,0 +1,8 @@
```toml
content_type = "game"
title = "fps"
date = "2022 10 9"
```
[fps](https://old.doordesk.net/games/fps/) is a Godot/WebGL experiment from scratch with
multiplayer using websockets and a master/slave architecture. Invite a friend or open multiple instances!

View file

@ -0,0 +1,23 @@
<h3>Some games using wasm/webgl</h3>
<p>Browser performance as of January 2023</p>
<p>Tested better:</p>
<ol>
<li>Opera</li>
<li>Firefox Developer Edition</li>
<li>Brave</li>
</ol>
<p>Tested poor or broken:</p>
<ol>
<li>Safari</li>
<li>Chrome stable release or older</li>
<li>Edge, see above^</li>
</ol>
<p>Consider anything else average or let me know otherwise</p>
<ul>
---MY GAMES---
<li><a href="https://old.doordesk.net/games/adam">adam</a> - The first. Unity Demo/Tutorial with some mods</li>
<li><a href="https://old.doordesk.net/games/fps">multiplayer fps</a> - Dive into netcode with Godot (Open two, invite
your friends!)</li>
<li><a href="https://old.doordesk.net/games/../snek">snek</a> - Canvas + JS (the actual first)</li>
<li><a href="https://old.doordesk.net/games/balls">balls</a> - Godot demo engine test</li>
</ul>

View file

@ -0,0 +1,7 @@
```toml
content_type = "game"
title = "snek"
date = "2022 5 20"
```
[snek](https://old.doordesk.net/snek) is a simple snake game made with JS/Canvas.

View file

@ -0,0 +1,114 @@
```toml
content_type = "project"
title = "Predicting Housing Prices"
date = "2022 5 29"
```
A recent project I had for class was to use [scikit-learn](https://scikit-learn.org/stable/index.html) to create a regression model that will predict the price of a house based on some features of that house.
### How?
1 Pick out and analyze certain features from the dataset. Used here is the [Ames Iowa Housing Data](https://www.kaggle.com/datasets/marcopale/housing) set.
1 Do some signal processing to provide a clearer input down the line, improving accuracy
1 Make predictions on sale price
1 Compare the predicted prices to recorded actual sale prices and score the results
### What's important?
Well, I don't know much about appraising houses. But I have heard the term "price per
square foot" so we'll start with that:
<p style="text-align: center;"><img src="https://old.doordesk.net/pics/livarea_no_outliers.png" /></p>
There is a feature for 'Above Grade Living Area' meaning floor area that's not basement.
It looks linear, there were a couple outliers to take care of but this should be a good
signal.
Next I calculated the age of every house at time of sale and plotted it:
<p style="text-align: center;"><img src="https://old.doordesk.net/pics/age.png" /></p>
Exactly what I'd expect to see. Price drops as age goes up, a few outliers. We'll
include that in the model.
Next I chose the area of the lot:
<p style="text-align: center;"><img src="https://old.doordesk.net/pics/lot_area.png" /></p>
Lot area positively affects sale price because land has value. Most of the houses here
have similarly sized lots.
### Pre-Processing
<div>
<p>
Here is an example where using
<a
href="https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html"
target="new"
>StandardScaler()</a
>
just doesn't cut it. The values are all scaled in a way where they can be compared
to one-another, but outliers have a huge effect on the clarity of the signal as a
whole.
</p>
<span>
<center>
<img src="https://old.doordesk.net/pics/age_liv_area_ss.png" />
<img src="https://old.doordesk.net/pics/age_liv_qt.png"
</center>
</span>
</div>
You should clearly see in the second figure that an old shed represented in the top left
corner will sell for far less than a brand new mansion represented in the bottom right
corner. This is the result of using the [QuantileTransformer()](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.QuantileTransformer.html)
for scaling.
### The Model
A simple [LinearRegression()](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html)
should do just fine, with [QuantileTransformer()](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.QuantileTransformer.html)
scaling of course.
<center>
<img src="https://old.doordesk.net/pics/mod_out.png" />
</center>
Predictions were within about $35-$40k on average.
It's a little fuzzy in the higher end of prices, I believe due to the small sample size.
There are a few outliers that can probably be reduced with some deeper cleaning however
I was worried about going too far and creating a different story. An "ideal" model in
this case would look like a straight line.
### Conclusion
This model was designed with a focus on quality and consistency. With some refinement,
the margin of error should be able to be reduced to a reasonable number and then
reliable, accurate predictions can be made for any application where there is a need to
assess the value of a property.
I think a large limiting factor here is the size of the dataset compared to the quality
of the features provided. There are
<a href="http://jse.amstat.org/v19n3/decock/DataDocumentation.txt">more features</a>
from this dataset that can be included but I think the largest gains will be had from
simply feeding in more data. As you stray from the "low hanging fruit" features, the
quality of your model overall starts to go down.
Here's an interesting case, Overall Condition of Property:
<center>
<img src="https://old.doordesk.net/pics/overall_cond.png" />
</center>
You would expect sale price to increase with quality, no? Yet it goes down.. Why?
I believe it's because a lot of sellers want to say that their house is of highest
quality, no matter the condition. It seems that most normal people (who aren't liars)
dont't care to rate their property and just say it's average. Both of these combined
actually create a negative trend for quality which definitely won't help predictions!
I would like to expand this in the future, maybe scraping websites like Zillow to gather
more data.
We'll see.

View file

@ -0,0 +1,109 @@
```toml
content_type = "project"
title = "What goes into a successful Reddit post?"
date = "2022 6 16"
```
In an attempt to find out what about a Reddit post makes it successful I will use some
classification models to try to determine which features have the highest influence on
making a correct prediction. In particular I use
[Random Forest](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html)
and
[KNNeighbors](https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html)
classifiers. Then I'll score the results and see what the highest predictors are.
To find what goes into making a successful Reddit post we'll have to do a few things,
first of which is collecting data:
### Introducing Scrapey!
[Scrapey](https://old.doordesk.net/projects/reddit/scrapey.html) is my scraper script that takes a snapshot
of Reddit/r/all hot and saves the data to a .csv file including a calculated age for
each post about every 12 minutes. Run time is about 2 minutes per iteration and each
time adds about 100 unique posts to the list while updating any post it's already seen.
I run this in the background in a terminal and it updates my data set every ~12 minutes.
I have records of all posts within about 12 minutes of them disappearing from /r/all.
### EDA
[Next I take a quick look to see what looks useful](https://old.doordesk.net/projects/reddit/EDA.html), what
doesn't, and check for outliers that will throw off the model. There were a few outliers
to drop from the num_comments column.
Chosen Features:
* Title
* Subreddit
* Over_18
* Is_Original_Content
* Is_Self
* Spoiler
* Locked
* Stickied
* Num_Comments (Target)
Then I split the data I'm going to use into two dataframes (numeric and non) to prepare
for further processing.
### Clean
[Cleaning the data further](https://old.doordesk.net/projects/reddit/clean.html) consists of:
* Scaling numeric features between 0-1
* Converting '_' and '-' to whitespace
* Removing any non a-z or A-Z or whitespace
* Stripping any leftover whitespace
* Deleting any titles that were reduced to empty strings
### Model
If the number of comments of a post is greater than the median total number of comments
then it's assigned a 1, otherwise a 0. This is the target column. I then try some
lemmatizing, it doesn't seem to add much. After that I create and join some dummies,
then split and feed the new dataframe into
[Random Forest](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html)
and [NNeighbors](https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html)
classifiers. Both actually scored the same with
[cross validation](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.cross_validate.html)
so I mainly used the forest.
[Notebook Here](https://old.doordesk.net/projects/reddit/model.html)
### Conclusion
Some Predictors from Top 25:
* Is_Self
* Subreddit_Memes
* OC
* Over_18
* Subreddit_Shitposting
* Is_Original_Content
* Subreddit_Superstonk
Popular words: 'like', 'just', 'time', 'new', 'oc', 'good', 'got', 'day', 'today', 'im',
'dont', and 'love'.
People on Reddit (at least in the past few days) like their memes, porn, and talking
about their day. And it's preferred if the content is original and self posted. So yes,
post your memes to memes and shitposting, tag them NSFW, use some words from the list,
and rake in all that sweet karma!
But it's not that simple, this is a fairly simple model, with simple data. To go beyond
this I think the comments would have to be analyzed.
[Lemmatisation](https://en.wikipedia.org/wiki/Lemmatisation) I thought would
be the most influential piece, and I still think that thinking is correct. But in this
case it doesn't apply because there is no real meaning to be had from reddit post
titles, at least to a computer. (or I did something wrong)
There's a lot more seen by a human than just the text in the title, there's often an
image attached, most posts reference a recent/current event, they could be an inside
joke of sorts. For some posts there could be emojis in the title, and depending on their
combination they can take on a meaning completely different from their individual
meanings. The next step from here I believe is to analyze the comments section of these
posts because in this moment I think that's the easiest way to truly describe the
meaning of a post to a computer. With what was gathered here I'm only to get 10% above
baseline and I think that's all there is to be had here, I mean we can tweak for a few
percent probably but I don't think there's much left on the table.

View file

@ -0,0 +1,32 @@
```toml
content_type = "project"
title = "Cartman is public!"
date = "2022 10 20"
```
[Cartman](https://old.doordesk.net/cartman) is trained by combining Microsoft's
[DialoGPT-medium](https://huggingface.co/microsoft/DialoGPT-medium)
NLP model (GPT2 model trained on 147M samples of multi-turn dialogue from Reddit) with 17 seasons of
[South Park](https://southparkstudios.com)
transcripts.
Requests are routed from
[Nginx](https://nginx.com)
through
[WireGuard](https://www.wireguard.com)
to a
[Raspberry Pi 4B 8GB](https://www.tomshardware.com/news/raspberry-pi-4-8gb-tested) running
[FastAPI](https://fastapi.tiangolo.com),
and the Cartman model using [PyTorch](https://pytorch.org).
It has enough RAM for more, but the CPU is pretty much at its limit. Expect it to take a few
seconds, I'm cheap. Sorry(kinda).
You can download a Docker image if you'd like to run it on your own hardware for either
[x86_64](https://old.doordesk.net/files/chatbots_api_x86_64.tar.gz)
or
[aarch64](https://old.doordesk.net/files/chatbots_api_aarch64.tar.gz).
More info [here](https://github.com/adoyle0/cartman) as well as
[example scripts](https://github.com/adoyle0/cartman/tree/master/api/test)
to talk to the docker container.

View file

@ -0,0 +1,12 @@
```toml
content_type = "project"
title = "Lightning"
date = "2023 4 27"
```
[Lightning](https://lightning.doordesk.net) is a mapping/data vis project for finding
EV charging stations. It uses [Martin](https://github.com/maplibre/martin) to serve
tiles generated from [OpenStreetMap](https://www.openstreetmap.org) data to a
[MapLibre](https://maplibre.org/) frontend. Additional layers are added on top
via [Deck.gl](https://deck.gl) using data from [EVChargerFinder](https://github.com/kevin-fwu/EVChargerFinder) made by my friend
Kevin.

BIN
public/favicon-16x16.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 685 B

BIN
public/favicon-32x32.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.5 KiB

BIN
public/favicon.ico Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

1
public/site.webmanifest Normal file
View file

@ -0,0 +1 @@
{"name":"","short_name":"","icons":[{"src":"/android-chrome-192x192.png","sizes":"192x192","type":"image/png"},{"src":"/android-chrome-512x512.png","sizes":"512x512","type":"image/png"}],"theme_color":"#ffffff","background_color":"#ffffff","display":"standalone"}

2
rust-toolchain.toml Normal file
View file

@ -0,0 +1,2 @@
[toolchain]
channel = "nightly"

1
rustfmt.toml Normal file
View file

@ -0,0 +1 @@
edition = "2021"

18
server/Cargo.toml Normal file
View file

@ -0,0 +1,18 @@
[package]
name = "server"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
app = { path = "../app", default-features = false, features = ["ssr"] }
leptos = { workspace = true, features = [ "ssr" ]}
leptos_axum.workspace = true
axum.workspace = true
simple_logger.workspace = true
tokio.workspace = true
tower.workspace = true
tower-http.workspace = true
log.workspace = true

46
server/src/fileserv.rs Normal file
View file

@ -0,0 +1,46 @@
// use app::error_template::AppError;
// use app::error_template::ErrorTemplate;
use app::App;
use axum::response::Response as AxumResponse;
use axum::{
body::{boxed, Body, BoxBody},
extract::State,
http::{Request, Response, StatusCode, Uri},
response::IntoResponse,
};
use leptos::*;
use tower::ServiceExt;
use tower_http::services::ServeDir;
pub async fn file_and_error_handler(
uri: Uri,
State(options): State<LeptosOptions>,
req: Request<Body>,
) -> AxumResponse {
let root = options.site_root.clone();
let res = get_static_file(uri.clone(), &root).await.unwrap();
if res.status() == StatusCode::OK {
res.into_response()
} else {
let handler =
leptos_axum::render_app_to_stream(options.to_owned(), move || view! { <App/> });
handler(req).await.into_response()
}
}
async fn get_static_file(uri: Uri, root: &str) -> Result<Response<BoxBody>, (StatusCode, String)> {
let req = Request::builder()
.uri(uri.clone())
.body(Body::empty())
.unwrap();
// `ServeDir` implements `tower::Service` so we can call it with `tower::ServiceExt::oneshot`
// This path is relative to the cargo root
match ServeDir::new(root).oneshot(req).await {
Ok(res) => Ok(res.map(boxed)),
Err(err) => Err((
StatusCode::INTERNAL_SERVER_ERROR,
format!("Something went wrong: {err}"),
)),
}
}

37
server/src/main.rs Normal file
View file

@ -0,0 +1,37 @@
use app::*;
use axum::{routing::post, Router};
use fileserv::file_and_error_handler;
use leptos::*;
use leptos_axum::{generate_route_list, LeptosRoutes};
pub mod fileserv;
#[tokio::main]
async fn main() {
simple_logger::init_with_level(log::Level::Debug).expect("couldn't initialize logging");
// Setting get_configuration(None) means we'll be using cargo-leptos's env values
// For deployment these variables are:
// <https://github.com/leptos-rs/start-axum#executing-a-server-on-a-remote-machine-without-the-toolchain>
// Alternately a file can be specified such as Some("Cargo.toml")
// The file would need to be included with the executable when moved to deployment
let conf = get_configuration(None).await.unwrap();
let leptos_options = conf.leptos_options;
let addr = leptos_options.site_addr;
let routes = generate_route_list(App);
// build our application with a route
let app = Router::new()
.route("/api/*fn_name", post(leptos_axum::handle_server_fns))
.leptos_routes(&leptos_options, routes, App)
.fallback(file_and_error_handler)
.with_state(leptos_options);
// run our app with hyper
// `axum::Server` is a re-export of `hyper::Server`
log::info!("listening on http://{}", &addr);
axum::Server::bind(&addr)
.serve(app.into_make_service())
.await
.unwrap();
}

93
style/tailwind.css Normal file
View file

@ -0,0 +1,93 @@
@tailwind base;
@tailwind components;
@tailwind utilities;
html {
@apply text-orange-50 antialiased font-mono tracking-tighter;
}
body {
@apply bg-gradient-to-br from-zinc-900 to-zinc-950 bg-fixed;
}
nav {
@apply shadow-sm shadow-zinc-950;
}
nav li {
@apply border-b-2 mx-1.5 sm:mx-6;
@apply border-transparent hover:border-orange-700 duration-300;
}
nav li:has(> a[aria-current="page"]) {
@apply border-b-orange-700 border-b-2;
}
/* Shadows don't work inline for some reason */
article {
@apply bg-zinc-900 shadow-inner shadow-zinc-950;
}
/* for innerHTML articles */
article p {
@apply mt-5 indent-4;
}
article h3 {
@apply mt-8 text-2xl text-orange-600;
}
article h3 > a {
@apply text-orange-600;
}
article a {
@apply text-orange-300 hover:underline;
}
article ul,
article ol {
@apply mt-5 list-inside;
}
article ul {
@apply list-disc;
}
article ol {
@apply list-decimal;
}
/* Code blocks */
.hh4 {
@apply text-orange-600;
}
.hh3 {
@apply text-blue-500;
}
.hh13 {
@apply text-orange-800;
}
.hh10 {
@apply text-slate-500;
}
.hh5 {
@apply text-orange-300;
}
/* dunno what this is yet */
.hh18 {
color: red;
}
.code-block {
@apply bg-black bg-opacity-50 p-3 rounded-lg shadow-lg shadow-black max-w-fit m-auto;
}
.code-block-inner {
@apply p-2 mt-2 text-sm border-t-gray-500 overflow-x-scroll border-t-2;
}

12
tailwind.config.js Normal file
View file

@ -0,0 +1,12 @@
/** @type {import('tailwindcss').Config} */
module.exports = {
content: {
relative: true,
files: ["*.html", "./app/**/*.rs"],
},
theme: {
extend: {},
},
plugins: [],
}