Convert the whole site to use Pelican instead of Jekyll

This commit is contained in:
Gergely Polonkai 2019-11-05 06:21:56 +01:00
parent 49961a3007
commit d5c1c942f0
No known key found for this signature in database
GPG Key ID: 38F402C8471DDE93
534 changed files with 7315 additions and 6642 deletions

View File

@ -1,3 +0,0 @@
---
BUNDLE_PATH: "vendor"
BUNDLE_DISABLE_SHARED_GEMS: "true"

6
.gitignore vendored
View File

@ -1,4 +1,2 @@
/vendor
_site/
/.bundle
/vendor
/output/
/gergelypolonkaieu_site.egg-info/

View File

@ -1,2 +0,0 @@
(setq hyde/git/remote "origin"
hyde/git/remote-branch "master")

11
404.md
View File

@ -1,11 +0,0 @@
---
layout: page
title: Not Found
permalink: /404.html
---
The page you are looking for is not here. Maybe it was but I have removed it. Most likely it was intentionally. If you think I made a mistake, please tell me.
{% if page.url contains '/akarmi' %}
If you are looking for the pictures that used to be here, you should definitely contact me. For reasons.
{% endif %}

1
CNAME
View File

@ -1 +0,0 @@
gergely.polonkai.eu

View File

@ -1,5 +0,0 @@
source 'https://rubygems.org'
gem 'jekyll'
gem 'jekyll-gist'
gem 'jekyll-paginate'

View File

@ -1,78 +0,0 @@
GEM
remote: https://rubygems.org/
specs:
addressable (2.7.0)
public_suffix (>= 2.0.2, < 5.0)
colorator (1.1.0)
concurrent-ruby (1.1.5)
em-websocket (0.5.1)
eventmachine (>= 0.12.9)
http_parser.rb (~> 0.6.0)
eventmachine (1.2.7)
faraday (0.17.0)
multipart-post (>= 1.2, < 3)
ffi (1.11.1)
forwardable-extended (2.6.0)
http_parser.rb (0.6.0)
i18n (1.7.0)
concurrent-ruby (~> 1.0)
jekyll (4.0.0)
addressable (~> 2.4)
colorator (~> 1.0)
em-websocket (~> 0.5)
i18n (>= 0.9.5, < 2)
jekyll-sass-converter (~> 2.0)
jekyll-watch (~> 2.0)
kramdown (~> 2.1)
kramdown-parser-gfm (~> 1.0)
liquid (~> 4.0)
mercenary (~> 0.3.3)
pathutil (~> 0.9)
rouge (~> 3.0)
safe_yaml (~> 1.0)
terminal-table (~> 1.8)
jekyll-gist (1.5.0)
octokit (~> 4.2)
jekyll-paginate (1.1.0)
jekyll-sass-converter (2.0.1)
sassc (> 2.0.1, < 3.0)
jekyll-watch (2.2.1)
listen (~> 3.0)
kramdown (2.1.0)
kramdown-parser-gfm (1.1.0)
kramdown (~> 2.0)
liquid (4.0.3)
listen (3.2.0)
rb-fsevent (~> 0.10, >= 0.10.3)
rb-inotify (~> 0.9, >= 0.9.10)
mercenary (0.3.6)
multipart-post (2.1.1)
octokit (4.14.0)
sawyer (~> 0.8.0, >= 0.5.3)
pathutil (0.16.2)
forwardable-extended (~> 2.6)
public_suffix (4.0.1)
rb-fsevent (0.10.3)
rb-inotify (0.10.0)
ffi (~> 1.0)
rouge (3.12.0)
safe_yaml (1.0.5)
sassc (2.2.1)
ffi (~> 1.9)
sawyer (0.8.2)
addressable (>= 2.3.5)
faraday (> 0.8, < 2.0)
terminal-table (1.8.0)
unicode-display_width (~> 1.1, >= 1.1.1)
unicode-display_width (1.6.0)
PLATFORMS
ruby
DEPENDENCIES
jekyll
jekyll-gist
jekyll-paginate
BUNDLED WITH
2.0.2

74
Makefile Normal file
View File

@ -0,0 +1,74 @@
PY?=python3
PELICAN?=pelican
PELICANOPTS=
BASEDIR=$(CURDIR)
INPUTDIR=$(BASEDIR)/content
OUTPUTDIR=$(BASEDIR)/output
CONFFILE=$(BASEDIR)/pelicanconf.py
PUBLISHCONF=$(BASEDIR)/publishconf.py
DEBUG ?= 0
ifeq ($(DEBUG), 1)
PELICANOPTS += -D
endif
RELATIVE ?= 0
ifeq ($(RELATIVE), 1)
PELICANOPTS += --relative-urls
endif
help:
@echo 'Makefile for a pelican Web site '
@echo ' '
@echo 'Usage: '
@echo ' make html (re)generate the web site '
@echo ' make clean remove the generated files '
@echo ' make regenerate regenerate files upon modification '
@echo ' make publish generate using production settings '
@echo ' make serve [PORT=8000] serve site at http://localhost:8000'
@echo ' make serve-global [SERVER=0.0.0.0] serve (as root) to $(SERVER):80 '
@echo ' make devserver [PORT=8000] serve and regenerate together '
@echo ' make ssh_upload upload the web site via SSH '
@echo ' make rsync_upload upload the web site via rsync+ssh '
@echo ' '
@echo 'Set the DEBUG variable to 1 to enable debugging, e.g. make DEBUG=1 html '
@echo 'Set the RELATIVE variable to 1 to enable relative urls '
@echo ' '
html:
$(PELICAN) $(INPUTDIR) -o $(OUTPUTDIR) -s $(CONFFILE) $(PELICANOPTS)
clean:
[ ! -d $(OUTPUTDIR) ] || rm -rf $(OUTPUTDIR)
regenerate:
$(PELICAN) -r $(INPUTDIR) -o $(OUTPUTDIR) -s $(CONFFILE) $(PELICANOPTS)
serve:
ifdef PORT
$(PELICAN) -l $(INPUTDIR) -o $(OUTPUTDIR) -s $(CONFFILE) $(PELICANOPTS) -p $(PORT)
else
$(PELICAN) -l $(INPUTDIR) -o $(OUTPUTDIR) -s $(CONFFILE) $(PELICANOPTS)
endif
serve-global:
ifdef SERVER
$(PELICAN) -l $(INPUTDIR) -o $(OUTPUTDIR) -s $(CONFFILE) $(PELICANOPTS) -p $(PORT) -b $(SERVER)
else
$(PELICAN) -l $(INPUTDIR) -o $(OUTPUTDIR) -s $(CONFFILE) $(PELICANOPTS) -p $(PORT) -b 0.0.0.0
endif
devserver:
ifdef PORT
$(PELICAN) -lr $(INPUTDIR) -o $(OUTPUTDIR) -s $(CONFFILE) $(PELICANOPTS) -p $(PORT)
else
$(PELICAN) -lr $(INPUTDIR) -o $(OUTPUTDIR) -s $(CONFFILE) $(PELICANOPTS)
endif
publish:
$(PELICAN) $(INPUTDIR) -o $(OUTPUTDIR) -s $(PUBLISHCONF) $(PELICANOPTS)
.PHONY: html help clean regenerate serve serve-global devserver publish

14
Pipfile Normal file
View File

@ -0,0 +1,14 @@
[[source]]
name = "pypi"
url = "https://pypi.org/simple"
verify_ssl = true
[dev-packages]
[packages]
pelican = {extras = ["markdown"],version = "*"}
gergelypolonkaieu-site = {editable = true,path = "."}
typogrify = "*"
[requires]
python_version = "3.7"

151
Pipfile.lock generated Normal file
View File

@ -0,0 +1,151 @@
{
"_meta": {
"hash": {
"sha256": "3848a327090b82fa6faf252335283a4c4648c0848fcf02cd841428b45a36c238"
},
"pipfile-spec": 6,
"requires": {
"python_version": "3.7"
},
"sources": [
{
"name": "pypi",
"url": "https://pypi.org/simple",
"verify_ssl": true
}
]
},
"default": {
"blinker": {
"hashes": [
"sha256:471aee25f3992bd325afa3772f1063dbdbbca947a041b8b89466dc00d606f8b6"
],
"version": "==1.4"
},
"docutils": {
"hashes": [
"sha256:6c4f696463b79f1fb8ba0c594b63840ebd41f059e92b31957c46b74a4599b6d0",
"sha256:9e4d7ecfc600058e07ba661411a2b7de2fd0fafa17d1a7f7361cd47b1175c827",
"sha256:a2aeea129088da402665e92e0b25b04b073c04b2dce4ab65caaa38b7ce2e1a99"
],
"version": "==0.15.2"
},
"feedgenerator": {
"hashes": [
"sha256:5ae05daa9cfa47fa406ee4744d0b7fa1c8a05a7a47ee0ad328ddf55327cfb106"
],
"version": "==1.9"
},
"gergelypolonkaieu-site": {
"editable": true,
"path": "."
},
"jinja2": {
"hashes": [
"sha256:74320bb91f31270f9551d46522e33af46a80c3d619f4a4bf42b3164d30b5911f",
"sha256:9fe95f19286cfefaa917656583d020be14e7859c6b0252588391e47db34527de"
],
"version": "==2.10.3"
},
"markdown": {
"hashes": [
"sha256:2e50876bcdd74517e7b71f3e7a76102050edec255b3983403f1a63e7c8a41e7a",
"sha256:56a46ac655704b91e5b7e6326ce43d5ef72411376588afa1dd90e881b83c7e8c"
],
"version": "==3.1.1"
},
"markupsafe": {
"hashes": [
"sha256:00bc623926325b26bb9605ae9eae8a215691f33cae5df11ca5424f06f2d1f473",
"sha256:09027a7803a62ca78792ad89403b1b7a73a01c8cb65909cd876f7fcebd79b161",
"sha256:09c4b7f37d6c648cb13f9230d847adf22f8171b1ccc4d5682398e77f40309235",
"sha256:1027c282dad077d0bae18be6794e6b6b8c91d58ed8a8d89a89d59693b9131db5",
"sha256:24982cc2533820871eba85ba648cd53d8623687ff11cbb805be4ff7b4c971aff",
"sha256:29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b",
"sha256:43a55c2930bbc139570ac2452adf3d70cdbb3cfe5912c71cdce1c2c6bbd9c5d1",
"sha256:46c99d2de99945ec5cb54f23c8cd5689f6d7177305ebff350a58ce5f8de1669e",
"sha256:500d4957e52ddc3351cabf489e79c91c17f6e0899158447047588650b5e69183",
"sha256:535f6fc4d397c1563d08b88e485c3496cf5784e927af890fb3c3aac7f933ec66",
"sha256:62fe6c95e3ec8a7fad637b7f3d372c15ec1caa01ab47926cfdf7a75b40e0eac1",
"sha256:6dd73240d2af64df90aa7c4e7481e23825ea70af4b4922f8ede5b9e35f78a3b1",
"sha256:717ba8fe3ae9cc0006d7c451f0bb265ee07739daf76355d06366154ee68d221e",
"sha256:79855e1c5b8da654cf486b830bd42c06e8780cea587384cf6545b7d9ac013a0b",
"sha256:7c1699dfe0cf8ff607dbdcc1e9b9af1755371f92a68f706051cc8c37d447c905",
"sha256:88e5fcfb52ee7b911e8bb6d6aa2fd21fbecc674eadd44118a9cc3863f938e735",
"sha256:8defac2f2ccd6805ebf65f5eeb132adcf2ab57aa11fdf4c0dd5169a004710e7d",
"sha256:98c7086708b163d425c67c7a91bad6e466bb99d797aa64f965e9d25c12111a5e",
"sha256:9add70b36c5666a2ed02b43b335fe19002ee5235efd4b8a89bfcf9005bebac0d",
"sha256:9bf40443012702a1d2070043cb6291650a0841ece432556f784f004937f0f32c",
"sha256:ade5e387d2ad0d7ebf59146cc00c8044acbd863725f887353a10df825fc8ae21",
"sha256:b00c1de48212e4cc9603895652c5c410df699856a2853135b3967591e4beebc2",
"sha256:b1282f8c00509d99fef04d8ba936b156d419be841854fe901d8ae224c59f0be5",
"sha256:b2051432115498d3562c084a49bba65d97cf251f5a331c64a12ee7e04dacc51b",
"sha256:ba59edeaa2fc6114428f1637ffff42da1e311e29382d81b339c1817d37ec93c6",
"sha256:c8716a48d94b06bb3b2524c2b77e055fb313aeb4ea620c8dd03a105574ba704f",
"sha256:cd5df75523866410809ca100dc9681e301e3c27567cf498077e8551b6d20e42f",
"sha256:e249096428b3ae81b08327a63a485ad0878de3fb939049038579ac0ef61e17e7"
],
"version": "==1.1.1"
},
"pelican": {
"extras": [
"markdown"
],
"hashes": [
"sha256:656d8ff9f778951f4e317c9e6530e92b02698c5961ebf019c583775a30f857f6",
"sha256:be7ea9a09311374322b5579c88975ae003409e40c833e761780a0d0dbd84e756"
],
"index": "pypi",
"version": "==4.2.0"
},
"pygments": {
"hashes": [
"sha256:71e430bc85c88a430f000ac1d9b331d2407f681d6f6aec95e8bcfbc3df5b0127",
"sha256:881c4c157e45f30af185c1ffe8d549d48ac9127433f2c380c24b84572ad66297"
],
"version": "==2.4.2"
},
"python-dateutil": {
"hashes": [
"sha256:73ebfe9dbf22e832286dafa60473e4cd239f8592f699aa5adaf10050e6e1823c",
"sha256:75bb3f31ea686f1197762692a9ee6a7550b59fc6ca3a1f4b5d7e32fb98e2da2a"
],
"version": "==2.8.1"
},
"pytz": {
"hashes": [
"sha256:1c557d7d0e871de1f5ccd5833f60fb2550652da6be2693c1e02300743d21500d",
"sha256:b02c06db6cf09c12dd25137e563b31700d3b80fcc4ad23abb7a315f2789819be"
],
"version": "==2019.3"
},
"six": {
"hashes": [
"sha256:3350809f0555b11f552448330d0b52d5f24c91a322ea4a15ef22629740f3761c",
"sha256:d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73"
],
"version": "==1.12.0"
},
"smartypants": {
"hashes": [
"sha256:8db97f7cbdf08d15b158a86037cd9e116b4cf37703d24e0419a0d64ca5808f0d"
],
"version": "==2.0.1"
},
"typogrify": {
"hashes": [
"sha256:8be4668cda434163ce229d87ca273a11922cb1614cb359970b7dc96eed13cb38"
],
"index": "pypi",
"version": "==2.0.7"
},
"unidecode": {
"hashes": [
"sha256:1d7a042116536098d05d599ef2b8616759f02985c85b4fef50c78a5aaf10822a",
"sha256:2b6aab710c2a1647e928e36d69c21e76b453cd455f4e2621000e54b2a9b8cce8"
],
"version": "==1.1.1"
}
},
"develop": {}
}

View File

@ -1,10 +0,0 @@
# gergely.polonkai.eu
## Initial start
```
git clone $REPO
cd $REPO
bundle install --path vendor/bundle
bundle exec jekyll server
```

View File

@ -1,18 +0,0 @@
# Site settings
title: Gergely Polonkai
email: gergely@polonkai.eu
description: "developer, systems engineer and administrator"
url: "http://gergely.polonkai.eu"
timezone: Europe/Budapest
name: Gergely Polonkai
paginate: 10
paginate_path: "/blog/page/:num"
exclude: ['README.md', 'Gemfile', 'Gemfile.lock', 'CNAME', ".hyde.el", "vendor"]
include: ['.well-known']
plugins:
- jekyll-gist
- jekyll-paginate
# Build settings
markdown: kramdown
permalink: pretty

View File

@ -1,55 +0,0 @@
- text: E-mail
link: mailto:gergely@polonkai.eu
image: email.png
icon: envelope-o
- text: Stack Exchange
link: http://stackexchange.com/users/1369500/gergelypolonkai
image: stackexchange.png
icon: stack-exchange
- text: LinkedIn
link: http://www.linkedin.com/in/gergelypolonkai
image: linkedin.png
icon: linkedin
- text: Skype
link: skype:gergely.polonkai
image: skype.png
icon: skype
- text: Facebook
link: http://facebook.com/Polesz
image: facebook.png
icon: facebook
- text: Google+
link: https://plus.google.com/+GergelyPolonkai/about
image: google_plus.png
icon: google-plus
- text: Twitter
link: http://twitter.com/GergelyPolonkai
image: twitter.png
icon: twitter
- text: Tumblr
link: http://gergelypolonkai.tumblr.com
image: tumblr.png
icon: tumblr
- text: deviantArt
link: http://gergelypolonkai.deviantart.com
image: deviantart.png
icon: deviantart
- text: Hashnode
link: https://hashnode.com/@gergelypolonkai
image: hashnode.png
- text: Keybase
link: https://keybase.io/gergelypolonkai
image: keybase.png
icon: keybase
- text: Liberapay
link: https://liberapay.com/gergelypolonkai
image: liberapay.png
icon: liberapay
- text: Mastodon
link: https://social.polonkai.eu/@gergely
image: mastodon.png
icon: mastodon
- text: Pay me a coffee
link: https://paypal.me/GergelyPolonkai/250
image: paypal.png
icon: paypal

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +0,0 @@
``` lisp
(defun cut-at-ten ()
(while (re-search-forward "," (save-excursion (end-of-line) (point)) t 10)
(newline-and-indent)))
```

View File

@ -1,15 +0,0 @@
---
layout: post
title: "GtkActionable in action"
author:
name: "Gergely Polonkai"
email: "gergely@polonkai.eu"
---
I have seen several people (including myself) struggling with
disabling/enabling menu items, toolbar buttons and similar UI
interfaces based on different conditions. It gets even worse if there
are multiple representations of the same action in the same
application, e.g. a menu item and a toolbar button exists for the same
action. But with GTK+ 3.4, we have GtkAction, which is exactly for
this kind of situations.

View File

@ -1,17 +0,0 @@
---
layout: post
title: "Measuring code coverage with codecov for libtool projects"
author:
name: "Gergely Polonkai"
email: "gergely@polonkai.eu"
---
I have recently found [codecov][https://codecov.io/]; they offer free
services for public GitHub projects. As I have recently started writing
tests for my SWE-GLib project, I decided to give it a go. Things are not
this easy if you use GNU Autotools and libtool, though…
The problem here is that these tools generate output under `src/.libs/`
(given that your sources are under `src/`) and `gcov` has hard times
finding the coverage data files. Well, at least in the codecov
environment, it works fine on my machine.

View File

@ -1,326 +0,0 @@
---
layout: post
title: "Writing a GNOME Shell extension"
---
I could not find a good tutorial on how to write a GNOME Shell
extension. There is a so called step by step
[instruction list](https://wiki.gnome.org/Projects/GnomeShell/Extensions/StepByStepTutorial)
on how to do it, but it has its flaws, including grammar and clearance.
As I wanted to create an extension for my SWE GLib library to display
the current position of some planets, I dug into existing (and working)
extensions source code and made up something. Comments welcome!
---
GNOME Shell extensions are written in JavaScript and are interpreted
by [GJS](https://wiki.gnome.org/action/show/Projects/Gjs). Using
introspected libraries from JavaScript is not a problem for me (see
SWE GLibs
[Javascript example](https://github.com/gergelypolonkai/swe-glib/blob/master/examples/basic.js);
its not beautiful, but its working), but wrapping your head around
the Shells concept can take some time.
The Shell is a Clutter stage, and all the buttons (including the
top-right “Activities” button) are actors on this stage. You can add
practically anything to the Shell panel that you can add to a Clutter
stage.
The other thing to remember is the lifecycle of a Shell
extension. After calling `init()`, there are two ways forward: you
either use a so called extension controller, or plain old JavaScript
functions `enable()` and `disable()`; I will go on with the former
method for reasons discussed later.
If you are fine with the `enable()`/`disable()` function version, you
can ease your job with the following command:
```
gnome-shell-extension-tool --create-extension
```
This will ask you a few parameters and create the necessary files for
you. On what these parameters should look like, please come with me to
the next section.
## Placement and naming
Extensions reside under `$HOME/.local/share/gnome-shell/extensions`,
where each of them have its own directory. The directory name has to be
unique, of course; to achieve this, they are usually the same as the
UUID of the extension.
The UUID is a string of alphanumeric characters, with some extras added.
Generally, it should match this regular expression:
`^[-a-zA-Z0-9@._]+$`. The convention is to use the form
`extension-name@author-id`, e.g. `Planets@gergely.polonkai.eu`. Please
see
[this link](https://wiki.gnome.org/Projects/GnomeShell/Extensions/UUIDGuidelines)
for some more information about this.
## Anatomy of an extension
Extensions consist of two main parts, `metadata.json` and
`extension.js`.
The `metadata.json` file contains compatibility information and, well,
some meta data:
```json
{
"shell-version": ["3.18"],
"uuid": "planets@gergely.polonkai.eu",
"name": "Planets",
"description": "Display current planet positions"
}
```
Here, `shell-version` must contain all versions of GNOME Shell that is
known to load and display your extension correctly. You can insert minor
versions here, like I did, or exact version numbers, like `3.18.1`.
In the `extension.js` file, which contains the actual extension code,
the only thing you actually need is an `init()` function:
```javascript
function init(extensionMeta) {
// Do whatever it takes to initialize your extension, like
// initializing the translations. However, never do any widget
// magic here yet.
// Then return the controller object
return new ExtensionController(extensionMeta);
}
```
## Extension controller
So far so good, but what is this extension controller thing? It is an
object which is capable of managing your GNOME Shell extension. Whenever
the extension is loaded, its `enable()` method is called; when the
extension is unloaded, you guessed it, the `disable()` method gets
called.
```javascript
function ExtensionController(extensionMeta) {
return {
extensionMeta: extensionMeta,
extension: null,
enable: function() {
this.extension = new PlanetsExtension(this.extensionMeta);
Main.panel.addToStatusArea("planets",
this.extension,
0, "right");
},
disable: function() {
this.extension.actor.destroy();
this.extension.destroy();
this.extension = null;
}
}
}
```
This controller will create a new instance of the `PlanetsExtension`
class and add it to the panels right side when loaded. Upon
unloading, the extensions actor gets destroyed (which, as you will
see later, gets created behind the scenes, not directly by us),
together with the extension itself. Also, for safety measures, the
extension is set to `null`.
## The extension
The extension is a bit more tricky, as, for convenience reasons, it
should extend an existing panel widget type.
```javascript
function PlanetsExtension(extensionMeta) {
this._init(extensionMeta);
}
PlanetsExtension.prototype = {
__proto__ = PanelMenu.Button.prototype,
_init: function(extensionMeta) {
PanelMenu.Button.prototype._init.call(this, 0.0);
this.extensionMeta = extensionMeta;
this.panelContainer = new St.BoxLayout({style_class: 'panel-box'});
this.actor.add_actor(this.panelContainer);
this.actor.add_style_class_name('panel-status-button');
this.panelLabel = new St.Label({
text: 'Loading',
y_align: Clutter.ActorAlign.CENTER
});
this.panelContainer.add(this.panelLabel);
}
};
```
Here we extend the Button class of panelMenu, so we will be able to do
some action upon activate.
The only parameter passed to the parents `_init()` function is
`menuAlignment`, with the value `0.0`, which is used to position the
menu arrow. (_Note: I cannot find any documentation on this, but it
seems that with the value `0.0`, a menu arrow is not added._)
The extension class in its current form is capable of creating the
actual panel button displaying the text “Loading” in its center.
## Loading up the extension
Now with all the necessary import lines added:
```javascript
// The PanelMenu module that contains Button
const PanelMenu = imports.ui.panelMenu;
// The St class that contains lots of UI functions
const St = imports.gi.St;
// Clutter, which is used for displaying everything
const Clutter = imports.gi.Clutter;
```
As soon as this file is ready, you can restart your Shell (press
Alt-F2 and enter the command `r`), and load the extension with
e.g. the GNOME Tweak Tool. You will see the Planets button on the
right. This little label showing the static text “Planets”, however,
is pretty boring, so lets add some action.
## Adding some periodical change
Since the planets position continuously change, we should update our
widget every minute or so. Lets patch our `_init()` a bit:
```javascript
this.last_update = 0;
MainLoop.timeout_add(1, Lang.bind(this, function() {
this.last_update++;
this.panelLabel.set_text("Update_count: " + this.last_update);
}))
```
This, of course, needs a new import line for `MainLoop` to become available:
```javascript
const MainLoop = imports.mainloop;
const Lang = imports.lang;
```
Now if you restart your Shell, your brand new extension will increase
its counter every second. This, however, presents some problems.
SWE GLib queries can sometimes be expensive, both in CPU and disk
operations, so updating our widget every second may present problems.
Also, planets dont go **that** fast. We may update our timeout value
from `1` to `60` or something, but why dont just give our user a chance
to set it?
## Introducing settings
Getting settings from `GSettings` is barely straightforward, especially
for software installed in a non-GNOME directory (which includes
extensions). To make our lives easier, I copied over a
[convenience library](https://github.com/projecthamster/shell-extension/blob/master/convenience.js)
from the [Hamster project](https://projecthamster.wordpress.com/)s
extension, originally written by Giovanni Campagna. The relevant
function here is `getSettings()`:
```javascript
/**
* getSettings:
* @schema: (optional): the GSettings schema id
*
* Builds and return a GSettings schema for @schema, using schema files
* in extensionsdir/schemas. If @schema is not provided, it is taken from
* metadata['settings-schema'].
*/
function getSettings(schema) {
let extension = ExtensionUtils.getCurrentExtension();
schema = schema || extension.metadata['settings-schema'];
const GioSSS = Gio.SettingsSchemaSource;
// check if this extension was built with "make zip-file", and thus
// has the schema files in a subfolder
// otherwise assume that extension has been installed in the
// same prefix as gnome-shell (and therefore schemas are available
// in the standard folders)
let schemaDir = extension.dir.get_child('schemas');
let schemaSource;
if (schemaDir.query_exists(null))
schemaSource = GioSSS.new_from_directory(schemaDir.get_path(),
GioSSS.get_default(),
false);
else
schemaSource = GioSSS.get_default();
let schemaObj = schemaSource.lookup(schema, true);
if (!schemaObj)
throw new Error('Schema ' + schema + ' could not be found for extension '
+ extension.metadata.uuid + '. Please check your installation.');
return new Gio.Settings({ settings_schema: schemaObj });
}
```
You can either incorporate this function into your `extension.js` file,
or just use `convenience.js` file like I (and the Hamster applet) did
and import it:
```javascript
const ExtensionUtils = imports.misc.extensionUtils;
const Me = ExtensionUtils.getCurrentExtension;
const Convenience = Me.imports.convenience;
```
Now lets create the settings definition. GSettings schema files are XML
files. We want to add only one settings for now, the refresh interval.
```xml
<?xml version="1.0" encoding="utf-8"?>
<schemalist>
<schema id="org.gnome.shell.extensions.planets" path="/org/gnome/shell/extensions/planets/">
<key name="refresh-interval" type="i">
<default>30</default>
<summary>Refresh interval of planet data</summary>
<description>Interval in seconds. Sets how often the planet positions are recalculated. Setting this too low (e.g. below 30) may raise performance issues.</description>
</key>
</schema>
</schemalist>
```
you need to compile these settings with
glib-compile-schemas --strict schemas/
Now lets utilize this new setting. In the extensions `_init()`
function, add the following line:
```javascript
this._settings = Convenience.getSettings();
```
And, for `getSettings()` to work correctly, we also need to extend our
`metadata.json` file:
```json
"settings-schema": "planets"
```
After another restart (please, GNOME guys, add an option to reload
extensions!), your brand new widget will refresh every 30 seconds.
## Displaying the planet positions
## The settings panel
## Start an application

View File

@ -1,67 +0,0 @@
---
layout: post
title: "Lessens you learn while writing an SDK"
date: 2016-03-19 12:34:56
tags: [development]
published: false
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
In the last few months Ive been working on a GLib based SDK for
client applications that want to communicate with a Matrix.org
homeserver.
For whoever doesnt know it, Matrix is a decentralized network of
servers (Homeservers). Clients can connect to them via HTTP and send
messages (events, in Matrix terminology) to each other. They are
called events because these messages can be pretty much anything from
instant messages through automated notifications to files or, well,
actual events (such as a vCalendar); anything that you can serialize
to JSON can go through this network.
My original intention was to integrate Matrix based chat into
Telepathy, a DBus based messaging framework used by e.g. the GNOME
desktop (more specifically Empathy, GNOME's chat client.) After
announcing my plans among the Matrix devs, I quickly learned some
things:
1. they are more than open to any development ideas
1. they really wanted to see this working
1. they would have been happy if there were a GLib or Qt based SDK
With my (far from complete) knowledge in GLib I decided to move on
with this last point, hoping that it will help me much when I finally
implement the Telepathy plugin.
## Matrix devs are open minded
What I learned very quickly is that Matrix devs are very open minded
folks from different parts of the world. They are all individuals with
their own ideas, experiences and quirks, yet, when it comes to that,
they steer towards their goals as a community. Thus, getting
additional information from them while reading the spec was super
easy.
## The specification is easy to understand
Except when it is not. For these cases, see the previous point.
Jokes asidu, anyone who worked with communications protocols or JSON
APIs before can get along with it fast. The endpoints are all
documented, and if something is unclear, they are happy to help
(especially if you patch up the spec afterwards.)
## Copying the SDK for a different language is not (always) what you want
I started my SDK in C, trying to mimic the Python SDK. This was a
double fail: the Python SDK was a volatile WiP, and C and Python are
fundamentally different.
During the upcoming weeks this became clear and I switched to the Vala
language. It is much easier to write GObject based stuff in Vala,
although I had to fall back to C to get some features working. I also
planned and implemented a more object oriented API, which is easier to
use in the GObject world.

View File

@ -1,27 +0,0 @@
<p>
Gergely Polonkai is a systems engineer of a telco company, and
also a freelancer self- and software developer.
</p>
<p>
He is learning about different IT subjects since the late
1990s. These include web development, application building,
systems engineering, IT security and many others. He also dug his
nose deeply into free software, dealing with different types of
Linux and its applications,
while also writing and contributing to some open source projects.
</p>
<p>
On this site he is writing posts about different stuff he faces
during work (oh my, yet another IT solutions blog), hoping they
can help others with their job, or just to get along with their
brand new netbook that shipped with Linux.
</p>
<p>
“I believe one can only achieve success if they follow their own
instincts and listen to, but not bend under others opinions. If
you change your course just because someone says so, you are
following their instincts, not yours.”
</p>

View File

@ -1,46 +0,0 @@
<article class="{% if page.post_listing %}col-sm-5 col-md-6 {% endif%}post">
{% if page.post_listing %}
<ul class="list-inline">
<li class="col-md-8">
{% endif %}
<header class="post-header">
{% if page.tag %}
<h5>
{% else %}
<h3>
{% endif %}
{% if page.post_listing %}
<a href="{{ post.url }}">
{% endif %}
{{ post.title }}
{% if page.post_listing %}
</a>
{% endif %}
{% if page.tag %}
</h5>
{% else %}
</h3>
{% endif %}
<div class="meta pull-left">
{{post.author.name}}
</div>
<div class="meta pull-right">
{{post.date | date: "%b %-d, %Y :: %H:%M"}}
</div>
<div class="clearfix"></div>
</header>
<main>
{% if layout.render_post %}
{{content}}
{% else %}
{{post.excerpt}}
{% endif %}
</main>
{% include tag-link.html %}
{% if layout.post_listing %}
</li>
</ul>
{% endif %}
</article>

View File

@ -1,16 +0,0 @@
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta name="keywords" content="{{page.keywords}}">
<meta name="description" content="Personal page of Gergely Polonkai">
<title>Gergely Polonkai{% if page.title %}: {{page.title}}{% endif %}</title>
<link rel="icon" type="image/x-icon" href="{% link favicon.ico %}">
<link href="https://fonts.googleapis.com/css?family=Open+Sans:400,300,300italic,400italic,600,600italic,700,700italic,800,800italic" rel="stylesheet" type="text/css">
<link rel="alternate" type="application/rss+xml" title="Gergely Polonkai's Blog - RSS Feed" href="{{site.url}}/blog/atom.xml">
<link rel="stylesheet" type="text/css" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css">
<link rel="stylesheet" href="{% link css/style.sass %}">
<link href="https://cdnjs.cloudflare.com/ajax/libs/jquery.terminal/1.6.3/css/jquery.terminal.min.css" rel="stylesheet"/>
<script type="text/javascript" src="//code.jquery.com/jquery-2.1.3.min.js"></script>
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery.terminal/1.6.3/js/jquery.terminal.min.js"></script>

View File

@ -1,35 +0,0 @@
<div class="navbar navbar-inverse navbar-fixed-top">
<div class="container-fluid">
<div class="navbar-header">
<button type="button" class="navbar-toggle" data-toggle="collapse" data-target="#gp-navbar">
<span class="icon-bar"></span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
</button>
<a class="navbar-brand" href="{% link index.html %}"><img src="{% link images/profile.svg %}" alt="Gergely Polonkai" style="background-color: white; height: 45px; margin-top: -13px;"></a>
{% if page.url != '/' %}
<a class="navbar-brand" href="{% link index.html %}">Gergely Polonkai</a>
{% endif %}
</div>
<div class="collapse navbar-collapse" id="gp-navbar">
<ul class="nav navbar-nav">
<li><a href="{% link blog/index.html %}">Blog</a></li>
<li><a href="{% link resume.html %}">Resume</a></li>
<li><a href="{% link stories/index.html %}">Stories</a></li>
</ul>
<ul class="nav navbar-nav navbar-right">
<li><a href="https://about.me/gergely.polonkai">about.me</a></li>
<li><a href="{% link disclaimer.md %}">Disclaimer</a></li>
<li class="dropdown">
<a href="#" class="dropdown-toggle" data-toggle="dropdown" role="button" aria-expanded="false"><span class="glyphicon glyphicon-pencil"></span> Contact me <span class="caret"></span></a>
<ul class="dropdown-menu" role="menu">
{% for contact in site.data.contacts %}
<li><a href="{{contact.link}}" target="_blank"><i class="fa fa-{{ contact.icon }}"></i> <img src="{% link images/contact/index.html %}{{ contact.image }}" alt="" /> {{contact.text}}</a></li>
{% endfor %}
<li><a href="{% link blog/atom.xml %}"><img src="{% link images/contact/index.html %}feed.png" alt="" /> RSS Feed</a></li>
</ul>
</li>
</ul>
</div>
</div>
</div>

View File

@ -1,17 +0,0 @@
<nav>
<ul class="pagination">
<li{% if paginator.previous_page == null %} class="disabled"{% endif %}>
<a href="{{ paginator.previous_page_path | replace: '//', '/'}}" aria-label="Previous page">
<span aria-hidden="true">&laquo;</span>
</a>
</li>
{% for page in (1..paginator.total_pages) %}
<li{% if paginator.page == page %} class="active"{% endif %}><a href="{% if page == 1 %}{% link blog/index.html %}{% else %}{{ site.paginate_path | replace: '//', '/' | replace: ':num', page }}{% endif %}">{{page}}</a></li>
{% endfor %}
<li{% if paginator.next_page == null %} class="disabled"{% endif %}>
<a href="{{paginator.next_page_path | replace: '//', '/'}}" aria-label="Next page">
<span aria-hidden="true">&raquo;</span>
</a>
</li>
</ul>
</nav>

View File

@ -1,9 +0,0 @@
<div class="container-fluid">
{% for post in posts limit: post_limit %}
{% capture counter %}{% cycle 'odd', 'even' %}{% endcapture %}
{% include blog-post.html %}
{% if counter == 'even' %}
<div class="clearfix"></div>
{% endif %}
{% endfor %}
</div>

View File

@ -1,4 +0,0 @@
<span class="reading time" title="Estimated reading time">
{% assign words = content | number_of_words %}
{% if words < 360 %}1 minute{% else %}{{ words | divided_by:180 }} minutes{% endif %} read
</span>

View File

@ -1,11 +0,0 @@
{% capture tagsize %}{{post.tags | size}}{% endcapture %}
{% if tagsize != '0' %}
<footer>
<p class="article-tags">
{% for tag in post.tags %}
<a href="{% link blog/tag/index.html %}{{ tag }}" class="tag-label">{{tag}}</a>
{% endfor %}
</p>
<br class="clearfix">
</footer>
{% endif %}

View File

@ -1,115 +0,0 @@
<!DOCTYPE html>
<html>
<head>
{% include head.html %}
</head>
<body>
{% include header.html %}
<div class="container" id="main-container">
{{content}}
{% if page.name != 'about.html' %}
<div class="well well-sm small">
<div class="pull-left" id="about-well-image">
<a href="{% link about.html %}">
<img src="{% link images/profile.svg %}" alt="">
</a>
</div>
{% include about.html %}
<div class="clearfix"></div>
</div>
{% endif %}
</div>
<script type="text/javascript">
$(document).ready(function() {
$('#tagcloud-button').click(function() {
$('#tag-cloud').toggle('slow');
});
});
jQuery.extend_if_has = function(desc, source, array) {
for (var i=array.length;i--;) {
if (typeof source[array[i]] != 'undefined') {
desc[array[i]] = source[array[i]];
}
}
return desc;
};
(function($) {
$.fn.tilda = function(eval, options) {
if ($('body').data('tilda')) {
return $('body').data('tilda').terminal;
}
this.addClass('tilda');
options = options || {};
eval = eval || function(command, term) {
term.echo("you don't set eval for tilda");
};
var settings = {
prompt: 'guest@gergely.polonkai.eu> ',
name: 'tilda',
height: 400,
enabled: false,
greetings: 'Welcome to my Terminal. Type `help\' to list the available commands.\n\nPowered by http://terminal.jcubic.pl',
keypress: function(e) {
if (e.which == 96) {
return false;
}
}
};
if (options) {
$.extend(settings, options);
}
this.append('<div class="td"></div>');
var self = this;
self.terminal = this.find('.td').terminal(eval, settings);
var focus = false;
$(document.documentElement).keypress(function(e) {
console.log(e);
if (e.which == 96) {
self.slideToggle('fast');
self.terminal.focus(focus = !focus);
self.terminal.attr({
scrollTop: self.terminal.attr("scrollHeight")
});
}
});
$('body').data('tilda', this);
this.hide();
return self;
};
})(jQuery);
String.prototype.strip = function(char) {
return this.replace(new RegExp("^\\s*"), '')
.replace(new RegExp("\\s*$"), '');
}
jQuery(document).ready(function($) {
$('#tilda').tilda(function(command, terminal) {
command = command.strip();
switch (command) {
case 'help':
terminal.echo('about - Go to the about page');
terminal.echo(' ');
terminal.echo('More commands will follow soon!');
break;
case 'about':
location = '{% link about.html %}';
break;
default:
terminal.echo(command + ': command not found');
break;
}
});
});
</script>
<div id="tilda"></div>
</body>
</html>

View File

@ -1,15 +0,0 @@
---
layout: default
---
<div class="post">
<header class="post-header">
<h2>{{page.title}}</h2>
<div class="clearfix"></div>
</header>
<article class="post-content">
{{content}}
</article>
</div>

View File

@ -1,16 +0,0 @@
---
layout: default
render_post: true
---
{% assign post = page %}
{% include blog-post.html %}
<nav>
<ul class="pager">
{% if page.previous %}
<li class="previous"><a href="{{ page.previous.url }}">&larr; {{page.previous.title}}</a></li>
{% endif %}
{% if page.next %}
<li class="next"><a href="{{ page.next.url }}">{{page.next.title}} &rarr;</a></li>
{% endif %}
</ul>
</nav>

View File

@ -1,15 +0,0 @@
---
layout: default
post_listing: true
---
<h3 class="tag">{{ page.tag }}</h3>
{{content}}
<h4>Articles under this tag</h4>
{% if site.tags[page.tag] %}
{% assign posts = site.tags[page.tag] %}
{% include post-list.html %}
{% else %}
No posts with this tag.
{% endif %}

View File

@ -1,8 +0,0 @@
---
layout: default
---
<h3>
{{ page.title }}<br>
<small>{% include read_time.html %}</small>
</h3>
{{ content }}

View File

@ -1,43 +0,0 @@
#! /bin/sh
#
# Find all tags in all posts under _posts, and generate a file for
# each under blog/tag. Also, if a tag page does not contain the tag:
# or layout: keywords, the script will include them in the front
# matter.
layout="posts-by-tag"
for tag in `grep -h ^tags: _posts/* | sed -re 's/^tags: +\[//' -e 's/\]$//' -e 's/, /\n/g' | sort | uniq`
do
tag_file="blog/tag/${tag}.md"
echo -n "[$tag] "
if [ ! -f $tag_file ]
then
echo "creating ($tag_file)"
cat <<EOF > $tag_file
---
layout: $layout
tag: $tag
---
EOF
else
updated=0
if ! egrep "^tag: +${tag}$" $tag_file 2>&1 > /dev/null; then
echo "adding tag"
sed -i "0,/---/! s/---/tag: $tag\\n---/" $tag_file
updated=1
fi
if ! egrep "^layout: +" $tag_file 2>&1 > /dev/null; then
echo "adding layout"
sed -i "0,/---/! s/---/layout: $layout\\n---/" $tag_file
updated=1
fi
if [ $updated = 0 ]; then
echo ""
fi
fi
done

View File

@ -1,29 +0,0 @@
---
layout: post
title: "Ethical Hacking 2012"
date: 2011-05-12 20:54:42
tags: [conference]
permalink: /blog/2011/5/12/ethical-hacking-2011
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
Today I went to the Ethical Hacking conference with my boss. It was my first
appearance at such conferences, but I hope there will be more. Although we
just started to redesign our IT security infrastructure with a 90% clear goal,
it was nice to hear that everything is vulnerable. I was thinking if we should
sell all our IT equipments, fire all our colleagues (you know, to prevent
social engineering), and move to the South Americas to herd llamas or sheep,
so the only danger would be some lurking pumas or jaguars. Or I simply leave
my old background image on my desktop, from the well-known game, which says:
Trust is a weakness.
Anyways, the conference was really nice. We heard about the weaknesses of
Android, Oracle, and even FireWire. They showed some demos about everything,
exploited some free and commercial software with no problem at all. We have
seen how much power the virtualisation admin has (although I think it can be
prevented, but Im not sure yet). However, in the end, we could see that the
Cloud is secure (or at least it can be, in a few months or so), so Im not
totally pessimistic. See you next time at Hacktivity!

View File

@ -1,88 +0,0 @@
---
layout: post
title: "Gentoo hardened desktop with GNOME 3 Round one"
date: 2011-05-12 20:32:41
tags: [gentoo, gnome3, selinux]
permalink: /blog/2011/5/12/gentoo-hardened-desktop-with-gnome-3-round-one
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
After having some hard times with Ubuntu (upgrading from 10.10 to 11.04), I
decided to switch back to my old friend, Gentoo. As Im currently learning
about Linux hardening, I decided to use the new SELinux profile, which
supports the v2 reference policy.
Installation was pretty easy, using the [Gentoo x86
Handbook](http://www.gentoo.org/doc/hu/handbook/handbook-x86.xml). This profile
automatically turns on the `USE=selinux` flag (so does the old SELinux
profile), but deprecated `FEATURE=loadpolicy` (which is turned on by the
profile, so portage will complain about it until you disable it in
`/etc/make.conf`).
For the kernel, I chose `hardened-sources-2.6.37-r7`. This seems to be recent
enough for my security testing needs. I turned on both SELinux, PaX and
grsecurity. So far, I have no problem with it, but I dont have X installed
yet, which will screw up things for sure.
After having those hard times with Ubuntu mentioned before, I decided not to
install Grub2 yet, as it renders things unusable (eg. my Windows 7
installation, which I sometimes need at the office). So I installed Grub 0.97
(this is the only version marked as stable, as I remember), touched
`/.autorelabel`, and reboot.
My first mistake was using an UUID as the root device on the kernel parameter
list (I dont want to list all the small mistakes like forgetting to include to
correct SATA driver from my kernel and such). Maybe I was lame, but after
including `/dev/sda5` instead of the UUID thing, it worked like…
Well, charm would not be the good word. For example, I forgot to install the
lvm2 package, so nothing was mounted except my root partition. After I
installed it with the install CD, I assumed everything will be all right, but
I was wrong.
udev and LVM is a critical point in a hardened environment. udev itself
doesnt want to work without the `CONFIG_DEVFS_TEMPFS=y` kernel option, so I
also had to change that. It seemed that it can be done without the install CD,
as it compiled the kernel with no problems. However, when it reached the point
when it compresses the kernel with gzip, it stopped with a `Permission denied`
message (although it was running with root privileges).
The most beautiful thing in the hardened environment with Mandatory Access
Control enabled) is that root is not a real power user any more by default.
You can get this kind of messages many times. There are many tools to debug
these, I will talk about these later.
So, my gzip needed a fix. After digging a bit on the Internet, I found that
the guilty thing is text relocation, which can be corrected if gzip is
compiled with PIC enabled. Thus, I turned on `USE=pic` flag globally, and
tried to remerge gzip. Of course it failed, as it had to use gzip to unpack
the gzip sources. So it did when I tried to install the PaX tools and gradm to
turn these checks off. The install CD came to the rescue again, with which I
successfully recompiled gzip, and with this new gzip, I compressed my new
kernel, with which udev started successfully. So far, so good, lets try to
reboot!
Damn, LVM is still not working. So I decided to finally consult the Gentoo
hardened guide. It says that the LVM startup scripts under `/lib/rcscripts/…`
must be modified, so LVM will put its lock files under `/etc/lvm/lock` instead
of `/dev/.lvm`. After this step and a reboot, LVM worked fine (finally).
The next thing was the file system labelling. SELinux should automatically
relabel the entire file system at boot time whenever it finds the
`/.autorelabel` file. Well, in my case it didnt happen. After checking the
[Gentoo Hardening](http://wiki.gentoo.org/wiki/Hardened_Gentoo) docs, I realised that the `rlpkg` program does exactly the same
(as far as I know, it is designed specifically for Gentoo). So I ran `rlpkg`,
and was kind of shocked. It says it will relabel ext2, ext3, xfs and JFS
partitions. Oh great, no ext4 support? Well, consulting the forums and adding
some extra lines to `/etc/portage/package.keywords` solved the problem (`rlpkg`
and some dependencies had to have the `~x86` keyword set). Thus, `rlpkg`
relabelled my file systems (I checked some directories with `ls -lZ`, it seemed
good for me).
Now it seems that everything is working fine, except the tons of audit
messages. Tomorrow I will check them with `audit2why` or `audit2allow` to see if
it is related with my SELinux lameness, or with a bug in the policy included
with Gentoo.

View File

@ -1,35 +0,0 @@
---
layout: post
title: "Zabbix performance tip"
date: 2011-05-13 19:03:31
tags: [zabbix, monitoring]
permalink: /blog/2011/5/13/zabbix-performance-tip
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
Recently I have switched from [MRTG](http://oss.oetiker.ch/mrtg/) + [Cacti](http://www.cacti.net/) + [Nagios](http://www.nagios.org/) + [Gnokii](http://www.gnokii.org/) to [Zabbix](http://www.zabbix.com/), and I
must say Im more than satisfied with it. It can do anything the former tools
did, and much more. First of all, it can do the same monitoring as Nagios did,
but it does much more fine. It can check several parameters within one
request, so network traffic is kept down. Also, its web front-end can generate
any kinds of graphs from the collected data, which took Cacti away. Also, it
can do SNMP queries (v1-v3), so querying my switches port states and traffic
made easy, taking MRTG out of the picture (I know Cacti can do it either, it
had historical reasons we had both tools installed). And the best part: it can
send SMS messages via a GSM modem natively, while Nagios had to use Gnokii.
The trade-off is, I had to install Zabbix agent on all my monitored machines,
but I think it worths the price. I even have had to install NRPE to monitor
some parameters, which can be a pain on Windows hosts, while Zabbix natively
supports Windows, Linux and Mac OS/X.
So I only had to create a MySQL database (which I already had for NOD32
central management), and install Zabbix server. Everything went fine, until I
reached about 1300 monitored parameters. MySQL seemed to be a bit slow on disk
writes, so my Zabbix “queue” filled up in no time. After reading some forums,
I decided to switch to PostgreSQL instead. Now it works like charm, even with
the default Debian settings. However, I will have to add several more
parameters, and my boss wants as many graphs as you can imagine, so Im more
than sure that I will have to fine tune my database later.

View File

@ -1,29 +0,0 @@
---
layout: post
title: "Gentoo hardened desktop with GNOME 3 Round two"
date: 2011-05-18 10:28:14
tags: [gentoo, gnome3, selinux]
permalink: /blog/2011/5/18/gentoo-hardened-desktop-with-gnome-3-round-two
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
After several hours of `package.keywords`/`package.use` editing and package
compiling, I managed to install GNOME 3 on my notebook. Well, I mean, the
GNOME 3 packages. Unfortunately the fglrx driver didnt seem to recognise my
ATI Mobility M56P card, and the open source driver didnt want to give me GLX
support. When I finally found some clues on what should I do, I had to use my
notebook for work, so I installed Fedora 14 on it. Then I realised that GNOME
3 is already included in Rawhide (Fedora 15), so I quickly downloaded and
installed that instead. Now I have to keep this machine in a working state for
a few days, so I will learn SELinux stuff in its native environment.
When I installed Fedora 14, the first AVC message popped up after about ten
minutes. That was a good thing, as I wanted to see `setroubleshoot` in action.
However, in Fedora 15, the AVC bubbles didnt show up even after a day. I
raised my left eyebrow and said thats impossible, SELinux must be disabled.
And its not! Its even in enforcing mode! And it works just fine. I like it,
and I hope I will be able to get the same results with Gentoo if I can get
back to testing…

View File

@ -1,41 +0,0 @@
---
layout: post
title: "Citrix XenServer 5.5 vs. Debian 5.0 upgrade to 6.0"
date: 2011-05-27 17:33:41
tags: [citrix-xenserver, debian]
permalink: /blog/2011/5/27/citrix-xenserver-vs-debian-5-0-upgrade-to-6-0
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
Few weeks ago Ive upgraded two of our Debian based application servers from
5.0 to 6.0. Everything went fine, as the upgraded packages worked well with
the 4.2 JBoss instances. For the new kernel we needed a reboot, but as the
network had to be rebuilt, I postponed this reboot until the network changes.
With the network, everything went fine again, we successfully migrated our
mail servers behind a firewall. Also the Xen server (5.5.0, upgrade to 5.6
still has to wait for a week or so) revolted well with some storage disks
added. But the application servers remained silent…
After checking the console, I realised that they dont have an active console.
And when I tried to manually start them, XenServer refused with a message
regarding pygrub.
To understand the problem, I had to understand how XenServer boots Debian. It
reads the grub.conf on the first partitions root or `/boot` directory, and
starts the first option, without asking (correct me, if Im mistaken
somewhere). However, this pygrub thing can not parse the new, grub2 config.
This is kinda frustrating.
For the first step, I quickly installed a new Debian 5.0 system from my
template. Then I attached the disks of the faulty virtual machine, and mounted
all its partitions. This way I could reach my faulty 6.0 system with a chroot
shell, from which I could install the `grub-legacy` package instead of grub,
install the necessary kernel and XenServer tools (which were missing from both
machines somehow), then halt the rescue system, and start up the original
instance.
Next week I will do an upgrade on the XenServer to 5.6.1. I hope no such
problems will occur.

View File

@ -1,25 +0,0 @@
---
layout: post
title: "Oracle Database “incompatible” with Oracle Linux?"
date: 2011-05-27 17:53:31
tags: [linux, oracle]
permalink: /blog/2011/5/27/oracle-database-incompatible-with-oracle-linux
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
Today I gave a shot to install [Oracle
Linux](http://www.oracle.com/us/technologies/linux/overview/index.html). I thought I could easily install
an Oracle DBA on it. Well, I was naive.
As only the 5.2 version is supported by XenServer 5.5, I downloaded that
version of Oracle Linux. Installing it was surprisingly fast and easy, it
asked almost nothing, and booted without any problems.
After this came the DBA, 10.2, which bloated an error message in my face
saying that this is an unsupported version of Linux. Bah.
Is it only me, or is it really strange that Oracle doesnt support their own
distro?

View File

@ -1,22 +0,0 @@
---
layout: post
title: "Proxy only non-existing files with mod_proxy and mod_rewrite"
date: 2011-06-10 14:20:43
tags: [apache]
permalink: /blog/2011/6/10/proxy-only-non-existing-files-with-mod-proxy-and-mod-rewrite
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
Today I got an interesting task. I had to upload some pdf documents to a site.
The domain is ours, but we dont have access to the application server that is
hosting the page yet. Until we get it in our hands, I did a trick.
I enabled `mod_rewrite`, `mod_proxy` and `mod_proxy_http`, then added the following
lines to my apache config:
{% gist 47680bfa44eb29708f20 redirect-non-existing.conf %}
Im not totally sure its actually secure, but it works for now.

View File

@ -1,30 +0,0 @@
---
layout: post
title: "Inverse of `sort`"
date: 2011-09-18 14:57:31
tags: [linux, command-line]
permalink: /blog/2011/9/18/inverse-of-sort
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
Im using \*NIX systems for about 14 years now, but it can still show me new
things. Today I had to generate a bunch of random names. Ive create a small
perl script which generates permutations of some usual Hungarian first and
last names, occasionally prefixing it with a Dr. title or using double first
names. For some reasons I forgot to include uniqueness check in the script.
When I ran it in the command line, I realized the mistake, so I appended
`| sort | uniq` to the command line. So I had around 200 unique names, but in
alphabetical order, which was awful for my final goal. Thus, I tried shell
commands like rand to create a random order, and when many of my tries failed,
the idea popped in my mind (not being a native English speaker): “I dont have
to create «random order», but «shuffle the list». So I started typing `shu`,
pressed Tab in the Bash shell, and voilà! `shuf` is the winner, it does just
exactly what I need:
**NAME**
shuf - generate random permutations
Thank you, Linux Core Utils! :)

View File

@ -1,16 +0,0 @@
---
layout: post
title: "Why you should always test your software with production data"
date: 2011-12-11 12:14:51
tags: [development, testing, ranting]
permalink: /blog/2011/12/11/why-you-should-always-test-your-software-with-production-data
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
Im writing a software for my company in PHP, using the Symfony 2 framework.
Ive finished all the work, created some sample data, it loaded perfectly. Now
I put the whole thing into production and tried to upload the production data
into it. Guess what… it didnt load.

View File

@ -1,29 +0,0 @@
---
layout: post
title: "PHP 5.4 released"
date: 2012-03-20 13:31:12
tags: [php]
permalink: /blog/2012/3/20/php-5-4-released
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
After a long time of waiting, PHP announced 5.4 release on 1 March (also,
today they announced that they finally migrate to Git, which is sweet from my
point of view, but it doesnt really matter).
About a year ago we became very agressive towards a developer who created our
internal e-learning system. Their database was very insecure, and they didnt
really follow industry standards in many ways. Thus, we forced them to move
from Windows + Apache 2.0 + PHP 5.2 + MySQL 4.0 to Debian Linux 6.0 + Apache
2.2 + PHP 5.3 + MySQL 5.1. It was fun (well, from our point of view), as their
coders… well… they are not so good. The code that ran “smoothly” on the
old system failed at many points on the new one. So they code and code, and
write more code. And they still didnt finish. And now 5.4 is here. Okay, I
know it will take some time to get into the Debian repositories, but its
here. And they removed `register_globals`, which will kill that funny code again
at so many points that they will soon get to rewrite the whole code to make it
work. And I just sit here in my so-much-comfortable chair, and laugh. Am I
evil?

View File

@ -1,34 +0,0 @@
---
layout: post
title: "Fast world, fast updates"
date: 2012-03-27 06:18:43
tags: [linux]
permalink: /blog/2012/3/27/fast-world-fast-updates
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
We live in a fast world, thats for sure. When I first heard about Ubuntu
Linux and their goals, I was happy: they gave a Debian to everyone, but in
different clothes. It had fresh software in it, and even they gave support of
a kind. It was easy to install and use, even if one had no Linux experience
before. So people liked it. Ive even installed it on some of my servers
because of the new package versions that came more often. Thus I got an up to
date system. However, it had a price. After a while, security updates came
more and more often, and when I had a new critical update every two or three
days, Ive decided to move back to Debian. Fortunately I did this at the time
of a new release, so I didnt really loose any features.
After a few years passed, even Debian is heading this very same way. But as I
see, the cause is not the same. It seems that upstream software is hitting
these bugs, and even the Debian guys dont have the time to check for them. At
the time of a GNOME version bump (yes, GNOME 3 is a really big one for the
UN\*X-like OSes), when hundreds of packages need to be checked, security bugs
show off more often. On the other hand however, Debian is releasing a new
security update every day (I had one on each of the last three days). This, of
course, is good from one point of view as we get a system that is more secure,
but most administrators dont have maintenance windows this often. I can think
of some alternatives like Fedora, but do I really have to change? Dear fellow
developers, please code more carefully instead!

View File

@ -1,28 +0,0 @@
---
layout: post
title: "Wordpress madness"
date: 2012-06-14 06:40:12
tags: [wordpress, ranting]
permalink: /blog/2012/6/14/wordpress-madness
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
Im a bit fed up that I had to install [MySQL](http://www.mysql.com/) on my
server to have [Wordpress](http://wordpress.org/) working, so Ive Googled a
bit to find a solution for my pain. I found
[this](http://codex.wordpress.org/Using_Alternative_Databases). I dont know when
this post was written, but I think its a bit out of date. I mean come on, PDO
is the part of PHP for ages now, and they say adding a DBAL to the dependencies
would be a project as large as (or larger than) WP itself. Well,
yes, but PHP is already a dependency, isnt it? Remove it guys, its too
large!
Okay, to be serious… Having a heavily MySQL dependent codebase is a bad
thing in my opinion, and changing it is no easy task. But once it is done, it
would be a childs play to keep it up to date, and to port WP to other
database backends. And it would be more than enough to call it 4.0, and
raising version numbers fast is a must nowadays (right, Firefox and Linux
Kernel guys?)

View File

@ -1,28 +0,0 @@
---
layout: post
title: "SSH login FAILed on Red Had Enterprise Linux 6.2"
date: 2012-06-18 18:28:45
tags: [linux, selinux, ssh, red-hat]
permalink: /blog/2012/6/18/ssh-login-failed-on-red-hat-enterprise-linux-6-2
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
Now this was a mistake I should not have done…
About a month ago I have moved my AWS EC2 machine from Amazon Linux to RHEL
6.2. This was good. I have moved all my files and stuff, recreated my own
user, everything was just fine. Then I copied my
[gitosis](https://github.com/tv42/gitosis) account (user `git` and its home
directory). Then I tried to log in. It failed. I was blaming OpenSSH for a week
or so, changed the config file in several ways, tried to change the permissions
on `~git/.ssh/*`, but still nothing. Permission were denied, I was unable to
push any of my development changes. Now after a long time of trying, I
coincidently `tail -f`-ed `/var/log/audit/audit.log` (wanted to open `auth.log`
instead) and that was my first good point. It told me that `sshd` was unable to
read `~git/.ssh/authorized_keys`, which gave me the idea to run `restorecon` on
`/home/git`. It solved the problem.
All hail SELinux and RBAC!

View File

@ -1,35 +0,0 @@
---
layout: post
title: "Upgrades requiring a reboot on Linux? At last!"
date: 2012-06-22 20:04:51
tags: [linux]
permalink: /blog/2012/6/22/upgrades-requiring-a-reboot-on-linux-at-last
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
Ive recently received an article on Google+ about Fedoras new idea: package
upgrades that require a reboot. The article said that Linux guys have lost
their primary adoo: “Haha! I dont have to reboot my system to install system
upgrades!” My answer was always this: “Well, actually you should…”
I think this can be a great idea if distros implement it well. PackageKit was
a good first step on this road. That software could easily solve such an
issue. However, it is sooo easy to do it wrong. The kernel, of course, can not
be upgraded online (or could it be? I have some theories on this subject,
wonder if it can be implemented…), but other packages are much different.
From the users point of view the best would be if the packages would be
upgraded in the background seemlessly. E.g. PackageKit should check if the
given executable is running. If not, it should upgrade it, while notifying the
user like “Hey dude, dont start Anjuta now, Im upgrading it!”, or simply
denying to start it. Libraries are a bit different, as PackageKit should check
if any running executables are using the library. Meanwhile, PK should also
keep a notification somewhere telling the users that some packages could be
upgraded, but without stopping this-and-that, it can not be done.
I know these things are easier said than done. But I think (a) users should
tell such ideas to the developers and (b) developers (mostly large companies,
like Microsoft or Apple) should listen to them, and at least think of these
ideas. Some users are not as stupid as they think…

View File

@ -1,80 +0,0 @@
---
layout: post
title: "Some thoughts about that dead Linux Desktop"
date: 2012-09-05 09:01:31
tags: [linux]
permalink: /blog/2012/9/5/some-thoughts-about-that-dead-linux-desktop
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
There were some arguments in the near past on [What Killed the Linux
Desktop](http://tirania.org/blog/archive/2012/Aug-29.html). After reading many
replies, like [Linus
Torvalds](http://www.zdnet.com/linus-torvalds-on-the-linux-desktops-popularity-problems-7000003641/),
I have my own thoughts, too.
I know my place in the world, especially in the online community. Im a Linux
user for about 15 years and a Linux administrator for 10 years now, beginning
with WindowMaker and something that I remember as GNOME without a version
number. I have committed some minor code chunks and translations in some minor
projects, so Im not really into it from the “write” side (well, until now,
since I have began to write this blog, and much more, but dont give a penny
for my words until you see it).
Im using Linux since 2.2 and GNOME since 1.whatever. Its nice that a program
compiled years ago still runs on todays Linux kernel, especially if you see
old DOS/Windows software failing to start on a new Windows 7 machine. I
understand Linus point that breaking external APIs is bad, and I think it can
work well on the kernels level. But the desktop level is much different. As
the Linux Desktop has such competitors (like OS/X and Windows Aero and Metro),
they have to give something new to the users almost every year to keep up with
them. Eye candies are a must (yes, of course my techy fellows, they are
worthless, but users *need* it), and they can not be created without extending
APIs. And the old API… well, it fades away fast. I dont really understand
however, why they have to totally disappear, like
[GTK_DIALOG_NO_SEPARATOR](http://developer.gnome.org/gtk/stable/GtkDialog.html#GtkDialogFlags)
in Gtk3. It could be replaced with a 0 value (e.g: it wont do anything). This
way my old Gtk2 program could compile with Gtk3 nicely. Also, there could be a
small software that goes through your source code and warn you about such
deprecated (and no-doer but still working) things. Porting applications between
Gtk (and thus, GNOME) versions became a real pain, which makes less enthusiast
programmers stop developing for Linux. Since Im a GNOME guy for years, I can
tell nothing about Qt and KDE, but for the GNOME guys, this is a bad thing. As
of alternatives, there is Java. No, wait… it turned out recently that [it has
several security
bugs](http://www.theregister.co.uk/2012/08/31/critical_flaw_found_in_patched_java).
Also its not that multiplatform as they say (I cant find the article on
that at the moment, but I have proof). Also, the JVMs out there eat up so much
resources, which makes it a bit hard and expensive to use.
Also, I see another problem: those blasted package managers. RPM, DPKG,
Portage, whatever. What the hell? Why are there so many? Why do developers
reinvent the wheel? The nave is too small or there are to few spokes? Come on…
we live in an open source world! Contribute to the one and only package manager
(which one is that I dont actually care)! Im sure the two (three, many)
bunches of develoeprs could make a deal. Thus, it could become better and
“outsider” companies would be happier to distribute their software for Linux
platforms.
And now that we get to the big companies. I dont really understand them.
nVidia and ATI made their own closed source drivers for Linux. Some other
hardware vendors also write Linux drivers, and as the kernel API doesnt really
change, they will work for a long time. But what about desktop
application vendors? Well, they try to stick to a desktop environment or two,
and if they change too frequently, they stop developing for Linux, like Skype
did (OK, maybe Skype has other reasons, but you see my point). But why? The
main part for Linux programs is the Linux kernel and the basic userland like
libc/stdlib++. If you write graphical software, it will have to use X-Windows.
Yes, its much different in many ways, mostly because they have a… well… pretty
ugly design by default. But still, its the same on every Linux distributions,
as it became somewhat an industry standard, as it was already on the market
back in the old UN\*X days. The protocol itself changed just like the Linux
kernel: almost no change at all, just some new features.
So what kills the Linux desktop in my opinion is these constant wars inside,
and the lack of support from the outside. Open Source is good, but until these
(mostly the first) problems are not resolved, Linux Desktop can do nothing on
the market. Its a downward spiral hard to escape.

View File

@ -1,76 +0,0 @@
---
layout: post
title: "How to start becoming a web developer"
date: 2012-09-07 18:12:12
tags: [development, technology]
permalink: /blog/2012/9/7/how-to-start-becoming-a-web-developer
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
A friend of mine asked me today how to become a web developer. It took me a
while, but I made up a checklist. Its short, but its enough for the first
steps.
#### First of all, learn English
Well, if you read this, maybe this was a bad first point…
#### Choose a language and stick to it!
For the UN\*X/Linux line, there is PHP. Its free, easy to learn, and has many
free tools and documentations available. It can be used in a functional or an
object-oriented way.
C# is another good way to start, but for the Windows line. Its fully object-
oriented, and the web is full of tutorials, how-tos and other resources.
#### Learn the basics of the system you are working on
To become a good developer, learn at least the basics of the system you are
working on. Basic commands can always come in handy. Debugging (yes, you will
do tons of bugs for sure) can become much easier if you know the huge set of
tools provided by your OS. You should also try to develop in the chosen
environment. Chose PHP? Get a Linux desktop! ASP.NET? Get a Windows.
Everything will be much easier!
#### Learn the basics of the web server you are using
PHP can run on [Apache](http://httpd.apache.org/) (as a module), or any
CGI-capable webserver, like [lighttpd](http://www.lighttpd.net/) or
[nginx](http://nginx.org/) (well, it can also run on IIS, but trust me: you
dont want that). ASP.NET is designed for IIS, and although some scripts can
be run under a mono-capable server, it should still stay there.
Whichever you choose, learn the basics! How to start and stop the service,
basic configuration methods, modules/extensions, and so on. Its more than sure
that you will face some issues while developing, so it can never hurt.
#### Keep your versions under control
Version control is critical nowadays. It gives you a basic backup solution,
can come in handy with debugging, and if you ever want to work in a team, you
will badly need it.
Subversion is a bit out of date now, and its kind of hard to set up.
Git is no easy. You will have to learn a lot of stuff, but basicly its just
another version control system. Just choose if you want to stick to
merge-then-commit or rebase-then-commit, get a client, and get on the run.
Microsofts Team Foundation is another good way if you are working in a team.
It provides several other features besides version controlling, and is well
integrated into Visual Studio, which is highly recommended for Windows based
development.
#### Choose an environment to work in
There are so many good tools out there. You should choose according to the
language and OS on what you are working on. [Zend
Studio](http://www.zend.com/en/products/studio) or
[Netbeans](https://netbeans.org/) are both good tools for PHP development,
while [Visual Studio](http://www.visualstudio.com/) is a best buy for Windows
development. Both of these have many ups and downs, but once you get in touch
with their deeper parts, you will like them.

View File

@ -1,19 +0,0 @@
---
layout: post
title: "Do-Not-Track in IE10 vs. Apache"
date: 2012-09-10 20:22:32
tags: [apache, technology]
permalink: /blog/2012/9/10/do-not-track-in-ie10-vs-apache
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
[Apache developer decided not to accept Do-Not-Track headers from IE10
users](http://arstechnica.com/security/2012/09/apache-webserver-updated-to-ignore-do-not-track-settings-in-ie-10/),
because its enabled by default. So… if I install a plugin that hides the
fact from the web server that Im using IE10, I become eligible of using
it. But if I do this, I simply became eligible because I consciously installed
that addon, so I could actually use it without hiding the fact. Sorry if
Im a bit Philosoraptorish…

View File

@ -1,64 +0,0 @@
---
layout: post
title: "Symfony 2 Create role- and class-based ACLs with your roles coming from the ORM"
date: 2012-09-16 18:39:25
tags: [php, symfony]
permalink: /blog/2012/9/16/symfony-2-create-role-and-class-based-acls-with-your-roles-coming-from-the-orm
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
During the last weeks I had some serious issues with one of my private Symfony
2 projects. One of my goals was to create a dynamic security system, e.g my
administrators wanted to create roles, and grant these roles access to
different object types (classes) and/or objects.
So I have created a `User` entity, which implements `UserInterface` and
`AdvancedUserInterface`, the latter for the possibility to enable/disable
accounts and such. It had a `$roles` property, which was a `ManyToMany` relation
to the `Role` entity, which implemented `RoleInterface`. Also I have created my
own role hierarchy service that implements `RoleHierarchyInterface`.
So far so good, first tests. It soon turned out that if `User::getRoles()`
returns a `DoctrineCollection` as it does by default, then the standard
{% gist 883ace4f35e440f6fe0f WhatEver.php %}
doesnt work. I know, it should not be hard coded, as my roles and permission
tables are dynamic, I have just tested. So I fixed my `User` entity so
`getRoles()` returns an array of `Role` objects instead of the
`DoctrineCollection`. Also I implemented a `getRolesCollection()` method to
return the original collection, but I think it will never be used.
After that, I had to implement some more features so I put this task away.
Then, I tried to create my first ACL.
{% gist 883ace4f35e440f6fe0f WhatEver2.php %}
I was about to check if the user who is logged in has an `OWNER` permission on
the `User` class.
{% gist 883ace4f35e440f6fe0f WhatEver3.php %}
The ACL was defined based on a role, so everyone who had the `ROLE_ADMIN` role
should gain access to the user listing page. But they didnt. It took several
weeks to find the cause, I have put it on
[stackoverflow](http://stackoverflow.com/questions/12057795/symfony-2-1-this-getsecurity-context-isgrantedrole-admin-returns-fa)
and the Symfony Google Group, but no usable answers.
Then I went off for debugging. Setting up NetBeans for xdebug-based PHP
debugging was real fun under Fedora, but thats another story. After a while I
have found that Symfonys basic access decision manager checks for
`$role->getRole()` only if `$role` is an instance of
`Symfony\Component\Security\Core\Role\Role`, instead of checking if the object
implements `Symfony\Component\Security\Core\Role\RoleInterface`. So Ive
checked if the bug is already reported. It turned out that it was, and my
solution was available in a specific commit about a year ago, but as [Johannes
Schmitt commented, it would introduce a security
issue](https://github.com/symfony/symfony/commit/af70ac8d777873c49347ac828a817a400006cbea),
so it was reverted. Unfortunately neither Johannes Schmitt, nor Fabien
Potencier (nor anyone else) could (or wanted) to tell about this issue. So the
final (and somewhat hack-like) solution was to extend
`Symfony\Component\Security\Core\Role\Role`. And boom! It worked.

View File

@ -1,25 +0,0 @@
---
layout: post
title: "SmsGateway and SmsSender"
date: 2012-10-07 00:10:26
tags: [development, php, symfony]
permalink: /blog/2012/10/7/smsgateway-and-smssender
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
I have just uploaded my SmsGateway, SmsSender and SmsSenderBundle packages to
[GitHub](http://github.com/gergelypolonkai) and
[Packagist](http://packagist.org). I hope some of you will find it useful.
* SmsGateway
* [GitHub](https://github.com/gergelypolonkai/smsgateway)
* [Packagist](https://packagist.org/packages/gergelypolonkai/smsgateway)
* SmsSender
* [GitHub](https://github.com/gergelypolonkai/smssender)
* [Packagist](https://packagist.org/packages/gergelypolonkai/smssender)
* SmsSenderBundle
* [GitHub](https://github.com/gergelypolonkai/smssender-bundle)
* [Packagist](https://packagist.org/packages/gergelypolonkai/smssender-bundle)

View File

@ -1,20 +0,0 @@
---
layout: post
title: "Changing the session cookies name in Symfony 2"
date: 2012-10-13 12:49:28
tags: [symfony, development]
permalink: /blog/2012/10/13/changing-the-session-cookie-s-name-in-symfony-2
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
I have a development server, on which I have several Symfony 2.x projects under
the same hostname in different directories. Now Im facing a funny problem
which is caused by that the cookies Symfony places for each of my projects have
the same name.
To change this, you will have to modify the `config.yml` file like this:
{% gist c695670ecca2809f7c93 %}

View File

@ -1,22 +0,0 @@
---
layout: post
title: "Symfony 2 Configuration Array of associative arrays"
date: 2012-12-20 12:03:23
tags: [php, symfony]
permalink: /blog/2012/12/20/symfony-2-configuration-array-of-associative-arrays
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
Few days ago I have struggled with a problem using Symfony2 configuration. I
wanted to add the following kind of configuration to `config.yml`:
{% gist 30440e25f7a447730064 config.yml %}
The problem was that the stuff under `transitions` is dynamic, so those
`hc_cba` and `cba_hc` tags can be pretty much anything. After hitting many
errors, I came to the solution:
{% gist 30440e25f7a447730064 DynarrayConfiguration.php %}

View File

@ -1,14 +0,0 @@
---
layout: post
title: "Development man pages on Fedora"
date: 2013-01-05 18:20:41
tags: [development, fedora]
permalink: /blog/2013/1/5/development-man-pages-on-fedora
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
If you use Fedora (like me), and cant find the development manual pages for
e.g. `printf(3)` (like me), just `yum install man-pages` (like me).

View File

@ -1,32 +0,0 @@
---
layout: post
title: "Registering an enum type in GLibs type system"
date: 2013-01-06 02:34:03
tags: [c, development, glib]
permalink: /blog/2013/1/6/registering-an-enum-type-in-glib-s-type-system
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
I faced a problem in my [GLib](https://developer.gnome.org/glib/) self-teaching
project, [wMUD](https://github.com/gergelypolonkai/wmud) today. I wanted to
register a signal for a `GObject`, whose handler should accept two `enum`
parameters for which I had to register a new `GEnum` type in the `GObject` type
system. However, the [documentation on this
feature](https://developer.gnome.org/gobject/unstable/gtype-non-instantiable.html)
(thanks for pointing out goes to hashem on `#gnome-hackers`) is not… uhm…
obvious. Making the long story short, I have checked with the `GIO` sources for
an example, and using that, I have created this small, working chunk:
{% gist 47794b6fb94484f8160b client-state.h %}
{% gist 47794b6fb94484f8160b client-state.c %}
Still, it can be made more perfect by using the
[glib-mkenums](http://developer.gnome.org/gobject/stable/glib-mkenums.html)
tool. I will read through the GLib Makefiles tomorrow for some hints on
this.
Edit: you can find the glib-mkenums solution [here]({% post_url 2014-08-16-registering-an-enum-type-in-glib-glib-mkenums-magic %}).

View File

@ -1,17 +0,0 @@
---
layout: post
title: "git rm --cached madness"
date: 2013-01-14 21:38:00
tags: [development, git]
permalink: /blog/2013/1/14/git-rm-cached-madness
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
I have recently learned about `git rm --cached`. Its a very good tool, as it
removes a file from tracking, without removing your local copy of it. However,
be warned that if you use `git pull` in another working copy, the file will be
removed from there! If you accidentally put the configuration of a production
project, and remove it on your dev machine, it can cause a lot of trouble ;)

View File

@ -1,52 +0,0 @@
---
layout: post
title: "JMS\\DiExtraBundles GrepPatternFinder grep exits with status code 2 on Fedora 18"
date: 2013-01-17 00:32:12
tags: [fedora, selinux, symfony]
permalink: /blog/2013/1/17/jms-diextrabundle-s-greppatternfinder-grep-exits-with-status-code-2-on-fedora-18
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
Yesterday Ive upgraded my development machines from Fedora 17 to Fedora
18. Although it went well, my [Symfony](http://symfony.com/) projects stopped
working with a message like this:
RuntimeException: Command "/usr/bin/grep --fixed-strings --directories=recurse --devices=skip --files-with-matches --with-filename --color=never --include=*.php 'JMS\DiExtraBundle\Annotation'
'/var/www/html/gergelypolonkaiweb/app/../src'
'/var/www/html/gergelypolonkaiweb/vendor/symfony/symfony/src/Symfony/Bundle/FrameworkBundle'
'/var/www/html/gergelypolonkaiweb/vendor/symfony/symfony/src/Symfony/Bundle/SecurityBundle'
'/var/www/html/gergelypolonkaiweb/vendor/symfony/symfony/src/Symfony/Bundle/TwigBundle'
'/var/www/html/gergelypolonkaiweb/vendor/symfony/monolog-bundle/Symfony/Bundle/MonologBundle'
'/var/www/html/gergelypolonkaiweb/vendor/symfony/swiftmailer-bundle/Symfony/Bundle/SwiftmailerBundle'
'/var/www/html/gergelypolonkaiweb/vendor/symfony/assetic-bundle/Symfony/Bundle/AsseticBundle'
'/var/www/html/gergelypolonkaiweb/vendor/doctrine/doctrine-bundle/Doctrine/Bundle/DoctrineBundle'
'/var/www/html/gergelypolonkaiweb/vendor/sensio/framework-extra-bundle/Sensio/Bundle/FrameworkExtraBundle'
'/var/www/html/gergelypolonkaiweb/vendor/jms/aop-bundle/JMS/AopBundle'
'/var/www/html/gergelypolonkaiweb/vendor/jms/security-extra-bundle/JMS/SecurityExtraBundle'
'/var/www/html/gergelypolonkaiweb/vendor/doctrine/doctrine-migrations-bundle/Doctrine/Bundle/MigrationsBundle'
'/var/www/html/gergelypolonkaiweb/vendor/friendsofsymfony/jsrouting-bundle/FOS/JsRoutingBundle'
'/var/www/html/gergelypolonkaiweb/vendor/avalanche123/imagine-bundle/Avalanche/Bundle/ImagineBundle'
'/var/www/html/gergelypolonkaiweb/vendor/genemu/form-bundle/Genemu/Bundle/FormBundle'
'/var/www/html/gergelypolonkaiweb/src/GergelyPolonkai/FrontBundle'
'/var/www/html/gergelypolonkaiweb/src/GergelyPolonkai/GeshiBundle'
'/var/www/html/gergelypolonkaiweb/vendor/symfony/symfony/src/Symfony/Bundle/WebProfilerBundle'
'/var/www/html/gergelypolonkaiweb/vendor/sensio/distribution-bundle/Sensio/Bundle/DistributionBundle'
'/var/www/html/gergelypolonkaiweb/vendor/sensio/generator-bundle/Sensio/Bundle/GeneratorBundle'" exited with non-successful status code "2".
After getting through my logs and such, Ive finally found out that the new
SELinux policy is causing the trouble together with git. Eventually, my
`.git/logs` directory is tagged as `unconfined_u:object_r:httpd_log_t:s0`.
`httpd_log_t` type is not readable by the `system_u:system_r:httpd_t:s0` user,
which makes `/usr/bin/grep` throw an access denied error. To fix this, I needed
to do
semanage fcontext -a -t httpd_sys_content_t '/var/www(/.*)?/\.git/logs(/.*)?'
as root. This makes `.git` directories readable for the httpd process, thus,
for `grep`. The optimal solution would be to tell `GrepPatternFinder` to ignore
version control stuff, so the `httpd` process would have no access to them at
all. Also, in production, removing the `.git` or `.svn` directories could be a
good idea.

View File

@ -1,32 +0,0 @@
---
layout: post
title: "mount: device or resource busy after enabling multipath"
date: 2013-02-19 23:09:05
tags: [linux, heartbeat-cluster]
permalink: /blog/2013/2/19/mount-device-or-resource-busy-after-enabling-multipath
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
We have a heartbeat cluster with two nodes. It has been running for several
months without problems. The shared storage is on an IBM DS3400, on which we
have a large volume formatted with ext4.
Today I decided to reboot the active node for security reasons. So Ive
switched to the passive node, which failed at the first step: it was unable to
mount the storage (`/dev/sda1`). After whining for a few moments, I tried to
mount it by hand, which told me
/dev/sda1 already mounted or /data is busy
Ive quickly made sure that none of that was true. After checking
this-and-that, it turned out that the passive node had `multipathd` running, so
I looked under `/dev/mapper`, and found two symlinks there, `<long-long WWN>`
and `<long-long WWN>-part1`. As the partition table and the disk size was the
same as on `/dev/sda`, I tried to
mount /dev/<long-long WWN>-part1 /data
and voilà! It worked like charm!

View File

@ -1,27 +0,0 @@
---
layout: post
title: "Why I stopped using annotation based routing in Symfony today"
date: 2013-02-27 23:10:24
tags: [development, symfony]
permalink: /blog/2013/2/27/why-i-stopped-using-annotation-based-routing-in-symfony-today
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
I have read several opinions about routing configuration in Symfony. I stayed
with annotation based routing as it was convinient for me to see the URL right
above the controller action. This was because by just checking the URL, I
remembered the controlling code, as they always were fresh ones. Well, until
today.
I had to take a look into an old (Sf 2.0, last commit was about 3 months ago)
project of mine. In the same run Ive upgraded the whole project to 2.2 (it was
a fast one, thanks for [JMikola@GitHub](https://github.com/jmikola) for the
quick reply on my issue with
[JmikolaJsAssetsHelperBundle](https://github.com/jmikola/JmikolaJsAssetsHelperBundle)
again!). After that I went on to the requested change. Now, finding a route in
about 40 controller files spread between 3 bundles can really be a pain! So
Ive finished with annotation based routing. Its still a nice feature, its
simply not for me.

View File

@ -1,62 +0,0 @@
---
layout: post
title: "Programming, as I see it"
date: 2013-03-01 23:32:35
permalink: /blog/2013/3/1/programming-as-i-see-it
published: false
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
Since my age of around 11, I write code. I began with BASIC, which is,
well, the most basic language I have ever seen. Simply writing
<code>10 PRINT "Hello World!"</code> does the job (with Assembly it
would be tens of lines as I recall). Then I moved to Pascal, then
Delphi (which is basically the same thing). The next step was a bit
longer, as I started learning more languages after this, like Perl
(for dynamic web pages), C (for desktop applications), TCL (for
eggdrop programming. Yes, I might have been a weird kid), PHP (again,
for dynamic web pages. It was becoming mainstream back then).
Many of my classmates looked down on me, as they thought I was a geek (hell I
was, but I wouldnt have confessed it then), and called me a nerd. For a few
months maybe I was depressed, but after that I realised that this is the thing
I want to do in my life, this is the thing Im good at.
Most people I ask why dont they code say “its too hard”. Ive attended some
courses (both online and offline, and I was like “Whoa! Coding is extremely
hard! What the hell! I will never learn it!”, but right after the course I
realised that everything is just fine, I can still write programs, and its
eeeeasy. So then, whats the problem?
After looking through many course papers, I found that most teachers do it
totally wrong. A programming language is just that: a language. You dont start
learning Spanish by going into a classic literature conference in Madrid and
doing a speech, but learn the basic vocabulary and grammar. The same goes for
coding. You learn the vocabulary (the basic commands or keywords) and grammar
(syntax). I had several ideas how this could be taught, just didnt have the
background to do it.
The idea of teaching programming lingers in my head for years now, and a few
days ago, Ive bumped into [this
video](https://www.youtube.com/watch?v=dU1xS07N-FA). So it seems that
technology superstars like Bill Gates and Mark Zuckerberg wants to do the same.
Maybe they dont have enough high quality coders at hand. Well of course,
if teachers make it awfully hard to learn it! So a bunch of guys sat together
and created [code.org](http://www.code.org/) to achieve my old dream. I like
the idea. And although I have almost no visitor on this blog of mine, allow me
to give you a few points on how I see programming.
#### Great learning process
When you write programs, especially during the first years, you adapt a new way
of thinking and learning. If you learn it as an adult, it can be a bit of a
pain, but as a child, its easy as learning how the wheels of those little cars
spin).
#### A job
#### Art
#### Magic

View File

@ -1,38 +0,0 @@
---
layout: post
title: "Fedora cant change Active Directory password via kpasswd"
date: 2013-03-05 08:55:04
tags: [fedora, kerberos, active-directory]
permalink: /blog/2013/3/5/fedora-can-t-change-active-directory-password-via-kpasswd
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
I wanted to change my AD password today. As the AD is actually a Kerberos
server, I was pretty sure that `kpasswd` will do the trick. However, `kpasswd`
output looked like this:
$ kpasswd
Password for polonkai.gergely@EXAMPLE.LOCAL:
Enter new password:
Enter it again:
kpasswd: Cannot find KDC for requested realm changing password
Ive checked `kinit` and `klist`, everything looked fine. After a while it came
to my mind that password changing is done through the kadmin server, not
through the KDC. It seems that when I set up the Active Directory membership,
the `admin_server` directive is not get written to `krb5.conf`. So all I had to
do was to put
admin_server = ad.example.local
in that file, and voilà!
$ kpasswd
Password for polonkai.gergely@EXAMPLE.LOCAL:
Enter new password:
Enter it again:
Password changed.

View File

@ -1,17 +0,0 @@
---
layout: post
title: "Haversine in MySQL"
date: 2013-03-05 12:49:28
permalink: /blog/2013/3/5/haversine-in-mysql
tags: [mysql, development]
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
Just insert it in your database, feed them two Google coordinates, and you get
the distance in kilometres. If you happen to need it in miles, change the
constant `12756.200` in the `RETURN` row to `7922.6` instead.
{% gist bdad1cf2d410853bef35 %}

View File

@ -1,28 +0,0 @@
---
layout: post
title: "Dvorak and me"
date: 2013-03-13 21:20:13
tags: [linux]
permalink: /blog/2013/3/13/dvorak-and-me
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
A few months ago I have decided to switch to the Dvorak layout. After using
QWERTY (well, QWERTZ, to be precise) for almost 17 years, it was a hard
decision, but now I think it worthed the try. I started with the UK (Dvorak
with UK punctuation) layout, and in about four weeks, Ive almost reached my
original typing speed. Today I have modified the Hungarian xkb definitions file
to add the Hungarian accended letters like ű to the layout, so I dont have to
use dead keys anymore (which apparently turned out to be a problem, as the
Linux version of Java doesnt support dead keys at all).
Best thing is, as I never learned proper 10-finger typing, but learned Dvorak
that way, I can switch between QWERTY and Dvorak more or less painlessly (about
10 minutes of confusion, so to say).
Conclusion: I dont know yet if this was actually a good decision, but it
wasnt bad, after all. But seeing peoples faces when they try to type on my
machine totally worths it.

View File

@ -1,28 +0,0 @@
---
layout: post
title: "Renaming a Symfony 2 bundle"
date: 2013-04-09 22:29:48
tags: [development, symfony]
permalink: /blog/2013/4/9/renaming-a-symfony-2-bundle
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
Today Ive realised that the name I gave to one of my Symfony 2 bundles should
be something else. To rename a bundle, one must do four things (at least).
1. Change the namespace from `Vendor\OldBundle` to `Vendor\NewBundle` in every
PHP class (sounds like pain? It is…)
1. Change the name of files and classes. Some files under
`src/Vendor/OldBundle` (and the classes in them) contain the name of the
bundle, like `OldBundle/DependencyInjection/VendorOldBundleExtension.php`
and `OldBundle/VendorOldBundle.php`. You should rename them, or Symfony
wont find the classes defined in them! When done, rename the whole bundle
directory either.
1. Change the configuration files accordingly, including `AppKernel.php`. These
config files are usually `routing.yml`, `services.yml`, and in some cases,
`config.yml`
1. Change the references in other parts of your code. A `grep OldBundle .` will
usually help…

View File

@ -1,111 +0,0 @@
---
layout: post
title: "Installing OTRS in Fedora 18 with SELinux enabled"
date: 2013-05-06 06:01:52
tags: [fedora, selinux, otrs]
permalink: /blog/2013/5/6/installing-otrs-in-fedora-18-with-selinux-enabled
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
Ive read somewhere in an OTRS installation howto that if you want to install
OTRS, you will have to disable SELinux. Well, I wont.
During the last few months, I have been using Fedora 18 with SELinux on all of
my desktop machines and on my notebook, and I had no problems at all.
Meanwhile I got familiar with SELinux itself, and got used to solving problems
caused by it. So I started `tail -f /var/log/httpd/error_log` in one terminal
(to see if something Apache related thing appears),
`tail -f /var/log/audit/audit.log` in another (to see errors caused by
SELinux), opened the admin manual at the installation chapter, took a deep
breath, and went on.
Throughout this article, I will refer to OTRS 3.2.6 as OTRS and Fedora 18
(with only “stock” repositories) as Fedora. I assume that you have already
installed OTRS in a non-SELinux environment before, and that you have at least
some basic knowledge about SELinux, MAC, RBAC, and all the like. Im
installing OTRS in `/opt/otrs`, so if you install it somewhere else, you will
have to modify the paths below. Also, if you happen to install under
`/var/www` (I wouldnt recommend it), that directory already has the
`httpd_sys_content_t` type, so you wont have to set it explicitly.
As the first step I have unpacked the archive to `/opt/otrs`. This directory
is the OTRS default, many config files have it hardcoded, and changing it is
no easy task.
Running `otrs.CheckModules.pl` gave me a list of missing perl modules. Red Hat
and Fedora makes it easy to install these, as you dont have to know the RPM
package name, just the perl module name:
yum install 'perl(Crypt::SSLeay)' \
'perl(DBD::Pg)' \
'perl(GD)' \
'perl(JSON::XS)' \
'perl(GD::Text)' \
'perl(GD::Graph)' \
'perl(Mail::IMAPClient)' \
'perl(Net::DNS)' \
'perl(PDF::API2)' \
'perl(Text::CSV_XS)' \
'perl(YAML::XS)'
I also needed to install `mod_perl`. Although `otrs.CheckModules.pl` didnt
mention it, the default settings use syslog as the logging module, so unless
you change it in `Config.pm`, you will also need to install
`'perl(Unix::Syslog)'`, either.
The default SELinux policy doesnt permit any network connection to be
initiated by Apache httpd. As OTRS needs to connect to its database, you
need to enable it explicitly. In older distributions, the
`httpd_can_network_connect` was the SELinux boolean for this, but recent
installations also have a `httpd_can_network_connect_db` flag. As far as I
know, this enables all network connections to the well-known database
servers default port, but I will have to check for it. For me, with a
MySQL listening on its standard port, the
`setsebool httpd_can_network_connect_db=1` command just did it.
With SELinux enabled, Apache wont be able to read anything thats not
marked with the `httpd_sys_content_t` type, nor write anywhere without the
`httpd_sys_rw_content_t` type. The trivial, quick and dirty solution is to
label all the files as `httpd_sys_rw_content_t`, and let everything go.
However, the goal of SELinux is just the opposite of this: grant access
only to what is really needed. After many trial-and-error steps, it finally
turned out that for OTRS to work correctly, you must set
* `httpd_sys_content_t`
* on `/opt/otrs/var/httpd/htdocs`
* `httpd_script_exec_t`
* on `/opt/otrs/bin/cgi-bin`
* `httpd_sys_rw_content_t`
* on `/opt/otrs/Kernel`
* on `/opt/otrs/var/sessions`
* on `/opt/otrs/var/log` (unless you use syslog for logging)
* on `/opt/otrs/var/packages` (this is used only when you download an .opm
package)
* on `/opt/otrs/var/stats`
* on `/opt/otrs/var/tmp`
* on `/opt/otrs/bin` (I wonder why this is required, though)
To do this, use the following command:
# semanage fcontext -a -t <context> <directory regex>
Where `<directory regex>` is something like `/opt/otrs/Kernel(/.*)?`. When
this is done, all you have to do is running `restorecon -vR /opt/otrs` so
it will relabel everything with the correct types (you can omit -v, I just
like to see what my software does).
The last thing I faced is that Fedora is more restrictive on reading
directories other than `/var/www`. It has a `Require all denied` on
`<Directory />`, and a `Require all granted` on `<Directory /var/www>`, so
`/opt/otrs/var/httpd/htdocs` will throw a
`403 Forbidden (client denied by server configuration)` error. To get rid
of this, I had to modify `scripts/apache2-httpd.include.conf` and add
`Require all granted` to both the `cgi-bin` and `htdocs` directories.
As I will have to use OTRS in a production environment soon with SELinux
enabled, it is more than sure that this list will change in the near future.
As there are no official documentation on this (I havent find one yet), I
have to do it with the trial-and-error way, so be patient!

View File

@ -1,30 +0,0 @@
---
layout: post
title: "SWE-GLib final release"
date: 2013-09-16 21:37:17
tags: [development, astrology]
permalink: /blog/2013/9/16/swe-glib-final-release
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
Few of you may know that Im interested in astrology. About two months ago
I have decided to create an astrologers software for the GNOME desktop.
Since then, I have contacted Jean-André Santoni, who created a software
called [Astrognome](https://code.google.com/p/astrognome/) some years ago.
We exchanged some e-mails, and after several weeks of coding, Im proud to
present [SWE-GLib](https://github.com/gergelypolonkai/swe-glib) 1.0.1. This
is “just” a library which wraps around [Swiss
Ephemeris](http://www.astro.com/swisseph/), creating a nice GLib-ish
interface around it. See the project page and the built-in GTK-Doc document
for more information.
The astrologers software Im writing will be
[Astrognome](https://github.com/gergelypolonkai/astrognome) (thanks for
Jean-André for letting me use the name). It is currently in pre-alpha
status, but already utilizes SWE-GLib (it just cant display the results
yet). If you happen to be interested in astrology and/or Astrognome, fork
the repository and contribute! You can also contact me (or open an
enhancement issue on GitHub) if you have any ideas.

View File

@ -1,25 +0,0 @@
---
layout: post
title: "From Symfony to Django in two days"
date: 2013-09-24 14:05:22
tags: [development, symfony, django]
permalink: /blog/2013/9/24/from-symfony-to-django-in-two-days
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
I was a Python hater for a long time, although I cant really tell why. It
didnt fit in my mind, maybe. I was programming in BASIC, Pascal (none of
these would come to my mind, though), C, PHP, Perl, JavaScript, and
different shell “languages” like awk, sed or bash.
After I could not fit my next Symfony app on my cloud server (it is pretty
low on storage), I have decided to move slowly to Django. My first task was
simple: transition my web page (this one) from PHP + Symfony 2 to Python +
Django. The results: the “static” pages are already working, the blog
listing is almost ready (some styling issues are still around), only
tagging remains. And this is after about 6 hours of work. Oh, and the admin
site is included with Django, so I dont have to port that. I have also
decided to finally integrate a comment feature in the Django version.

View File

@ -1,29 +0,0 @@
---
layout: post
title: "First impressions of Windows 8"
date: 2013-11-05 08:14:50
tags: [windows]
permalink: /blog/2013/11/5/first-impressions-of-windows-8
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
Many of you may know my commitment to Linux and Open Source Software. But this
doesnt mean I hate proprietary software like many others do. I think
everything has its own place in the world, and this goes for software as well.
A few days ago I got my hands on a new notebook, thanks to my company. It was
shipped with Windows 8 by default, and although I installed Fedora 19 in an
instant (which went smoothlessly, even with Secure Boot enabled), Ive decided
to give a try to this new Windows Version.
Being a heavy Windows 7 user, my first thought was “What the hell is this?”
But in a day, I got totally used to it. I dont miss the Start button at all.
The applications already installed were almost enough for me (I still need
Office. Maybe Ill also enroll for Office 365 later…), and the games are great
and beautiful too. So overall, this new version may be totally different (by
the looks), but it seems almost the same Windows as we know it. So if you
dont freak out by touching something new, go give it a try: dont
instant-remove 8 in favour of 7!

View File

@ -1,33 +0,0 @@
---
layout: post
title: "List Git branches and their remote tracking branches side by side"
date: 2014-07-18 21:46:45
tags: [git]
permalink: /blog/2014/7/18/list-git-branches-and-their-remote-tracking-branches-side-by-side
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
I had a hard time following my own branches in a project. They got pretty
numerous, and I wasnt sure if I pushed them to origin at all.
`git branch -a` can list all the branches, including remote ones, but, as
my list grew too big, it was impossible to follow it any more.
Thus, I have created a small script called git-branches-with-remotes, which
does the work for me. Its only requirements are git (of course), and the
`column` command, which is pretty obviously present on every POSIX
compliant systems (even OSX).
{% gist 8af6a3e86b57dd4c250e %}
I just put it in my path, and `git branches-with-remotes` does the work!
Edit (16 August): I have added some code to mark the current branch (if any)
with an asterisk. Also, I have put this script [in a
gist](https://gist.github.com/gergelypolonkai/8af6a3e86b57dd4c250e).
Edit (26 February, 2015): It turns out that `git branch -vv` shows the same
information and some more: it also shows if the branches are diverged, and the
first line of the last commits message.

View File

@ -1,36 +0,0 @@
---
layout: post
title: "Registering an enum type in GLib, glib-mkenums magic"
date: 2014-08-16 15:10:54
tags: [development, c, glib]
permalink: /blog/2014/8/16/registering-an-enum-type-in-glib-glib-mkenums-magic
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
In [this
post](/blog/2013/1/6/registering-an-enum-type-in-glib-s-type-system) I said
I will get through the GLib Makefiles to add an enum type to GLib in a more
sophisticated way.
In my other project,
[SWE-GLib](https://github.com/gergelypolonkai/swe-glib) I already used this
method. The following two rules in `Makefile.am` create `gswe-enumtypes.h`
and `gswe-enumtypes.c`.
{% gist 1e2fdedb136de3ca67f0 Makefile %}
`$(GLIB_MKENUMS)` is set in `configure` with
`AC_PATH_PROG([GLIB_MKENUMS], [glib-mkenums])`.
This approach requires the GNU Autotools (you can get rid of it by changing
`$(GLIB_MKENUMS)` to the path to `glib-mkenums` binary), and two template
files, one for the header and one for the code. `$(gswe_enum_headers)`
contains a list of all the header files that have enum types defined
throughout the project.
{% gist 1e2fdedb136de3ca67f0 gswe-enumtypes.h %}
{% gist 1e2fdedb136de3ca67f0 gswe-enumtypes.c %}

View File

@ -1,16 +0,0 @@
---
layout: post
title: "NyanMacs"
date: 2014-09-17 12:45:42
tags: [emacs]
permalink: /blog/2014/9/17/nyanmacs
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
I was a Vi/ViM user for years. For several reasons I had to change to Emacs
now and then. And then, I found
[this](https://www.emacswiki.org/emacs/NyanMode). I surrender. Emacs is
just better. (And its working even in plain text mode without graphics)

View File

@ -1,25 +0,0 @@
---
layout: post
title: "Rounding numbers to N decimals in Emacs"
date: 2014-10-07 10:28:50
tags: [emacs, development]
permalink: /blog/2014/10/7/rounding-numbers-to-n-decimals-in-emacs
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
I have recently faced a problem, where I had a bunch of SVG files with a
large amount of fraction numbers in the path definitions. These images were
displayed in small size, so this amount of precision was irrelevant, and
these numbers took almost half of my SVG images size. So I created an
Elisp defun to round these numbers to 2 decimals:
{% gist 9c721ceda6d3079b4f05 %}
This finds the first digit of the number under point (the cursor), and
reduces its digits to the given amount (or the number given with `C-u`). It
has some drawbacks, though, as it cannot handle exponential forms (e.g.
`1e-1234`), but these were rare in my case, and its hard to iterate through
all numbers. I will come over this latter problem soon(ish).

View File

@ -1,46 +0,0 @@
---
layout: post
title: "Using Git bisect to find the first good commit"
date: 2015-02-26 10:42:56
tags: [git]
permalink: /blog/2015/2/26/using-git-bisect-to-find-the-first-good-commit
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
Few months ago we “implemented” a bug in our software, which was released
to the customers. We continued development for two weeks when the first
customer ticket arrived about the bug. We successfully reproduced it with
the customers version, but not with the development sources; it turned out
that one of the developers unconsciously fixed the bug. The devs spent some
hours finding where the fix lied before coming to me like “There is
`git-bisect` which we can use to find the commit where we messed up things.
Is there a way to find where we fixed it?”
For those who dont know this feature, you have to mark a known “good” and
“bad” commit, then git-bisect will go through the commits between this two,
present you the corresponding snapshots, and you have to mark each of them
as “good” or “bad”. At the end, you will get a commit hash where the bug
first occured.
As it turned out, our developers problem rooted in the naming convention
of git-bisect: they assumed that the “good” commit must be a working one,
while a “bad” one must be the buggy. In this case, we did the following:
The commit with the customers release tag was marked as good (even though
this had the bug), and the latest commit on our development branch was
marked as “bad” (even though the bug was fixed by then). Now with every
snapshot presented by git-bisect we had to do the opposite what you usually
do: mark commits still having the bug as “good”, and commits that dont as
“bad”. At the end, we had the hash of the commit that fixed the bug (among
some other things; luckily, the developer who pushed that commit had a
workflow that introduced a lot of cherry-picking and squashing before the
push, so he could easily find the bit that actually fixed the problem in
his local repository with the same technique).
[This StackOverflow answer](http://stackoverflow.com/a/17153598/1305139)
suggests the very same, but with some aliases:
{% gist a98f4aab84659d60364e %}

View File

@ -1,23 +0,0 @@
---
layout: post
title: "Good bye, Digital Ocean! Hello again, GitHub!"
date: 2015-04-25 21:18:56
permalink: /blog/2015/4/25/good-bye-digital-ocean-hello-again-github
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
Few years ago I have signed up for a
[Digital Ocean](https://www.digitalocean.com/) account. I used one
single droplet for my private needs, like hosting my private Git
repositories and my blog. However, as I didnt host anything else there
except my blog, I decided to shut it down. From now on, my blog is
on [GitHub Pages](https://pages.github.com/), as it provides just
everything I need (except automatically converting my resume to
PDF. But I can live without that.)
Im really sorry, Digital Ocean Guys, your hosting is awesome and Ill
keep recommending you to others, but paying for a droplet for one
single blog is overkill.

View File

@ -1,32 +0,0 @@
---
layout: post
title: "Cross browser border-radius SASS mixin with varargs"
date: 2015-04-27 22:59:56
tags: [css, sass]
permalink: /blog/2015/4/28/cross-browser-border-radius-sass-mixin-with-varargs
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
Few days ago I needed to create style sheets with many rounded boxes,
where different corners had to be rounded differently (think about
Bootstraps [button
groups](http://getbootstrap.com/components/#btn-groups)).
CSS has this nifty shorthand to specify border width in one line, like
with `border-width: 1px 2px 3px 4px`, but it lacks the same for
`border-radius`. So I decided to create something similar using [Sass
mixins](http://sass-lang.com/guide#topic-6) with dynamic
parameters. Another nice feature you get using the `border-width`
shorthand is that you can specify less than four parameters, and the
values will be applied on different sides of your box, so in the end
all side will have the whole `border-width` set.
I wanted to achieve the same for my `border-radius` mixin, although I
could not start specifically with the `top` side. I decided to go with
the top right corner for the first parameter, while trying to keep a
sane repeating pattern. Here is the result:
{% gist 313b227434ecc5d85d7b border-radius.sass %}

View File

@ -1,39 +0,0 @@
---
layout: post
title: "@ParamConverter à la Django"
date: 2015-06-07 18:14:32
tags: [python, django]
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
One thing I really miss from [Django](https://www.djangoproject.com/)
is [Symfony](http://symfony.com)s
[@ParamConverter](http://symfony.com/doc/current/bundles/SensioFrameworkExtraBundle/annotations/converters.html). It
made my life so much easier while developing with Symfony. In Django,
of course, there is
[get_object_or_404](https://docs.djangoproject.com/en/dev/topics/http/shortcuts/#get-object-or-404),
but, for example, in one of my projects I had a view that had to resolve 6(!)
objects from the URL, and writing `get_object_or_404` six times is not what a
programmer likes to do (yes, this view had a refactor later on). A quick Google
search gave me one [usable
result](http://openclassrooms.com/forum/sujet/middleware-django-genre-paramconverter-doctrine)
(in French), but it was very generalized that I cannot always use. Also, it was
using a middleware, which may introduce performance issues
sometimes<sup>[citation needed]</sup>. So I decided to go with decorators, and
at the end, I came up with this:
{% gist 498a32297f39b4960ad7 helper.py %}
Now I can decorate my views, either class or function based, with
`@convert_params(User, (Article, 'aid'), (Paragraph, None, 'pid'),
(AnotherObject, None, None, 'obj'))` and all the magic happens in the
background. The `user_id` parameter passed to my function will be
popped off, and be resolved against the `User` model by using the `id`
field; the result is put in the new `user` parameter. For Article, the
`aid` parameter will be matched against the `id` field of the
`Article` model putting the result into `article`, and finally, the
`another_object_id` will be matched against the `id` field of the
`AnotherObject` model, but in this case, the result is passed to the
original function as `obj`.

View File

@ -1,20 +0,0 @@
---
layout: post
title: "F/OSS Fail meter"
date: 2015-08-19 10:12:19
tags: [development]
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
I have recently bumped into [this
article](http://spot.livejournal.com/308370.html). Naturally, I quickly
calculated the FAIL metrics for all my projects (most of them are pretty high).
To ease calculation, I made up a
[small page]({% link failmeter/index.html %}) based on this list
(although I have divided the points by 5; I really dont understand why spot is
using such big points if all of them can be divided by 5). Feel free to use it,
and if you have any recommendations (point additions/removal, new categories,
etc.), leave me a comment!

View File

@ -1,228 +0,0 @@
---
layout: post
title: "How my e-mail gets to that other guy?"
date: 2015-08-27 21:47:19
tags: [technology]
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
A friend of mine asked me how it is possible that she pushes buttons on her
keyboard and mouse, and in an instant her peer reads the text she had in her
mind. This is a step-by-step introduction of what happens in-between.
#### From your mind to your computer
When you decide to write an e-mail to an acquaintance of yours, you open up
your mailing software (this document doesnt cover using mail applications
you access through your browsers, just plain old Thunderbird, Outlook or
similar programs. However, it gets the same after the mail left your
computer), and press the “New Mail” button. What happens during this process
is not covered in this article, but feel free to ask me in a comment! Now
that you have your Mail User Agent (MUA) up and running, you begin typing.
When you press a button on your keyboard or mouse, a bunch of bits gets
through the wire (or through air, if you went wireless) and get into your
computer. I guess you learned about Morse during school; imagine two
[Morse operators](http://www.uscupstate.edu/academics/education/aam/lessons/susan_sawyer/morse%20code.jpg),
one in your keyboard/mouse, and one in your computer. Whenever you press a
key, that tiny creature sends a series of short and long beeps (called 0 or
1 bits, respectively) to the operator in your computer (fun fact: have you
ever seen someone typing at an amazing speed of 5 key presses per second?
Now imagine that whenever that guy presses a key on their keyboard, that
tiny little Morse operator pressing his button 16 times for each key press,
with perfect timing so that the receiving operator can decide if that was a
short or long beep.)
Now that the code got to the operator inside the machine, its up to him to
decode it. The funny thing about keyboards and computers is that the
computer doesnt receive the message “Letter Q was pressed”, but instead
“The second button on the second row was pressed” (a number called scan
code). At this time the operator decodes this information (in this example
it is most likely this Morse code: `···-···· -··-····`) and checks one of
his tables titled “Current Keyboard Layout.” It says this specific key
corresponds to letter Q, so it forwards this information (I mean the
letter; after this step your computer doesnt care which plastic slab you
hit, just the letter Q) to your MUA, inserts it into the mail in its
memory, then displaying it happily (more about this step later).
When you finish your letter you press the send button of your MUA. First it
converts all the pretty letters and pictures to something a computer can
understand (yes, those Morse codes, or more precisely, zeros and ones,
again). Then it adds loads of meta data, like your name and e-mail address,
the current date and time including the time zone and pass it to the sending
parts of the MUA so the next step can begin.
#### IP addresses, DNS and protocols
The Internet is a huge amount of computers connected with each other, all of
them having at least one address called IP address that looks something like
this: `123.234.112.221`. These are four numbers between 0 and 255 inclusive,
separated by dots. This makes it possible to have 4,294,967,296 computers.
With the rules of address assignment added, this is actually reduced to
3,702,258,432; a huge number, still, but it is not enough, as in the era of
the Internet of Things everything is interconnected, up to and possibly
including your toaster. Thus, we are slowly transitioning to a new
addressing scheme that looks like this:
`1234:5678:90ab:dead:beef:9876:5432:1234`. This gives an enormous amount of
340,282,366,920,938,463,463,374,607,431,768,211,456 addresses, with only
4,325,185,976,917,036,918,000,125,705,034,137,602 of them being reserved,
which gives us only a petty
335,957,180,944,021,426,545,374,481,726,734,073,854 available.
Imagine a large city with
[that many buildings](http://www.digitallifeplus.com/wp-content/uploads/2012/07/new-york-city-aerial-5.jpg),
all of them having only a number: their IP address. No street names, no
company names, no nothing. But people tend to be bad at memorizing numbers,
so they started to give these buildings names. For example there is a house
with the number `216.58.209.165`, but between each other, people call it
`gmail.com`. Much better, isnt it? Unfortunately, when computers talk, they
only understand numbers so we have to provide them just that.
As remembering this huge number of addresses is a bit inconvenient, we
created Domain Name Service, or DNS for short. A “domain name” usually (but
not always) consist of two strings of letters, separated by dots (e.g.
polonkai.eu, gmail.com, my-very-long-domain.co.uk, etc.), and a hostname is
a domain name occasionally prefixed with something (e.g. **www**.gmail.com,
**my-server**.my-very-long-domain.co.uk, etc.) One of the main jobs of DNS
is to keep record of hostname/address pairs. When you enter `gmail.com`
(which happens to be both a domain name and a hostname) in your browsers
address bar, your computer asks the DNS service if it knows the actual
address of the building that people call `gmail.com`. If it does, it will
happily tell your computer the number of that building.
Another DNS job is to store some meta data about these domain names. For
such meta data there are record types, one of these types being the Mail
eXchanger, or MX. This record of a domain tells the world who is handling
incoming mails for the specified domain. For `gmail.com` this is
`gmail-smtp-in.l.google.com` (among others; there can be multiple records of
the same type, in which case they usually have priorities, too.)
One more rule: when two computers talk to each other they use so called
protocols. These protocols define a set of rules on how they should
communicate; this includes message formatting, special code words and such.
#### From your computer to the mail server
Your MUA has two settings called SMTP server address SMTP port number (see
about that later). SMTP stands for Simple Mail Transfer Protocol, and
defines the rules on how your MUA, or another mail handling computer should
communicate with a mail handling computer when *sending* mail. Most probably
your Internet Service Provider gave you an SMTP server name, like
`smtp.aol.com` and a port number like `587`.
When you hit that send button of yours, your computer will check with the
DNS service for the address of the SMTP server, which, for `smtp.aol.com`,
is `64.12.88.133`. The computer puts this name/address pair into its memory,
so it doesnt have to ask the DNS again (this technique is called caching
and is widely used wherever time consuming operations happen).
Then it will send your message to the given port number of this newly
fetched address. If you imagined computers as office buildings, you can
imagine port numbers as departments and there can be 65535 of them in one
building. The port number of SMTP is usually 25, 465 or 587 depending on
many things we dont cover here. Your MUA prepares your letter, adding your
e-mail address and the recipients, together with other information that may
be useful for transferring your mail. It then puts this well formatted
message in an envelope and writes “to building `64.12.88.133`, dept. `587`”,
and puts it on the wire so it gets there (if the wire is broken, the
building does not exist or there is no such department, you will get an
error message from your MUA). Your address and the recipients address are
inside the envelope; other than the MUA, your own computer is not concerned
about it.
The mailing department (or instead lets call it the Mail Transfer Agent,
A.K.A. MTA) now opens this envelope and reads the letter. All of it, letter
by letter, checking if your MUA formatted it well. More than probably it
also runs your message through several filters to decide if you are a bad
guy sending some unwanted letter (also known as spam), but most importantly
it fetches the recipients address. It is possible, e.g. when you send an
e-mail within the same organization, that the recipients address is handled
by this very same computer. In this case the MTA puts the mail to the
recipients mailbox and the next step is skipped.
#### From one server to another
Naturally, it is possible to send an e-mail from one company to another, so
these MTAs dont just wait for e-mails from you, but also communicate with
each other. When you send a letter from your `example@aol.com` address to me
at `gergely@polonkai.eu`, this is what happens.
In this case, the MTA that initially received the e-mail from you (which
happened to be your Internet Service Providers SMTP server) turns to the
DNS again. It will ask for the MX record of the domain name specified by the
e-mail address, (the part after the `@` character, in my case,
`polonkai.eu`), because the server mentioned there must be contacted, so
they can deliver your mail for me. My domain is configured so its primary MX
record is `aspmx.l.google.com` and the secondary is
`alt1.aspmx.l.google.com` (and 5 more. Google likes to play it safe.) The
MTA then gets the first server name, asks the DNS for its address, and tries
to send a message to the `173.194.67.27` (the address of
`aspmx.l.google.com`), same department. But unlike your MUA, MTAs dont have
a pre-defined port number for other MTAs (although there can be exceptions).
Instead, they use well-known port numbers, `465` and `25`. If the MTA on
that server cannot be contacted for any reason, it tries the next one on the
list of MX records. If none of the servers can be contacted, it will retry
based on a set of rules defined by the administrators, which usually means
it will retry after 1, 4, 24 and 48 hours. If there is still no answer after
that many attempts, you will get an error message back, in the form of an
e-mail sent directly by the SMTP server.
Once the other MTA could be contacted, your message is sent there. The
original envelope you used is discarded, and a new one is used with the
address and dept. number (port) of the receiving MTA. Also, your message
gets altered a little bit, as most MTAs are kind enough (ie. not sneaky) to
add a clause to your message stating “the MTA at <organization> has checked
and forwarded this message.”
It is possible, though not likely, that your message gets through more than
two MTAs (one at your ISP and one at the receivers) before arriving to its
destination. At the end, an MTA will say that “OK, this recipient address is
handled by me”, your message stops and stays there, put in your peers
mailbox.
##### The mailbox
Now that the MTA has passed your mail to the mailbox team (I call it a team
instead of department because the tasks described here are usually handled
by the MTA, too), it reads it. (Pesky little guys are these mail handling
departments, arent they?) If the mailbox has some filtering rules, like “if
XY sends me a letter, mark it as important” or “if the letter has a specific
word in its subject, put it in the XY folder”, it executes them, but the
main point is to land the message in the actual post box of the recipient.
#### From the post box to the recipients computer
When the recipient opens their MUA, it will look to a setting usually called
“Incoming mail server”. Just like the SMTP server, it has a name and port
number, along with a server type. This type can vary from provider to
provider, and is usually one of POP3 (pretty old protocol, doesnt even
support folders on its own), IMAP (a newer one, with folders and message
flags like “important”), MAPI (a dialect of IMAP, created by Microsoft as
far as I know), or plain old mbox files on the receiving computer (this last
option is pretty rare nowadays, so I dont cover this option. Also, if you
use these, you most probably dont really need this article to understand
how these things work.) This latter setting defines the protocol, telling
your MUA how to “speak” to the post box.
So your MUA turns to the DNS once more to get the address of your incoming
mail server and contacts it, using the protocol set by the server type. At
the end, the recipients computer will receive a bunch of envelopes including
the one that contains your message. The MUA opens them one by one and reads
them, making a list ordered by their sender or subject, or the date of
sending.
#### From the recipients comupter to their eyes
When the recipient then clicks on one of these mails, the MUA will fetch all
the relevant bits like the sender, the subject line, the date of sending and
the contents itself and sends it to the “printing” department (I use quotes
as they dont really print your mail on paper, they just convert it to a
nice image so the recipient can see it. This is sometimes referred to as a
rendering engine). Based on a bunch of rules they pretty-print it and send
it to your display as a new series of Morse codes. Your display then decides
how it will present it to the user: draw the pretty pictures if it is a
computer screen, or just raise and lower some hard dots that represents
letters on a Braille terminal.

View File

@ -1,53 +0,0 @@
---
layout: post
title: "Emacs: Implement a GObjects virtual function"
date: 2016-01-13 13:31:12
tags: [c, development, emacs]
published: true
author:
name: "Gergely Polonkai"
email: "gergely@polonkai.eu"
---
I have recently started creating a GLib implementation of the
Matrix.org API. For that, I have created a GObject interface,
MatrixAPI, which has as many virtual functions as API calls (which is
a lot, and expanding). This way I ended up with the following scenario.
In `matrix-api.h` I had a struct like this, with a lot more elements:
typedef struct {
void (*initial_sync)(MatrixAPI *api,
MatrixAPICallback callback,
gpointer user_data,
GError **error);
void (*sync)(MatrixAPI *api,
MatrixAPICallback callback,
gpointer user_data,
GError **error);
And in `matrix-http-api.c`, which implements `MatrixAPI`, I have a
function like this (again, with a lot more elements):
static void
matrix_http_api_matrix_api_init(GObjectInterface *iface)
{
iface->initial_sync = i_initial_sync;
iface->sync = i_sync;
}
And every time I wanted to implement a new function from the vtable, I
had to copy the prototype, and add an `iface->foo_bar = i_foo_bar`
line and an actual function header for `i_foo_bar` with the same
parameters. Thats a cumbersome job for more than 40 function
headers. But emacs comes to the rescue!
{% gist bfd36be8b515edced3d2 implement-gobject-vfunc.el %}
Now all I have to do is to copy the whole vtable entry into
`matrix_http_api_matrix_api_init()`, execute `M-x
implement-gobject-vfunc`, then put the same vtable entry somewhere
before the interface init function, and execute `M-x
implement-gobject-vfunc-prototype`.

View File

@ -1,33 +0,0 @@
---
layout: post
title: "Vala interface madness"
date: 2016-02-26 13:07:52
tags: [vala, development]
published: true
author:
name: "Gergely Polonkai"
email: "gergely@polonkai.eu"
---
Although I have just started making it in C, I decided to move my
Matrix GLib SDK to Vala. First to learn a new language, and second
because it is much easier to write GObject based stuff with it.
For the first step I created a `.vapi` file from my existing sources,
so the whole SDK prototype was available for me in Vala.
I had a `MatrixEvent` class that implemented the `GInitable`
interface, and many others were subclassed `MatrixEvent`. For some
reason I dont remember, I created the following header for one of the
event classes:
public class MatrixPresenceEvent : GLib.Object, GLib.Initable {
This is nice and everything, but as I didnt create an `init()` method
for `MatrixPresenceEvent`, it tried to use the one from the parent
class and somehow got into an infinite loop. The Vala transformer
(`valac`), however, doesnt mention this.
Lessons learned: if you implement an interface on a subclass that is
implemented by the parent dont forget to add the necessary functions
to the subclass.

View File

@ -1,46 +0,0 @@
---
layout: post
title: "Emacs package to generate GObject boilerplate"
date: 2016-09-28 15:40:15
tags: [gnome, development]
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
Before I started using Vala (and sometimes even after that) I often
needed to generate new classes based
on [GObject](https://developer.gnome.org/gobject/stable/).
If you have ever worked with GObject in C, you know how tedious it can
be. You need a pretty long boilerplate just to register your class,
and, if you want to be introspectable (and readable, actually), your
function names can grow really long.
To overcome this problem back in my ViM days, I used template files,
where I could replace class prefixes and names with a few keyboard
macros. As I never really dug into ViM scripting other than using some
plugins, I never got farther than
that. [Then came Emacs]({% post_url 2014-09-17-nyanmacs %}).
I use Emacs for about two years now very extensively, up to and
including GLib-based development. I tried the template approach, but
it felt to be a really poor experience, especially given that I made
my feet wet with Emacs Lisp. So I dug deeper, and created a package
for that.
![A screenshot of GobGen in action]({% link images/screenshot-gobgen.png %})
GobGen has its own buffer with some widgets, a bit similar to
`customize`. You can enter the name of your new object and its parent,
specify some settings. Then you press Generate, and you are presented
with two new buffers, one for the `.c` and another for the `.h`
boilerplate.
There are a lot of things to do, actually. There is already an open
issue for creating a major mode for this buffer, and there are some
minor switches Id like to add, but it is already usable. You can grab
it from [MELPA](https://melpa.org/#/gobgen) (my first package there;
woo!) or from
my [GitHub account](https://github.com/gergelypolonkai/gobgen.el).

View File

@ -1,65 +0,0 @@
---
layout: post
title: "git-merge stages"
date: 2016-10-04 12:46:00
tags: [git]
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
This was a mail to my companys internal Git mailing list, after I
realised many colleagues cant wrap their heads around merge
conflicts.
>Hello all,
>
>I just saw this on
>the [git-users](https://groups.google.com/forum/#!forum/git-users)
>list and thought it could help you when you bump into a merge
>conflict. It is an excerpt from a mail by Konstantin Khomoutov (one
>of the main contributors on the list), with a few modifications of
>mine. Happy debugging :)
>
>>When a merge conflict is detected for a file, Git:
>>
>>1. Updates the entry for that file in the index to make it contain
>> several so-called “stages”:
>> * `0`: “Ours” version that one which was there in this index entry
>> before we begun to merge. At the beginning of the conflict, like
>> right after the `git merge` or `git rebase` command this wont
>> exist (unless you had the file in the index, which you didnt, did
>> you?). When you resolve the conflict and use `git add
>> my/conflicting/file.cc`, this will be the version added to the
>> staging area (index), thus, the resolution of the conflict.
>> * `1`: The version from the common ancestor commit, ie. the version
>> of the file both of you modified.
>> * `2`: The version from `HEAD`. During a merge, this is the current
>> branch. During a rebase, this is the branch or commit you are
>> rebasing onto, which usually will be `origin/develop`).
>> * `3`: The version being merged, or the commit you are rebasing.
>>2. Updates the file in the work tree to contain conflict markers and
>> the conflicting chunks of text between them (and the text from the
>> common ancestor if the `diff3` style of conflict markers was set).
>>
>>Now you can use the numbers in point 1 to access the different stages
>>of the conflicting file. For example, to see the common ancestor (the
>>version both of you modified), use
>>
>>```
>>git show :1:my/conflicting/file.cc
>>```
>>
>>Or, to see the difference between the two conflicting versions, try
>>
>>```
>>git diff :2:my/conflicting/file.cc :3:my/conflicting/file.cc
>>```
>>
>>**Note** that you cant use the `:0:` stage *before* you stage your
>>resolution with `git add`, and you cant use the `:2:` and `:3:`
>>stages *after* you staged the resolution.
>>
>>Fun fact: behind the scenes, these are the files (*revisions*) `git mergetool`
>>accesses when it presents you the conflict visually.

View File

@ -1,98 +0,0 @@
---
layout: post
title: "How I started with Emacs"
date: 2016-11-03 09:58:41
tags: [emacs]
published: true
authon:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
Sacha Chua has a nice
[Emacs chat intro](http://sachachua.com/blog/2013/04/emacs-chat-intro/) article
back from 2013. I write this post half because she asks there about my
(OK, anyones) first Emacs moments, and half because I plan to do it
for months now.
I wanted to start using Emacs 6(ish) years ago, and I was like
<kbd>C-x</kbd> what”? (Note that back around 1998, I was among the
people who exited `vi` by killing it from another terminal after a
bunch of tries & fails like
[these](http://osxdaily.com/2014/06/12/how-to-quit-vim/).)
I tried to come back to Emacs a lot of times. And I mean a *lot*,
about every two months. I suddenly learned what these cryptic key
chord descriptions mean (`C` is for <kbd>Control</kbd> and `M` is for
<kbd>Meta</kbd>, which is actually <kbd>Alt</kbd>), but somehow it
didnt *click*. I remained a ViM power user with a huge pile of
3<sup>rd</sup> party plugins.
Then [I found Nyan-macs]({% post_url 2014-09-17-nyanmacs %}),
which converted me to Emacs, and it is final now. Many of my friends
thought Im just kidding this being the cause, but Im not. Im a huge
fan of Nyan cat (did you know there is even a site
called [nyan.cat](http://nyan.cat/)?) and since then I have it in my
mode line:
![Nyan modeline]({% link images/nyan-modeline.png %})
…in my `eshell` prompt:
![eshell prompt with a Nyan cat]({% link images/nyan-eshell.png %})
…and I also [zone out](https://www.emacswiki.org/emacs/ZoneMode) with
Nyan cat:
![a text-based animation with Nyan cat]({% link images/nyan-zone.png %})
Now on to more serious stuff. After browsing through all the packages
provided by [ELPA](http://elpa.gnu.org/), I found tons of useful (and
sometimes, less useful) packages,
like
[Helm](https://github.com/emacs-helm/helm/wiki),
[company](http://company-mode.github.io/),
[gtags](https://www.emacswiki.org/emacs/GnuGlobal) (which introduced
me to GNU Global, removing Exuberant ctags from my
life),
[magit](https://magit.vc/),
[Projectile](http://batsov.com/projectile/),
and [Org](http://orgmode.org/) (OK, its actually part of Emacs for a
while, but still). I still use these few, but in a month or two, I
started
to [version control](https://github.com/gergelypolonkai/my-emacs-d) my
`.emacs.d` directory, so I can easily transfer it between my home and
work machine (and for a few weeks now, even to my phone: Im using
Termux on Android). Then, over these two years I wrote some packages
like [GobGen](https://github.com/gergelypolonkai/gobgen.el), and a
small addon for Calendar
providing
[Hungarian holidays](https://github.com/gergelypolonkai/hungarian-holidays),
and I found a lot more (in no particular
order):
[git-gutter](https://github.com/syohex/emacs-git-gutter),
[multiple-cursors](https://github.com/magnars/multiple-cursors.el),
[origami](https://github.com/gregsexton/origami.el),
[ace-window](https://github.com/abo-abo/ace-window),
[avy](https://github.com/abo-abo/avy),
[beacon](https://github.com/Malabarba/beacon), and a lot more.
What is more important (to me) is that I started using
the [use-package](https://github.com/jwiegley/use-package) package,
which can automatically download packages that are not installed on my
current local system. Together
with
[auto-package-update](https://github.com/rranelli/auto-package-update.el),
it is *very* practical.
In addition, I started to follow the blogs of a bunch of Emacs
users/gurus. Ive already
mentioned [Sacha Chua](http://sachachua.com/). Shes a charming,
cheerful person, writing a lot about Emacs and project management
(among other things). Another one
is [Bozhidar Batsov](http://batsov.com/), who, among other things, had
an initiate to lay down the foundation of
a
[common Elisp coding style](https://github.com/bbatsov/emacs-lisp-style-guide). Another
favourite of mine
is [Endless Parentheses](http://endlessparentheses.com/), whence I got
a lot of ideas.

View File

@ -1,27 +0,0 @@
---
layout: post
title: "Edit file as another user in Emacs"
date: 2016-11-10 08:57:12
tags: [development, emacs]
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
I have recently found
[this article](http://emacsredux.com/blog/2013/04/21/edit-files-as-root/) by
Bozhidar Batsov on opening the current file as root. I barely use
[tramp](https://www.gnu.org/software/tramp/) for sudo access, but when I do,
I almost never use root as the target user. So I decided to fix it for my
needs.
{% gist 192c83aa0556d5cdaf4018f57b75a84b %}
If the user is not specified, the default is still root. Also, if the
current buffer is not visiting a file, I prompt for a filename. As Im not
an `ido` user, I didnt bother calling
`ido-read-file-name`; [`helm`](https://github.com/emacs-helm/helm/wiki)
overrides `read-file-name` for me anyway.
Unlike Bozhidar, I barely use this feature, so I didnt bind this to a key.

View File

@ -1,28 +0,0 @@
---
layout: post
title: "Get account data programatically from id-manager"
date: 2016-11-18 12:43:13
tags: [emacs]
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
I recently started
using [`id-manager`](https://github.com/kiwanami/emacs-id-manager). It is a
nice little package that can store your passwords, encrypting them with
GPG. My original reason was to store my GitHub access token
for [`github-notifier`](https://github.com/xuchunyang/github-notifier.el),
but it soon turned out, its not *that* easy.
`id-manager` is a nice package when it comes to storing your password, and
retrieving them for your own eyes. But it cannot retrieve account data
programatically. Taking a look into its source code, I came up with this
solution:
{% gist 8bad70502ac563864080f754fce726c3 idm.el %}
I currently need only the account ID (ie. the username) and the password,
but its pretty easy to add a macro to get the `memo` or `update-time`
fields, too.

View File

@ -1,47 +0,0 @@
---
layout: "post"
title: "Add Python docstring to the beginning of anything in Emacs"
date: 2016-11-30 07:52:37
tags: [development, python, emacs]
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
Now that I write Python code for a living, I write a lot of functions,
classes, and modules. What I still tend to forget, and also find tedious,
is adding docstrings. Unlike many developers, writing documentation is not
an enemy of mine, but it usually comes to my mind when I finish
implementation. The procedure, roughly, is this:
* Decide where I am (in a function, in a class but not in one of its
methods, or not inside such a block at all)
* Go to the beginning of the thing
* Insert `"""`
* Leave a blank line
* Insert `"""`
One of my mottos is if something takes more than one step and you have to do
it more than twice, you should automate it after the first time. This puts
a small(ish) overhead on the second invocation (when you implement the
automation), but it usually worth the time.
Since I use Emacs for pretty much everything coding-related (and many more,
but thats the topic of a different post), I wrote a small function to do it
for me.
{% gist 7b062a00d3b8a2555024521273cecfee python-docstring.el %}
There are still a lot of things to improve:
* it always inserts double quotes (althoug I couldnt show a use-case when
single quotes are preferred)
* it doesnt check for an existing docstring, just happily inserts a new one
(leaving the old one alone, but generating a syntax error this way)
* it would also be nice if I could jump to the beginning of a file even from
a class method. I guess I will use prefix keys for that, but Im not sure
yet.
You can bet I will implement these features, so check back soon for an
updated version!

View File

@ -1,20 +0,0 @@
---
layout: post
title: "Slugify in Python 3"
date: 2016-12-08 12:54:19
tags: [development, python]
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
Today I needed a function to create a slug (an ASCII-only representation of
a string). I went Googling a bit, and found an
excellend [Flask snippet](http://flask.pocoo.org/snippets/5/). Problem is,
it is designed for Python 2, so I came up with a Python 3 version.
{% gist 1866fd363f75f4da5f86103952e387f6 slugify.py %}
As I dont really like the transliteration done in the first example
(e.g. converting ü to ue), I went with the second example.

View File

@ -1,115 +0,0 @@
---
layout: post
title: "Finding non-translated strings in Python code"
date: 2016-12-22 09:35:11
tags: [development, python]
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
When creating multilingual software, be it on the web, mobile, or desktop,
you will eventually fail to mark strings as translatable. I know, I know,
we developers are superhuman and never do that, but somehow I stopped
trusting myself recently, so I came up with an idea.
Right now I assist in the creation of a multilingual site/web application,
where a small part of the strings come from the Python code instead of HTML
templates. Call it bad practice if you like, but I could not find a better
way yet.
As a start, I tried to parse the source files with simple regular
expressions, so I could find anything between quotation marks or
apostrophes. This attempt quickly failed with strings that had such
characters inside, escaped or not; my regexps became so complex I lost all
hope. Then the magic word “lexer” came to mind.
While searching for ready made Python lexers, I bumped into the awesome
`ast` module. AST stands for Abstract Syntax Tree, and this module does
that: parses a Python file and returns a tree of nodes. For walking through
these nodes there is a `NodeVisitor` class (among other means), which is
meant to be subclassed. You add a bunch of `visitN` methods (where `N` is
an `ast` class name like `Str` or `Call`), instantiate it, and call its
`visit()` method with the root node. For example, the `visitStr()` method
will be invoked for every string it finds.
#### How does it work?
Before getting into the details, lets me present you the code I made:
{% gist 1a16a47e5a1971ca33e58bdfd88c5059 string-checker.py %}
The class initialization does two things: creates an empty `in_call` list
(this will hold our primitive backtrace), and saves the filename, if
provided.
`visitCall`, again, has two tasks. First, it checks if we are inside a
translation function. If so, it reports the fact that we are translating
something that is not a raw string. Although it is not necessarily a bad
thing, I consider it bad practice as it may result in undefined behaviour.
Its second task is to walk through the positional and keyword arguments of
the function call. For each argument it calls the `visit_with_trace()`
method.
This method updates the `in_call` property with the current function name
and the position of the call. This latter is needed because `ast` doesnt
store position information for every node (operators are a notable example).
Then it simply visits the argument node, which is needed because
`NodeVisitor.visit()` is not recursive. When the visit is done (which, with
really deeply nested calls like `visit(this(call(iff(you(dare)))))` will be
recursive), the current function name is removed from `in_call`, so
subsequent calls on the same level see the same “backtrace”.
The `generic_visit()` method is called for every node that doesnt have a
named visitor (like `visitCall` or `visitStr`. For the same reason we
generate a warning in `visitCall`, we do the same here. If there is
anything but a raw string inside a translation function call, developers
should know about it.
The last and I think the most important method is `visitStr`. All it does
is checking the last element of the `in_call` list, and generates a warning
if a raw string is found somewhere that is not inside a translation function
call.
For accurate reports, there is a `get_func_name()` function that takes an
`ast` node as an argument. As function call can be anything from actual
functions to object methods, this goes all down the nodes properties, and
recursively reconstructs the name of the actual function.
Finally, there are some test functions in this code. `tst` and
`actual_tests` are there so if I run a self-check on this script, it will
find these strings and report all the untranslated strings and all the
potential problems like the string concatenation.
#### Drawbacks
There are several drawbacks here. First, translation function names are
built in, to the `TRANSLATION_FUNCTIONS` property of the `ShowString` class.
You must change this if you use other translation functions like
`dngettext`, or if you use a translation library other than `gettext`.
Second, it cannot ignore untranslated strings right now. It would be great
if a pragma like `flake8`s `# noqa` or `coverage.py`s `# pragma: no cover`
could be added. However, `ast` doesnt parse comment blocks, so this proves
to be challenging.
Third, it reports docstrings as untranslated. Clearly, this is wrong, as
docstrings generally dont have to be translated. Ignoring them, again, is
a nice challenge I couldnt yet overcome.
The `get_func_name()` helper is everything but done. As long as I cannot
remove that final `else` clause, there may be error reports. If that
happens, the reported class should be treated in a new `elif` branch.
Finally (and the most easily fixed), the warnings are simply printed on the
console. It is nice, but it should be optional; the problems identified
should be stored so the caller can obtain it as an array.
#### Bottom line
Finding strings in Python sources is not as hard as I imagined. It was fun
to learn using the `ast` module, and it does a great job. Once I can
overcome the drawbacks above, this script will be a fantastic piece of code
that can assist me in my future tasks.

View File

@ -1,34 +0,0 @@
---
layout: post
title: "Rename automatically named foreign keys with Alembic"
date: 2017-01-02 09:41:23
tags: [mysql, development, flask, python]
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
I have recently messed up my Alembic migrations while modifying my
SQLAlchemy models. To start with, I didnt update the auto-generated
migration files to name the indexes/foreign keys a name, so Alembic used its
own naming scheme. This is not an actual problem until you have to modify
columns that have such constraints. I have since fixed this problem, but
first I had to find which column references what (I had no indexes other
than primary key back then, so I could go with foreign keys only). Here is
a query I put together, mostly using
[this article](http://www.binarytides.com/list-foreign-keys-in-mysql/).
``` sql
SELECT constraint_name,
CONCAT(table_name, '.', column_name) AS 'foreign key',
CONCAT(referenced_table_name, '.', referenced_column_name) AS 'references'
FROM information_schema.key_column_usage
WHERE referenced_table_name IS NOT NULL AND
table_schema = 'my_app';
```
Now I could easily drop such constraints using
`alembic.op.drop_constraint('users_ibfk1', 'users', type_='foreignkey')` and
recreate them with `alembic.op.create_foreign_key('fk_user_client', 'users',
'clients', ['client_id'], ['id'])`

View File

@ -1,91 +0,0 @@
---
layout: post
title: "Category-based logging with Flask"
date: 2017-03-26 22:00:52
tags: [development, python, flask]
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
Im in a team who are developing a Flask-based web application, which uses
logging extensively. For a while now it spews out a lot of lines so the
need arose to index them in ElasticSearch, and more importantly, to search
through them for auditing purposes. This latter user story brought up one
more question: why dont we categorize our log messages? I quickly came up
with an extended log format (`[auth]` is the new category name):
[2017-01-14 00:55:42,554] [8286] [INFO] [auth] invalid password for john@example.com [at __init__.py:12, in function utils.validate_login]
Here, `[auth]` is the category name. In the ideal solution, all Id have to
do is adding `%(category)s` to my formatter, and I could call
`app.logger.info('auth', 'invalid password')` to achieve this output.
Unfortunately, `Flask.logger` (and, in the background, the `logging` module)
is not that easy to tame.
As it turns out, a Flask applications `logger` property is a subclass of
`logging.Logger`, so my first idea was to monkey patch that class. When the
apps logger is initialised, it subclasses `logging.Logger` and tweaks the
log level so it goes down to `logging.DEBUG` if the app is running in debug
mode. This is done by using a different logger class depending on the app
config. Fortunately it doesnt directly subclass `logging.Logger`; it calls
`logging.getLoggerClass()` to find which class it should extend. To achieve
my goals, all I had to do is to subclass the original logger class, and pass
it to `logging.setLoggerClass()` *before* I initialise my app, and I have a
fail-safe(ish) solution. So far so good, on to the extra category
parameter.
Now if you add a new variable to the formatter like my new `%(category)s`,
you get a nifty `KeyError` saying there is no `category` in the format
expansion dictionary. If you add `category='auth` to the
`app.logger.info()` calls and its cousins, its fine, because these methods
use the magic `**kwarg` argument to swallow it. Everything goes well until
control arrives to the `_log()` method: it complains about that extra
`category` keyword argument. Taking a peek at Pythons internals, I found
two things: `info()`, `error()`, and co. pass `*args` and `**kwargs` to
`_log()` unmodified, and the `_log()` method doesnt have `**kwargs`
present in its definition to swallow it. A little doc reading later I found
that if I want to pass extra arguments for such a formatter, I should do it
via the `extra` keyword argument to `_log()`. A call like
`app.logger.info('invalid password', extra={'category': 'auth'})` solved the
problem. Now *that* is tedious.
My first idea was to override all the standard logging methods like `info()`
and `error()`, and handle `category` there. But this resulted in lots of
repeating code. I changed the specification a bit, so my calls would look
like `info('message', category='auth)` instead of the original plan of
`info('auth', 'message')`: as the logging methods pass all keyword arguments
to `_log()`, I can handle it there. So at the end, my new logger class
only patches `_log()`, by picking out `category` from the kwarg list, and
inserting it to `extra` before calling `super`.
As you can see, this is a bit ugly solution. It requires me, the app
author, to know about Flasks internals (that I can set my own logging class
before the app is created, and so the app will use it.) This means if Flask
developers change the way how logging is done, I have to adapt and find a
workaround for the new version (well, unless they let me directly set the
logging class. That would make it easy.)
What is worse, I must know about Python internals. I know the `extra` kwarg
is documented well (I just failed to notice), but this made adding a new
formatter variable so hard. Python version doesnt change as often as Flask
version in this project, and I think the standard library wont really
change until 4.0, so I dont think my tampering with a “protected” method
will cause any trouble in the future. Still, this makes me feel a bit
uneasy.
All the above can be circumvented if this class, and the whole solution have
some tests. As my class uses the same method as Flask (use
`logging.getLoggerClass()` as a base class instead of using
`logging.Logger()` directly), if the base logger class changes in Python or
in the running environment, my app wont care. By checking if the app
logger can use my special `category` variable (ie. it doesnt raise an
exception *and* the category actually gets into the log output), I made sure
my class is used as a base in Flask, so if they change the way they
construct `app.logger`, I will know about it when I first run my tests after
upgrading Flask.
If you are interested in such functionality (and more), you can grab it
from [GitHub](https://github.com/gergelypolonkai/flask-logging-extras), or
via [PyPI](https://pypi.python.org/pypi/Flask-Logging-Extras/).

View File

@ -1,27 +0,0 @@
---
layout: post
title: "Add SysAdmin day to Emacs Calendar"
date: 2017-10-02 09:37:52
tags: [emacs]
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
Im a SysAdmin since 1998. Maybe a bit earlier, if you count managing our home computer. This
means [SysAdmin Day](http://sysadminday.com/) is also celebrating me. However, my Emacs Calendar
doesnt show it for some reason.
The solution is pretty easy:
``` lisp
(add-to-list 'holiday-other-holidays '(holiday-float 7 5 -1 "SysAdmin Day") t)
```
Now invoke `holidays-list` for any year, choosing “Other” as the category, and there you go:
```
Friday, July 28, 2017: SysAdmin Day
```

View File

@ -1,37 +0,0 @@
---
layout: post
title: "Recurring events are hard"
date: 2018-07-19 13:22:00
tags: [development]
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
It was almost a month ago when I
[announced]({% post_url 2018-06-26-please-welcome-calendar-social %}) the development of
Calendar.social. Since then Im over some interesting and some less interesting stuff; (web)
development, after all, is just a recurrence of patterns. Speaking of recurrence, I arrived to a
really interesting topic: recurring events.
My initial thought was like “oh, thats easy! Lets insert all future occurences as a separate
`Event` object, linking to the original one for the details. That makes handling exceptions easy,
as I just have to update/delete that specific instance.” Well, not really. I mean, an event
repeating daily *forever* would fill up the database quickly, isnt it? Thats when I decided to
look how other projects do it.
As it turns out, my first thought is about the same as everyone else has their mind, with about
the same reasons. Then, they usually turn down the idea just like I did. And instead, they
implement recurrence patterns and exception patterns.
My favourite is
[this article](https://github.com/bmoeskau/Extensible/blob/master/recurrence-overview.md) so far.
The author suggests to use the recurrence patterns specced by
[RFC2445](http://www.ietf.org/rfc/rfc2445.txt) (the spec for the iCalendar format). The
interesting part in this solution is how to query recurring events: you simply store the timestamp
of the last occurence of the events (or, if the event repeats forever, the greatest timestamp your
database supports.)
Choosing the maximum date seemed to be the tricky one, but it turned out both Python and popular
SQL backends support dates up to the end of year 9999.

View File

@ -1,214 +0,0 @@
---
layout: post
title: "Check if the last Git commit has test coverage"
date: 2018-07-26 12:49:52
tags: [python,development,testing]
published: true
author:
name: Gergely Polonkai
email: gergely@polonkai.eu
---
I use Python at work and for private projects. I also aim to write tests for my code, especially
recently. And as I usually dont start from 100% code coverage (TDD is not my game), I at least
want to know if the code I just wrote have full coverage.
The trick is to collect all the lines that changed, and all the lines that has no coverage. Then
compare the two, and you have the uncovered lines that changed!
### Getting the list of changed lines
Recently, I bumped into
[this article](https://adam.younglogic.com/2018/07/testing-patch-has-test/). It is a great awk
script that lists the lines that changed in the latest commit. I have really no problem with awk,
but Im pretty sure it can be done in Python, as that is my main language nowadays.
```python
def get_changed_lines():
"""Get the line numbers that changed in the last commit
"""
git_output = subprocess.check_output('git show', shell=True).decode('utf-8')
current_file = None
lines = {}
left = 0
right = 0
for line in git_output.split('\n'):
match = re.match(r'^@@ -([0-9]+),[0-9]+ [+]([0-9]+),[0-9]+ @@', line)
if match:
left = int(match.groups()[0])
right = int(match.groups()[1])
continue
if re.match(r'^\+\+\+', line):
current_file = line[6:]
continue
if re.match(r'^-', line):
left += 1
continue
if re.match(r'^[+]', line):
# Save this line number as changed
lines.setdefault(current_file, [])
lines[current_file].append(right)
right += 1
continue
left += 1
right += 1
return lines
```
OK, not as short as the awk script, but works just fine.
### Getting the uncovered lines
Coverage.py can list the uncovered lines with `coverage report --show-missing`. For Calendar.social, this looks something like this:
```
Name Stmts Miss Cover Missing
----------------------------------------------------------------------
calsocial/__init__.py 173 62 64% 44, 138-148, 200, 239-253, 261-280, 288-295, 308-309, 324-346, 354-363
calsocial/__main__.py 3 3 0% 4-9
calsocial/account.py 108 51 53% 85-97, 105-112, 125, 130-137, 148-160, 169-175, 184-200, 209-212, 221-234
calsocial/app_state.py 10 0 100%
calsocial/cache.py 73 11 85% 65-70, 98, 113, 124, 137, 156-159
calsocial/calendar_system/__init__.py 10 3 70% 32, 41, 48
calsocial/calendar_system/gregorian.py 77 0 100%
calsocial/config_development.py 11 11 0% 4-17
calsocial/config_testing.py 12 0 100%
calsocial/forms.py 198 83 58% 49, 59, 90, 136-146, 153, 161-169, 188-195, 198-206, 209-212, 228-232, 238-244, 252-253, 263-267, 273-277, 317-336, 339-342, 352-354, 362-374, 401-413
calsocial/models.py 372 92 75% 49-51, 103-106, 177, 180-188, 191-200, 203, 242-248, 257-268, 289, 307, 349, 352-359, 378, 392, 404-409, 416, 444, 447, 492-496, 503, 510, 516, 522, 525, 528, 535-537, 545-551, 572, 606-617, 620, 652, 655, 660, 700, 746-748, 762-767, 774-783, 899, 929, 932
calsocial/security.py 15 3 80% 36, 56-58
calsocial/utils.py 42 5 88% 45-48, 52-53
----------------------------------------------------------------------
TOTAL 1104 324 71%
```
All we have to do is converting these ranges into a list of numbers, and compare it with the
result of the previous function:
```python
def get_uncovered_lines(changed_lines):
"""Get the full list of lines that has not been covered by tests
"""
column_widths = []
uncovered_lines = {}
for line in sys.stdin:
line = line.strip()
if line.startswith('---'):
continue
if line.startswith('Name '):
match = re.match(r'^(Name +)(Stmts +)(Miss +)(Cover +)Missing$', line)
assert match
column_widths = [len(col) for col in match.groups()]
continue
name = [
line[sum(column_widths[0:idx]):sum(column_widths[0:idx]) + width].strip()
for idx, width in enumerate(column_widths)][0]
missing = line[sum(column_widths):].strip()
for value in missing.split(', '):
if not value:
continue
try:
number = int(value)
except ValueError:
first, last = value.split('-')
lines = range(int(first), int(last) + 1)
else:
lines = range(number, number + 1)
for lineno in lines:
if name in changed_lines and lineno not in changed_lines[name]:
uncovered_lines.setdefault(name, [])
uncovered_lines[name].append(lineno)
return uncovered_lines
```
At the end we have a dictionary that has filenames as keys, and a list of changed but uncovered
lines.
### Converting back to ranges
To make the final result more readable, lets convert them back to a nice `from_line-to_line`
range list first:
```python
def line_numbers_to_ranges():
"""List the lines that has not been covered
"""
changed_lines = get_changed_lines()
uncovered_lines = get_uncovered_lines(changed_lines)
line_list = []
for filename, lines in uncovered_lines.items():
lines = sorted(lines)
last_value = None
ranges = []
for lineno in lines:
if last_value and last_value + 1 == lineno:
ranges[-1].append(lineno)
else:
ranges.append([lineno])
last_value = lineno
range_list = []
for range_ in ranges:
first = range_.pop(0)
if range_:
range_list.append(f'{first}-{range_[-1]}')
else:
range_list.append(str(first))
line_list.append((filename, ', '.join(range_list)))
return line_list
```
### Printing the result
Now all that is left is to print the result on the screen in a format digestable by a human being:
```python
def tabular_print(uncovered_lines):
"""Print the list of uncovered lines on the screen in a tabbed format
"""
max_filename_len = max(len(data[0]) for data in uncovered_lines)
for filename, lines in uncovered_lines:
print(filename.ljust(max_filename_len + 2) + lines)
```
And we are done.
### Conclusion
This task never seemed hard to accomplish, but somehow I never put enough energy into it to make
it happen. Kudos to Adam Young doing some legwork for me!

View File

@ -1,58 +0,0 @@
#! /usr/bin/env python3
import re
import sys
import yaml
RENAME_REGEX = re.compile(r'\.(markdown|md)')
FRONT_MATTER_REGEX = re.compile(r'^---\n([\s\S]+?)\n---', re.MULTILINE)
def convert(filename):
org_filename = re.sub(RENAME_REGEX, '.org', filename)
with open(filename) as md_file:
markdown = md_file.read()
filename = re.sub(RENAME_REGEX, '', filename)
# Get the front matter
m = re.match(FRONT_MATTER_REGEX, markdown)
if not m:
print(f'No front matter found in {filename}')
return
front_matter = m.groups()[0]
markdown = markdown[len(front_matter) + 10:]
front_matter = yaml.load(front_matter)
markdown = '\n'.join(' ' + line if line else '' for line in markdown.split('\n'))
tags = ''
permalink = ''
date = front_matter['date'].isoformat().replace('T', ' ')
if 'tags' in front_matter:
tags = ' :' + ':'.join(front_matter['tags']) + ':'
if 'permalink' in front_matter:
permalink = f' :PERMALINK: {front_matter["permalink"]}\n'
markdown = markdown.replace('`', '~')
org_content = f'''* {front_matter['title']}{tags}
CLOSED: [{date}]
:PROPERTIES:
:EXPORT_FILENAME: {filename}
{permalink} :END:
{markdown}'''
with open(org_filename, 'w') as org_file:
org_file.write(org_content)
if __name__ == '__main__':
for filename in sys.argv[1:]:
convert(filename)

View File

@ -1,30 +0,0 @@
---
layout: page
title: About the author
permalink: /about/
---
{% include about.html %}
<h3>Some fun stats and links</h3>
<h4>Im a registered Linux User :)</h4>
<p>
<a href="https://www.linuxcounter.net/user/416972">
<img src="https://www.linuxcounter.net/cert/416972.png"
alt="I am Linux user #416972">
</a>
</p>
<h4>I have my own GeekCode</h4>
<pre>
-----BEGIN GEEK CODE BLOCK-----
Version: 3.1
GCM/CS/IT/O d--(+) s++:- a C++$ UB++L++++$ P++ L+++$ E++>$ W+++$ N o? K w+ O M- V PS+ PE Y+ PGP+(++) t+ 5 X R !tv b+ DI++ D+ G e h----() r+++ y++++
-----END GEEK CODE BLOCK-----
</pre>
<h4>And i have a <a href="{% link gergely@polonkai.eu.asc %}">public PGP key</a></h4>
Its fingerprint is <code>13D6 0476 B35A FCA8 BC01 3A32 F42B BA58 B074 0C4C</code>.
<pre class="code-block">
{% include pubkey.asc %}
</pre>

View File

@ -1,30 +0,0 @@
---
---
<?xml version="1.0" encoding="utf-8"?>
<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom">
<channel>
<title>{{site.title | xml_escape}}</title>
<description>{{site.description | xml_escape}}</description>
<link>{{site.baseurl | prepend: site.url}}</link>
<atom:link href="{{'/blog/atom.xml' | prepend: site.baseurl | prepend: site.url}}" rel="self" type="application/rss+xml" />
<lastBuildDate>{{site.posts.first.date | date: "%a, %d %b %Y %H:%M:%S %z"}}</lastBuildDate>
<pubDate>{{site.posts.first.date | date: "%a, %d %b %Y %H:%M:%S %z"}}</pubDate>
<ttl>7200</ttl>
<language>en</language>

{% for post in site.posts limit:10 %}
<item>
<title>{{post.title | xml_escape}}</title>
<link>{{post.url | prepend: site.baseurl | prepend: site.url}}</link>
<comments>{{post.url | prepend: site.baseurl | prepend: site.url}}#comments</comments>
<pubDate>{{post.date | date: "%a, %d %b %Y %H:%M:%S %z"}}</pubDate>
<description>{{post.excerpt | xml_escape}}</description>
<guid isPermaLink="true">{{post.url | prepend: site.baseurl | prepend: site.url}}</guid>
</item>
{% endfor %}
</channel>
</rss>

View File

@ -1,12 +0,0 @@
---
layout: page
title: Blog posts
post_listing: true
---
{% include pagination.html %}
{% assign posts = paginator.posts %}
{% include post-list.html %}
{% include pagination.html %}

View File

@ -1,4 +0,0 @@
---
layout: posts-by-tag
tag: active-directory
---

View File

@ -1,4 +0,0 @@
---
layout: posts-by-tag
tag: apache
---

Some files were not shown because too many files have changed in this diff Show More