Add Fish. (Update)

This commit is contained in:
Jorge Bucaran
2014-12-07 15:57:46 +09:00
75 changed files with 8957 additions and 364 deletions

1
.gitignore vendored
View File

@@ -4,3 +4,4 @@ benchmark/
lib/linguist/samples.json
/grammars
/node_modules
test/fixtures/ace_modes.json

View File

@@ -3,13 +3,14 @@ require 'rake/clean'
require 'rake/testtask'
require 'yaml'
require 'yajl'
require 'open-uri'
task :default => :test
Rake::TestTask.new
# Extend test task to check for samples
task :test => :check_samples
# Extend test task to check for samples and fetch latest Ace modes
task :test => [:check_samples, :fetch_ace_modes]
desc "Check that we have samples.json generated"
task :check_samples do
@@ -18,6 +19,20 @@ task :check_samples do
end
end
desc "Fetch the latest Ace modes from its GitHub repository"
task :fetch_ace_modes do
ACE_FIXTURE_PATH = File.join('test', 'fixtures', 'ace_modes.json')
File.delete(ACE_FIXTURE_PATH) if File.exist?(ACE_FIXTURE_PATH)
begin
ace_github_modes = open("https://api.github.com/repos/ajaxorg/ace/contents/lib/ace/mode").read
File.write(ACE_FIXTURE_PATH, ace_github_modes)
rescue OpenURI::HTTPError, SocketError
# no internet? no problem.
end
end
task :samples do
require 'linguist/samples'
json = Yajl.dump(Linguist::Samples.data, :pretty => true)

View File

@@ -61,8 +61,14 @@ https://github.com/Varriount/NimLime:
- source.nimrod
- source.nimrod_filter
- source.nimrodcfg
https://github.com/alkemist/gradle.tmbundle:
- source.groovy.gradle
https://github.com/ambethia/Sublime-Loom:
- source.loomscript
https://github.com/angryant0007/VBDotNetSyntax:
- source.vbnet
https://github.com/anunayk/cool-tmbundle:
- source.cool
https://github.com/aroben/ada.tmbundle/raw/c45eed4d5f98fe3bcbbffbb9e436601ab5bbde4b/Syntaxes/Ada.plist:
- source.ada
https://github.com/aroben/ruby.tmbundle@4636a3023153c3034eb6ffc613899ba9cf33b41f:
@@ -163,6 +169,8 @@ https://github.com/lavrton/sublime-better-typescript:
- source.ts
https://github.com/leafo/moonscript-tmbundle:
- source.moonscript
https://github.com/lsf37/Isabelle.tmbundle:
- source.isabelle.theory
https://github.com/lunixbochs/x86-assembly-textmate-bundle:
- source.asm.x86
https://github.com/macekond/Alloy.tmbundle:
@@ -408,5 +416,11 @@ https://github.com/vmg/zephir-sublime:
- source.php.zephir
https://github.com/whitequark/llvm.tmbundle:
- source.llvm
https://github.com/wmertens/sublime-nix:
- source.nix
https://raw.githubusercontent.com/eregon/oz-tmbundle/master/Syntaxes/Oz.tmLanguage:
- source.oz
https://raw.githubusercontent.com/tenbits/sublime-mask/release/Syntaxes/mask.tmLanguage:
- source.mask
https://github.com/l15n/fish-tmbundle:
- source.fish

View File

@@ -4,4 +4,5 @@ require 'linguist/heuristics'
require 'linguist/language'
require 'linguist/repository'
require 'linguist/samples'
require 'linguist/shebang'
require 'linguist/version'

View File

@@ -3,6 +3,25 @@ require 'linguist/tokenizer'
module Linguist
# Language bayesian classifier.
class Classifier
# Public: Use the classifier to detect language of the blob.
#
# blob - An object that quacks like a blob.
# possible_languages - Array of Language objects
#
# Examples
#
# Classifier.call(FileBlob.new("path/to/file"), [
# Language["Ruby"], Language["Python"]
# ])
#
# Returns an Array of Language objects, most probable first.
def self.call(blob, possible_languages)
language_names = possible_languages.map(&:name)
classify(Samples.cache, blob.data, language_names).map do |name, _|
Language[name] # Return the actual Language objects
end
end
# Public: Train classifier that data is a certain language.
#
# db - Hash classifier database object

View File

@@ -1,173 +1,177 @@
module Linguist
# A collection of simple heuristics that can be used to better analyze languages.
class Heuristics
ACTIVE = true
# Public: Given an array of String language names,
# apply heuristics against the given data and return an array
# of matching languages, or nil.
# Public: Use heuristics to detect language of the blob.
#
# data - Array of tokens or String data to analyze.
# languages - Array of language name Strings to restrict to.
# blob - An object that quacks like a blob.
# possible_languages - Array of Language objects
#
# Returns an array of Languages or []
def self.find_by_heuristics(data, languages)
if active?
result = []
# Examples
#
# Heuristics.call(FileBlob.new("path/to/file"), [
# Language["Ruby"], Language["Python"]
# ])
#
# Returns an Array of languages, or empty if none matched or were inconclusive.
def self.call(blob, languages)
data = blob.data
if languages.all? { |l| ["Perl", "Prolog"].include?(l) }
result = disambiguate_pl(data)
end
if languages.all? { |l| ["ECL", "Prolog"].include?(l) }
result = disambiguate_ecl(data)
end
if languages.all? { |l| ["IDL", "Prolog"].include?(l) }
result = disambiguate_pro(data)
end
if languages.all? { |l| ["Common Lisp", "OpenCL"].include?(l) }
result = disambiguate_cl(data)
end
if languages.all? { |l| ["Hack", "PHP"].include?(l) }
result = disambiguate_hack(data)
end
if languages.all? { |l| ["Scala", "SuperCollider"].include?(l) }
result = disambiguate_sc(data)
end
if languages.all? { |l| ["AsciiDoc", "AGS Script"].include?(l) }
result = disambiguate_asc(data)
end
if languages.all? { |l| ["FORTRAN", "Forth"].include?(l) }
result = disambiguate_f(data)
end
if languages.all? { |l| ["F#", "Forth", "GLSL"].include?(l) }
result = disambiguate_fs(data)
end
return result
@heuristics.each do |heuristic|
return Array(heuristic.call(data)) if heuristic.matches?(languages)
end
[] # No heuristics matched
end
# Internal: Define a new heuristic.
#
# languages - String names of languages to disambiguate.
# heuristic - Block which takes data as an argument and returns a Language or nil.
#
# Examples
#
# disambiguate "Perl", "Prolog" do |data|
# if data.include?("use strict")
# Language["Perl"]
# elsif data.include?(":-")
# Language["Prolog"]
# end
# end
#
def self.disambiguate(*languages, &heuristic)
@heuristics << new(languages, &heuristic)
end
# Internal: Array of defined heuristics
@heuristics = []
# Internal
def initialize(languages, &heuristic)
@languages = languages
@heuristic = heuristic
end
# Internal: Check if this heuristic matches the candidate languages.
def matches?(candidates)
candidates.any? && candidates.all? { |l| @languages.include?(l.name) }
end
# Internal: Perform the heuristic
def call(data)
@heuristic.call(data)
end
disambiguate "Objective-C", "C++", "C" do |data|
if (/@(interface|class|protocol|property|end|synchronised|selector|implementation)\b/.match(data))
Language["Objective-C"]
elsif (/^\s*#\s*include <(cstdint|string|vector|map|list|array|bitset|queue|stack|forward_list|unordered_map|unordered_set|(i|o|io)stream)>/.match(data) ||
/^\s*template\s*</.match(data) || /^[^@]class\s+\w+/.match(data) || /^[^@](private|public|protected):$/.match(data) || /std::.+$/.match(data))
Language["C++"]
end
end
# .h extensions are ambiguous between C, C++, and Objective-C.
# We want to shortcut look for Objective-C _and_ now C++ too!
#
# Returns an array of Languages or []
def self.disambiguate_c(data)
matches = []
if data.include?("@interface")
matches << Language["Objective-C"]
elsif data.include?("#include <cstdint>")
matches << Language["C++"]
end
matches
end
def self.disambiguate_pl(data)
matches = []
if data.include?("use strict")
matches << Language["Perl"]
disambiguate "Perl", "Perl6", "Prolog" do |data|
if data.include?("use v6")
Language["Perl6"]
elsif data.include?("use strict")
Language["Perl"]
elsif data.include?(":-")
matches << Language["Prolog"]
Language["Prolog"]
end
matches
end
def self.disambiguate_ecl(data)
matches = []
disambiguate "ECL", "Prolog" do |data|
if data.include?(":-")
matches << Language["Prolog"]
Language["Prolog"]
elsif data.include?(":=")
matches << Language["ECL"]
Language["ECL"]
end
matches
end
def self.disambiguate_pro(data)
matches = []
if (data.include?(":-"))
matches << Language["Prolog"]
disambiguate "IDL", "Prolog" do |data|
if data.include?(":-")
Language["Prolog"]
else
matches << Language["IDL"]
Language["IDL"]
end
matches
end
def self.disambiguate_ts(data)
matches = []
if (data.include?("</translation>"))
matches << Language["XML"]
else
matches << Language["TypeScript"]
end
matches
end
def self.disambiguate_cl(data)
matches = []
disambiguate "Common Lisp", "OpenCL", "Cool" do |data|
if data.include?("(defun ")
matches << Language["Common Lisp"]
Language["Common Lisp"]
elsif /^class/x.match(data)
Language["Cool"]
elsif /\/\* |\/\/ |^\}/.match(data)
matches << Language["OpenCL"]
Language["OpenCL"]
end
matches
end
def self.disambiguate_r(data)
matches = []
matches << Language["Rebol"] if /\bRebol\b/i.match(data)
matches << Language["R"] if data.include?("<-")
matches
end
def self.disambiguate_hack(data)
matches = []
disambiguate "Hack", "PHP" do |data|
if data.include?("<?hh")
matches << Language["Hack"]
Language["Hack"]
elsif /<?[^h]/.match(data)
matches << Language["PHP"]
Language["PHP"]
end
matches
end
def self.disambiguate_sc(data)
matches = []
if (/\^(this|super)\./.match(data) || /^\s*(\+|\*)\s*\w+\s*{/.match(data) || /^\s*~\w+\s*=\./.match(data))
matches << Language["SuperCollider"]
disambiguate "Scala", "SuperCollider" do |data|
if /\^(this|super)\./.match(data) || /^\s*(\+|\*)\s*\w+\s*{/.match(data) || /^\s*~\w+\s*=\./.match(data)
Language["SuperCollider"]
elsif /^\s*import (scala|java)\./.match(data) || /^\s*val\s+\w+\s*=/.match(data) || /^\s*class\b/.match(data)
Language["Scala"]
end
if (/^\s*import (scala|java)\./.match(data) || /^\s*val\s+\w+\s*=/.match(data) || /^\s*class\b/.match(data))
matches << Language["Scala"]
end
matches
end
def self.disambiguate_asc(data)
matches = []
matches << Language["AsciiDoc"] if /^=+(\s|\n)/.match(data)
matches
disambiguate "AsciiDoc", "AGS Script" do |data|
Language["AsciiDoc"] if /^=+(\s|\n)/.match(data)
end
def self.disambiguate_f(data)
matches = []
disambiguate "FORTRAN", "Forth" do |data|
if /^: /.match(data)
matches << Language["Forth"]
Language["Forth"]
elsif /^([c*][^a-z]| subroutine\s)/i.match(data)
matches << Language["FORTRAN"]
Language["FORTRAN"]
end
matches
end
def self.disambiguate_fs(data)
matches = []
disambiguate "F#", "Forth", "GLSL" do |data|
if /^(: |new-device)/.match(data)
matches << Language["Forth"]
elsif /^(#light|import|let|module|namespace|open|type)/.match(data)
matches << Language["F#"]
elsif /^(#include|#pragma|precision|uniform|varying|void)/.match(data)
matches << Language["GLSL"]
Language["Forth"]
elsif /^\s*(#light|import|let|module|namespace|open|type)/.match(data)
Language["F#"]
elsif /^\s*(#include|#pragma|precision|uniform|varying|void)/.match(data)
Language["GLSL"]
end
matches
end
def self.active?
!!ACTIVE
disambiguate "Gosu", "JavaScript" do |data|
Language["Gosu"] if /^uses java\./.match(data)
end
disambiguate "LoomScript", "LiveScript" do |data|
if /^\s*package\s*[\w\.\/\*\s]*\s*{/.match(data)
Language["LoomScript"]
else
Language["LiveScript"]
end
end
disambiguate "Frege", "Forth", "text" do |data|
if /^(: |also |new-device|previous )/.match(data)
Language["Forth"]
elsif /\s*(import|module|package|data|type) /.match(data)
Language["Frege"]
else
Language["text"]
end
end
disambiguate "TypeScript", "XML" do |data|
if data.include?("<TS ")
Language["XML"]
else
Language["TypeScript"]
end
end
end
end

View File

@@ -10,6 +10,8 @@ require 'linguist/heuristics'
require 'linguist/samples'
require 'linguist/file_blob'
require 'linguist/blob_helper'
require 'linguist/strategy/filename'
require 'linguist/shebang'
module Linguist
# Language names that are recognizable by GitHub. Defined languages
@@ -91,6 +93,13 @@ module Linguist
language
end
STRATEGIES = [
Linguist::Strategy::Filename,
Linguist::Shebang,
Linguist::Heuristics,
Linguist::Classifier
]
# Public: Detects the Language of the blob.
#
# blob - an object that includes the Linguist `BlobHelper` interface;
@@ -98,61 +107,22 @@ module Linguist
#
# Returns Language or nil.
def self.detect(blob)
name = blob.name.to_s
# Bail early if the blob is binary or empty.
return nil if blob.likely_binary? || blob.binary? || blob.empty?
# A bit of an elegant hack. If the file is executable but extensionless,
# append a "magic" extension so it can be classified with other
# languages that have shebang scripts.
extensions = FileBlob.new(name).extensions
if extensions.empty? && blob.mode && (blob.mode.to_i(8) & 05) == 05
name += ".script!"
end
# Find languages that match based on filename.
possible_languages = find_by_filename(name)
if possible_languages.length == 1
# Simplest and most common case, we can just return the one match based
# on extension
possible_languages.first
# If there is more than one possible language with that extension (or no
# extension at all, in the case of extensionless scripts), we need to
# continue our detection work
else
# Matches possible_languages.length == 0 || possible_languages.length > 0
data = blob.data
# Check if there's a shebang line and use that as authoritative
if (result = find_by_shebang(data)) && !result.empty?
return result.first
# More than one language with that extension. We need to make a choice.
elsif possible_languages.length > 1
# First try heuristics
possible_language_names = possible_languages.map(&:name)
heuristic_languages = Heuristics.find_by_heuristics(data, possible_language_names)
# If there are multiple possible languages returned from heuristics
# then reduce language candidates for Bayesian classifier here.
if heuristic_languages.size > 1
possible_language_names = heuristic_languages.map(&:name)
end
if heuristic_languages.size == 1
return heuristic_languages.first
# Lastly, fall back to the probabilistic classifier.
elsif classified = Classifier.classify(Samples.cache, data, possible_language_names).first
# Return the actual Language object based of the string language name (i.e., first element of `#classify`)
return Language[classified[0]]
end
# Call each strategy until one candidate is returned.
STRATEGIES.reduce([]) do |languages, strategy|
candidates = strategy.call(blob, languages)
if candidates.size == 1
return candidates.first
elsif candidates.size > 1
# More than one candidate was found, pass them to the next strategy.
candidates
else
# No candiates were found, pass on languages from the previous strategy.
languages
end
end
end.first
end
# Public: Get all Languages
@@ -229,20 +199,26 @@ module Linguist
@extension_index[extname]
end
# Public: Look up Languages by shebang line.
# DEPRECATED
def self.find_by_shebang(data)
@interpreter_index[Shebang.interpreter(data)]
end
# Public: Look up Languages by interpreter.
#
# data - Array of tokens or String data to analyze.
# interpreter - String of interpreter name
#
# Examples
#
# Language.find_by_shebang("#!/bin/bash\ndate;")
# Language.find_by_interpreter("bash")
# # => [#<Language name="Bash">]
#
# Returns the matching Language
def self.find_by_shebang(data)
@interpreter_index[Linguist.interpreter_from_shebang(data)]
def self.find_by_interpreter(interpreter)
@interpreter_index[interpreter]
end
# Public: Look up Language by its name or lexer.
#
# name - The String name of the Language

File diff suppressed because it is too large Load Diff

View File

@@ -6,6 +6,7 @@ end
require 'linguist/md5'
require 'linguist/classifier'
require 'linguist/shebang'
module Linguist
# Model for accessing classifier training data.
@@ -61,7 +62,7 @@ module Linguist
yield({
:path => path,
:language => category,
:interpreter => Linguist.interpreter_from_shebang(File.read(path)),
:interpreter => Shebang.interpreter(File.read(path)),
:extname => File.extname(filename)
})
end
@@ -114,41 +115,4 @@ module Linguist
db
end
end
# Used to retrieve the interpreter from the shebang line of a file's
# data.
def self.interpreter_from_shebang(data)
lines = data.lines.to_a
if lines.any? && (match = lines[0].match(/(.+)\n?/)) && (bang = match[0]) =~ /^#!/
bang.sub!(/^#! /, '#!')
tokens = bang.split(' ')
pieces = tokens.first.split('/')
if pieces.size > 1
script = pieces.last
else
script = pieces.first.sub('#!', '')
end
script = script == 'env' ? tokens[1] : script
# If script has an invalid shebang, we might get here
return unless script
# "python2.6" -> "python2"
script.sub! $1, '' if script =~ /(\.\d+)$/
# Check for multiline shebang hacks that call `exec`
if script == 'sh' &&
lines[0...5].any? { |l| l.match(/exec (\w+).+\$0.+\$@/) }
script = $1
end
File.basename(script)
else
nil
end
end
end

44
lib/linguist/shebang.rb Normal file
View File

@@ -0,0 +1,44 @@
module Linguist
class Shebang
# Public: Use shebang to detect language of the blob.
#
# blob - An object that quacks like a blob.
#
# Examples
#
# Shebang.call(FileBlob.new("path/to/file"))
#
# Returns an Array with one Language if the blob has a shebang with a valid
# interpreter, or empty if there is no shebang.
def self.call(blob, _ = nil)
Language.find_by_interpreter interpreter(blob.data)
end
# Public: Get the interpreter from the shebang
#
# Returns a String or nil
def self.interpreter(data)
lines = data.lines
return unless match = /^#! ?(.+)$/.match(lines.first)
tokens = match[1].split(' ')
script = tokens.first.split('/').last
script = tokens[1] if script == 'env'
# If script has an invalid shebang, we might get here
return unless script
# "python2.6" -> "python2"
script.sub! $1, '' if script =~ /(\.\d+)$/
# Check for multiline shebang hacks that call `exec`
if script == 'sh' &&
lines.first(5).any? { |l| l.match(/exec (\w+).+\$0.+\$@/) }
script = $1
end
File.basename(script)
end
end
end

View File

@@ -0,0 +1,20 @@
module Linguist
module Strategy
# Detects language based on filename and/or extension
class Filename
def self.call(blob, _)
name = blob.name.to_s
# A bit of an elegant hack. If the file is executable but extensionless,
# append a "magic" extension so it can be classified with other
# languages that have shebang scripts.
extensions = FileBlob.new(name).extensions
if extensions.empty? && blob.mode && (blob.mode.to_i(8) & 05) == 05
name += ".script!"
end
Language.find_by_filename(name)
end
end
end
end

View File

@@ -232,9 +232,6 @@
# .DS_Store's
- .[Dd][Ss]_[Ss]tore$
# Mercury --use-subdirs
- Mercury/
# R packages
- ^vignettes/
- ^inst/extdata/

View File

@@ -1,3 +1,3 @@
module Linguist
VERSION = "4.0.3"
VERSION = "4.2.3"
end

86
samples/C++/16F88.h Normal file
View File

@@ -0,0 +1,86 @@
/*
* This file is part of PIC
* Copyright © 2012 Rachel Mant (dx-mon@users.sourceforge.net)
*
* PIC is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* PIC is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
enum PIC16F88Instruction
{
ADDWF,
ANDWF,
CLRF,
CLRW,
COMF,
DECF,
DECFSZ,
INCF,
INCFSZ,
IORWF,
MOVF,
MOVWF,
NOP,
RLF,
RRF,
SUBWF,
SWAPF,
XORWF,
BCF,
BSF,
BTFSC,
BTFSS,
ADDLW,
ANDLW,
CALL,
CLRWDT,
GOTO,
IORLW,
MOVLW,
RETFIE,
RETLW,
RETURN,
SLEEP,
SUBLW,
XORLW
};
class PIC16F88
{
public:
PIC16F88(ROM *ProgramMemory);
void Step();
private:
uint8_t q;
bool nextIsNop, trapped;
Memory *memory;
ROM *program;
Stack<uint16_t, 8> *CallStack;
Register<uint16_t> *PC;
Register<> *WREG, *PCL, *STATUS, *PCLATCH;
PIC16F88Instruction inst;
uint16_t instrWord;
private:
void DecodeInstruction();
void ProcessInstruction();
uint8_t GetBank();
uint8_t GetMemoryContents(uint8_t partialAddress);
void SetMemoryContents(uint8_t partialAddress, uint8_t newVal);
void CheckZero(uint8_t value);
void StoreValue(uint8_t value, bool updateZero);
uint8_t SetCarry(bool val);
uint16_t GetPCHFinalBits();
};

32
samples/C++/Memory16F88.h Normal file
View File

@@ -0,0 +1,32 @@
/*
* This file is part of PIC
* Copyright © 2012 Rachel Mant (dx-mon@users.sourceforge.net)
*
* PIC is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* PIC is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "Memory.h"
class Memory16F88 : public Memory
{
private:
uint8_t memory[512];
std::map<uint32_t, MemoryLocation *> memoryMap;
public:
Memory16F88();
uint8_t Dereference(uint8_t bank, uint8_t partialAddress);
uint8_t *Reference(uint8_t bank, uint8_t partialAddress);
uint8_t *operator [](uint32_t ref);
};

View File

@@ -0,0 +1,76 @@
/*
* This file is part of IRCBot
* Copyright © 2014 Rachel Mant (dx-mon@users.sourceforge.net)
*
* IRCBot is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* IRCBot is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __THREADED_QUEUE_H__
#define __THREADED_QUEUE_H__
#include <pthread.h>
#include <queue>
template<class T>
class ThreadedQueue : public std::queue<T>
{
private:
pthread_mutex_t queueMutex;
pthread_cond_t queueCond;
public:
ThreadedQueue()
{
pthread_mutexattr_t mutexAttrs;
pthread_condattr_t condAttrs;
pthread_mutexattr_init(&mutexAttrs);
pthread_mutexattr_settype(&mutexAttrs, PTHREAD_MUTEX_ERRORCHECK);
pthread_mutex_init(&queueMutex, &mutexAttrs);
pthread_mutexattr_destroy(&mutexAttrs);
pthread_condattr_init(&condAttrs);
pthread_condattr_setpshared(&condAttrs, PTHREAD_PROCESS_PRIVATE);
pthread_cond_init(&queueCond, &condAttrs);
pthread_condattr_destroy(&condAttrs);
}
~ThreadedQueue()
{
pthread_cond_destroy(&queueCond);
pthread_mutex_destroy(&queueMutex);
}
void waitItems()
{
pthread_mutex_lock(&queueMutex);
pthread_cond_wait(&queueCond, &queueMutex);
pthread_mutex_unlock(&queueMutex);
}
void signalItems()
{
pthread_mutex_lock(&queueMutex);
pthread_cond_broadcast(&queueCond);
pthread_mutex_unlock(&queueMutex);
}
void push(T item)
{
std::queue<T>::push(item);
signalItems();
}
};
#endif /*__THREADED_QUEUE_H__*/

145
samples/C/2D.C Normal file
View File

@@ -0,0 +1,145 @@
#include "2D.h"
#include <math.h>
void set_vgabasemem(void)
{
ULONG vgabase;
SELECTOR tmp;
asm mov [tmp], ds
dpmi_get_sel_base(&vgabase, tmp);
vgabasemem = (char *)(-vgabase + 0xa0000);
}
void drw_chdis(int mode) // change the display!
{
regs.b.ah = 0x00; // seet theh display moode
regs.b.al = mode; // change it to the mode like innit
regs.h.flags = 0x72;// Set the dingoes kidneys out of FLAGS eh?
regs.h.ss = 0; // Like, totally set the stack segment
regs.h.sp = 0; // Set tha stack pointaaaaahhhhh!!!
dpmi_simulate_real_interrupt(0x10, &regs);
}
void drw_pix(int x, int y, enum COLORS col)
{
*VGAPIX(x, y) = col;
}
void drw_line(int x0, int y0, int x1, int y1, enum COLORS col)
{
// Going for the optimized version of bresenham's line algo.
int stp = (abs(y0 - y1) > abs(x0 - x1));
int tmp, dx, dy, err, yi, i, j; // yi = y excrement
if (stp) {
// swappity swap
tmp = y0;
y0 = x0;
x0 = tmp;
tmp = y1;
y1 = x1;
x1 = tmp;
}
// AAAAND NOW WE MUST DO ZEES AGAIN :(
// I'm sure there was a func somewhere that does this? :P
if (x0 > x1) {
tmp = x0;
x0 = x1;
x1 = tmp;
tmp = y0;
y0 = y1;
y1 = tmp;
}
dx = (x1 - x0);
dy = (abs(y1 - y0));
err = (dx / 2);
if (y0 < y1)
yi = 1;
else
yi = -1;
j = y0;
for (i = x0; i < x1; i++)
{
if (stp)
*VGAPIX(j, i) = col;
else
*VGAPIX(i, j) = col;
err -= dy;
if (err < 0) {
j += yi;
err += dx;
}
}
}
void drw_rectl(int x, int y, int w, int h, enum COLORS col)
{
drw_line(x, y, x+w, y, col);
drw_line(x+w, y, x+w, y+h, col);
drw_line(x, y, x, y+h, col);
drw_line(x, y+h, x+w+1, y+h, col);
}
void drw_rectf(int x, int y, int w, int h, enum COLORS col)
{
int i, j;
for (j = y; j < x+h; j++) {
for (i = x; i < y+w; i++) {
*VGAPIX(i, j) = col;
}
}
}
void drw_circl(int x, int y, int rad, enum COLORS col)
{
int mang, i; // max angle, haha
int px, py;
mang = 360; // Yeah yeah I'll switch to rad later
for (i = 0; i <= mang; i++)
{
px = cos(i)*rad + x; // + px; // causes some really cools effects! :D
py = sin(i)*rad + y; // + py;
*VGAPIX(px, py) = col;
}
}
void drw_tex(int x, int y, int w, int h, enum COLORS tex[])
{ // i*w+j
int i, j;
for (i = 0; i < w; i++)
{
for (j = 0; j < h; j++)
{
*VGAPIX(x+i, y+j) = tex[j*w+i];
}
}
}
void 2D_init(void)
{
set_vgabasemem();
drw_chdis(0x13);
}
void 2D_exit(void)
{
drw_chdis(3);
}
/*
int main()
{
set_vgabasemem();
drw_chdis(0x13);
while(!kbhit()) {
if ((getch()) == 0x1b) // escape
break;
}
drw_chdis(3);
return 0;
}
*/

29
samples/C/2D.H Normal file
View File

@@ -0,0 +1,29 @@
#ifndef __2DGFX
#define __2DGFX
// Includes
#include <stdio.h>
#include <math.h>
#include <conio.h>
#include <dpmi.h>
// Defines
#define VGAPIX(x,y) (vgabasemem + (x) + (y) * 320)
// Variables
char * vgabasemem;
DPMI_REGS regs;
// Drawing functions:
//void setvgabasemem(void);
void drw_chdis(int mode); // draw_func_change_display
void drw_pix(int x, int y, enum COLORS col);
void drw_line(int x0, int y0, int x1, int y1, enum COLORS col);
void drw_rectl(int x, int y, int w, int h, enum COLORS col);
void drw_rectf(int x, int y, int w, int h, enum COLORS col);
void drw_cirl(int x, int y, int rad, enum COLORS col);
void drw_tex(int x, int y, int w, int h, enum COLORS tex[]);
void 2D_init(void);
void 2D_exit(void);
#endif

93
samples/C/ArrowLeft.h Normal file
View File

@@ -0,0 +1,93 @@
/*
* This file is part of GTK++ (libGTK++)
* Copyright © 2012 Rachel Mant (dx-mon@users.sourceforge.net)
*
* GTK++ is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GTK++ is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/* GdkPixbuf RGBA C-Source image dump */
#ifdef __SUNPRO_C
#pragma align 4 (ArrowLeft)
#endif
#ifdef __GNUC__
static const uint8_t ArrowLeft[] __attribute__ ((__aligned__ (4))) =
#else
static const uint8_t ArrowLeft[] =
#endif
{ ""
/* Pixbuf magic (0x47646b50) */
"GdkP"
/* length: header (24) + pixel_data (1600) */
"\0\0\6X"
/* pixdata_type (0x1010002) */
"\1\1\0\2"
/* rowstride (80) */
"\0\0\0P"
/* width (20) */
"\0\0\0\24"
/* height (20) */
"\0\0\0\24"
/* pixel_data: */
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\377\0\0\0\0\0"
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
"\0\0\377\0\0\0\377\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
"\0\0\0\0\0\0\0\0\0\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\0\0\0\0\0\0\0"
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\377\0\0\0\377\0\0\0\377"
"\0\0\0\377\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
"\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\0\0\0\0\0\0\0\0\0"
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
"\0\0\0\0\0\0\0\0\0\0\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377"
"\0\0\0\377\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
"\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\0\0\0\0\0\0\0\0\0"
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\377\0\0\0\377\0\0\0\377\0"
"\0\0\377\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
"\0\0\0\0\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\377\0\0\0\377\0"
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
"\0\0\0\0\0\0\0\0\0\0\377\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
"\0\0"};

903
samples/C/GLKMatrix4.h Normal file
View File

@@ -0,0 +1,903 @@
//
// GLKMatrix4.h
// GLKit
//
// Copyright (c) 2011, Apple Inc. All rights reserved.
//
#ifndef __GLK_MATRIX_4_H
#define __GLK_MATRIX_4_H
#include <stddef.h>
#include <stdbool.h>
#include <math.h>
#if defined(__ARM_NEON__)
#include <arm_neon.h>
#endif
#include <GLKit/GLKMathTypes.h>
#include <GLKit/GLKVector3.h>
#include <GLKit/GLKVector4.h>
#include <GLKit/GLKQuaternion.h>
#ifdef __cplusplus
extern "C" {
#endif
#pragma mark -
#pragma mark Prototypes
#pragma mark -
extern const GLKMatrix4 GLKMatrix4Identity;
/*
m30, m31, and m32 correspond to the translation values tx, ty, tz, respectively.
*/
static __inline__ GLKMatrix4 GLKMatrix4Make(float m00, float m01, float m02, float m03,
float m10, float m11, float m12, float m13,
float m20, float m21, float m22, float m23,
float m30, float m31, float m32, float m33);
/*
m03, m13, and m23 correspond to the translation values tx, ty, tz, respectively.
*/
static __inline__ GLKMatrix4 GLKMatrix4MakeAndTranspose(float m00, float m01, float m02, float m03,
float m10, float m11, float m12, float m13,
float m20, float m21, float m22, float m23,
float m30, float m31, float m32, float m33);
/*
m[12], m[13], and m[14] correspond to the translation values tx, ty, and tz, respectively.
*/
static __inline__ GLKMatrix4 GLKMatrix4MakeWithArray(float values[16]);
/*
m[3], m[7], and m[11] correspond to the translation values tx, ty, and tz, respectively.
*/
static __inline__ GLKMatrix4 GLKMatrix4MakeWithArrayAndTranspose(float values[16]);
/*
row0, row1, and row2's last component should correspond to the translation values tx, ty, and tz, respectively.
*/
static __inline__ GLKMatrix4 GLKMatrix4MakeWithRows(GLKVector4 row0,
GLKVector4 row1,
GLKVector4 row2,
GLKVector4 row3);
/*
column3's first three components should correspond to the translation values tx, ty, and tz.
*/
static __inline__ GLKMatrix4 GLKMatrix4MakeWithColumns(GLKVector4 column0,
GLKVector4 column1,
GLKVector4 column2,
GLKVector4 column3);
/*
The quaternion will be normalized before conversion.
*/
static __inline__ GLKMatrix4 GLKMatrix4MakeWithQuaternion(GLKQuaternion quaternion);
static __inline__ GLKMatrix4 GLKMatrix4MakeTranslation(float tx, float ty, float tz);
static __inline__ GLKMatrix4 GLKMatrix4MakeScale(float sx, float sy, float sz);
static __inline__ GLKMatrix4 GLKMatrix4MakeRotation(float radians, float x, float y, float z);
static __inline__ GLKMatrix4 GLKMatrix4MakeXRotation(float radians);
static __inline__ GLKMatrix4 GLKMatrix4MakeYRotation(float radians);
static __inline__ GLKMatrix4 GLKMatrix4MakeZRotation(float radians);
/*
Equivalent to gluPerspective.
*/
static __inline__ GLKMatrix4 GLKMatrix4MakePerspective(float fovyRadians, float aspect, float nearZ, float farZ);
/*
Equivalent to glFrustum.
*/
static __inline__ GLKMatrix4 GLKMatrix4MakeFrustum(float left, float right,
float bottom, float top,
float nearZ, float farZ);
/*
Equivalent to glOrtho.
*/
static __inline__ GLKMatrix4 GLKMatrix4MakeOrtho(float left, float right,
float bottom, float top,
float nearZ, float farZ);
/*
Equivalent to gluLookAt.
*/
static __inline__ GLKMatrix4 GLKMatrix4MakeLookAt(float eyeX, float eyeY, float eyeZ,
float centerX, float centerY, float centerZ,
float upX, float upY, float upZ);
/*
Returns the upper left 3x3 portion of the 4x4 matrix.
*/
static __inline__ GLKMatrix3 GLKMatrix4GetMatrix3(GLKMatrix4 matrix);
/*
Returns the upper left 2x2 portion of the 4x4 matrix.
*/
static __inline__ GLKMatrix2 GLKMatrix4GetMatrix2(GLKMatrix4 matrix);
/*
GLKMatrix4GetRow returns vectors for rows 0, 1, and 2 whose last component will be the translation value tx, ty, and tz, respectively.
Valid row values range from 0 to 3, inclusive.
*/
static __inline__ GLKVector4 GLKMatrix4GetRow(GLKMatrix4 matrix, int row);
/*
GLKMatrix4GetColumn returns a vector for column 3 whose first three components will be the translation values tx, ty, and tz.
Valid column values range from 0 to 3, inclusive.
*/
static __inline__ GLKVector4 GLKMatrix4GetColumn(GLKMatrix4 matrix, int column);
/*
GLKMatrix4SetRow expects that the vector for row 0, 1, and 2 will have a translation value as its last component.
Valid row values range from 0 to 3, inclusive.
*/
static __inline__ GLKMatrix4 GLKMatrix4SetRow(GLKMatrix4 matrix, int row, GLKVector4 vector);
/*
GLKMatrix4SetColumn expects that the vector for column 3 will contain the translation values tx, ty, and tz as its first three components, respectively.
Valid column values range from 0 to 3, inclusive.
*/
static __inline__ GLKMatrix4 GLKMatrix4SetColumn(GLKMatrix4 matrix, int column, GLKVector4 vector);
static __inline__ GLKMatrix4 GLKMatrix4Transpose(GLKMatrix4 matrix);
GLKMatrix4 GLKMatrix4Invert(GLKMatrix4 matrix, bool *isInvertible);
GLKMatrix4 GLKMatrix4InvertAndTranspose(GLKMatrix4 matrix, bool *isInvertible);
static __inline__ GLKMatrix4 GLKMatrix4Multiply(GLKMatrix4 matrixLeft, GLKMatrix4 matrixRight);
static __inline__ GLKMatrix4 GLKMatrix4Add(GLKMatrix4 matrixLeft, GLKMatrix4 matrixRight);
static __inline__ GLKMatrix4 GLKMatrix4Subtract(GLKMatrix4 matrixLeft, GLKMatrix4 matrixRight);
static __inline__ GLKMatrix4 GLKMatrix4Translate(GLKMatrix4 matrix, float tx, float ty, float tz);
static __inline__ GLKMatrix4 GLKMatrix4TranslateWithVector3(GLKMatrix4 matrix, GLKVector3 translationVector);
/*
The last component of the GLKVector4, translationVector, is ignored.
*/
static __inline__ GLKMatrix4 GLKMatrix4TranslateWithVector4(GLKMatrix4 matrix, GLKVector4 translationVector);
static __inline__ GLKMatrix4 GLKMatrix4Scale(GLKMatrix4 matrix, float sx, float sy, float sz);
static __inline__ GLKMatrix4 GLKMatrix4ScaleWithVector3(GLKMatrix4 matrix, GLKVector3 scaleVector);
/*
The last component of the GLKVector4, scaleVector, is ignored.
*/
static __inline__ GLKMatrix4 GLKMatrix4ScaleWithVector4(GLKMatrix4 matrix, GLKVector4 scaleVector);
static __inline__ GLKMatrix4 GLKMatrix4Rotate(GLKMatrix4 matrix, float radians, float x, float y, float z);
static __inline__ GLKMatrix4 GLKMatrix4RotateWithVector3(GLKMatrix4 matrix, float radians, GLKVector3 axisVector);
/*
The last component of the GLKVector4, axisVector, is ignored.
*/
static __inline__ GLKMatrix4 GLKMatrix4RotateWithVector4(GLKMatrix4 matrix, float radians, GLKVector4 axisVector);
static __inline__ GLKMatrix4 GLKMatrix4RotateX(GLKMatrix4 matrix, float radians);
static __inline__ GLKMatrix4 GLKMatrix4RotateY(GLKMatrix4 matrix, float radians);
static __inline__ GLKMatrix4 GLKMatrix4RotateZ(GLKMatrix4 matrix, float radians);
/*
Assumes 0 in the w component.
*/
static __inline__ GLKVector3 GLKMatrix4MultiplyVector3(GLKMatrix4 matrixLeft, GLKVector3 vectorRight);
/*
Assumes 1 in the w component.
*/
static __inline__ GLKVector3 GLKMatrix4MultiplyVector3WithTranslation(GLKMatrix4 matrixLeft, GLKVector3 vectorRight);
/*
Assumes 1 in the w component and divides the resulting vector by w before returning.
*/
static __inline__ GLKVector3 GLKMatrix4MultiplyAndProjectVector3(GLKMatrix4 matrixLeft, GLKVector3 vectorRight);
/*
Assumes 0 in the w component.
*/
static __inline__ void GLKMatrix4MultiplyVector3Array(GLKMatrix4 matrix, GLKVector3 *vectors, size_t vectorCount);
/*
Assumes 1 in the w component.
*/
static __inline__ void GLKMatrix4MultiplyVector3ArrayWithTranslation(GLKMatrix4 matrix, GLKVector3 *vectors, size_t vectorCount);
/*
Assumes 1 in the w component and divides the resulting vector by w before returning.
*/
static __inline__ void GLKMatrix4MultiplyAndProjectVector3Array(GLKMatrix4 matrix, GLKVector3 *vectors, size_t vectorCount);
static __inline__ GLKVector4 GLKMatrix4MultiplyVector4(GLKMatrix4 matrixLeft, GLKVector4 vectorRight);
static __inline__ void GLKMatrix4MultiplyVector4Array(GLKMatrix4 matrix, GLKVector4 *vectors, size_t vectorCount);
#pragma mark -
#pragma mark Implementations
#pragma mark -
static __inline__ GLKMatrix4 GLKMatrix4Make(float m00, float m01, float m02, float m03,
float m10, float m11, float m12, float m13,
float m20, float m21, float m22, float m23,
float m30, float m31, float m32, float m33)
{
GLKMatrix4 m = { m00, m01, m02, m03,
m10, m11, m12, m13,
m20, m21, m22, m23,
m30, m31, m32, m33 };
return m;
}
static __inline__ GLKMatrix4 GLKMatrix4MakeAndTranspose(float m00, float m01, float m02, float m03,
float m10, float m11, float m12, float m13,
float m20, float m21, float m22, float m23,
float m30, float m31, float m32, float m33)
{
GLKMatrix4 m = { m00, m10, m20, m30,
m01, m11, m21, m31,
m02, m12, m22, m32,
m03, m13, m23, m33 };
return m;
}
static __inline__ GLKMatrix4 GLKMatrix4MakeWithArray(float values[16])
{
GLKMatrix4 m = { values[0], values[1], values[2], values[3],
values[4], values[5], values[6], values[7],
values[8], values[9], values[10], values[11],
values[12], values[13], values[14], values[15] };
return m;
}
static __inline__ GLKMatrix4 GLKMatrix4MakeWithArrayAndTranspose(float values[16])
{
#if defined(__ARM_NEON__)
float32x4x4_t m = vld4q_f32(values);
return *(GLKMatrix4 *)&m;
#else
GLKMatrix4 m = { values[0], values[4], values[8], values[12],
values[1], values[5], values[9], values[13],
values[2], values[6], values[10], values[14],
values[3], values[7], values[11], values[15] };
return m;
#endif
}
static __inline__ GLKMatrix4 GLKMatrix4MakeWithRows(GLKVector4 row0,
GLKVector4 row1,
GLKVector4 row2,
GLKVector4 row3)
{
GLKMatrix4 m = { row0.v[0], row1.v[0], row2.v[0], row3.v[0],
row0.v[1], row1.v[1], row2.v[1], row3.v[1],
row0.v[2], row1.v[2], row2.v[2], row3.v[2],
row0.v[3], row1.v[3], row2.v[3], row3.v[3] };
return m;
}
static __inline__ GLKMatrix4 GLKMatrix4MakeWithColumns(GLKVector4 column0,
GLKVector4 column1,
GLKVector4 column2,
GLKVector4 column3)
{
#if defined(__ARM_NEON__)
float32x4x4_t m;
m.val[0] = vld1q_f32(column0.v);
m.val[1] = vld1q_f32(column1.v);
m.val[2] = vld1q_f32(column2.v);
m.val[3] = vld1q_f32(column3.v);
return *(GLKMatrix4 *)&m;
#else
GLKMatrix4 m = { column0.v[0], column0.v[1], column0.v[2], column0.v[3],
column1.v[0], column1.v[1], column1.v[2], column1.v[3],
column2.v[0], column2.v[1], column2.v[2], column2.v[3],
column3.v[0], column3.v[1], column3.v[2], column3.v[3] };
return m;
#endif
}
static __inline__ GLKMatrix4 GLKMatrix4MakeWithQuaternion(GLKQuaternion quaternion)
{
quaternion = GLKQuaternionNormalize(quaternion);
float x = quaternion.q[0];
float y = quaternion.q[1];
float z = quaternion.q[2];
float w = quaternion.q[3];
float _2x = x + x;
float _2y = y + y;
float _2z = z + z;
float _2w = w + w;
GLKMatrix4 m = { 1.0f - _2y * y - _2z * z,
_2x * y + _2w * z,
_2x * z - _2w * y,
0.0f,
_2x * y - _2w * z,
1.0f - _2x * x - _2z * z,
_2y * z + _2w * x,
0.0f,
_2x * z + _2w * y,
_2y * z - _2w * x,
1.0f - _2x * x - _2y * y,
0.0f,
0.0f,
0.0f,
0.0f,
1.0f };
return m;
}
static __inline__ GLKMatrix4 GLKMatrix4MakeTranslation(float tx, float ty, float tz)
{
GLKMatrix4 m = GLKMatrix4Identity;
m.m[12] = tx;
m.m[13] = ty;
m.m[14] = tz;
return m;
}
static __inline__ GLKMatrix4 GLKMatrix4MakeScale(float sx, float sy, float sz)
{
GLKMatrix4 m = GLKMatrix4Identity;
m.m[0] = sx;
m.m[5] = sy;
m.m[10] = sz;
return m;
}
static __inline__ GLKMatrix4 GLKMatrix4MakeRotation(float radians, float x, float y, float z)
{
GLKVector3 v = GLKVector3Normalize(GLKVector3Make(x, y, z));
float cos = cosf(radians);
float cosp = 1.0f - cos;
float sin = sinf(radians);
GLKMatrix4 m = { cos + cosp * v.v[0] * v.v[0],
cosp * v.v[0] * v.v[1] + v.v[2] * sin,
cosp * v.v[0] * v.v[2] - v.v[1] * sin,
0.0f,
cosp * v.v[0] * v.v[1] - v.v[2] * sin,
cos + cosp * v.v[1] * v.v[1],
cosp * v.v[1] * v.v[2] + v.v[0] * sin,
0.0f,
cosp * v.v[0] * v.v[2] + v.v[1] * sin,
cosp * v.v[1] * v.v[2] - v.v[0] * sin,
cos + cosp * v.v[2] * v.v[2],
0.0f,
0.0f,
0.0f,
0.0f,
1.0f };
return m;
}
static __inline__ GLKMatrix4 GLKMatrix4MakeXRotation(float radians)
{
float cos = cosf(radians);
float sin = sinf(radians);
GLKMatrix4 m = { 1.0f, 0.0f, 0.0f, 0.0f,
0.0f, cos, sin, 0.0f,
0.0f, -sin, cos, 0.0f,
0.0f, 0.0f, 0.0f, 1.0f };
return m;
}
static __inline__ GLKMatrix4 GLKMatrix4MakeYRotation(float radians)
{
float cos = cosf(radians);
float sin = sinf(radians);
GLKMatrix4 m = { cos, 0.0f, -sin, 0.0f,
0.0f, 1.0f, 0.0f, 0.0f,
sin, 0.0f, cos, 0.0f,
0.0f, 0.0f, 0.0f, 1.0f };
return m;
}
static __inline__ GLKMatrix4 GLKMatrix4MakeZRotation(float radians)
{
float cos = cosf(radians);
float sin = sinf(radians);
GLKMatrix4 m = { cos, sin, 0.0f, 0.0f,
-sin, cos, 0.0f, 0.0f,
0.0f, 0.0f, 1.0f, 0.0f,
0.0f, 0.0f, 0.0f, 1.0f };
return m;
}
static __inline__ GLKMatrix4 GLKMatrix4MakePerspective(float fovyRadians, float aspect, float nearZ, float farZ)
{
float cotan = 1.0f / tanf(fovyRadians / 2.0f);
GLKMatrix4 m = { cotan / aspect, 0.0f, 0.0f, 0.0f,
0.0f, cotan, 0.0f, 0.0f,
0.0f, 0.0f, (farZ + nearZ) / (nearZ - farZ), -1.0f,
0.0f, 0.0f, (2.0f * farZ * nearZ) / (nearZ - farZ), 0.0f };
return m;
}
static __inline__ GLKMatrix4 GLKMatrix4MakeFrustum(float left, float right,
float bottom, float top,
float nearZ, float farZ)
{
float ral = right + left;
float rsl = right - left;
float tsb = top - bottom;
float tab = top + bottom;
float fan = farZ + nearZ;
float fsn = farZ - nearZ;
GLKMatrix4 m = { 2.0f * nearZ / rsl, 0.0f, 0.0f, 0.0f,
0.0f, 2.0f * nearZ / tsb, 0.0f, 0.0f,
ral / rsl, tab / tsb, -fan / fsn, -1.0f,
0.0f, 0.0f, (-2.0f * farZ * nearZ) / fsn, 0.0f };
return m;
}
static __inline__ GLKMatrix4 GLKMatrix4MakeOrtho(float left, float right,
float bottom, float top,
float nearZ, float farZ)
{
float ral = right + left;
float rsl = right - left;
float tab = top + bottom;
float tsb = top - bottom;
float fan = farZ + nearZ;
float fsn = farZ - nearZ;
GLKMatrix4 m = { 2.0f / rsl, 0.0f, 0.0f, 0.0f,
0.0f, 2.0f / tsb, 0.0f, 0.0f,
0.0f, 0.0f, -2.0f / fsn, 0.0f,
-ral / rsl, -tab / tsb, -fan / fsn, 1.0f };
return m;
}
static __inline__ GLKMatrix4 GLKMatrix4MakeLookAt(float eyeX, float eyeY, float eyeZ,
float centerX, float centerY, float centerZ,
float upX, float upY, float upZ)
{
GLKVector3 ev = { eyeX, eyeY, eyeZ };
GLKVector3 cv = { centerX, centerY, centerZ };
GLKVector3 uv = { upX, upY, upZ };
GLKVector3 n = GLKVector3Normalize(GLKVector3Add(ev, GLKVector3Negate(cv)));
GLKVector3 u = GLKVector3Normalize(GLKVector3CrossProduct(uv, n));
GLKVector3 v = GLKVector3CrossProduct(n, u);
GLKMatrix4 m = { u.v[0], v.v[0], n.v[0], 0.0f,
u.v[1], v.v[1], n.v[1], 0.0f,
u.v[2], v.v[2], n.v[2], 0.0f,
GLKVector3DotProduct(GLKVector3Negate(u), ev),
GLKVector3DotProduct(GLKVector3Negate(v), ev),
GLKVector3DotProduct(GLKVector3Negate(n), ev),
1.0f };
return m;
}
static __inline__ GLKMatrix3 GLKMatrix4GetMatrix3(GLKMatrix4 matrix)
{
GLKMatrix3 m = { matrix.m[0], matrix.m[1], matrix.m[2],
matrix.m[4], matrix.m[5], matrix.m[6],
matrix.m[8], matrix.m[9], matrix.m[10] };
return m;
}
static __inline__ GLKMatrix2 GLKMatrix4GetMatrix2(GLKMatrix4 matrix)
{
GLKMatrix2 m = { matrix.m[0], matrix.m[1],
matrix.m[4], matrix.m[5] };
return m;
}
static __inline__ GLKVector4 GLKMatrix4GetRow(GLKMatrix4 matrix, int row)
{
GLKVector4 v = { matrix.m[row], matrix.m[4 + row], matrix.m[8 + row], matrix.m[12 + row] };
return v;
}
static __inline__ GLKVector4 GLKMatrix4GetColumn(GLKMatrix4 matrix, int column)
{
#if defined(__ARM_NEON__)
float32x4_t v = vld1q_f32(&(matrix.m[column * 4]));
return *(GLKVector4 *)&v;
#else
GLKVector4 v = { matrix.m[column * 4 + 0], matrix.m[column * 4 + 1], matrix.m[column * 4 + 2], matrix.m[column * 4 + 3] };
return v;
#endif
}
static __inline__ GLKMatrix4 GLKMatrix4SetRow(GLKMatrix4 matrix, int row, GLKVector4 vector)
{
matrix.m[row] = vector.v[0];
matrix.m[row + 4] = vector.v[1];
matrix.m[row + 8] = vector.v[2];
matrix.m[row + 12] = vector.v[3];
return matrix;
}
static __inline__ GLKMatrix4 GLKMatrix4SetColumn(GLKMatrix4 matrix, int column, GLKVector4 vector)
{
#if defined(__ARM_NEON__)
float *dst = &(matrix.m[column * 4]);
vst1q_f32(dst, vld1q_f32(vector.v));
return matrix;
#else
matrix.m[column * 4 + 0] = vector.v[0];
matrix.m[column * 4 + 1] = vector.v[1];
matrix.m[column * 4 + 2] = vector.v[2];
matrix.m[column * 4 + 3] = vector.v[3];
return matrix;
#endif
}
static __inline__ GLKMatrix4 GLKMatrix4Transpose(GLKMatrix4 matrix)
{
#if defined(__ARM_NEON__)
float32x4x4_t m = vld4q_f32(matrix.m);
return *(GLKMatrix4 *)&m;
#else
GLKMatrix4 m = { matrix.m[0], matrix.m[4], matrix.m[8], matrix.m[12],
matrix.m[1], matrix.m[5], matrix.m[9], matrix.m[13],
matrix.m[2], matrix.m[6], matrix.m[10], matrix.m[14],
matrix.m[3], matrix.m[7], matrix.m[11], matrix.m[15] };
return m;
#endif
}
static __inline__ GLKMatrix4 GLKMatrix4Multiply(GLKMatrix4 matrixLeft, GLKMatrix4 matrixRight)
{
#if defined(__ARM_NEON__)
float32x4x4_t iMatrixLeft = *(float32x4x4_t *)&matrixLeft;
float32x4x4_t iMatrixRight = *(float32x4x4_t *)&matrixRight;
float32x4x4_t m;
m.val[0] = vmulq_n_f32(iMatrixLeft.val[0], vgetq_lane_f32(iMatrixRight.val[0], 0));
m.val[1] = vmulq_n_f32(iMatrixLeft.val[0], vgetq_lane_f32(iMatrixRight.val[1], 0));
m.val[2] = vmulq_n_f32(iMatrixLeft.val[0], vgetq_lane_f32(iMatrixRight.val[2], 0));
m.val[3] = vmulq_n_f32(iMatrixLeft.val[0], vgetq_lane_f32(iMatrixRight.val[3], 0));
m.val[0] = vmlaq_n_f32(m.val[0], iMatrixLeft.val[1], vgetq_lane_f32(iMatrixRight.val[0], 1));
m.val[1] = vmlaq_n_f32(m.val[1], iMatrixLeft.val[1], vgetq_lane_f32(iMatrixRight.val[1], 1));
m.val[2] = vmlaq_n_f32(m.val[2], iMatrixLeft.val[1], vgetq_lane_f32(iMatrixRight.val[2], 1));
m.val[3] = vmlaq_n_f32(m.val[3], iMatrixLeft.val[1], vgetq_lane_f32(iMatrixRight.val[3], 1));
m.val[0] = vmlaq_n_f32(m.val[0], iMatrixLeft.val[2], vgetq_lane_f32(iMatrixRight.val[0], 2));
m.val[1] = vmlaq_n_f32(m.val[1], iMatrixLeft.val[2], vgetq_lane_f32(iMatrixRight.val[1], 2));
m.val[2] = vmlaq_n_f32(m.val[2], iMatrixLeft.val[2], vgetq_lane_f32(iMatrixRight.val[2], 2));
m.val[3] = vmlaq_n_f32(m.val[3], iMatrixLeft.val[2], vgetq_lane_f32(iMatrixRight.val[3], 2));
m.val[0] = vmlaq_n_f32(m.val[0], iMatrixLeft.val[3], vgetq_lane_f32(iMatrixRight.val[0], 3));
m.val[1] = vmlaq_n_f32(m.val[1], iMatrixLeft.val[3], vgetq_lane_f32(iMatrixRight.val[1], 3));
m.val[2] = vmlaq_n_f32(m.val[2], iMatrixLeft.val[3], vgetq_lane_f32(iMatrixRight.val[2], 3));
m.val[3] = vmlaq_n_f32(m.val[3], iMatrixLeft.val[3], vgetq_lane_f32(iMatrixRight.val[3], 3));
return *(GLKMatrix4 *)&m;
#else
GLKMatrix4 m;
m.m[0] = matrixLeft.m[0] * matrixRight.m[0] + matrixLeft.m[4] * matrixRight.m[1] + matrixLeft.m[8] * matrixRight.m[2] + matrixLeft.m[12] * matrixRight.m[3];
m.m[4] = matrixLeft.m[0] * matrixRight.m[4] + matrixLeft.m[4] * matrixRight.m[5] + matrixLeft.m[8] * matrixRight.m[6] + matrixLeft.m[12] * matrixRight.m[7];
m.m[8] = matrixLeft.m[0] * matrixRight.m[8] + matrixLeft.m[4] * matrixRight.m[9] + matrixLeft.m[8] * matrixRight.m[10] + matrixLeft.m[12] * matrixRight.m[11];
m.m[12] = matrixLeft.m[0] * matrixRight.m[12] + matrixLeft.m[4] * matrixRight.m[13] + matrixLeft.m[8] * matrixRight.m[14] + matrixLeft.m[12] * matrixRight.m[15];
m.m[1] = matrixLeft.m[1] * matrixRight.m[0] + matrixLeft.m[5] * matrixRight.m[1] + matrixLeft.m[9] * matrixRight.m[2] + matrixLeft.m[13] * matrixRight.m[3];
m.m[5] = matrixLeft.m[1] * matrixRight.m[4] + matrixLeft.m[5] * matrixRight.m[5] + matrixLeft.m[9] * matrixRight.m[6] + matrixLeft.m[13] * matrixRight.m[7];
m.m[9] = matrixLeft.m[1] * matrixRight.m[8] + matrixLeft.m[5] * matrixRight.m[9] + matrixLeft.m[9] * matrixRight.m[10] + matrixLeft.m[13] * matrixRight.m[11];
m.m[13] = matrixLeft.m[1] * matrixRight.m[12] + matrixLeft.m[5] * matrixRight.m[13] + matrixLeft.m[9] * matrixRight.m[14] + matrixLeft.m[13] * matrixRight.m[15];
m.m[2] = matrixLeft.m[2] * matrixRight.m[0] + matrixLeft.m[6] * matrixRight.m[1] + matrixLeft.m[10] * matrixRight.m[2] + matrixLeft.m[14] * matrixRight.m[3];
m.m[6] = matrixLeft.m[2] * matrixRight.m[4] + matrixLeft.m[6] * matrixRight.m[5] + matrixLeft.m[10] * matrixRight.m[6] + matrixLeft.m[14] * matrixRight.m[7];
m.m[10] = matrixLeft.m[2] * matrixRight.m[8] + matrixLeft.m[6] * matrixRight.m[9] + matrixLeft.m[10] * matrixRight.m[10] + matrixLeft.m[14] * matrixRight.m[11];
m.m[14] = matrixLeft.m[2] * matrixRight.m[12] + matrixLeft.m[6] * matrixRight.m[13] + matrixLeft.m[10] * matrixRight.m[14] + matrixLeft.m[14] * matrixRight.m[15];
m.m[3] = matrixLeft.m[3] * matrixRight.m[0] + matrixLeft.m[7] * matrixRight.m[1] + matrixLeft.m[11] * matrixRight.m[2] + matrixLeft.m[15] * matrixRight.m[3];
m.m[7] = matrixLeft.m[3] * matrixRight.m[4] + matrixLeft.m[7] * matrixRight.m[5] + matrixLeft.m[11] * matrixRight.m[6] + matrixLeft.m[15] * matrixRight.m[7];
m.m[11] = matrixLeft.m[3] * matrixRight.m[8] + matrixLeft.m[7] * matrixRight.m[9] + matrixLeft.m[11] * matrixRight.m[10] + matrixLeft.m[15] * matrixRight.m[11];
m.m[15] = matrixLeft.m[3] * matrixRight.m[12] + matrixLeft.m[7] * matrixRight.m[13] + matrixLeft.m[11] * matrixRight.m[14] + matrixLeft.m[15] * matrixRight.m[15];
return m;
#endif
}
static __inline__ GLKMatrix4 GLKMatrix4Add(GLKMatrix4 matrixLeft, GLKMatrix4 matrixRight)
{
#if defined(__ARM_NEON__)
float32x4x4_t iMatrixLeft = *(float32x4x4_t *)&matrixLeft;
float32x4x4_t iMatrixRight = *(float32x4x4_t *)&matrixRight;
float32x4x4_t m;
m.val[0] = vaddq_f32(iMatrixLeft.val[0], iMatrixRight.val[0]);
m.val[1] = vaddq_f32(iMatrixLeft.val[1], iMatrixRight.val[1]);
m.val[2] = vaddq_f32(iMatrixLeft.val[2], iMatrixRight.val[2]);
m.val[3] = vaddq_f32(iMatrixLeft.val[3], iMatrixRight.val[3]);
return *(GLKMatrix4 *)&m;
#else
GLKMatrix4 m;
m.m[0] = matrixLeft.m[0] + matrixRight.m[0];
m.m[1] = matrixLeft.m[1] + matrixRight.m[1];
m.m[2] = matrixLeft.m[2] + matrixRight.m[2];
m.m[3] = matrixLeft.m[3] + matrixRight.m[3];
m.m[4] = matrixLeft.m[4] + matrixRight.m[4];
m.m[5] = matrixLeft.m[5] + matrixRight.m[5];
m.m[6] = matrixLeft.m[6] + matrixRight.m[6];
m.m[7] = matrixLeft.m[7] + matrixRight.m[7];
m.m[8] = matrixLeft.m[8] + matrixRight.m[8];
m.m[9] = matrixLeft.m[9] + matrixRight.m[9];
m.m[10] = matrixLeft.m[10] + matrixRight.m[10];
m.m[11] = matrixLeft.m[11] + matrixRight.m[11];
m.m[12] = matrixLeft.m[12] + matrixRight.m[12];
m.m[13] = matrixLeft.m[13] + matrixRight.m[13];
m.m[14] = matrixLeft.m[14] + matrixRight.m[14];
m.m[15] = matrixLeft.m[15] + matrixRight.m[15];
return m;
#endif
}
static __inline__ GLKMatrix4 GLKMatrix4Subtract(GLKMatrix4 matrixLeft, GLKMatrix4 matrixRight)
{
#if defined(__ARM_NEON__)
float32x4x4_t iMatrixLeft = *(float32x4x4_t *)&matrixLeft;
float32x4x4_t iMatrixRight = *(float32x4x4_t *)&matrixRight;
float32x4x4_t m;
m.val[0] = vsubq_f32(iMatrixLeft.val[0], iMatrixRight.val[0]);
m.val[1] = vsubq_f32(iMatrixLeft.val[1], iMatrixRight.val[1]);
m.val[2] = vsubq_f32(iMatrixLeft.val[2], iMatrixRight.val[2]);
m.val[3] = vsubq_f32(iMatrixLeft.val[3], iMatrixRight.val[3]);
return *(GLKMatrix4 *)&m;
#else
GLKMatrix4 m;
m.m[0] = matrixLeft.m[0] - matrixRight.m[0];
m.m[1] = matrixLeft.m[1] - matrixRight.m[1];
m.m[2] = matrixLeft.m[2] - matrixRight.m[2];
m.m[3] = matrixLeft.m[3] - matrixRight.m[3];
m.m[4] = matrixLeft.m[4] - matrixRight.m[4];
m.m[5] = matrixLeft.m[5] - matrixRight.m[5];
m.m[6] = matrixLeft.m[6] - matrixRight.m[6];
m.m[7] = matrixLeft.m[7] - matrixRight.m[7];
m.m[8] = matrixLeft.m[8] - matrixRight.m[8];
m.m[9] = matrixLeft.m[9] - matrixRight.m[9];
m.m[10] = matrixLeft.m[10] - matrixRight.m[10];
m.m[11] = matrixLeft.m[11] - matrixRight.m[11];
m.m[12] = matrixLeft.m[12] - matrixRight.m[12];
m.m[13] = matrixLeft.m[13] - matrixRight.m[13];
m.m[14] = matrixLeft.m[14] - matrixRight.m[14];
m.m[15] = matrixLeft.m[15] - matrixRight.m[15];
return m;
#endif
}
static __inline__ GLKMatrix4 GLKMatrix4Translate(GLKMatrix4 matrix, float tx, float ty, float tz)
{
GLKMatrix4 m = { matrix.m[0], matrix.m[1], matrix.m[2], matrix.m[3],
matrix.m[4], matrix.m[5], matrix.m[6], matrix.m[7],
matrix.m[8], matrix.m[9], matrix.m[10], matrix.m[11],
matrix.m[0] * tx + matrix.m[4] * ty + matrix.m[8] * tz + matrix.m[12],
matrix.m[1] * tx + matrix.m[5] * ty + matrix.m[9] * tz + matrix.m[13],
matrix.m[2] * tx + matrix.m[6] * ty + matrix.m[10] * tz + matrix.m[14],
matrix.m[15] };
return m;
}
static __inline__ GLKMatrix4 GLKMatrix4TranslateWithVector3(GLKMatrix4 matrix, GLKVector3 translationVector)
{
GLKMatrix4 m = { matrix.m[0], matrix.m[1], matrix.m[2], matrix.m[3],
matrix.m[4], matrix.m[5], matrix.m[6], matrix.m[7],
matrix.m[8], matrix.m[9], matrix.m[10], matrix.m[11],
matrix.m[0] * translationVector.v[0] + matrix.m[4] * translationVector.v[1] + matrix.m[8] * translationVector.v[2] + matrix.m[12],
matrix.m[1] * translationVector.v[0] + matrix.m[5] * translationVector.v[1] + matrix.m[9] * translationVector.v[2] + matrix.m[13],
matrix.m[2] * translationVector.v[0] + matrix.m[6] * translationVector.v[1] + matrix.m[10] * translationVector.v[2] + matrix.m[14],
matrix.m[15] };
return m;
}
static __inline__ GLKMatrix4 GLKMatrix4TranslateWithVector4(GLKMatrix4 matrix, GLKVector4 translationVector)
{
GLKMatrix4 m = { matrix.m[0], matrix.m[1], matrix.m[2], matrix.m[3],
matrix.m[4], matrix.m[5], matrix.m[6], matrix.m[7],
matrix.m[8], matrix.m[9], matrix.m[10], matrix.m[11],
matrix.m[0] * translationVector.v[0] + matrix.m[4] * translationVector.v[1] + matrix.m[8] * translationVector.v[2] + matrix.m[12],
matrix.m[1] * translationVector.v[0] + matrix.m[5] * translationVector.v[1] + matrix.m[9] * translationVector.v[2] + matrix.m[13],
matrix.m[2] * translationVector.v[0] + matrix.m[6] * translationVector.v[1] + matrix.m[10] * translationVector.v[2] + matrix.m[14],
matrix.m[15] };
return m;
}
static __inline__ GLKMatrix4 GLKMatrix4Scale(GLKMatrix4 matrix, float sx, float sy, float sz)
{
#if defined(__ARM_NEON__)
float32x4x4_t iMatrix = *(float32x4x4_t *)&matrix;
float32x4x4_t m;
m.val[0] = vmulq_n_f32(iMatrix.val[0], (float32_t)sx);
m.val[1] = vmulq_n_f32(iMatrix.val[1], (float32_t)sy);
m.val[2] = vmulq_n_f32(iMatrix.val[2], (float32_t)sz);
m.val[3] = iMatrix.val[3];
return *(GLKMatrix4 *)&m;
#else
GLKMatrix4 m = { matrix.m[0] * sx, matrix.m[1] * sx, matrix.m[2] * sx, matrix.m[3] * sx,
matrix.m[4] * sy, matrix.m[5] * sy, matrix.m[6] * sy, matrix.m[7] * sy,
matrix.m[8] * sz, matrix.m[9] * sz, matrix.m[10] * sz, matrix.m[11] * sz,
matrix.m[12], matrix.m[13], matrix.m[14], matrix.m[15] };
return m;
#endif
}
static __inline__ GLKMatrix4 GLKMatrix4ScaleWithVector3(GLKMatrix4 matrix, GLKVector3 scaleVector)
{
#if defined(__ARM_NEON__)
float32x4x4_t iMatrix = *(float32x4x4_t *)&matrix;
float32x4x4_t m;
m.val[0] = vmulq_n_f32(iMatrix.val[0], (float32_t)scaleVector.v[0]);
m.val[1] = vmulq_n_f32(iMatrix.val[1], (float32_t)scaleVector.v[1]);
m.val[2] = vmulq_n_f32(iMatrix.val[2], (float32_t)scaleVector.v[2]);
m.val[3] = iMatrix.val[3];
return *(GLKMatrix4 *)&m;
#else
GLKMatrix4 m = { matrix.m[0] * scaleVector.v[0], matrix.m[1] * scaleVector.v[0], matrix.m[2] * scaleVector.v[0], matrix.m[3] * scaleVector.v[0],
matrix.m[4] * scaleVector.v[1], matrix.m[5] * scaleVector.v[1], matrix.m[6] * scaleVector.v[1], matrix.m[7] * scaleVector.v[1],
matrix.m[8] * scaleVector.v[2], matrix.m[9] * scaleVector.v[2], matrix.m[10] * scaleVector.v[2], matrix.m[11] * scaleVector.v[2],
matrix.m[12], matrix.m[13], matrix.m[14], matrix.m[15] };
return m;
#endif
}
static __inline__ GLKMatrix4 GLKMatrix4ScaleWithVector4(GLKMatrix4 matrix, GLKVector4 scaleVector)
{
#if defined(__ARM_NEON__)
float32x4x4_t iMatrix = *(float32x4x4_t *)&matrix;
float32x4x4_t m;
m.val[0] = vmulq_n_f32(iMatrix.val[0], (float32_t)scaleVector.v[0]);
m.val[1] = vmulq_n_f32(iMatrix.val[1], (float32_t)scaleVector.v[1]);
m.val[2] = vmulq_n_f32(iMatrix.val[2], (float32_t)scaleVector.v[2]);
m.val[3] = iMatrix.val[3];
return *(GLKMatrix4 *)&m;
#else
GLKMatrix4 m = { matrix.m[0] * scaleVector.v[0], matrix.m[1] * scaleVector.v[0], matrix.m[2] * scaleVector.v[0], matrix.m[3] * scaleVector.v[0],
matrix.m[4] * scaleVector.v[1], matrix.m[5] * scaleVector.v[1], matrix.m[6] * scaleVector.v[1], matrix.m[7] * scaleVector.v[1],
matrix.m[8] * scaleVector.v[2], matrix.m[9] * scaleVector.v[2], matrix.m[10] * scaleVector.v[2], matrix.m[11] * scaleVector.v[2],
matrix.m[12], matrix.m[13], matrix.m[14], matrix.m[15] };
return m;
#endif
}
static __inline__ GLKMatrix4 GLKMatrix4Rotate(GLKMatrix4 matrix, float radians, float x, float y, float z)
{
GLKMatrix4 rm = GLKMatrix4MakeRotation(radians, x, y, z);
return GLKMatrix4Multiply(matrix, rm);
}
static __inline__ GLKMatrix4 GLKMatrix4RotateWithVector3(GLKMatrix4 matrix, float radians, GLKVector3 axisVector)
{
GLKMatrix4 rm = GLKMatrix4MakeRotation(radians, axisVector.v[0], axisVector.v[1], axisVector.v[2]);
return GLKMatrix4Multiply(matrix, rm);
}
static __inline__ GLKMatrix4 GLKMatrix4RotateWithVector4(GLKMatrix4 matrix, float radians, GLKVector4 axisVector)
{
GLKMatrix4 rm = GLKMatrix4MakeRotation(radians, axisVector.v[0], axisVector.v[1], axisVector.v[2]);
return GLKMatrix4Multiply(matrix, rm);
}
static __inline__ GLKMatrix4 GLKMatrix4RotateX(GLKMatrix4 matrix, float radians)
{
GLKMatrix4 rm = GLKMatrix4MakeXRotation(radians);
return GLKMatrix4Multiply(matrix, rm);
}
static __inline__ GLKMatrix4 GLKMatrix4RotateY(GLKMatrix4 matrix, float radians)
{
GLKMatrix4 rm = GLKMatrix4MakeYRotation(radians);
return GLKMatrix4Multiply(matrix, rm);
}
static __inline__ GLKMatrix4 GLKMatrix4RotateZ(GLKMatrix4 matrix, float radians)
{
GLKMatrix4 rm = GLKMatrix4MakeZRotation(radians);
return GLKMatrix4Multiply(matrix, rm);
}
static __inline__ GLKVector3 GLKMatrix4MultiplyVector3(GLKMatrix4 matrixLeft, GLKVector3 vectorRight)
{
GLKVector4 v4 = GLKMatrix4MultiplyVector4(matrixLeft, GLKVector4Make(vectorRight.v[0], vectorRight.v[1], vectorRight.v[2], 0.0f));
return GLKVector3Make(v4.v[0], v4.v[1], v4.v[2]);
}
static __inline__ GLKVector3 GLKMatrix4MultiplyVector3WithTranslation(GLKMatrix4 matrixLeft, GLKVector3 vectorRight)
{
GLKVector4 v4 = GLKMatrix4MultiplyVector4(matrixLeft, GLKVector4Make(vectorRight.v[0], vectorRight.v[1], vectorRight.v[2], 1.0f));
return GLKVector3Make(v4.v[0], v4.v[1], v4.v[2]);
}
static __inline__ GLKVector3 GLKMatrix4MultiplyAndProjectVector3(GLKMatrix4 matrixLeft, GLKVector3 vectorRight)
{
GLKVector4 v4 = GLKMatrix4MultiplyVector4(matrixLeft, GLKVector4Make(vectorRight.v[0], vectorRight.v[1], vectorRight.v[2], 1.0f));
return GLKVector3MultiplyScalar(GLKVector3Make(v4.v[0], v4.v[1], v4.v[2]), 1.0f / v4.v[3]);
}
static __inline__ void GLKMatrix4MultiplyVector3Array(GLKMatrix4 matrix, GLKVector3 *vectors, size_t vectorCount)
{
size_t i;
for (i=0; i < vectorCount; i++)
vectors[i] = GLKMatrix4MultiplyVector3(matrix, vectors[i]);
}
static __inline__ void GLKMatrix4MultiplyVector3ArrayWithTranslation(GLKMatrix4 matrix, GLKVector3 *vectors, size_t vectorCount)
{
size_t i;
for (i=0; i < vectorCount; i++)
vectors[i] = GLKMatrix4MultiplyVector3WithTranslation(matrix, vectors[i]);
}
static __inline__ void GLKMatrix4MultiplyAndProjectVector3Array(GLKMatrix4 matrix, GLKVector3 *vectors, size_t vectorCount)
{
size_t i;
for (i=0; i < vectorCount; i++)
vectors[i] = GLKMatrix4MultiplyAndProjectVector3(matrix, vectors[i]);
}
static __inline__ GLKVector4 GLKMatrix4MultiplyVector4(GLKMatrix4 matrixLeft, GLKVector4 vectorRight)
{
#if defined(__ARM_NEON__)
float32x4x4_t iMatrix = *(float32x4x4_t *)&matrixLeft;
float32x4_t v;
iMatrix.val[0] = vmulq_n_f32(iMatrix.val[0], (float32_t)vectorRight.v[0]);
iMatrix.val[1] = vmulq_n_f32(iMatrix.val[1], (float32_t)vectorRight.v[1]);
iMatrix.val[2] = vmulq_n_f32(iMatrix.val[2], (float32_t)vectorRight.v[2]);
iMatrix.val[3] = vmulq_n_f32(iMatrix.val[3], (float32_t)vectorRight.v[3]);
iMatrix.val[0] = vaddq_f32(iMatrix.val[0], iMatrix.val[1]);
iMatrix.val[2] = vaddq_f32(iMatrix.val[2], iMatrix.val[3]);
v = vaddq_f32(iMatrix.val[0], iMatrix.val[2]);
return *(GLKVector4 *)&v;
#else
GLKVector4 v = { matrixLeft.m[0] * vectorRight.v[0] + matrixLeft.m[4] * vectorRight.v[1] + matrixLeft.m[8] * vectorRight.v[2] + matrixLeft.m[12] * vectorRight.v[3],
matrixLeft.m[1] * vectorRight.v[0] + matrixLeft.m[5] * vectorRight.v[1] + matrixLeft.m[9] * vectorRight.v[2] + matrixLeft.m[13] * vectorRight.v[3],
matrixLeft.m[2] * vectorRight.v[0] + matrixLeft.m[6] * vectorRight.v[1] + matrixLeft.m[10] * vectorRight.v[2] + matrixLeft.m[14] * vectorRight.v[3],
matrixLeft.m[3] * vectorRight.v[0] + matrixLeft.m[7] * vectorRight.v[1] + matrixLeft.m[11] * vectorRight.v[2] + matrixLeft.m[15] * vectorRight.v[3] };
return v;
#endif
}
static __inline__ void GLKMatrix4MultiplyVector4Array(GLKMatrix4 matrix, GLKVector4 *vectors, size_t vectorCount)
{
size_t i;
for (i=0; i < vectorCount; i++)
vectors[i] = GLKMatrix4MultiplyVector4(matrix, vectors[i]);
}
#ifdef __cplusplus
}
#endif
#endif /* __GLK_MATRIX_4_H */

99
samples/C/NWMan.h Normal file
View File

@@ -0,0 +1,99 @@
#ifndef _NME_WMAN_H
#define _NME_WMAN_H
// Internal window manager API
#include "NCompat.h"
START_HEAD
#include "NPos.h"
#include "NUtil.h"
#include "NTypes.h"
NTS(NWMan_event);
NSTRUCT(NWMan, {
// Init stuff
bool (*init)();
bool (*destroy)();
// Window stuff
bool (*create_window)();
bool (*destroy_window)();
void (*swap_buffers)();
// Event stuff
bool (*next_event)(NWMan_event* event);
// Time stuff
uint (*get_millis)();
void (*sleep)(uint millis);
// Info
int rshift_key;
int lshift_key;
int left_key;
int right_key;
});
NENUM(NWMan_event_type, {
N_WMAN_MOUSE_MOVE = 0,
N_WMAN_MOUSE_BUTTON = 1,
N_WMAN_MOUSE_WHEEL = 2,
N_WMAN_KEYBOARD = 10,
N_WMAN_QUIT = 20,
N_WMAN_RESIZE = 21,
N_WMAN_FOCUS = 22
});
#define N_WMAN_MOUSE_LEFT 0
#define N_WMAN_MOUSE_RIGHT 1
#define N_WMAN_MOUSE_MIDDLE 2
NSTRUCT(NWMan_event, {
NWMan_event_type type;
union {
// Mouse
NPos2i mouse_pos;
struct {
short id;
bool state;
} mouse_button;
signed char mouse_wheel; // 1 if up, -1 if down
// Keyboard
struct {
int key;
bool state;
} keyboard;
// Window
bool window_quit; // Will always be true if WM_QUIT
NPos2i window_size;
bool window_focus;
};
});
NWMan_event NWMan_event_new(NWMan_event_type type);
bool NWMan_init();
bool NWMan_destroy();
extern NWMan N_WMan;
END_HEAD
#endif

27
samples/C/Nightmare.h Normal file
View File

@@ -0,0 +1,27 @@
#ifndef _NMEX_NIGHTMARE_H
#define _NMEX_NIGHTMARE_H
//#define NMEX
#include "../src/NCompat.h"
START_HEAD
#include "../src/NTypes.h"
#include "../src/NUtil.h"
#include "../src/NPorting.h"
#include "../src/NGlobals.h"
#include "../src/NLog.h"
#include "../src/NWMan.h"
#include "../src/NRsc.h"
#include "../src/NShader.h"
#include "../src/NSquare.h"
#include "../src/NImage.h"
#include "../src/NSprite.h"
#include "../src/NSpritesheet.h"
#include "../src/NEntity.h"
#include "../src/Game.h"
END_HEAD
#endif

89
samples/C/ntru_encrypt.h Normal file
View File

@@ -0,0 +1,89 @@
/*
* Copyright (C) 2014 FH Bielefeld
*
* This file is part of a FH Bielefeld project.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301 USA
*/
/**
* @file ntru_encrypt.h
* Header for the internal API of ntru_encrypt.c.
* @brief header for encrypt.c
*/
#ifndef PQC_ENCRYPT_H
#define PQC_ENCRYPT_H
#include "ntru_params.h"
#include "ntru_poly.h"
#include "ntru_string.h"
#include <fmpz_poly.h>
#include <fmpz.h>
/**
* encrypt the msg, using the math:
* e = (h r) + m (mod q)
*
* e = the encrypted poly
*
* h = the public key
*
* r = the random poly
*
* m = the message poly
*
* q = large mod
*
* @param msg_tern the message to encrypt, in ternary format
* @param pub_key the public key
* @param rnd the random poly (should have relatively small
* coefficients, but not restricted to {-1, 0, 1})
* @param out the output poly which is in the range {0, q-1}
* (not ternary!) [out]
* @param params ntru_params the ntru context
*/
void
ntru_encrypt_poly(
const fmpz_poly_t msg_tern,
const fmpz_poly_t pub_key,
const fmpz_poly_t rnd,
fmpz_poly_t out,
const ntru_params *params);
/**
* Encrypt a message in the form of a null-terminated char array and
* return a string.
*
* @param msg the message
* @param pub_key the public key
* @param rnd the random poly (should have relatively small
* coefficients, but not restricted to {-1, 0, 1})
* @param params ntru_params the ntru context
* @return the newly allocated encrypted string
*/
string *
ntru_encrypt_string(
const string *msg,
const fmpz_poly_t pub_key,
const fmpz_poly_t rnd,
const ntru_params *params);
#endif /* PQC_ENCRYPT_H */

View File

@@ -0,0 +1,12 @@
cmake_minimum_required(VERSION 2.8)
project(Foo)
set(CMAKE_SKIP_RPATH TRUE)
set(CMAKE_INSTALL_PREFIX "/usr/local")
add_subdirectory(bar)
add_executable(foo foo.c)
target_link_libraries(foo pthread)
install(TARGETS foo DESTINATION bin)

26
samples/Cool/list.cl Normal file
View File

@@ -0,0 +1,26 @@
(* This simple example of a list class is adapted from an example in the
Cool distribution. *)
class List {
isNil() : Bool { true };
head() : Int { { abort(); 0; } };
tail() : List { { abort(); self; } };
cons(i : Int) : List {
(new Cons).init(i, self)
};
};
class Cons inherits List {
car : Int; -- The element in this list cell
cdr : List; -- The rest of the list
isNil() : Bool { false };
head() : Int { car };
tail() : List { cdr };
init(i : Int, rest : List) : List {
{
car <- i;
cdr <- rest;
self;
}
};
};

71
samples/Cool/sample.cl Normal file
View File

@@ -0,0 +1,71 @@
(* Refer to Alex Aiken, "The Cool Reference Manual":
http://theory.stanford.edu/~aiken/software/cool/cool-manual.pdf
for language specification.
*)
-- Exhibit various language constructs
class Sample {
testCondition(x: Int): Bool {
if x = 0
then false
else
if x < (1 + 2) * 3
then true
else false
fi
fi
};
testLoop(y: Int): Bool {
while y > 0 loop
{
if not condition(y)
then y <- y / 2
else y <- y - 1;
}
pool
};
testAssign(z: Int): Bool {
i : Int;
i <- ~z;
};
testCase(var: Sample): SELF_TYPE {
io : IO <- new IO;
case var of
a : A => io.out_string("Class type is A\n");
b : B => io.out_string("Class type is B\n");
s : Sample => io.out_string("Class type is Sample\n");
o : Object => io.out_string("Class type is object\n");
esac
};
testLet(i: Int): Int {
let (a: Int in
let(b: Int <- 3, c: Int <- 4 in
{
a <- 2;
a * b * 2 / c;
}
)
)
};
};
-- Used to test subclasses
class A inherits Sample {};
class B inherits A {};
class C {
main() : Int {
(new Sample).testLet(1)
};
};
-- "Hello, world" example
class Main inherits IO {
main(): SELF_TYPE {
out_string("Hello, World.\n")
};
};

49
samples/F#/Combinators.fs Normal file
View File

@@ -0,0 +1,49 @@
namespace Nessos.FsPickler.Combinators
open Nessos.FsPickler
open Nessos.FsPickler.Json
/// Json pickling methods
[<RequireQualifiedAccess>]
module Json =
let private jsonSerializer = lazy(FsPickler.CreateJson(omitHeader = true))
/// <summary>
/// Pickles a value to Json.
/// </summary>
/// <param name="pickler">utilized pickler.</param>
/// <param name="value">input value.</param>
let pickle (pickler : Pickler<'T>) (value : 'T) : string =
jsonSerializer.Value.PickleToString (pickler, value)
/// <summary>
/// Unpickles a value from Json.
/// </summary>
/// <param name="pickler">utilized pickler.</param>
/// <param name="pickle">input pickle.</param>
let unpickle (pickler : Pickler<'T>) (pickle : string) : 'T =
jsonSerializer.Value.UnPickleOfString (pickler, pickle)
/// Bson pickling methods
[<RequireQualifiedAccess>]
module Bson =
let private bsonPickler = lazy(FsPickler.CreateBson())
/// <summary>
/// Pickles a value to Bson.
/// </summary>
/// <param name="pickler">utilized pickler.</param>
/// <param name="value">input value.</param>
let pickle (pickler : Pickler<'T>) (value : 'T) : byte [] =
bsonPickler.Value.Pickle (pickler, value)
/// <summary>
/// Unpickles a value from bson.
/// </summary>
/// <param name="pickler">utilized pickler.</param>
/// <param name="pickle">input pickle.</param>
let unpickle (pickler : Pickler<'T>) (pickle : byte []) : 'T =
bsonPickler.Value.UnPickle (pickler, pickle)

65
samples/F#/JsonFormat.fs Normal file
View File

@@ -0,0 +1,65 @@
namespace Nessos.FsPickler.Json
open System
open System.IO
open System.Text
open Newtonsoft.Json
open Nessos.FsPickler
/// <summary>
/// Factory methods for the Json serialization format.
/// </summary>
type JsonPickleFormatProvider internal (indent, omitHeader) as self =
let isCustomSeq isTopLevelSequence =
isTopLevelSequence && self.OmitHeader && self.UseCustomTopLevelSequenceSeparator
let mutable sequenceSeparator = " "
member val Indent = indent with get,set
member val OmitHeader = omitHeader with get,set
member val UseCustomTopLevelSequenceSeparator = false with get,set
member __.SequenceSeparator
with get () = sequenceSeparator
and set sep =
if sep <> null && String.IsNullOrWhiteSpace sep then
sequenceSeparator <- sep
else
invalidArg "SequenceSeparator" "should be non-null whitespace."
interface ITextPickleFormatProvider with
member __.Name = "Json"
// see discussion : https://github.com/nessos/FsPickler/issues/17
member __.DefaultEncoding = new UTF8Encoding(false) :> Encoding
member __.CreateWriter (stream, encoding, isTopLevelSequence, leaveOpen) =
#if NET40
if leaveOpen then raise <| new NotSupportedException("'leaveOpen' not supported in .NET 40.")
let sw = new StreamWriter(stream, encoding)
#else
let sw = new StreamWriter(stream, encoding, 1024, leaveOpen)
#endif
let jw = new JsonTextWriter(sw)
new JsonPickleWriter(jw, __.OmitHeader, __.Indent, isCustomSeq isTopLevelSequence, sequenceSeparator, leaveOpen) :> _
member __.CreateReader (stream, encoding, isTopLevelSequence, leaveOpen) =
#if NET40
if leaveOpen then raise <| new NotSupportedException("'leaveOpen' not supported in .NET 40.")
let sr = new StreamReader(stream, encoding)
#else
let sr = new StreamReader(stream, encoding, true, 1024, leaveOpen)
#endif
let jr = new JsonTextReader(sr)
new JsonPickleReader(jr, __.OmitHeader, isCustomSeq isTopLevelSequence, leaveOpen) :> _
member __.CreateWriter (textWriter, isTopLevelSequence, leaveOpen) =
let jw = new JsonTextWriter(textWriter)
new JsonPickleWriter(jw, __.OmitHeader, __.Indent, isCustomSeq isTopLevelSequence, sequenceSeparator, leaveOpen) :> _
member __.CreateReader (textReader, isTopLevelSequence, leaveOpen) =
let jr = new JsonTextReader(textReader)
new JsonPickleReader(jr, __.OmitHeader, isCustomSeq isTopLevelSequence, leaveOpen) :> _

202
samples/F#/JsonReader.fs Normal file
View File

@@ -0,0 +1,202 @@
namespace Nessos.FsPickler.Json
open System
open System.Collections.Generic
open System.Globalization
open System.IO
open System.Numerics
open System.Text
open Newtonsoft.Json
open Nessos.FsPickler
/// <summary>
/// Json format deserializer
/// </summary>
type internal JsonPickleReader (jsonReader : JsonReader, omitHeader, isTopLevelSequence, leaveOpen) =
do
jsonReader.CloseInput <- not leaveOpen
jsonReader.SupportMultipleContent <- isTopLevelSequence
let isBsonReader = match jsonReader with :? Bson.BsonReader -> true | _ -> false
let mutable depth = 0
let arrayStack = new Stack<int> ()
do arrayStack.Push Int32.MinValue
// do not write tag if omitting header or array element
let omitTag () = (omitHeader && depth = 0) || arrayStack.Peek() = depth - 1
interface IPickleFormatReader with
member __.BeginReadRoot (tag : string) =
do jsonReader.Read() |> ignore
if omitHeader then () else
if jsonReader.TokenType <> JsonToken.StartObject then raise <| new FormatException("invalid json root object.")
else
do jsonReader.MoveNext()
let version = jsonReader.ReadPrimitiveAs<string> false "FsPickler"
if version <> jsonFormatVersion then
let v = Version(version)
raise <| new FormatException(sprintf "Invalid FsPickler format version %O." version)
let sTag = jsonReader.ReadPrimitiveAs<string> false "type"
if tag <> sTag then
raise <| new InvalidPickleTypeException(tag, sTag)
member __.EndReadRoot () =
if not omitHeader then jsonReader.Read() |> ignore
member __.BeginReadObject (tag : string) =
if not <| omitTag () then
jsonReader.ReadProperty tag
jsonReader.MoveNext ()
if isTopLevelSequence && depth = 0 then
arrayStack.Push depth
depth <- depth + 1
ObjectFlags.IsSequenceHeader
else
match jsonReader.TokenType with
| JsonToken.Null -> ObjectFlags.IsNull
| JsonToken.StartArray ->
jsonReader.MoveNext()
arrayStack.Push depth
depth <- depth + 1
ObjectFlags.IsSequenceHeader
| JsonToken.StartObject ->
do jsonReader.MoveNext()
depth <- depth + 1
if jsonReader.ValueAs<string> () = "_flags" then
jsonReader.MoveNext()
let csvFlags = jsonReader.ValueAs<string>()
jsonReader.MoveNext()
parseFlagCsv csvFlags
else
ObjectFlags.None
| token -> raise <| new FormatException(sprintf "expected start of Json object but was '%O'." token)
member __.EndReadObject () =
if isTopLevelSequence && depth = 1 then
arrayStack.Pop () |> ignore
depth <- depth - 1
jsonReader.Read() |> ignore
else
match jsonReader.TokenType with
| JsonToken.Null -> ()
| JsonToken.EndObject -> depth <- depth - 1
| JsonToken.EndArray ->
arrayStack.Pop() |> ignore
depth <- depth - 1
| token -> raise <| new FormatException(sprintf "expected end of Json object but was '%O'." token)
if omitHeader && depth = 0 then ()
else jsonReader.Read() |> ignore
member __.SerializeUnionCaseNames = true
member __.PreferLengthPrefixInSequences = false
member __.ReadNextSequenceElement () =
if isTopLevelSequence && depth = 1 then
jsonReader.TokenType <> JsonToken.None
else
jsonReader.TokenType <> JsonToken.EndArray
member __.ReadCachedObjectId () = jsonReader.ReadPrimitiveAs<int64> false "id"
member __.ReadBoolean tag = jsonReader.ReadPrimitiveAs<bool> (omitTag ()) tag
member __.ReadByte tag = jsonReader.ReadPrimitiveAs<int64> (omitTag ()) tag |> byte
member __.ReadSByte tag = jsonReader.ReadPrimitiveAs<int64> (omitTag ()) tag |> sbyte
member __.ReadInt16 tag = jsonReader.ReadPrimitiveAs<int64> (omitTag ()) tag |> int16
member __.ReadInt32 tag = jsonReader.ReadPrimitiveAs<int64> (omitTag ()) tag |> int
member __.ReadInt64 tag = jsonReader.ReadPrimitiveAs<int64> (omitTag ()) tag
member __.ReadUInt16 tag = jsonReader.ReadPrimitiveAs<int64> (omitTag ()) tag |> uint16
member __.ReadUInt32 tag = jsonReader.ReadPrimitiveAs<int64> (omitTag ()) tag |> uint32
member __.ReadUInt64 tag = jsonReader.ReadPrimitiveAs<int64> (omitTag ()) tag |> uint64
member __.ReadSingle tag =
if not <| omitTag () then
jsonReader.ReadProperty tag
jsonReader.MoveNext()
let value =
match jsonReader.TokenType with
| JsonToken.Float -> jsonReader.ValueAs<double> () |> single
| JsonToken.String -> Single.Parse(jsonReader.ValueAs<string>(), CultureInfo.InvariantCulture)
| _ -> raise <| new FormatException("not a float.")
jsonReader.Read() |> ignore
value
member __.ReadDouble tag =
if not <| omitTag () then
jsonReader.ReadProperty tag
jsonReader.MoveNext()
let value =
match jsonReader.TokenType with
| JsonToken.Float -> jsonReader.ValueAs<double> ()
| JsonToken.String -> Double.Parse(jsonReader.ValueAs<string>(), CultureInfo.InvariantCulture)
| _ -> raise <| new FormatException("not a float.")
jsonReader.Read() |> ignore
value
member __.ReadChar tag = let value = jsonReader.ReadPrimitiveAs<string> (omitTag ()) tag in value.[0]
member __.ReadString tag = jsonReader.ReadPrimitiveAs<string> (omitTag ()) tag
member __.ReadBigInteger tag = jsonReader.ReadPrimitiveAs<string> (omitTag ()) tag |> BigInteger.Parse
member __.ReadGuid tag =
if isBsonReader then
jsonReader.ReadPrimitiveAs<Guid> (omitTag ()) tag
else
jsonReader.ReadPrimitiveAs<string> (omitTag ()) tag |> Guid.Parse
member __.ReadTimeSpan tag = jsonReader.ReadPrimitiveAs<string> (omitTag ()) tag |> TimeSpan.Parse
member __.ReadDecimal tag = jsonReader.ReadPrimitiveAs<string> (omitTag ()) tag |> decimal
// BSON spec mandates the use of Unix time;
// this has millisecond precision which results in loss of accuracy w.r.t. ticks
// since the goal of FsPickler is to offer faithful representations of .NET objects
// we choose to override the spec and serialize ticks outright.
// see also https://json.codeplex.com/discussions/212067
member __.ReadDate tag =
if isBsonReader then
let ticks = jsonReader.ReadPrimitiveAs<int64> (omitTag ()) tag
DateTime(ticks)
else
jsonReader.ReadPrimitiveAs<DateTime> (omitTag ()) tag
member __.ReadBytes tag =
if not <| omitTag () then
jsonReader.ReadProperty tag
jsonReader.Read() |> ignore
let bytes =
if jsonReader.TokenType = JsonToken.Null then null
elif isBsonReader then jsonReader.ValueAs<byte []> ()
else
let base64 = jsonReader.ValueAs<string> ()
Convert.FromBase64String base64
jsonReader.Read() |> ignore
bytes
member __.IsPrimitiveArraySerializationSupported = false
member __.ReadPrimitiveArray _ _ = raise <| new NotImplementedException()
member __.Dispose () = (jsonReader :> IDisposable).Dispose()

View File

@@ -0,0 +1,85 @@
namespace Nessos.FsPickler.Json
open System
open Nessos.FsPickler
type internal OAttribute = System.Runtime.InteropServices.OptionalAttribute
type internal DAttribute = System.Runtime.InteropServices.DefaultParameterValueAttribute
/// <summary>
/// Json pickler instance.
/// </summary>
type JsonSerializer =
inherit FsPicklerTextSerializer
val private format : JsonPickleFormatProvider
/// <summary>
/// Initializes a new Json pickler instance.
/// </summary>
/// <param name="indent">indent out Json pickles.</param>
/// <param name="omitHeader">omit FsPickler header in Json pickles.</param>
/// <param name="typeConverter">specify a custom type name converter.</param>
new ([<O;D(null)>] ?indent, [<O;D(null)>] ?omitHeader, [<O;D(null)>] ?typeConverter) =
let indent = defaultArg indent false
let omitHeader = defaultArg omitHeader false
let json = new JsonPickleFormatProvider(indent, omitHeader)
{
inherit FsPicklerTextSerializer(json, ?typeConverter = typeConverter)
format = json
}
/// <summary>
/// Gets or sets whether Json output should be indented.
/// </summary>
member x.Indent
with get () = x.format.Indent
and set b = x.format.Indent <- b
/// <summary>
/// Gets or sets whether FsPickler headers should be ignored in pickle format.
/// </summary>
member x.OmitHeader
with get () = x.format.OmitHeader
and set b = x.format.OmitHeader <- b
/// <summary>
/// Gets or sets a non-null whitespace string that serves as a custom, top-level sequence separator.
/// </summary>
member x.SequenceSeparator
with get () = x.format.SequenceSeparator
and set sep = x.format.SequenceSeparator <- sep
/// <summary>
/// Gets or sets whether top-level sequences should be serialized using the custom separator.
/// </summary>
member x.UseCustomTopLevelSequenceSeparator
with get () = x.format.UseCustomTopLevelSequenceSeparator
and set e = x.format.UseCustomTopLevelSequenceSeparator <- e
/// <summary>
/// BSON pickler instance.
/// </summary>
type BsonSerializer([<O;D(null)>] ?typeConverter) =
inherit FsPicklerSerializer(new BsonPickleFormatProvider(), ?typeConverter = typeConverter)
/// FsPickler static methods.
type FsPickler =
/// <summary>
/// Initializes a new Json pickler instance.
/// </summary>
/// <param name="indent">indent out Json pickles.</param>
/// <param name="omitHeader">omit FsPickler header in Json pickles.</param>
/// <param name="typeConverter">specify a custom type name converter.</param>
static member CreateJson([<O;D(null)>] ?indent, [<O;D(null)>] ?omitHeader, [<O;D(null)>] ?typeConverter) =
new JsonSerializer(?indent = indent, ?omitHeader = omitHeader, ?typeConverter = typeConverter)
/// <summary>
/// Initializes a new Bson pickler instance.
/// </summary>
/// <param name="typeConverter">specify a custom type name converter.</param>
static member CreateBson([<O;D(null)>] ?typeConverter) =
new BsonSerializer(?typeConverter = typeConverter)

142
samples/F#/JsonWriter.fs Normal file
View File

@@ -0,0 +1,142 @@
namespace Nessos.FsPickler.Json
open System
open System.IO
open System.Collections.Generic
open Newtonsoft.Json
open Nessos.FsPickler
/// <summary>
/// Json format serializer.
/// </summary>
type internal JsonPickleWriter (jsonWriter : JsonWriter, omitHeader, indented, isTopLevelSequence, separator, leaveOpen) =
do
jsonWriter.Formatting <- if indented then Formatting.Indented else Formatting.None
jsonWriter.CloseOutput <- not leaveOpen
let isBsonWriter = match jsonWriter with :? Bson.BsonWriter -> true | _ -> false
let mutable depth = 0
let mutable isTopLevelSequenceHead = false
let mutable currentValueIsNull = false
let arrayStack = new Stack<int> ()
do arrayStack.Push Int32.MinValue
// do not write tag if omitting header or array element
let omitTag () = (omitHeader && depth = 0) || arrayStack.Peek() = depth - 1
interface IPickleFormatWriter with
member __.BeginWriteRoot (tag : string) =
if omitHeader then () else
jsonWriter.WriteStartObject()
writePrimitive jsonWriter false "FsPickler" jsonFormatVersion
writePrimitive jsonWriter false "type" tag
member __.EndWriteRoot () =
if not omitHeader then jsonWriter.WriteEnd()
member __.BeginWriteObject (tag : string) (flags : ObjectFlags) =
if not <| omitTag () then
jsonWriter.WritePropertyName tag
if flags.HasFlag ObjectFlags.IsNull then
currentValueIsNull <- true
jsonWriter.WriteNull()
elif flags.HasFlag ObjectFlags.IsSequenceHeader then
if isTopLevelSequence && depth = 0 then
isTopLevelSequenceHead <- true
else
jsonWriter.WriteStartArray()
arrayStack.Push depth
depth <- depth + 1
else
jsonWriter.WriteStartObject()
depth <- depth + 1
if flags = ObjectFlags.None then ()
else
let flagCsv = mkFlagCsv flags
writePrimitive jsonWriter false "_flags" flagCsv
member __.EndWriteObject () =
if currentValueIsNull then
currentValueIsNull <- false
else
depth <- depth - 1
if arrayStack.Peek () = depth then
if isTopLevelSequence && depth = 0 then ()
else
jsonWriter.WriteEndArray()
arrayStack.Pop () |> ignore
else
jsonWriter.WriteEndObject()
member __.SerializeUnionCaseNames = true
member __.PreferLengthPrefixInSequences = false
member __.WriteNextSequenceElement hasNext =
if isTopLevelSequence && depth = 1 then
if isTopLevelSequenceHead then
isTopLevelSequenceHead <- false
else
jsonWriter.WriteWhitespace separator
member __.WriteCachedObjectId id = writePrimitive jsonWriter false "id" id
member __.WriteBoolean (tag : string) value = writePrimitive jsonWriter (omitTag ()) tag value
member __.WriteByte (tag : string) value = writePrimitive jsonWriter (omitTag ()) tag value
member __.WriteSByte (tag : string) value = writePrimitive jsonWriter (omitTag ()) tag value
member __.WriteInt16 (tag : string) value = writePrimitive jsonWriter (omitTag ()) tag value
member __.WriteInt32 (tag : string) value = writePrimitive jsonWriter (omitTag ()) tag value
member __.WriteInt64 (tag : string) value = writePrimitive jsonWriter (omitTag ()) tag value
member __.WriteUInt16 (tag : string) value = writePrimitive jsonWriter (omitTag ()) tag value
member __.WriteUInt32 (tag : string) value = writePrimitive jsonWriter (omitTag ()) tag value
member __.WriteUInt64 (tag : string) value = writePrimitive jsonWriter (omitTag ()) tag value
member __.WriteSingle (tag : string) value = writePrimitive jsonWriter (omitTag ()) tag value
member __.WriteDouble (tag : string) value = writePrimitive jsonWriter (omitTag ()) tag value
member __.WriteDecimal (tag : string) value = writePrimitive jsonWriter (omitTag ()) tag (string value)
member __.WriteChar (tag : string) value = writePrimitive jsonWriter (omitTag ()) tag value
member __.WriteString (tag : string) value = writePrimitive jsonWriter (omitTag ()) tag value
member __.WriteBigInteger (tag : string) value = writePrimitive jsonWriter (omitTag ()) tag (string value)
member __.WriteGuid (tag : string) value = writePrimitive jsonWriter (omitTag ()) tag value
member __.WriteTimeSpan (tag : string) value = writePrimitive jsonWriter (omitTag ()) tag (string value)
// BSON spec mandates the use of Unix time;
// this has millisecond precision which results in loss of accuracy w.r.t. ticks
// since the goal of FsPickler is to offer faithful representations of .NET objects
// we choose to override the spec and serialize ticks outright.
// see also https://json.codeplex.com/discussions/212067
member __.WriteDate (tag : string) value =
if isBsonWriter then
writePrimitive jsonWriter (omitTag ()) tag value.Ticks
else
writePrimitive jsonWriter (omitTag ()) tag value
member __.WriteBytes (tag : string) (value : byte []) =
if not <| omitTag () then
jsonWriter.WritePropertyName tag
if obj.ReferenceEquals(value, null) then
jsonWriter.WriteNull()
else
jsonWriter.WriteValue value
member __.IsPrimitiveArraySerializationSupported = false
member __.WritePrimitiveArray _ _ = raise <| NotSupportedException()
member __.Dispose () = jsonWriter.Flush()

View File

@@ -0,0 +1,68 @@
namespace Nessos.FsPickler.Tests
open PerfUtil
open PerfUtil.NUnit
open NUnit.Framework
open Nessos.FsPickler
open Nessos.FsPickler.Json
[<AbstractClass>]
type PerfTester () =
inherit NUnitPerf<Serializer> ()
let tests = PerfTest.OfModuleMarker<PerformanceTests.Marker> ()
override __.PerfTests = tests
type ``Serializer Comparison`` () =
inherit PerfTester()
let fsp = FsPickler.initBinary()
let bfs = new BinaryFormatterSerializer() :> Serializer
let ndc = new NetDataContractSerializer() :> Serializer
let jdn = new JsonDotNetSerializer() :> Serializer
let bdn = new JsonDotNetBsonSerializer () :> Serializer
let pbn = new ProtoBufSerializer() :> Serializer
let ssj = new ServiceStackJsonSerializer() :> Serializer
let sst = new ServiceStackTypeSerializer() :> Serializer
let comparer = new WeightedComparer(spaceFactor = 0.2, leastAcceptableImprovementFactor = 1.)
let tester = new ImplementationComparer<_>(fsp, [bfs;ndc;jdn;bdn;pbn;ssj;sst], throwOnError = true, warmup = true, comparer = comparer)
override __.PerfTester = tester :> _
type ``FsPickler Formats Comparison`` () =
inherit PerfTester ()
let binary = FsPickler.initBinary()
let json = FsPickler.initJson()
let bson = FsPickler.initBson()
let xml = FsPickler.initXml()
let tester = new ImplementationComparer<_>(binary, [json ; bson; xml], warmup = true, throwOnError = false)
override __.PerfTester = tester :> _
type ``Past FsPickler Versions Comparison`` () =
inherit PerfTester ()
let persistResults = true
let persistenceFile = "fspPerf.xml"
let fsp = FsPickler.initBinary()
let version = typeof<FsPickler>.Assembly.GetName().Version
let comparer = new WeightedComparer(spaceFactor = 0.2, leastAcceptableImprovementFactor = 0.8)
let tester =
new PastImplementationComparer<Serializer>(
fsp, version, historyFile = persistenceFile, throwOnError = true, warmup = true, comparer = comparer)
override __.PerfTester = tester :> _
[<TestFixtureTearDown>]
member __.Persist() =
if persistResults then tester.PersistCurrentResults ()

View File

@@ -0,0 +1,207 @@
namespace Nessos.FsPickler.Tests
open System
open System.Collections.Generic
open PerfUtil
open Nessos.FsPickler
open Nessos.FsPickler.Tests.Serializer
open Nessos.FsPickler.Tests.TestTypes
module PerformanceTests =
type Marker = class end
let guid = Guid.NewGuid()
[<PerfTest(1000)>]
let ``Value: Guid`` s = roundtrip guid s
let date = DateTime.Now
[<PerfTest(1000)>]
let ``Value: DateTime`` s = roundtrip date s
[<PerfTest(10000)>]
let ``Value: String`` s = roundtrip stringValue s
let boxed = box ([| 1 .. 1000 |], "lorem ipsum")
[<PerfTest(1000)>]
let ``Boxed Object`` s = roundtrip boxed s
let fsClass = new Class(42, stringValue)
[<PerfTest(10000)>]
let ``Class: Simple F# Class`` s = roundtrip fsClass s
let serializableClass = new SerializableClass<_>(42, stringValue, [|1..1000|])
[<PerfTest(10000)>]
let ``Class: ISerializable`` s = roundtrip serializableClass s
let boxedClass = box(Some 42)
[<PerfTest(10000)>]
let ``Subtype Resolution`` s = roundtrip boxedClass s
let floatArray = Array.init 100000 (fun i -> float i)
[<PerfTest(10)>]
let ``Array: Float`` s = roundtrip floatArray s
let intArray = Array.init 100000 id
[<PerfTest(10)>]
let ``Array: Int`` s = roundtrip intArray s
let stringArray = Array.init 10000 (fun i -> stringValue + string i)
[<PerfTest(100)>]
let ``Array: String`` s = roundtrip stringArray s
let kvarr = [|1..10000|] |> Array.map (fun i -> i, string i)
[<PerfTest(100)>]
let ``Array: Key-Value Pairs`` s = roundtrip kvarr s
let duArray = [| for i in 1 .. 10000 -> (Something ("asdasdasdas", i)) |]
[<PerfTest(100)>]
let ``Array: Discriminated Unions`` s = roundtrip duArray s
let objArray =
[|
box 2; box 3; box "hello" ; box <| Some 3; box(2,3) ;
box <| new Class(2, stringValue) ; box <| new SerializableClass<int option>(2, stringValue, Some 12);
box stringValue
|]
[<PerfTest(1000)>]
let ``Array: Objects`` s = roundtrip objArray s
let array3D = Array3D.init 100 100 100 (fun i j k -> float (i * j + k))
[<PerfTest(10)>]
let ``Array: Rank-3 Float`` s = roundtrip array3D s
let bclDict = dict [ for i in 1 .. 1000 -> (string i, i)]
[<PerfTest(100)>]
let ``.NET Dictionary`` s = roundtrip bclDict s
let bclStack = new Stack<string>([for i in 1 .. 1000 -> string i])
[<PerfTest(100)>]
let ``.NET Stack`` s = roundtrip bclStack s
let bclList = new List<string * int>([for i in 1 .. 1000 -> string i, i])
[<PerfTest(100)>]
let ``.NET List`` s = roundtrip bclList s
let bclSet = new SortedSet<_>([for i in 1 .. 1000 -> string i])
[<PerfTest(100)>]
let ``.NET Set`` s = roundtrip bclSet s
let smallTuple = (1, DateTime.Now,"hello")
[<PerfTest(10000)>]
let ``FSharp: Tuple Small`` s = roundtrip smallTuple s
let largeTuple = (stringValue, 1, 2, 3, true, "", Some(3.14, [2]), 3, 2, 1, stringValue)
[<PerfTest(10000)>]
let ``FSharp: Tuple Large`` s =
roundtrip largeTuple s
let intList = [1..1000]
[<PerfTest(1000)>]
let ``FSharp: List Int`` s = roundtrip intList s
let stringList = [ for i in 1 .. 1000 -> stringValue + string i ]
[<PerfTest(1000)>]
let ``FSharp: List String`` s = roundtrip stringList s
let pairList = [ for i in 1 .. 1000 -> (string i, i) ]
[<PerfTest(1000)>]
let ``FSharp: List Key-Value`` s = roundtrip pairList s
let nestedLst = let n = [1..1000] in [for _ in 1 .. 100 -> n]
[<PerfTest(1000)>]
let ``FSharp: List Nested`` s = roundtrip nestedLst s
let union = SomethingElse(stringValue, 42, box (Some 42))
[<PerfTest(10000)>]
let ``FSharp: Union`` s = roundtrip union s
let record = { Int = 42 ; String = stringValue ; Tuple = (13, "") }
[<PerfTest(10000)>]
let ``FSharp: Record`` s = roundtrip record s
let peano = int2Peano 100
[<PerfTest(100)>]
let ``FSharp: Peano Rectype`` s = roundtrip peano s
let closure = (@) [ Some([1..100], Set.ofList [1..100]) ]
[<PerfTest(1000)>]
let ``FSharp: Curried Function`` s = roundtrip closure s
let binTree = mkTree 10
[<PerfTest(100)>]
let ``FSharp: Binary Tree`` s = roundtrip binTree s
let intSet = [1..1000] |> List.map string |> set
[<PerfTest(1000)>]
let ``FSharp: Set`` s = roundtrip intSet s
let fsMap = [1..1000] |> Seq.map (fun i -> (string i,i)) |> Map.ofSeq
[<PerfTest(1000)>]
let ``FSharp: Map`` s = roundtrip fsMap s
let testType = typeof<int * string option * Map<int * string [], string ref option>>
[<PerfTest(1000)>]
let ``Reflection: Type`` s = roundtrip testType s
let quotationSmall = <@ fun x -> pown 2 x @>
let quotationLarge =
<@
async {
let rec fibAsync n =
async {
match n with
| _ when n < 0 -> return invalidArg "negative" "n"
| _ when n < 2 -> return n
| n ->
let! fn = fibAsync (n-1)
let! fnn = fibAsync (n-2)
return fn + fnn
}
let! values = [1..100] |> Seq.map fibAsync |> Async.Parallel
return Seq.sum values
}
@>
[<PerfTest(10000)>]
let ``FSharp: Quotation Small`` s = roundtrip quotationSmall s
[<PerfTest(1000)>]
let ``FSharp: Quotation Large`` s = roundtrip quotationLarge s

244
samples/Forth/asm.fr Normal file
View File

@@ -0,0 +1,244 @@
\ Copyright 2013-2014 Lars Brinkhoff
\ Assembler for x86.
\ Adds to FORTH vocabulary: ASSEMBLER CODE ;CODE.
\ Creates ASSEMBLER vocabulary with: END-CODE and x86 opcodes.
\ Conventional prefix syntax: "<source> <destination> <opcode>,".
\ Addressing modes:
\ - immediate: "n #"
\ - direct: n
\ - register: <reg>
\ - indirect: "<reg> )"
\ - indirect with displacement: "n <reg> )#"
\ - indexed: not supported yet
require lib/common.fth
require search.fth
vocabulary assembler
also assembler definitions
\ Access to the target image.
' header, defer header, is header,
' cell defer cell is cell
' dp defer dp is dp
0 value delta
: aligned cell + 1 - cell negate nand invert ;
: align dp @ aligned dp ! ;
: allot dp +! ;
: here dp @ ;
: cells cell * ;
: c! delta + c! ;
: c, here c! 1 allot ;
: h, dup c, 8 rshift c, ;
: , dup h, 16 rshift h, ;
base @ hex
\ This constant signals that an operand is not a direct address.
deadbeef constant -addr
\ Assembler state.
variable opcode
variable d
variable s
variable dir?
variable mrrm defer ?mrrm,
variable sib defer ?sib,
variable disp defer ?disp,
variable imm defer ?imm,
defer imm,
defer immediate-opcode
defer reg
defer ?opsize
\ Set opcode. And destination: register or memory.
: opcode! 3@ is immediate-opcode >r opcode ! ;
: !reg dir? @ if 2 d ! then dir? off ;
: !mem dir? off ;
\ Set bits in mod/reg/rm byte.
: -mrrm ['] nop is ?mrrm, ;
: mod! mrrm c0 !bits ;
: reg@ mrrm 38 @bits ;
: reg! mrrm 38 !bits ;
: rm@ mrrm 7 @bits ;
: rm! rm@ 3 lshift reg! mrrm 7 !bits ;
: reg>opcode rm@ opcode 07 !bits ;
: opcode>reg opcode @ dup 3 rshift rm! 8 rshift opcode ! ;
\ Write parts of instruction to memory.
: ds d @ s @ + ;
: ?twobyte dup FF > if dup 8 rshift c, then ;
: opcode, opcode @ ?twobyte ds + c, ;
: mrrm, mrrm @ c, ;
: sib, sib @ c, ;
: imm8, imm @ c, ;
: imm16, imm @ h, ;
: imm32, imm @ , ;
: disp8, disp @ c, ;
: disp32, disp @ , ;
\ Set operand size.
: -opsize 2drop r> drop ;
: opsize! is imm, s ! ['] -opsize is ?opsize ;
: !op8 0 ['] imm8, ?opsize ;
: !op32 1 ['] imm32, ?opsize ;
: !op16 1 ['] imm16, ?opsize 66 c, ;
\ Set SIB byte.
: !sib ['] sib, is ?sib, ;
: sib! 3 lshift + sib ! !sib ;
\ Set displacement.
: byte? -80 80 within ;
: disp! is ?disp, disp ! ;
: !disp8 ['] disp8, disp! ;
: !disp32 ['] disp32, disp! ;
: !disp ( a -- u ) dup byte? if !disp8 40 else !disp32 80 then ;
: -pc here 5 + negate ;
: relative -pc disp +! ;
\ Set immediate operand.
: imm! imm ! ['] imm, is ?imm, ;
\ Implements addressing modes: register, indirect, indexed, and direct.
: reg1 rm! !reg ;
: reg2 3 lshift reg! ;
: !reg2 ['] reg2 is reg ;
: ind dup mod! rm! !mem !reg2 ;
: ind# swap !disp + ind ;
: idx 04 ind sib! ;
: idx# rot !disp 04 + ind sib! ;
: addr !disp32 05 ind ;
\ Reset assembler state.
: 0opsize ['] opsize! is ?opsize ;
: 0ds d off s off ;
: 0reg ['] reg1 is reg ;
: 0mrrm c0 mrrm ! ['] mrrm, is ?mrrm, ;
: 0sib ['] nop is ?sib, ;
: 0disp ['] nop is ?disp, ;
: 0imm imm off ['] nop is ?imm, 0 is imm, ;
: 0asm 0imm 0disp 0reg 0ds 0mrrm 0sib 0opsize dir? on ;
\ Enter and exit assembler mode.
: start-code also assembler 0asm ;
: end-code align previous ;
\ Implements addressing mode: immediate.
: imm8? imm @ byte? ;
: ?sign-extend d off imm8? if 2 d ! ['] imm8, is ?imm, then ;
: alu# opcode @ reg! 80 opcode ! ?sign-extend ;
: mov# B0 s @ 3 lshift + rm@ + opcode ! 0ds -mrrm ;
: push# imm8? if ['] imm8, 6A else ['] imm32, 68 then dup opcode ! rm! is ?imm, ;
: test# F6 opcode ! ;
: imm-op imm! immediate-opcode ;
\ Process one operand. All operands except a direct address
\ have the stack picture ( n*x xt -addr ).
: addr? dup -addr <> ;
: op addr? if addr else drop execute then ;
\ Define instruction formats.
: instruction, opcode! opcode, ?mrrm, ?sib, ?disp, ?imm, 0asm ;
: mnemonic ( u a "name" -- ) create ['] nop 3, does> instruction, ;
: format: create ] !csp does> mnemonic ;
: immediate: ' latestxt >body ! ;
\ Instruction formats.
format: 0op -mrrm ;
format: 1reg op reg>opcode 0ds -mrrm ;
format: 1op opcode>reg op d off ;
format: 2op op op ;
format: 2op-d op op d off ;
format: 2op-ds op op 0ds ;
format: 1addr op relative -mrrm ;
format: 1imm8 !op8 op -mrrm ;
\ Instruction mnemonics.
00 2op add, immediate: alu#
08 2op or, immediate: alu#
0F44 2op-ds cmove, \ Todo: other condition codes.
0FB6 2op-ds movzx,
0FBE 2op-ds movsx,
10 2op adc, immediate: alu#
18 2op sbb, immediate: alu#
20 2op and, immediate: alu#
26 0op es,
28 2op sub, immediate: alu#
2E 0op cs,
30 2op xor, immediate: alu#
36 0op ss,
38 2op cmp, immediate: alu#
3E 0op ds,
50 1reg push, immediate: push#
58 1reg pop,
64 0op fs,
65 0op gs,
\ 70 jcc
84 2op-d test, immediate: test#
86 2op-d xchg,
88 2op mov, immediate: mov#
8D 2op-ds lea,
\ 8F/0 pop, rm
90 0op nop,
C3 0op ret,
\ C6/0 immediate mov to r/m
\ C7/0 immediate mov to r/m
CD 1imm8 int,
E8 1addr call,
E9 1addr jmp,
\ EB jmp rel8
F0 0op lock,
F2 0op rep,
F3 0op repz,
F4 0op hlt,
F5 0op cmc,
F610 1op not,
F618 1op neg,
F8 0op clc,
F9 0op stc,
FA 0op cli,
FB 0op sti,
FC 0op cld,
FD 0op std,
\ FE 0 inc rm
\ FF 1 dec rm
\ FF 2 call rm
\ FF 4 jmp rm
\ FF 6 push rm
: sp? dup 4 = ;
\ Addressing mode syntax: immediate, indirect, and displaced indirect.
: # ['] imm-op -addr ;
: ) 2drop sp? if 4 ['] idx else ['] ind then -addr 0reg 0opsize ;
: )# 2drop sp? if 4 ['] idx# else ['] ind# then -addr 0reg 0opsize ;
\ Define registers.
: reg8 create , does> @ ['] reg -addr !op8 ;
: reg16 create , does> @ ['] reg -addr !op16 ;
: reg32 create , does> @ ['] reg -addr !op32 ;
: reg: dup reg8 dup reg16 dup reg32 1+ ;
\ Register names.
0
reg: al ax eax reg: cl cx ecx reg: dl dx edx reg: bl bx ebx
reg: ah sp esp reg: ch bp ebp reg: dh si esi reg: bh di edi
drop
\ Runtime for ;CODE. CODE! is defined elsewhere.
: (;code) r> code! ;
base ! only forth definitions also assembler
\ Standard assembler entry points.
: code parse-name header, ?code, start-code ;
: ;code postpone (;code) reveal postpone [ ?csp start-code ; immediate
0asm
previous

238
samples/Gosu/Ronin.gs Normal file
View File

@@ -0,0 +1,238 @@
/**
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
*/
package ronin
uses gw.util.concurrent.LockingLazyVar
uses gw.lang.reflect.*
uses java.lang.*
uses java.io.*
uses ronin.config.*
uses org.slf4j.*
/**
* The central location for Ronin utility methods. Controllers and templates should generally access the
* methods and properties they inherit from {@link ronin.IRoninUtils} instead of using the methods and
* properties here.
*/
class Ronin {
// One static field to rule the all...
static var _CONFIG : IRoninConfig as Config
// And one thread local to bind them
static var _CURRENT_REQUEST = new ThreadLocal<RoninRequest>();
// That's inconstructable
private construct() {}
internal static function init(servlet : RoninServlet, m : ApplicationMode, src : File) {
if(_CONFIG != null) {
throw "Cannot initialize a Ronin application multiple times!"
}
var cfg = TypeSystem.getByFullNameIfValid("config.RoninConfig")
var defaultWarning = false
if(cfg != null) {
var ctor = cfg.TypeInfo.getConstructor({ronin.config.ApplicationMode, ronin.RoninServlet})
if(ctor == null) {
throw "config.RoninConfig must have a constructor with the same signature as ronin.config.RoninConfig"
}
_CONFIG = ctor.Constructor.newInstance({m, servlet}) as IRoninConfig
} else {
_CONFIG = new DefaultRoninConfig(m, servlet)
defaultWarning = true
}
var roninLogger = TypeSystem.getByFullNameIfValid("ronin.RoninLoggerFactory")
if(roninLogger != null) {
roninLogger.TypeInfo.getMethod("init", {ronin.config.LogLevel}).CallHandler.handleCall(null, {LogLevel})
}
if(defaultWarning) {
log("No configuration was found at config.RoninConfig, using the default configuration...", :level=WARN)
}
Quartz.maybeStart()
ReloadManager.setSourceRoot(src)
}
internal static property set CurrentRequest(req : RoninRequest) {
_CURRENT_REQUEST.set(req)
}
//============================================
// Public API
//============================================
/**
* The trace handler for the current request.
*/
static property get CurrentTrace() : Trace {
return CurrentRequest?.Trace
}
/**
* Ronin's representation of the current request.
*/
static property get CurrentRequest() : RoninRequest {
return _CURRENT_REQUEST.get()
}
/**
* The mode in which this application is running.
*/
static property get Mode() : ApplicationMode {
return _CONFIG?.Mode ?: TESTING
}
/**
* The log level at and above which log messages should be displayed.
*/
static property get LogLevel() : LogLevel {
return _CONFIG?.LogLevel ?: DEBUG
}
/**
* Whether or not to display detailed trace information on each request.
*/
static property get TraceEnabled() : boolean {
return _CONFIG != null ? _CONFIG.TraceEnabled : true
}
/**
* The default controller method to call when no method name is present in the request URL.
*/
static property get DefaultAction() : String {
return _CONFIG?.DefaultAction
}
/**
* The default controller to call when no controller name is present in the request URL.
*/
static property get DefaultController() : Type {
return _CONFIG?.DefaultController
}
/**
* The servlet responsible for handling Ronin requests.
*/
static property get RoninServlet() : RoninServlet {
return _CONFIG?.RoninServlet
}
/**
* The handler for request processing errors.
*/
static property get ErrorHandler() : IErrorHandler {
return _CONFIG?.ErrorHandler
}
/**
* The custom handler for logging messages.
*/
static property get LogHandler() : ILogHandler {
return _CONFIG?.LogHandler
}
/**
* Logs a message using the configured log handler.
* @param msg The text of the message to log, or a block which returns said text.
* @param level (Optional) The level at which to log the message.
* @param component (Optional) The logical component from whence the message originated.
* @param exception (Optional) An exception to associate with the message.
*/
static function log(msg : Object, level : LogLevel = null, component : String = null, exception : java.lang.Throwable = null) {
if(level == null) {
level = INFO
}
if(LogLevel <= level) {
var msgStr : String
if(msg typeis block():String) {
msgStr = (msg as block():String)()
} else {
msgStr = msg as String
}
if(_CONFIG?.LogHandler != null) {
_CONFIG.LogHandler.log(msgStr, level, component, exception)
} else {
switch(level) {
case TRACE:
LoggerFactory.getLogger(component?:Logger.ROOT_LOGGER_NAME).trace(msgStr, exception)
break
case DEBUG:
LoggerFactory.getLogger(component?:Logger.ROOT_LOGGER_NAME).debug(msgStr, exception)
break
case INFO:
LoggerFactory.getLogger(component?:Logger.ROOT_LOGGER_NAME).info(msgStr, exception)
break
case WARN:
LoggerFactory.getLogger(component?:Logger.ROOT_LOGGER_NAME).warn(msgStr, exception)
break
case ERROR:
case FATAL:
LoggerFactory.getLogger(component?:Logger.ROOT_LOGGER_NAME).error(msgStr, exception)
break
}
}
}
}
/**
* The caches known to Ronin.
*/
static enum CacheStore {
REQUEST,
SESSION,
APPLICATION
}
/**
* Retrieves a value from a cache, or computes and stores it if it is not in the cache.
* @param value A block which will compute the desired value.
* @param name (Optional) A unique identifier for the value. Default is null, which means one will be
* generated from the type of the value.
* @param store (Optional) The cache store used to retrieve or store the value. Default is the request cache.
* @return The retrieved or computed value.
*/
static function cache<T>(value : block():T, name : String = null, store : CacheStore = null) : T {
if(store == null or store == REQUEST) {
return _CONFIG.RequestCache.getValue(value, name)
} else if (store == SESSION) {
return _CONFIG.SessionCache.getValue(value, name)
} else if (store == APPLICATION) {
return _CONFIG.ApplicationCache.getValue(value, name)
} else {
throw "Don't know about CacheStore ${store}"
}
}
/**
* Invalidates a cached value in a cache.
* @param name The unique identifier for the value.
* @param store The cache store in which to invalidate the value.
*/
static function invalidate<T>(name : String, store : CacheStore) {
if(store == null or store == REQUEST) {
_CONFIG.RequestCache.invalidate(name)
} else if (store == SESSION) {
_CONFIG.SessionCache.invalidate(name)
} else if (store == APPLICATION) {
_CONFIG.ApplicationCache.invalidate(name)
} else {
throw "Don't know about CacheStore ${store}"
}
}
/**
* Detects changes made to resources in the Ronin application and
* reloads them. This function should only be called when Ronin is
* in development mode.
*/
static function loadChanges() {
ReloadManager.detectAndReloadChangedResources()
}
}

View File

@@ -0,0 +1,18 @@
apply plugin: GreetingPlugin
greeting.message = 'Hi from Gradle'
class GreetingPlugin implements Plugin<Project> {
void apply(Project project) {
// Add the 'greeting' extension object
project.extensions.create("greeting", GreetingPluginExtension)
// Add a task that uses the configuration
project.task('hello') << {
println project.greeting.message
}
}
}
class GreetingPluginExtension {
def String message = 'Hello from GreetingPlugin'
}

View File

@@ -0,0 +1,20 @@
apply plugin: GreetingPlugin
greeting {
message = 'Hi'
greeter = 'Gradle'
}
class GreetingPlugin implements Plugin<Project> {
void apply(Project project) {
project.extensions.create("greeting", GreetingPluginExtension)
project.task('hello') << {
println "${project.greeting.message} from ${project.greeting.greeter}"
}
}
}
class GreetingPluginExtension {
String message
String greeter
}

View File

@@ -0,0 +1,78 @@
/*
License
Copyright [2013] [Farruco Sanjurjo Arcay]
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
*/
var TagsTotalPerMonth;
TagsTotalPerMonth = (function(){
function TagsTotalPerMonth(){};
TagsTotalPerMonth.getDatasource = function (category, months, values){
return new CategoryMonthlyExpenseBarChartDataSource(category, months, values);
};
TagsTotalPerMonth.getType = function (){ return Charts.ChartType.COLUMN};
return TagsTotalPerMonth;
})();
var TagsTotalPerMonthWithMean;
TagsTotalPerMonthWithMean = (function(){
function TagsTotalPerMonthWithMean(){};
TagsTotalPerMonthWithMean.getDatasource = function (category, months, values){
return new CategoryMonthlyWithMeanExpenseDataSource(category, months, values);
};
TagsTotalPerMonthWithMean.getType = function (){ return Charts.ChartType.LINE};
return TagsTotalPerMonthWithMean;
})();
var TagsAccumulatedPerMonth;
TagsAccumulatedPerMonth = (function(){
function TagsAccumulatedPerMonth(){};
TagsAccumulatedPerMonth.getDatasource = function (category, months, values){
return new CategoryMonthlyAccumulated(category, months, values);
};
TagsAccumulatedPerMonth.getType = function (){ return Charts.ChartType.AREA};
return TagsAccumulatedPerMonth;
})();
var MonthTotalsPerTags;
MonthTotalsPerTags = (function(){
function MonthTotalsPerTags(){};
MonthTotalsPerTags.getDatasource = function (month, tags, values){
return new CategoryExpenseDataSource(tags, month, values);
};
MonthTotalsPerTags.getType = function (){ return Charts.ChartType.PIE; };
return MonthTotalsPerTags;
})();
var SavingsFlowChartComposer = (function(){
function SavingsFlowChartComposer(){};
SavingsFlowChartComposer.getDatasource = function(months, values){
return new SavingsFlowDataSource(months, values);
};
SavingsFlowChartComposer.getType = function(){ return Charts.ChartType.COLUMN; };
return SavingsFlowChartComposer;
})();

150
samples/JavaScript/itau.gs Normal file
View File

@@ -0,0 +1,150 @@
/*
The MIT License (MIT)
Copyright (c) 2014 Thiago Brandão Damasceno
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
// based on http://ctrlq.org/code/19053-send-to-google-drive
function sendToGoogleDrive() {
var gmailLabels = 'inbox';
var driveFolder = 'Itaú Notifications';
var spreadsheetName = 'itau';
var archiveLabel = 'itau.processed';
var itauNotificationEmail = 'comunicacaodigital@itau-unibanco.com.br';
var filter = "from: " +
itauNotificationEmail +
" -label:" +
archiveLabel +
" label:" +
gmailLabels;
// Create label for 'itau.processed' if it doesn't exist
var moveToLabel = GmailApp.getUserLabelByName(archiveLabel);
if (!moveToLabel) {
moveToLabel = GmailApp.createLabel(archiveLabel);
}
// Create folder 'Itaú Notifications' if it doesn't exist
var folders = DriveApp.getFoldersByName(driveFolder);
var folder;
if (folders.hasNext()) {
folder = folders.next();
} else {
folder = DriveApp.createFolder(driveFolder);
}
// Create spreadsheet file 'itau' if it doesn't exist
var files = folder.getFilesByName(spreadsheetName);
// File is in DriveApp
// Doc is in SpreadsheetApp
// They are not interchangeable
var file, doc;
// Confusing :\
// As per: https://code.google.com/p/google-apps-script-issues/issues/detail?id=3578
if (files.hasNext()){
file = files.next();
doc = SpreadsheetApp.openById(file.getId());
} else {
doc = SpreadsheetApp.create(spreadsheetName);
file = DriveApp.getFileById(doc.getId());
folder.addFile(file);
DriveApp.removeFile(file);
}
var sheet = doc.getSheets()[0];
// Append header if first line
if(sheet.getLastRow() == 0){
sheet.appendRow(['Conta', 'Operação', 'Valor', 'Data', 'Hora', 'Email ID']);
}
var message, messages, account, operation, value, date, hour, emailID, plainBody;
var accountRegex = /Conta: (XXX[0-9\-]+)/;
var operationRegex = /Tipo de operação: ([A-Z]+)/;
var paymentRegex = /Pagamento de ([0-9A-Za-z\-]+)\ ?([0-9]+)?/;
var valueRegex = /Valor: R\$ ([0-9\,\.]+)/;
var dateRegex = /Data: ([0-9\/]+)/;
var hourRegex = /Hora: ([0-9\:]+)/;
var emailIDRegex = /E-mail nº ([0-9]+)/;
var threads = GmailApp.search(filter, 0, 100);
for (var x = 0; x < threads.length; x++) {
messages = threads[x].getMessages();
for (var i = 0; i < messages.length; i++) {
account, operation, value, date, hour, emailID = [];
message = messages[i];
plainBody = message.getPlainBody();
if(accountRegex.test(plainBody)) {
account = RegExp.$1;
}
if(operationRegex.test(plainBody)) {
operation = RegExp.$1;
}
if(valueRegex.test(plainBody)) {
value = RegExp.$1;
}
if(dateRegex.test(plainBody)) {
date = RegExp.$1;
}
if(hourRegex.test(plainBody)) {
hour = RegExp.$1;
}
if(emailIDRegex.test(plainBody)){
emailID = RegExp.$1;
}
if(paymentRegex.test(plainBody)){
operation = RegExp.$1;
if(RegExp.$2){
operation += ' ' + RegExp.$2
}
date = hour = ' - ';
}
if(account && operation && value && date && hour){
sheet.appendRow([account, operation, value, date, hour, emailID]);
}
// Logger.log(account);
// Logger.log(operation);
// Logger.log(value);
// Logger.log(date);
// Logger.log(hour);
}
threads[x].addLabel(moveToLabel);
}
}

View File

@@ -0,0 +1,93 @@
(function(root, factory) {
if (typeof define === 'function' && define.amd) {
define(['lodash'], factory);
} else if (typeof exports !== 'undefined') {
module.exports = factory(require('lodash'));
} else {
root.Namespace = factory(root._);
}
})(this, function(_) {
'use strict';
/**
* @module namespace
* @class namespace
*/
function Namespace() {}
/**
* Regex for splitting keypaths into arrays.
*
* @private
* @const {RegExp}
* @type
*/
var KEYPATH_SPLITTER = /\./g;
/**
* An internal cache to avoid calculating a keypath more than once.
*
* @private
* @type {Object}
*/
var _keypaths = {};
_.extend(Namespace.prototype, {
/**
* Adds a definition to the namespace object.
*
* @public
* @instance
* @method add
* @param {String} keypath - The keypath for the definition to be added at.
* @param {Function|Object} definition - The definition to be added.
* @return {Function|Object} - The definition.
*/
add: function(keypath, definition) {
return this._walk(keypath, function(memo, name, index, keypath) {
if (index + 1 === keypath.length) {
memo[name] = _.extend(definition, memo[name]);
}
return memo[name] || (memo[name] = {});
});
},
/**
* Retrieves a definition from the namespace safely.
*
* @public
* @instance
* @method get
* @param {String} keypath - The keypath to lookup a definition for.
* @returns {Function|Object|undefined} - The definition if it exists, otherwise `undefined`.
*/
get: function(keypath) {
return this._walk(keypath);
},
/**
* An internal function for walking a keypath.
*
* @private
* @instance
* @method _walk
* @param {String} keypath - The keypath to walk through.
* @param {Function} [callback] - An optional callback to be called at each item in the path.
* @returns {function|Object|undefined} - The reduced keypath.
*/
_walk: function(keypath, callback) {
return _.reduce(
_keypaths[keypath] || (_keypaths[keypath] = keypath.split(KEYPATH_SPLITTER)),
callback || function(memo, name) {
return memo && memo[name];
},
this
);
}
});
return Namespace;
});
//# sourceMappingURL=namespace.js.map

View File

@@ -0,0 +1,38 @@
package
{
import loom.Application;
import loom2d.display.StageScaleMode;
import loom2d.ui.SimpleLabel;
/**
The HelloWorld app renders a label with its name on it,
and traces 'hello' to the log.
*/
public class HelloWorld extends Application
{
override public function run():void
{
stage.scaleMode = StageScaleMode.LETTERBOX;
centeredMessage(simpleLabel, this.getFullTypeName());
trace("hello");
}
// a convenience getter that generates a label and adds it to the stage
private function get simpleLabel():SimpleLabel
{
return stage.addChild(new SimpleLabel("assets/Curse-hd.fnt")) as SimpleLabel;
}
// a utility to set the label's text and then center it on the stage
private function centeredMessage(label:SimpleLabel, msg:String):void
{
label.text = msg;
label.center();
label.x = stage.stageWidth / 2;
label.y = (stage.stageHeight / 2) - (label.height / 2);
}
}
}

View File

@@ -0,0 +1,137 @@
package
{
import loom.Application;
public interface I {}
public class C {}
public class B extends C implements I {}
final public class A extends B {}
delegate ToCompute(s:String, o:Object):Number;
public enum Enumeration
{
foo,
baz,
cat,
}
struct P {
public var x:Number = 0;
public var y:Number = 0;
public static operator function =(a:P, b:P):P
{
a.x = b.x;
a.y = b.y;
return a;
}
}
// single-line comment
/*
Multi-line comment
*/
/**
Doc comment
*/
public class SyntaxExercise extends Application
{
static public var classVar:String = 'class variable';
public const CONST:String = 'constant';
private var _a:A = new A();
public var _d:ToCompute;
override public function run():void
{
trace("hello");
}
private function get a():A { return _a; }
private function set a(value:A):void { _a = value; }
private function variousTypes(defaultValue:String = ''):void
{
var nil:Object = null;
var b1:Boolean = true;
var b2:Boolean = false;
var n1:Number = 0.123;
var n2:Number = 12345;
var n3:Number = 0xfed;
var s1:String = 'single-quotes with "quotes" inside';
var s2:String = "double-quotes with 'quotes' inside";
var f1:Function = function (life:String, universe:Object, ...everything):Number { return 42; };
var v1:Vector.<Number> = [1, 2];
var d1:Dictionary.<String, Number> = { 'three': 3, 'four': 4 };
_d += f1;
_d -= f1;
}
private function variousOps():void
{
var a = ((100 + 200 - 0) / 300) % 2;
var b = 100 * 30;
var d = true && (b > 301);
var e = 0x10 | 0x01;
b++; b--;
a += 300; a -= 5; a *= 4; a /= 2; a %= 7;
var castable1:Boolean = (a is B);
var castable2:Boolean = (a as B) != null;
var cast:String = B(a).toString();
var instanced:Boolean = (_a instanceof A);
}
private function variousFlow():void
{
var n:Number = Math.random();
if (n > 0.6)
trace('top 40!');
else if(n > 0.3)
trace('mid 30!');
else
trace('bottom 30');
var flip:String = (Math.random() > 0.5) ? 'heads' : 'tails';
for (var i = 0; i < 100; i++)
trace(i);
var v:Vector.<String> = ['a', 'b', 'c'];
for each (var s:String in v)
trace(s);
var d:Dictionary.<String, Number> = { 'one': 1 };
for (var key1:String in d)
trace(key1);
for (var key2:Number in v)
trace(key2);
while (i > 0)
{
i--;
if (i == 13) continue;
trace(i);
}
do
{
i++;
}
while (i < 10);
switch (Math.floor(Math.random()) * 3 + 1)
{
case 1 : trace('rock'); break;
case 2 : trace('paper'); break;
default: trace('scissors'); break;
}
}
}
}

52
samples/Oz/example.oz Normal file
View File

@@ -0,0 +1,52 @@
% You can get a lot of information about Oz by following theses links :
% - http://mozart.github.io/
% - http://en.wikipedia.org/wiki/Oz_(programming_language)
% There is also a well known book that uses Oz for pedagogical reason :
% - http://mitpress.mit.edu/books/concepts-techniques-and-models-computer-programming
% And there are two courses on edX about 'Paradigms of Computer Programming' that also uses Oz for pedagogical reason :
% - https://www.edx.org/node/2751#.VHijtfl5OSo
% - https://www.edx.org/node/4436#.VHijzfl5OSo
%
% Here is an example of some code written with Oz.
declare
% Computes the sum of square of the N first integers.
fun {Sum N}
local SumAux in
fun {SumAux N Acc}
if N==0 then Acc
else
{Sum N-1 Acc}
end
end
{SumAux N 0}
end
end
% Returns true if N is a prime and false otherwize
fun {Prime N}
local PrimeAcc in
fun {PrimeAcc N Acc}
if(N == 1) then false
elseif(Acc == 1) then true
else
if (N mod Acc) == 0 then false
else
{PrimeAcc N Acc-1}
end
end
end
{PrimeAcc N (N div 2)}
end
end
% Reverse a list using cells and for loop (instead of recursivity)
fun {Reverse L}
local RevList in
RevList = {NewCell nil}
for E in L do
RevList := E|@RevList
end
@RevList
end
end

View File

@@ -0,0 +1,97 @@
use v6;
use Test;
=begin pod
Test handling of -I.
Multiple C<-I> switches are supposed to
prepend left-to-right:
-Ifoo -Ibar
should make C<@*INC> look like:
foo
bar
...
Duplication of directories on the command line is mirrored
in the C<@*INC> variable, so C<pugs -Ilib -Ilib> will have B<two>
entries C<lib/> in C<@*INC>.
=end pod
# L<S19/Reference/"Prepend directories to">
my $fragment = '-e "@*INC.perl.say"';
my @tests = (
'foo',
'foo$bar',
'foo bar$baz',
'foo$foo',
);
plan @tests*2;
diag "Running under $*OS";
my ($pugs,$redir) = ($*EXECUTABLE_NAME, ">");
if $*OS eq any <MSWin32 mingw msys cygwin> {
$pugs = 'pugs.exe';
$redir = '>';
};
sub nonce () { return (".{$*PID}." ~ (1..1000).pick) }
sub run_pugs ($c) {
my $tempfile = "temp-ex-output" ~ nonce;
my $command = "$pugs $c $redir $tempfile";
diag $command;
run $command;
my $res = slurp $tempfile;
unlink $tempfile;
return $res;
}
for @tests -> $t {
my @dirs = split('$',$t);
my $command;
# This should be smarter about quoting
# (currently, this should work for WinNT and Unix shells)
$command = join " ", map { qq["-I$_"] }, @dirs;
my $got = run_pugs( $command ~ " $fragment" );
$got .= chomp;
if (substr($got,0,1) ~~ "[") {
# Convert from arrayref to array
$got = substr($got, 1, -1);
};
my @got = EVAL $got;
@got = @got[ 0..@dirs-1 ];
my @expected = @dirs;
is @got, @expected, "'" ~ @dirs ~ "' works";
$command = join " ", map { qq[-I "$_"] }, @dirs;
$got = run_pugs( $command ~ " $fragment" );
$got .= chomp;
if (substr($got,0,1) ~~ "[") {
# Convert from arrayref to array
$got = substr($got, 1, -1);
};
@got = EVAL $got;
@got = @got[ 0..@dirs-1 ];
@expected = @dirs;
is @got, @expected, "'" ~ @dirs ~ "' works (with a space delimiting -I)";
}
# vim: ft=perl6

223
samples/Perl6/01-parse.t Normal file
View File

@@ -0,0 +1,223 @@
use v6;
BEGIN { @*INC.push('lib') };
use JSON::Tiny::Grammar;
use Test;
my @t =
'{}',
'{ }',
' { } ',
'{ "a" : "b" }',
'{ "a" : null }',
'{ "a" : true }',
'{ "a" : false }',
'{ "a" : { } }',
'[]',
'[ ]',
' [ ] ',
# stolen from JSON::XS, 18_json_checker.t, and adapted a bit
Q<<[
"JSON Test Pattern pass1",
{"object with 1 member":["array with 1 element"]},
{},
[]
]>>,
Q<<[1]>>,
Q<<[true]>>,
Q<<[-42]>>,
Q<<[-42,true,false,null]>>,
Q<<{ "integer": 1234567890 }>>,
Q<<{ "real": -9876.543210 }>>,
Q<<{ "e": 0.123456789e-12 }>>,
Q<<{ "E": 1.234567890E+34 }>>,
Q<<{ "": 23456789012E66 }>>,
Q<<{ "zero": 0 }>>,
Q<<{ "one": 1 }>>,
Q<<{ "space": " " }>>,
Q<<{ "quote": "\""}>>,
Q<<{ "backslash": "\\"}>>,
Q<<{ "controls": "\b\f\n\r\t"}>>,
Q<<{ "slash": "/ & \/"}>>,
Q<<{ "alpha": "abcdefghijklmnopqrstuvwyz"}>>,
Q<<{ "ALPHA": "ABCDEFGHIJKLMNOPQRSTUVWYZ"}>>,
Q<<{ "digit": "0123456789"}>>,
Q<<{ "0123456789": "digit"}>>,
Q<<{"special": "`1~!@#$%^&*()_+-={':[,]}|;.</>?"}>>,
Q<<{"hex": "\u0123\u4567\u89AB\uCDEF\uabcd\uef4A"}>>,
Q<<{"true": true}>>,
Q<<{"false": false}>>,
Q<<{"null": null}>>,
Q<<{"array":[ ]}>>,
Q<<{"object":{ }}>>,
Q<<{"address": "50 St. James Street"}>>,
Q<<{"url": "http://www.JSON.org/"}>>,
Q<<{"comment": "// /* <!-- --"}>>,
Q<<{"# -- --> */": " "}>>,
Q<<{ " s p a c e d " :[1,2 , 3
,
4 , 5 , 6 ,7 ],"compact":[1,2,3,4,5,6,7]}>>,
Q<<{"jsontext": "{\"object with 1 member\":[\"array with 1 element\"]}"}>>,
Q<<{"quotes": "&#34; \u0022 %22 0x22 034 &#x22;"}>>,
Q<<{ "\/\\\"\uCAFE\uBABE\uAB98\uFCDE\ubcda\uef4A\b\f\n\r\t`1~!@#$%^&*()_+-=[]{}|;:',./<>?"
: "A key can be any string"
}>>,
Q<<[ 0.5 ,98.6
,
99.44
,
1066,
1e1,
0.1e1
]>>,
Q<<[1e-1]>>,
Q<<[1e00,2e+00,2e-00,"rosebud"]>>,
Q<<[[[[[[[[[[[[[[[[[[["Not too deep"]]]]]]]]]]]]]]]]]]]>>,
Q<<{
"JSON Test Pattern pass3": {
"The outermost value": "must be an object or array.",
"In this test": "It is an object."
}
}
>>,
# from http://www.json.org/example.html
Q<<{
"glossary": {
"title": "example glossary",
"GlossDiv": {
"title": "S",
"GlossList": {
"GlossEntry": {
"ID": "SGML",
"SortAs": "SGML",
"GlossTerm": "Standard Generalized Markup Language",
"Acronym": "SGML",
"Abbrev": "ISO 8879:1986",
"GlossDef": {
"para": "A meta-markup language, used to create markup languages such as DocBook.",
"GlossSeeAlso": ["GML", "XML"]
},
"GlossSee": "markup"
}
}
}
}
}
>>,
Q<<{"menu": {
"id": "file",
"value": "File",
"popup": {
"menuitem": [
{"value": "New", "onclick": "CreateNewDoc()"},
{"value": "Open", "onclick": "OpenDoc()"},
{"value": "Close", "onclick": "CloseDoc()"}
]
}
}}>>,
Q<<{"widget": {
"debug": "on",
"window": {
"title": "Sample Konfabulator Widget",
"name": "main_window",
"width": 500,
"height": 500
},
"image": {
"src": "Images/Sun.png",
"name": "sun1",
"hOffset": 250,
"vOffset": 250,
"alignment": "center"
},
"text": {
"data": "Click Here",
"size": 36,
"style": "bold",
"name": "text1",
"hOffset": 250,
"vOffset": 100,
"alignment": "center",
"onMouseUp": "sun1.opacity = (sun1.opacity / 100) * 90;"
}
}}>>,
;
my @n =
'{ ',
'{ 3 : 4 }',
'{ 3 : tru }', # not quite true
'{ "a : false }', # missing quote
# stolen from JSON::XS, 18_json_checker.t
Q<<"A JSON payload should be an object or array, not a string.">>,
Q<<{"Extra value after close": true} "misplaced quoted value">>,
Q<<{"Illegal expression": 1 + 2}>>,
Q<<{"Illegal invocation": alert()}>>,
Q<<{"Numbers cannot have leading zeroes": 013}>>,
Q<<{"Numbers cannot be hex": 0x14}>>,
Q<<["Illegal backslash escape: \x15"]>>,
Q<<[\naked]>>,
Q<<["Illegal backslash escape: \017"]>>,
# skipped: wo don't implement no stinkin' aritifical limits.
# Q<<[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]>>,
Q<<{"Missing colon" null}>>,
Q<<["Unclosed array">>,
Q<<{"Double colon":: null}>>,
Q<<{"Comma instead of colon", null}>>,
Q<<["Colon instead of comma": false]>>,
Q<<["Bad value", truth]>>,
Q<<['single quote']>>,
qq<["\ttab\tcharacter in string "]>,
Q<<["line
break"]>>,
Q<<["line\
break"]>>,
Q<<[0e]>>,
Q<<{unquoted_key: "keys must be quoted"}>>,
Q<<[0e+]>>,
Q<<[0e+-1]>>,
Q<<{"Comma instead if closing brace": true,>>,
Q<<["mismatch"}>>,
Q<<["extra comma",]>>,
Q<<["double extra comma",,]>>,
Q<<[ , "<-- missing value"]>>,
Q<<["Comma after the close"],>>,
Q<<["Extra close"]]>>,
Q<<{"Extra comma": true,}>>,
;
plan (+@t) + (+@n);
my $i = 0;
for @t -> $t {
my $desc = $t;
if $desc ~~ m/\n/ {
$desc .= subst(/\n.*$/, "\\n...[$i]");
}
my $parsed = 0;
try {
JSON::Tiny::Grammar.parse($t)
and $parsed = 1;
}
ok $parsed, "JSON string «$desc» parsed";
$i++;
}
for @n -> $t {
my $desc = $t;
if $desc ~~ m/\n/ {
$desc .= subst(/\n.*$/, "\\n...[$i]");
}
my $parsed = 0;
try { JSON::Tiny::Grammar.parse($t) and $parsed = 1 };
nok $parsed, "NOT parsed «$desc»";
$i++;
}
# vim: ft=perl6

9
samples/Perl6/A.pm Normal file
View File

@@ -0,0 +1,9 @@
# used in t/spec/S11-modules/nested.t
BEGIN { @*INC.push('t/spec/packages') };
module A::A {
use A::B;
}
# vim: ft=perl6

148
samples/Perl6/ANSIColor.pm Normal file
View File

@@ -0,0 +1,148 @@
use v6;
module Term::ANSIColor;
# these will be macros one day, yet macros can't be exported so far
sub RESET is export { "\e[0m" }
sub BOLD is export { "\e[1m" }
sub UNDERLINE is export { "\e[4m" }
sub INVERSE is export { "\e[7m" }
sub BOLD_OFF is export { "\e[22m" }
sub UNDERLINE_OFF is export { "\e[24m" }
sub INVERSE_OFF is export { "\e[27m" }
my %attrs =
reset => "0",
bold => "1",
underline => "4",
inverse => "7",
black => "30",
red => "31",
green => "32",
yellow => "33",
blue => "34",
magenta => "35",
cyan => "36",
white => "37",
default => "39",
on_black => "40",
on_red => "41",
on_green => "42",
on_yellow => "43",
on_blue => "44",
on_magenta => "45",
on_cyan => "46",
on_white => "47",
on_default => "49";
sub color (Str $what) is export {
my @res;
my @a = $what.split(' ');
for @a -> $attr {
if %attrs.exists($attr) {
@res.push: %attrs{$attr}
} else {
die("Invalid attribute name '$attr'")
}
}
return "\e[" ~ @res.join(';') ~ "m";
}
sub colored (Str $what, Str $how) is export {
color($how) ~ $what ~ color('reset');
}
sub colorvalid (*@a) is export {
for @a -> $el {
return False unless %attrs.exists($el)
}
return True;
}
sub colorstrip (*@a) is export {
my @res;
for @a -> $str {
@res.push: $str.subst(/\e\[ <[0..9;]>+ m/, '', :g);
}
return @res.join;
}
sub uncolor (Str $what) is export {
my @res;
my @list = $what.comb(/\d+/);
for @list -> $elem {
if %attrs.reverse.exists($elem) {
@res.push: %attrs.reverse{$elem}
} else {
die("Bad escape sequence: {'\e[' ~ $elem ~ 'm'}")
}
}
return @res.join(' ');
}
=begin pod
=head1 NAME
Term::ANSIColor - Color screen output using ANSI escape sequences
=head1 SYNOPSIS
use Term::ANSIColor;
say color('bold'), "this is in bold", color('reset');
say colored('underline red on_green', 'what a lovely colours!');
say BOLD, 'good to be fat!', BOLD_OFF;
say 'ok' if colorvalid('magenta', 'on_black', 'inverse');
say '\e[36m is ', uncolor('\e36m');
say colorstrip("\e[1mThis is bold\e[0m");
=head1 DESCRIPTION
Term::ANSIColor provides an interface for using colored output
in terminals. The following functions are available:
=head2 C<color()>
Given a string with color names, the output produced by C<color()>
sets the terminal output so the text printed after it will be colored
as specified. The following color names are recognised:
reset bold underline inverse black red green yellow blue
magenta cyan white default on_black on_red on_green on_yellow
on_blue on_magenta on_cyan on_white on_default
The on_* family of colors correspond to the background colors.
=head2 C<colored()>
C<colored()> is similar to C<color()>. It takes two Str arguments,
where the first is the colors to be used, and the second is the string
to be colored. The C<reset> sequence is automagically placed after
the string.
=head2 C<colorvalid()>
C<colorvalid()> gets an array of color specifications (like those
passed to C<color()>) and returns true if all of them are valid,
false otherwise.
=head2 C<colorstrip()>
C<colorstrip>, given a string, removes all the escape sequences
in it, leaving the plain text without effects.
=head2 C<uncolor()>
Given escape sequences, C<uncolor()> returns a string with readable
color names. E.g. passing "\e[36;44m" will result in "cyan on_blue".
=head1 Constants
C<Term::ANSIColor> provides constants which are just strings of
appropriate escape sequences. The following constants are available:
RESET BOLD UNDERLINE INVERSE BOLD_OFF UNDERLINE_OFF INVERSE_OFF
=end pod
# vim: ft=perl6

102
samples/Perl6/Bailador.pm Normal file
View File

@@ -0,0 +1,102 @@
use Bailador::App;
use Bailador::Request;
use Bailador::Response;
use Bailador::Context;
use HTTP::Easy::PSGI;
module Bailador;
my $app = Bailador::App.current;
our sub import {
my $file = callframe(1).file;
my $slash = $file.rindex('/');
if $slash {
$app.location = $file.substr(0, $file.rindex('/'));
} else {
$app.location = '.';
}
}
sub route_to_regex($route) {
$route.split('/').map({
my $r = $_;
if $_.substr(0, 1) eq ':' {
$r = q{(<-[\/\.]>+)};
}
$r
}).join("'/'");
}
multi parse_route(Str $route) {
my $r = route_to_regex($route);
return "/ ^ $r \$ /".eval;
}
multi parse_route($route) {
# do nothing
$route
}
sub get(Pair $x) is export {
my $p = parse_route($x.key) => $x.value;
$app.add_route: 'GET', $p;
return $x;
}
sub post(Pair $x) is export {
my $p = parse_route($x.key) => $x.value;
$app.add_route: 'POST', $p;
return $x;
}
sub request is export { $app.context.request }
sub content_type(Str $type) is export {
$app.response.headers<Content-Type> = $type;
}
sub header(Str $name, Cool $value) is export {
$app.response.headers{$name} = ~$value;
}
sub status(Int $code) is export {
$app.response.code = $code;
}
sub template(Str $tmpl, *@params) is export {
$app.template($tmpl, @params);
}
our sub dispatch_request(Bailador::Request $r) {
return dispatch($r.env);
}
sub dispatch($env) {
$app.context.env = $env;
my ($r, $match) = $app.find_route($env);
if $r {
status 200;
if $match {
$app.response.content = $r.value.(|$match.list);
} else {
$app.response.content = $r.value.();
}
}
return $app.response;
}
sub dispatch-psgi($env) {
return dispatch($env).psgi;
}
sub baile is export {
given HTTP::Easy::PSGI.new(port => 3000) {
.app(&dispatch-psgi);
say "Entering the development dance floor: http://0.0.0.0:3000";
.run;
}
}

View File

@@ -0,0 +1,7 @@
module ContainsUnicode {
sub uc-and-join(*@things, :$separator = ', ') is export {
@things».uc.join($separator)
}
}
# vim: ft=perl6

1431
samples/Perl6/Exception.pm Normal file

File diff suppressed because it is too large Load Diff

146
samples/Perl6/Model.pm Normal file
View File

@@ -0,0 +1,146 @@
use v6;
class Math::Model;
use Math::RungeKutta;
# TODO: only load when needed
use SVG;
use SVG::Plot;
has %.derivatives;
has %.variables;
has %.initials;
has @.captures is rw;
has %!inv = %!derivatives.invert;
# in Math::Model all variables are accessible by name
# in contrast Math::RungeKutta uses vectors, so we need
# to define an (arbitrary) ordering
# @!deriv-names holds the names of the derivatives in a fixed
# order, sod @!deriv-names[$number] turns the number into a name
# %!deriv-keying{$name} translates a name into the corresponding index
has @!deriv-names = %!inv.keys;
has %!deriv-keying = @!deriv-names Z=> 0..Inf;
# snapshot of all variables in the current model
has %!current-values;
has %.results;
has @.time;
has $.numeric-error is rw = 0.0001;
my sub param-names(&c) {
&c.signature.params».name».substr(1).grep({ $_ ne '_'});
}
method !params-for(&c) {
param-names(&c).map( {; $_ => %!current-values{$_} } ).hash;
}
method topo-sort(*@vars) {
my %seen;
my @order;
sub topo(*@a) {
for @a {
next if %!inv.exists($_) || %seen{$_} || $_ eq 'time';
die "Undeclared variable '$_' used in model"
unless %.variables.exists($_);
topo(param-names(%.variables{$_}));
@order.push: $_;
%seen{$_}++;
}
}
topo(@vars);
# say @order.perl;
@order;
}
method integrate(:$from = 0, :$to = 10, :$min-resolution = ($to - $from) / 20, :$verbose) {
for %.derivatives -> $d {
die "There must be a variable defined for each derivative, missing for '$d.key()'"
unless %.variables.exists($d.key) || %!inv.exists($d.key);
die "There must be an initial value defined for each derivative target, missing for '$d.value()'"
unless %.initials.exists($d.value);
}
%!current-values = %.initials;
%!current-values<time> = $from;
my @vars-topo = self.topo-sort(%.variables.keys);
sub update-current-values($time, @values) {
%!current-values<time> = $time;
%!current-values{@!deriv-names} = @values;
for @vars-topo {
my $c = %.variables{$_};
%!current-values{$_} = $c.(|self!params-for($c));
}
}
my @initial = %.initials{@!deriv-names};
sub derivatives($time, @values) {
update-current-values($time, @values);
my @r;
for %!inv{@!deriv-names} {
my $v = %.variables{$_};
@r.push: $v.defined
?? $v(|self!params-for($v))
!! %!current-values{$_};
}
@r;
}
@!time = ();
for @.captures {
%!results{$_} = [];
}
sub record($time, @values) {
update-current-values($time, @values);
@!time.push: $time;
say $time if $verbose;
for @.captures {
%!results{$_}.push: %!current-values{$_};;
}
}
record($from, %.initials{@!deriv-names});
adaptive-rk-integrate(
:$from,
:$to,
:@initial,
:derivative(&derivatives),
:max-stepsize($min-resolution),
:do(&record),
:epsilon($.numeric-error),
);
%!results;
}
method render-svg(
$filename,
:$x-axis = 'time',
:$width = 800,
:$height = 600,
:$title = 'Model output') {
my $f = open $filename, :w
or die "Can't open file '$filename' for writing: $!";
my @values = map { %!results{$_} }, @.captures.grep({ $_ ne $x-axis});
my @x = $x-axis eq 'time' ?? @!time !! %!results{$x-axis}.flat;
my $svg = SVG::Plot.new(
:$width,
:$height,
:@x,
:@values,
:$title,
).plot(:xy-lines);
$f.say(SVG.serialize($svg));
$f.close;
say "Wrote ouput to '$filename'";
}
# vim: ft=perl6

317
samples/Perl6/Simple.pm Normal file
View File

@@ -0,0 +1,317 @@
# ----------------------
# LWP::Simple for Perl 6
# ----------------------
use v6;
use MIME::Base64;
use URI;
class LWP::Simple:auth<cosimo>:ver<0.085>;
our $VERSION = '0.085';
enum RequestType <GET POST>;
has Str $.default_encoding = 'utf-8';
our $.class_default_encoding = 'utf-8';
# these were intended to be constant but that hit pre-compilation issue
my Buf $crlf = Buf.new(13, 10);
my Buf $http_header_end_marker = Buf.new(13, 10, 13, 10);
my Int constant $default_stream_read_len = 2 * 1024;
method base64encode ($user, $pass) {
my MIME::Base64 $mime .= new();
my $encoded = $mime.encode_base64($user ~ ':' ~ $pass);
return $encoded;
}
method get (Str $url) {
self.request_shell(RequestType::GET, $url)
}
method post (Str $url, %headers = {}, Any $content?) {
self.request_shell(RequestType::POST, $url, %headers, $content)
}
method request_shell (RequestType $rt, Str $url, %headers = {}, Any $content?) {
return unless $url;
my ($scheme, $hostname, $port, $path, $auth) = self.parse_url($url);
%headers{'Connection'} = 'close';
%headers{'User-Agent'} //= "LWP::Simple/$VERSION Perl6/$*PERL<compiler><name>";
if $auth {
$hostname = $auth<host>;
my $user = $auth<user>;
my $pass = $auth<password>;
my $base64enc = self.base64encode($user, $pass);
%headers<Authorization> = "Basic $base64enc";
}
%headers<Host> = $hostname;
if ($rt ~~ RequestType::POST && $content.defined) {
# Attach Content-Length header
# as recommended in RFC2616 section 14.3.
# Note: Empty content is also a content,
# header value equals to zero is valid.
%headers{'Content-Length'} = $content.encode.bytes;
}
my ($status, $resp_headers, $resp_content) =
self.make_request($rt, $hostname, $port, $path, %headers, $content);
given $status {
when / 30 <[12]> / {
my %resp_headers = $resp_headers.hash;
my $new_url = %resp_headers<Location>;
if ! $new_url {
die "Redirect $status without a new URL?";
}
# Watch out for too many redirects.
# Need to find a way to store a class member
#if $redirects++ > 10 {
# say "Too many redirects!";
# return;
#}
return self.request_shell($rt, $new_url, %headers, $content);
}
when /200/ {
# should be fancier about charset decoding application - someday
if $resp_headers<Content-Type> &&
$resp_headers<Content-Type> ~~
/ $<media-type>=[<-[/;]>+]
[ <[/]> $<media-subtype>=[<-[;]>+] ]? / &&
( $<media-type> eq 'text' ||
( $<media-type> eq 'application' &&
$<media-subtype> ~~ /[ ecma | java ]script | json/
)
)
{
my $charset =
($resp_headers<Content-Type> ~~ /charset\=(<-[;]>*)/)[0];
$charset = $charset ?? $charset.Str !!
self ?? $.default_encoding !! $.class_default_encoding;
return $resp_content.decode($charset);
}
else {
return $resp_content;
}
}
# Response failed
default {
return;
}
}
}
method parse_chunks(Blob $b is rw, IO::Socket::INET $sock) {
my Int ($line_end_pos, $chunk_len, $chunk_start) = (0) xx 3;
my Blob $content = Blob.new();
# smallest valid chunked line is 0CRLFCRLF (ascii or other 8bit like EBCDIC)
while ($line_end_pos + 5 <= $b.bytes) {
while ( $line_end_pos +4 <= $b.bytes &&
$b.subbuf($line_end_pos, 2) ne $crlf
) {
$line_end_pos++
}
# say "got here x0x pos ", $line_end_pos, ' bytes ', $b.bytes, ' start ', $chunk_start, ' some data ', $b.subbuf($chunk_start, $line_end_pos +2 - $chunk_start).decode('ascii');
if $line_end_pos +4 <= $b.bytes &&
$b.subbuf(
$chunk_start, $line_end_pos + 2 - $chunk_start
).decode('ascii') ~~ /^(<.xdigit>+)[";"|"\r\n"]/
{
# deal with case of chunk_len is 0
$chunk_len = :16($/[0].Str);
# say 'got chunk len ', $/[0].Str;
# test if at end of buf??
if $chunk_len == 0 {
# this is a "normal" exit from the routine
return True, $content;
}
# think 1CRLFxCRLF
if $line_end_pos + $chunk_len + 4 <= $b.bytes {
# say 'inner chunk';
$content ~= $b.subbuf($line_end_pos +2, $chunk_len);
$line_end_pos = $chunk_start = $line_end_pos + $chunk_len +4;
}
else {
# say 'last chunk';
# remaining chunk part len is chunk_len with CRLF
# minus the length of the chunk piece at end of buffer
my $last_chunk_end_len =
$chunk_len +2 - ($b.bytes - $line_end_pos -2);
$content ~= $b.subbuf($line_end_pos +2);
if $last_chunk_end_len > 2 {
$content ~= $sock.read($last_chunk_end_len -2);
}
# clean up CRLF after chunk
$sock.read(min($last_chunk_end_len, 2));
# this is a` "normal" exit from the routine
return False, $content;
}
}
else {
# say 'extend bytes ', $b.bytes, ' start ', $chunk_start, ' data ', $b.subbuf($chunk_start).decode('ascii');
# maybe odd case of buffer has just part of header at end
$b ~= $sock.read(20);
}
}
# say join ' ', $b[0 .. 100];
# say $b.subbuf(0, 100).decode('utf-8');
die "Could not parse chunk header";
}
method make_request (
RequestType $rt, $host, $port as Int, $path, %headers, $content?
) {
my $headers = self.stringify_headers(%headers);
my IO::Socket::INET $sock .= new(:$host, :$port);
my Str $req_str = $rt.Stringy ~ " {$path} HTTP/1.1\r\n"
~ $headers
~ "\r\n";
# attach $content if given
# (string context is forced by concatenation)
$req_str ~= $content if $content.defined;
$sock.send($req_str);
my Blob $resp = $sock.read($default_stream_read_len);
my ($status, $resp_headers, $resp_content) = self.parse_response($resp);
if (($resp_headers<Transfer-Encoding> || '') eq 'chunked') {
my Bool $is_last_chunk;
my Blob $resp_content_chunk;
($is_last_chunk, $resp_content) =
self.parse_chunks($resp_content, $sock);
while (not $is_last_chunk) {
($is_last_chunk, $resp_content_chunk) =
self.parse_chunks(
my Blob $next_chunk_start = $sock.read(1024),
$sock
);
$resp_content ~= $resp_content_chunk;
}
}
elsif ( $resp_headers<Content-Length> &&
$resp_content.bytes < $resp_headers<Content-Length>
) {
$resp_content ~= $sock.read(
$resp_headers<Content-Length> - $resp_content.bytes
);
}
else { # a bit hacky for now but should be ok
while ($resp.bytes > 0) {
$resp = $sock.read($default_stream_read_len);
$resp_content ~= $resp;
}
}
$sock.close();
return ($status, $resp_headers, $resp_content);
}
method parse_response (Blob $resp) {
my %header;
my Int $header_end_pos = 0;
while ( $header_end_pos < $resp.bytes &&
$http_header_end_marker ne $resp.subbuf($header_end_pos, 4) ) {
$header_end_pos++;
}
if ($header_end_pos < $resp.bytes) {
my @header_lines = $resp.subbuf(
0, $header_end_pos
).decode('ascii').split(/\r\n/);
my Str $status_line = @header_lines.shift;
for @header_lines {
my ($name, $value) = .split(': ');
%header{$name} = $value;
}
return $status_line, %header.item, $resp.subbuf($header_end_pos +4).item;
}
die "could not parse headers";
# if %header.exists('Transfer-Encoding') && %header<Transfer-Encoding> ~~ m/:i chunked/ {
# @content = self.decode_chunked(@content);
# }
}
method getprint (Str $url) {
my $out = self.get($url);
if $out ~~ Buf { $*OUT.write($out) } else { say $out }
}
method getstore (Str $url, Str $filename) {
return unless defined $url;
my $content = self.get($url);
if ! $content {
return
}
my $fh = open($filename, :bin, :w);
if $content ~~ Buf {
$fh.write($content)
}
else {
$fh.print($content)
}
$fh.close;
}
method parse_url (Str $url) {
my URI $u .= new($url);
my $path = $u.path_query;
my $user_info = $u.grammar.parse_result<URI_reference><URI><hier_part><authority><userinfo>;
return (
$u.scheme,
$user_info ?? "{$user_info}@{$u.host}" !! $u.host,
$u.port,
$path eq '' ?? '/' !! $path,
$user_info ?? {
host => $u.host,
user => ~ $user_info<likely_userinfo_component>[0],
password => ~ $user_info<likely_userinfo_component>[1]
} !! Nil
);
}
method stringify_headers (%headers) {
my Str $str = '';
for sort %headers.keys {
$str ~= $_ ~ ': ' ~ %headers{$_} ~ "\r\n";
}
return $str;
}

207
samples/Perl6/Win32.pm Normal file
View File

@@ -0,0 +1,207 @@
my class IO::Spec::Win32 is IO::Spec::Unix {
# Some regexes we use for path splitting
my $slash = regex { <[\/ \\]> }
my $notslash = regex { <-[\/ \\]> }
my $driveletter = regex { <[A..Z a..z]> ':' }
my $UNCpath = regex { [<$slash> ** 2] <$notslash>+ <$slash> [<$notslash>+ | $] }
my $volume_rx = regex { <$driveletter> | <$UNCpath> }
method canonpath ($path, :$parent) {
$path eq '' ?? '' !! self!canon-cat($path, :$parent);
}
method catdir(*@dirs) {
return "" unless @dirs;
return self!canon-cat( "\\", |@dirs ) if @dirs[0] eq "";
self!canon-cat(|@dirs);
}
method splitdir($dir) { $dir.split($slash) }
method catfile(|c) { self.catdir(|c) }
method devnull { 'nul' }
method rootdir { '\\' }
method tmpdir {
first( { .defined && .IO.d && .IO.w },
%*ENV<TMPDIR>,
%*ENV<TEMP>,
%*ENV<TMP>,
'SYS:/temp',
'C:\system\temp',
'C:/temp',
'/tmp',
'/')
|| self.curdir;
}
method path {
my @path = split(';', %*ENV<PATH>);
@path».=subst(:global, q/"/, '');
@path = grep *.chars, @path;
unshift @path, ".";
return @path;
}
method is-absolute ($path) {
# As of right now, this returns 2 if the path is absolute with a
# volume, 1 if it's absolute with no volume, 0 otherwise.
given $path {
when /^ [<$driveletter> <$slash> | <$UNCpath>]/ { 2 }
when /^ <$slash> / { 1 }
default { 0 }
} #/
}
method split ($path as Str is copy) {
$path ~~ s[ <$slash>+ $] = '' #=
unless $path ~~ /^ <$driveletter>? <$slash>+ $/;
$path ~~
m/^ ( <$volume_rx> ? )
( [ .* <$slash> ]? )
(.*)
/;
my ($volume, $directory, $basename) = (~$0, ~$1, ~$2);
$directory ~~ s/ <?after .> <$slash>+ $//;
if all($directory, $basename) eq '' && $volume ne '' {
$directory = $volume ~~ /^<$driveletter>/
?? '.' !! '\\';
}
$basename = '\\' if $directory eq any('/', '\\') && $basename eq '';
$directory = '.' if $directory eq '' && $basename ne '';
return (:$volume, :$directory, :$basename);
}
method join ($volume, $directory is copy, $file is copy) {
$directory = '' if $directory eq '.' && $file.chars;
if $directory.match( /^<$slash>$/ ) && $file.match( /^<$slash>$/ ) {
$file = '';
$directory = '' if $volume.chars > 2; #i.e. UNC path
}
self.catpath($volume, $directory, $file);
}
method splitpath($path as Str, :$nofile = False) {
my ($volume,$directory,$file) = ('','','');
if ( $nofile ) {
$path ~~
/^ (<$volume_rx>?) (.*) /;
$volume = ~$0;
$directory = ~$1;
}
else {
$path ~~
m/^ ( <$volume_rx> ? )
( [ .* <$slash> [ '.' ** 1..2 $]? ]? )
(.*)
/;
$volume = ~$0;
$directory = ~$1;
$file = ~$2;
}
return ($volume,$directory,$file);
}
method catpath($volume is copy, $directory, $file) {
# Make sure the glue separator is present
# unless it's a relative path like A:foo.txt
if $volume.chars and $directory.chars
and $volume !~~ /^<$driveletter>/
and $volume !~~ /<$slash> $/
and $directory !~~ /^ <$slash>/
{ $volume ~= '\\' }
if $file.chars and $directory.chars
and $directory !~~ /<$slash> $/
{ $volume ~ $directory ~ '\\' ~ $file; }
else { $volume ~ $directory ~ $file; }
}
method rel2abs ($path is copy, $base? is copy) {
my $is_abs = self.is-absolute($path);
# Check for volume (should probably document the '2' thing...)
return self.canonpath( $path ) if $is_abs == 2;
if $is_abs {
# It's missing a volume, add one
my $vol;
$vol = self.splitpath($base)[0] if $base.defined;
$vol ||= self.splitpath($*CWD)[0];
return self.canonpath( $vol ~ $path );
}
if not defined $base {
# TODO: implement _getdcwd call ( Windows maintains separate CWD for each volume )
# See: http://msdn.microsoft.com/en-us/library/1e5zwe0c%28v=vs.80%29.aspx
#$base = Cwd::getdcwd( (self.splitpath: $path)[0] ) if defined &Cwd::getdcwd ;
#$base //= $*CWD ;
$base = $*CWD;
}
elsif ( !self.is-absolute( $base ) ) {
$base = self.rel2abs( $base );
}
else {
$base = self.canonpath( $base );
}
my ($path_directories, $path_file) = self.splitpath( $path )[1..2] ;
my ($base_volume, $base_directories) = self.splitpath( $base, :nofile ) ;
$path = self.catpath(
$base_volume,
self.catdir( $base_directories, $path_directories ),
$path_file
) ;
return self.canonpath( $path ) ;
}
method !canon-cat ( $first, *@rest, :$parent --> Str) {
$first ~~ /^ ([ <$driveletter> <$slash>?
| <$UNCpath>
| [<$slash> ** 2] <$notslash>+
| <$slash> ]?)
(.*)
/;
my Str ($volume, $path) = ~$0, ~$1;
$volume.=subst(:g, '/', '\\');
if $volume ~~ /^<$driveletter>/ {
$volume.=uc;
}
elsif $volume.chars && $volume !~~ / '\\' $/ {
$volume ~= '\\';
}
$path = join "\\", $path, @rest.flat;
$path ~~ s:g/ <$slash>+ /\\/; # /xx\\yy --> \xx\yy
$path ~~ s:g/[ ^ | '\\'] '.' '\\.'* [ '\\' | $ ]/\\/; # xx/././yy --> xx/yy
if $parent {
while $path ~~ s:g { [^ | <?after '\\'>] <!before '..\\'> <-[\\]>+ '\\..' ['\\' | $ ] } = '' { };
}
$path ~~ s/^ '\\'+ //; # \xx --> xx NOTE: this is *not* root
$path ~~ s/ '\\'+ $//; # xx\ --> xx
if $volume ~~ / '\\' $ / { # <vol>\.. --> <vol>\
$path ~~ s/ ^ '..' '\\..'* [ '\\' | $ ] //;
}
if $path eq '' { # \\HOST\SHARE\ --> \\HOST\SHARE
$volume ~~ s/<?after '\\\\' .*> '\\' $ //;
$volume || '.';
}
else {
$volume ~ $path;
}
}
}

View File

@@ -0,0 +1,75 @@
# http://perl6advent.wordpress.com/2009/12/16/day-16-we-call-it-the-old-switcheroo/
use v6;
use Test;
sub weather($weather) {
given $weather {
when 'sunny' { return 'Aah! ☀' }
when 'cloudy' { return 'Meh. ☁' }
when 'rainy' { return 'Where is my umbrella? ☂' }
when 'snowy' { return 'Yippie! ☃' }
default { return 'Looks like any other day.' }
}
}
is weather(Any), 'Looks like any other day.', 'Weather given/when';
{
sub probability($probability) {
given $probability {
when 1.00 { return 'A certainty' }
when * > 0.75 { return 'Quite likely' }
when * > 0.50 { return 'Likely' }
when * > 0.25 { return 'Unlikely' }
when * > 0.00 { return 'Very unlikely' }
when 0.00 { return 'Fat chance' }
}
}
is probability(0.80), 'Quite likely', 'Probability given/when';
sub fib(Int $_) {
when * < 2 { 1 }
default { fib($_ - 1) + fib($_ - 2) }
}
is fib(5), 8, '6th fibonacci number';
}
class Card {
method bend() { return "Card bent" }
method fold() { return "Card folded" }
method mutilate() { return "Card mutilated" }
}
my Card $punch-card .= new;
my $actions;
given $punch-card {
$actions ~= .bend;
$actions ~= .fold;
$actions ~= .mutilate;
}
is $actions, 'Card bentCard foldedCard mutilated', 'Given as a sort of once-only for loop.';
my @list = 1, 2, 3, 4, 5;
my $castle = 'phantom';
my $full-of-vowels = 'aaaooouuuiiee';
is (.[0] + .[1] + .[2] given @list), 6, 'Statement ending given';
{
is ("My God, it's full of vowels!" when $full-of-vowels ~~ /^ <[aeiou]>+ $/), "My God, it's full of vowels!", 'Statement ending when';
is ('Boo!' when /phantom/ given $castle), 'Boo!', 'Nesting when inside given';
}
{
#Test DNA one liner at the end
my $result;
for ^20 {my ($a,$b)=<AT CG>.pick.comb.pick(*); my ($c,$d)=sort map({6+4*sin($_/2)},($_,$_+4)); $result ~= sprintf "%{$c}s%{$d-$c}s\n",$a,$b}
is $result.chars , 169 , 'We got a bunch of DNA';
is $result.split("\n").Int , 21 , 'On 20 line';
is $result.subst(/\s/ , '' , :g).chars , 40 , 'Containing 20 pairs';
}
eval_lives_ok 'for ^20 {my ($a,$b)=<AT CG>.pick.comb.pick(*); my ($c,$d)=sort map {6+4*sin($_/2)},$_,$_+4; sprintf "%{$c}s%{$d-$c}s\n",$a,$b}' , 'Can handle "map {...} ,$x,$y"';
done;

View File

@@ -0,0 +1,48 @@
use v6;
use Test;
plan 9;
sub test_lines(@lines) {
#!rakudo todo 'line counts'
is @lines.elems, 3, 'Three lines read';
is @lines[0],
"Please do not remove this file, used by S16-io/basic-open.t",
'Retrieved first line';
is @lines[2],
"This is a test line.",
'Retrieved last line';
}
#?niecza skip 'TextReader.eof NYI'
{
my $fh = open('t/spec/S16-io/test-data');
my $count = 0;
while !$fh.eof {
my $x = $fh.get;
$count++ if $x.defined;
}
is $count, 3, 'Read three lines with while !$hanlde.eof';
}
# test that we can interate over $fh.lines
{
my $fh = open('t/spec/S16-io/test-data');
ok defined($fh), 'Could open test file';
my @lines;
for $fh.lines -> $x {
push @lines, $x;
}
test_lines(@lines);
}
# test that we can get all items in list context:
{
my $fh = open('t/spec/S16-io/test-data');
ok defined($fh), 'Could open test file (again)';
my @lines = $fh.lines;
test_lines(@lines);
}
# vim: ft=perl6

209
samples/Perl6/calendar.t Normal file
View File

@@ -0,0 +1,209 @@
use v6;
use Test;
# calendar.t: tests some calendar-related methods common to
# Date and DateTime
plan 130;
sub date($year, $month, $day) {
Date.new(:$year, :$month, :$day)
}
sub dtim($year, $month, $day) {
DateTime.new(:$year, :$month, :$day,
:hour(17), :minute(33), :second(2.9))
}
# --------------------------------------------------------------------
# L<S32::Temporal/C<DateTime>/'truncated-to'>
# --------------------------------------------------------------------
is ~date(1969, 7, 20).truncated-to(month), '1969-07-01', 'Date.truncated-to(month)';
is ~dtim(1969, 7, 20).truncated-to(month), '1969-07-01T00:00:00Z', 'DateTime.truncated-to(month)';
is ~date(1969, 7, 20).truncated-to(year), '1969-01-01', 'Date.truncated-to(year)';
is ~dtim(1969, 7, 20).truncated-to(year), '1969-01-01T00:00:00Z', 'DateTime.truncated-to(year)';
is ~date(1999, 1, 18).truncated-to(week), '1999-01-18', 'Date.truncated-to(week) (no change in day)';
is ~date(1999, 1, 19).truncated-to(week), '1999-01-18', 'Date.truncated-to(week) (short jump)';
is ~date(1999, 1, 17).truncated-to(week), '1999-01-11', 'Date.truncated-to(week) (long jump)';
is ~dtim(1999, 1, 17).truncated-to(week), '1999-01-11T00:00:00Z', 'DateTime.truncated-to(week) (long jump)';
is ~date(1999, 4, 2).truncated-to(week), '1999-03-29', 'Date.truncated-to(week) (changing month)';
is ~date(1999, 1, 3).truncated-to(week), '1998-12-28', 'Date.truncated-to(week) (changing year)';
is ~dtim(1999, 1, 3).truncated-to(week), '1998-12-28T00:00:00Z', 'DateTime.truncated-to(week) (changing year)';
is ~date(2000, 3, 1).truncated-to(week), '2000-02-28', 'Date.truncated-to(week) (skipping over Feb 29)';
is ~dtim(2000, 3, 1).truncated-to(week), '2000-02-28T00:00:00Z', 'DateTime.truncated-to(week) (skipping over Feb 29)';
is ~date(1988, 3, 3).truncated-to(week), '1988-02-29', 'Date.truncated-to(week) (landing on Feb 29)';
is ~dtim(1988, 3, 3).truncated-to(week), '1988-02-29T00:00:00Z', 'DateTime.truncated-to(week) (landing on Feb 29)';
# Verify .gist
# Example taken from S32 specs documentation.
#?niecza skip 'Undeclared routine: hour'
{
my $dt = DateTime.new('2005-02-01T15:20:35Z');
my $truncated = $dt.truncated-to(hour);
is $truncated.gist, "2005-02-01T15:00:00Z", "validate .gist output";
}
# --------------------------------------------------------------------
# L<S32::Temporal/Accessors/'the synonym day-of-month'>
# --------------------------------------------------------------------
is date(2003, 3, 18).day-of-month, 18, 'Date.day can be spelled as Date.day-of-month';
is dtim(2003, 3, 18).day-of-month, 18, 'DateTime.day can be spelled as DateTime.day-of-month';
# --------------------------------------------------------------------
# L<S32::Temporal/Accessors/'day-of-week method'>
# --------------------------------------------------------------------
# much of this is blatantly stolen from the Date::Simple test suite
# and redistributed under the terms of the Artistic License 2.0 with
# permission of the original authors (John Tobey, Marty Pauly).
is date(1966, 10, 15).day-of-week, 6, 'Date.day-of-week (1966-10-15)';
is dtim(1966, 10, 15).day-of-week, 6, 'DateTime.day-of-week (1966-10-15)';
is date(2401, 3, 1).day-of-week, 4, 'Date.day-of-week (2401-03-01)';
is date(2401, 2, 28).day-of-week, 3, 'Date.day-of-week (2401-02-28)';
is date(2400, 3, 1).day-of-week, 3, 'Date.day-of-week (2400-03-01)';
is date(2400, 2, 29).day-of-week, 2, 'Date.day-of-week (2400-02-29)';
is date(2400, 2, 28).day-of-week, 1, 'Date.day-of-week (2400-02-28)';
is date(2101, 3, 1).day-of-week, 2, 'Date.day-of-week (2101-03-01)';
is date(2101, 2, 28).day-of-week, 1, 'Date.day-of-week (2101-02-28)';
is date(2100, 3, 1).day-of-week, 1, 'Date.day-of-week (2100-03-01)';
is dtim(2100, 3, 1).day-of-week, 1, 'DateTime.day-of-week (2100-03-01)';
is date(2100, 2, 28).day-of-week, 7, 'Date.day-of-week (2100-02-28)';
is dtim(2100, 2, 28).day-of-week, 7, 'DateTime.day-of-week (2100-02-28)';
is date(2001, 3, 1).day-of-week, 4, 'Date.day-of-week (2001-03-01)';
is date(2001, 2, 28).day-of-week, 3, 'Date.day-of-week (2001-02-28)';
is date(2000, 3, 1).day-of-week, 3, 'Date.day-of-week (2000-03-01)';
is date(2000, 2, 29).day-of-week, 2, 'Date.day-of-week (2000-02-29)';
is date(2000, 2, 28).day-of-week, 1, 'Date.day-of-week (2000-02-28)';
is date(1901, 3, 1).day-of-week, 5, 'Date.day-of-week (1901-03-01)';
is date(1901, 2, 28).day-of-week, 4, 'Date.day-of-week (1901-02-28)';
is date(1900, 3, 1).day-of-week, 4, 'Date.day-of-week (1900-03-01)';
is date(1900, 2, 28).day-of-week, 3, 'Date.day-of-week (1900-02-28)';
is date(1801, 3, 1).day-of-week, 7, 'Date.day-of-week (1801-03-01)';
is date(1801, 2, 28).day-of-week, 6, 'Date.day-of-week (1801-02-28)';
is date(1800, 3, 1).day-of-week, 6, 'Date.day-of-week (1800-03-01)';
is dtim(1800, 3, 1).day-of-week, 6, 'DateTime.day-of-week (1800-03-01)';
is date(1800, 2, 28).day-of-week, 5, 'Date.day-of-week (1800-02-28)';
is dtim(1800, 2, 28).day-of-week, 5, 'DateTime.day-of-week (1800-02-28)';
is date(1701, 3, 1).day-of-week, 2, 'Date.day-of-week (1701-03-01)';
is date(1701, 2, 28).day-of-week, 1, 'Date.day-of-week (1701-02-28)';
is date(1700, 3, 1).day-of-week, 1, 'Date.day-of-week (1700-03-01)';
is date(1700, 2, 28).day-of-week, 7, 'Date.day-of-week (1700-02-28)';
is date(1601, 3, 1).day-of-week, 4, 'Date.day-of-week (1601-03-01)';
is dtim(1601, 3, 1).day-of-week, 4, 'DateTime.day-of-week (1601-03-01)';
is date(1601, 2, 28).day-of-week, 3, 'Date.day-of-week (1601-02-28)';
is dtim(1601, 2, 28).day-of-week, 3, 'DateTime.day-of-week (1601-02-28)';
is date(1600, 3, 1).day-of-week, 3, 'Date.day-of-week (1600-03-01)';
is date(1600, 2, 29).day-of-week, 2, 'Date.day-of-week (1600-02-29)';
is date(1600, 2, 28).day-of-week, 1, 'Date.day-of-week (1600-02-28)';
# --------------------------------------------------------------------
# L<S32::Temporal/Accessors/'The method week'>
# --------------------------------------------------------------------
is date(1977, 8, 20).week.join(' '), '1977 33', 'Date.week (1977-8-20)';
is dtim(1977, 8, 20).week.join(' '), '1977 33', 'DateTime.week (1977-8-20)';
is date(1977, 8, 20).week-year, 1977, 'Date.week (1977-8-20)';
is dtim(1977, 8, 20).week-year, 1977, 'DateTime.week (1977-8-20)';
is date(1977, 8, 20).week-number, 33, 'Date.week-number (1977-8-20)';
is dtim(1977, 8, 20).week-number, 33, 'DateTime.week-number (1977-8-20)';
is date(1987, 12, 18).week.join(' '), '1987 51', 'Date.week (1987-12-18)';
is date(2020, 5, 4).week.join(' '), '2020 19', 'Date.week (2020-5-4)';
# From http://en.wikipedia.org/w/index.php?title=ISO_week_dtim&oldid=370553706#Examples
is date(2005, 01, 01).week.join(' '), '2004 53', 'Date.week (2005-01-01)';
is date(2005, 01, 02).week.join(' '), '2004 53', 'Date.week (2005-01-02)';
is date(2005, 12, 31).week.join(' '), '2005 52', 'Date.week (2005-12-31)';
is date(2007, 01, 01).week.join(' '), '2007 1', 'Date.week (2007-01-01)';
is date(2007, 12, 30).week.join(' '), '2007 52', 'Date.week (2007-12-30)';
is dtim(2007, 12, 30).week.join(' '), '2007 52', 'DateTime.week (2007-12-30)';
is date(2007, 12, 30).week-year, 2007, 'Date.week (2007-12-30)';
is dtim(2007, 12, 30).week-year, 2007, 'DateTime.week (2007-12-30)';
is date(2007, 12, 30).week-number, 52, 'Date.week-number (2007-12-30)';
is dtim(2007, 12, 30).week-number, 52, 'DateTime.week-number (2007-12-30)';
is date(2007, 12, 31).week.join(' '), '2008 1', 'Date.week (2007-12-31)';
is date(2008, 01, 01).week.join(' '), '2008 1', 'Date.week (2008-01-01)';
is date(2008, 12, 29).week.join(' '), '2009 1', 'Date.week (2008-12-29)';
is date(2008, 12, 31).week.join(' '), '2009 1', 'Date.week (2008-12-31)';
is date(2009, 01, 01).week.join(' '), '2009 1', 'Date.week (2009-01-01)';
is date(2009, 12, 31).week.join(' '), '2009 53', 'Date.week (2009-12-31)';
is date(2010, 01, 03).week.join(' '), '2009 53', 'Date.week (2010-01-03)';
is dtim(2010, 01, 03).week.join(' '), '2009 53', 'DateTime.week (2010-01-03)';
is date(2010, 01, 03).week-year, 2009, 'Date.week-year (2010-01-03)';
is dtim(2010, 01, 03).week-year, 2009, 'DateTime.week-year (2010-01-03)';
is date(2010, 01, 03).week-number, 53, 'Date.week-number (2010-01-03)';
is dtim(2010, 01, 03).week-number, 53, 'DateTime.week-number (2010-01-03)';
# day-of-week is tested each time show-dt is called.
# --------------------------------------------------------------------
# L<S32::Temporal/Accessors/'The weekday-of-month method'>
# --------------------------------------------------------------------
is date(1982, 2, 1).weekday-of-month, 1, 'Date.weekday-of-month (1982-02-01)';
is dtim(1982, 2, 1).weekday-of-month, 1, 'DateTime.weekday-of-month (1982-02-01)';
is date(1982, 2, 7).weekday-of-month, 1, 'Date.weekday-of-month (1982-02-07)';
is date(1982, 2, 8).weekday-of-month, 2, 'Date.weekday-of-month (1982-02-08)';
is date(1982, 2, 18).weekday-of-month, 3, 'Date.weekday-of-month (1982-02-18)';
is date(1982, 2, 28).weekday-of-month, 4, 'Date.weekday-of-month (1982-02-28)';
is dtim(1982, 2, 28).weekday-of-month, 4, 'DateTime.weekday-of-month (1982-02-28)';
is date(1982, 4, 4).weekday-of-month, 1, 'Date.weekday-of-month (1982-04-04)';
is date(1982, 4, 7).weekday-of-month, 1, 'Date.weekday-of-month (1982-04-07)';
is date(1982, 4, 8).weekday-of-month, 2, 'Date.weekday-of-month (1982-04-08)';
is date(1982, 4, 13).weekday-of-month, 2, 'Date.weekday-of-month (1982-04-13)';
is date(1982, 4, 30).weekday-of-month, 5, 'Date.weekday-of-month (1982-04-30)';
is dtim(1982, 4, 30).weekday-of-month, 5, 'DateTime.weekday-of-month (1982-04-30)';
# --------------------------------------------------------------------
# L<S32::Temporal/Accessors/'The days-in-month method'>
# --------------------------------------------------------------------
is date(1999, 5, 5).days-in-month, 31, 'Date.days-in-month (May 1999)';
is date(1999, 6, 5).days-in-month, 30, 'Date.days-in-month (Jun 1999)';
is date(1999, 2, 5).days-in-month, 28, 'Date.days-in-month (Feb 1999)';
is dtim(1999, 2, 5).days-in-month, 28, 'DateTime.days-in-month (Feb 1999)';
is date(2000, 2, 5).days-in-month, 29, 'Date.days-in-month (Feb 2000)';
is dtim(2000, 2, 5).days-in-month, 29, 'DateTime.days-in-month (Feb 2000)';
# --------------------------------------------------------------------
# L<S32::Temporal/Accessors/'The day-of-year method'>
# --------------------------------------------------------------------
is date(1975, 1, 1).day-of-year, 1, 'Date.day-of-year (1975-01-01)';
is dtim(1975, 1, 1).day-of-year, 1, 'DateTime.day-of-year (1975-01-01)';
is date(1977, 5, 5).day-of-year, 125, 'Date.day-of-year (1977-05-05)';
is date(1983, 11, 27).day-of-year, 331, 'Date.day-of-year (1983-11-27)';
is date(1999, 2, 28).day-of-year, 59, 'Date.day-of-year (1999-02-28)';
is dtim(1999, 2, 28).day-of-year, 59, 'DateTime.day-of-year (1999-02-28)';
is date(1999, 3, 1).day-of-year, 60, 'Date.day-of-year (1999-03-01)';
is dtim(1999, 3, 1).day-of-year, 60, 'DateTime.day-of-year (1999-03-01)';
is date(1999, 12, 31).day-of-year, 365, 'Date.day-of-year (1999-12-31)';
is date(2000, 2, 28).day-of-year, 59, 'Date.day-of-year (2000-02-28)';
is dtim(2000, 2, 28).day-of-year, 59, 'DateTime.day-of-year (2000-02-28)';
is date(2000, 2, 29).day-of-year, 60, 'Date.day-of-year (2000-02-29)';
is dtim(2000, 2, 29).day-of-year, 60, 'DateTime.day-of-year (2000-02-29)';
is date(2000, 3, 1).day-of-year, 61, 'Date.day-of-year (2000-03-01)';
is date(2000, 12, 31).day-of-year, 366, 'Date.day-of-year (2000-12-31)';
# --------------------------------------------------------------------
# L<S32::Temporal/Accessors/'The method is-leap-year'>
# --------------------------------------------------------------------
nok date(1800, 1, 1).is-leap-year, 'Date.is-leap-year (1800)';
nok date(1801, 1, 1).is-leap-year, 'Date.is-leap-year (1801)';
ok date(1804, 1, 1).is-leap-year, 'Date.is-leap-year (1804)';
nok date(1900, 1, 1).is-leap-year, 'Date.is-leap-year (1900)';
nok dtim(1900, 1, 1).is-leap-year, 'DateTime.is-leap-year (1900)';
ok date(1996, 1, 1).is-leap-year, 'Date.is-leap-year (1996)';
nok date(1999, 1, 1).is-leap-year, 'Date.is-leap-year (1999)';
ok date(2000, 1, 1).is-leap-year, 'Date.is-leap-year (2000)';
ok dtim(2000, 1, 1).is-leap-year, 'DateTime.is-leap-year (2000)';
done;
# vim: ft=perl6

586
samples/Perl6/for.t Normal file
View File

@@ -0,0 +1,586 @@
use v6;
#?pugs emit #
use MONKEY_TYPING;
use Test;
=begin description
Tests the "for" statement
This attempts to test as many variations of the
for statement as possible
=end description
plan 77;
## No foreach
# L<S04/The C<for> statement/"no foreach statement any more">
{
my $times_run = 0;
eval_dies_ok 'foreach 1..10 { $times_run++ }; 1', "foreach is gone";
eval_dies_ok 'foreach (1..10) { $times_run++}; 1',
"foreach is gone, even with parens";
is $times_run, 0, "foreach doesn't work";
}
## for with plain old range operator w/out parens
{
my $a = "";
for 0 .. 5 { $a = $a ~ $_; };
is($a, '012345', 'for 0..5 {} works');
}
# ... with pointy blocks
{
my $b = "";
for 0 .. 5 -> $_ { $b = $b ~ $_; };
is($b, '012345', 'for 0 .. 5 -> {} works');
}
#?pugs todo 'slice context'
#?niecza skip 'slice context'
{
my $str;
my @a = 1..3;
my @b = 4..6;
for zip(@a; @b) -> $x, $y {
$str ~= "($x $y)";
}
is $str, "(1 4)(2 5)(3 6)", 'for zip(@a; @b) -> $x, $y works';
}
# ... with referential sub
{
my $d = "";
for -2 .. 2 { $d ~= .sign };
is($d, '-1-1011', 'for 0 .. 5 { .some_sub } works');
}
## and now with parens around the range operator
{
my $e = "";
for (0 .. 5) { $e = $e ~ $_; };
is($e, '012345', 'for () {} works');
}
# ... with pointy blocks
{
my $f = "";
for (0 .. 5) -> $_ { $f = $f ~ $_; };
is($f, '012345', 'for () -> {} works');
}
# ... with implicit topic
{
$_ = "GLOBAL VALUE";
for "INNER VALUE" {
is( .lc, "inner value", "Implicit default topic is seen by lc()");
};
is($_,"GLOBAL VALUE","After the loop the implicit topic gets restored");
}
{
# as statement modifier
$_ = "GLOBAL VALUE";
is( .lc, "inner value", "Implicit default topic is seen by lc()" )
for "INNER VALUE";
#?pugs todo
is($_,"GLOBAL VALUE","After the loop the implicit topic gets restored");
}
## and now for with 'topical' variables
# ... w/out parens
my $i = "";
for 0 .. 5 -> $topic { $i = $i ~ $topic; };
is($i, '012345', 'for 0 .. 5 -> $topic {} works');
# ... with parens
my $j = "";
for (0 .. 5) -> $topic { $j = $j ~ $topic; };
is($j, '012345', 'for () -> $topic {} works');
## for with @array operator w/out parens
my @array_k = (0 .. 5);
my $k = "";
for @array_k { $k = $k ~ $_; };
is($k, '012345', 'for @array {} works');
# ... with pointy blocks
my @array_l = (0 .. 5);
my $l = "";
for @array_l -> $_ { $l = $l ~ $_; };
is($l, '012345', 'for @array -> {} works');
## and now with parens around the @array
my @array_o = (0 .. 5);
my $o = "";
for (@array_o) { $o = $o ~ $_; };
is($o, '012345', 'for (@array) {} works');
# ... with pointy blocks
{
my @array_p = (0 .. 5);
my $p = "";
for (@array_p) -> $_ { $p = $p ~ $_; };
is($p, '012345', 'for (@array) -> {} works');
}
my @elems = <a b c d e>;
{
my @a;
for (@elems) {
push @a, $_;
}
my @e = <a b c d e>;
is(@a, @e, 'for (@a) { ... $_ ... } iterates all elems');
}
{
my @a;
for (@elems) -> $_ { push @a, $_ };
my @e = @elems;
is(@a, @e, 'for (@a)->$_ { ... $_ ... } iterates all elems' );
}
{
my @a;
for (@elems) { push @a, $_, $_; }
my @e = <a a b b c c d d e e>;
is(@a, @e, 'for (@a) { ... $_ ... $_ ... } iterates all elems, not just odd');
}
# "for @a -> $var" is ro by default.
#?pugs skip 'parsefail'
{
my @a = <1 2 3 4>;
eval_dies_ok('for @a -> $elem {$elem = 5}', '-> $var is ro by default');
for @a <-> $elem {$elem++;}
is(@a, <2 3 4 5>, '<-> $var is rw');
for @a <-> $first, $second {$first++; $second++}
is(@a, <3 4 5 6>, '<-> $var, $var2 works');
}
# for with "is rw"
{
my @array_s = (0..2);
my @s = (1..3);
for @array_s { $_++ };
is(@array_s, @s, 'for @array { $_++ }');
}
{
my @array = <a b c d>;
for @array { $_ ~= "c" }
is ~@array, "ac bc cc dc",
'mutating $_ in for works';
}
{
my @array_t = (0..2);
my @t = (1..3);
for @array_t -> $val is rw { $val++ };
is(@array_t, @t, 'for @array -> $val is rw { $val++ }');
}
#?pugs skip "Can't modify const item"
{
my @array_v = (0..2);
my @v = (1..3);
for @array_v.values -> $val is rw { $val++ };
is(@array_v, @v, 'for @array.values -> $val is rw { $val++ }');
}
#?pugs skip "Can't modify const item"
{
my @array_kv = (0..2);
my @kv = (1..3);
for @array_kv.kv -> $key, $val is rw { $val++ };
is(@array_kv, @kv, 'for @array.kv -> $key, $val is rw { $val++ }');
}
#?pugs skip "Can't modify const item"
{
my %hash_v = ( a => 1, b => 2, c => 3 );
my %v = ( a => 2, b => 3, c => 4 );
for %hash_v.values -> $val is rw { $val++ };
is(%hash_v, %v, 'for %hash.values -> $val is rw { $val++ }');
}
#?pugs todo
{
my %hash_kv = ( a => 1, b => 2, c => 3 );
my %kv = ( a => 2, b => 3, c => 4 );
try { for %hash_kv.kv -> $key, $val is rw { $val++ }; };
is( %hash_kv, %kv, 'for %hash.kv -> $key, $val is rw { $val++ }');
}
# .key //= ++$i for @array1;
class TestClass{ has $.key is rw };
{
my @array1 = (TestClass.new(:key<1>),TestClass.new());
my $i = 0;
for @array1 { .key //= ++$i }
my $sum1 = [+] @array1.map: { $_.key };
is( $sum1, 2, '.key //= ++$i for @array1;' );
}
# .key = 1 for @array1;
{
my @array1 = (TestClass.new(),TestClass.new(:key<2>));
.key = 1 for @array1;
my $sum1 = [+] @array1.map: { $_.key };
is($sum1, 2, '.key = 1 for @array1;');
}
# $_.key = 1 for @array1;
{
my @array1 = (TestClass.new(),TestClass.new(:key<2>));
$_.key = 1 for @array1;
my $sum1 = [+] @array1.map: { $_.key };
is( $sum1, 2, '$_.key = 1 for @array1;');
}
# rw scalars
#L<S04/The C<for> statement/implicit parameter to block read/write "by default">
{
my ($a, $b, $c) = 0..2;
try { for ($a, $b, $c) { $_++ } };
is( [$a,$b,$c], [1,2,3], 'for ($a,$b,$c) { $_++ }');
($a, $b, $c) = 0..2;
try { for ($a, $b, $c) -> $x is rw { $x++ } };
is( [$a,$b,$c], [1,2,3], 'for ($a,$b,$c) -> $x is rw { $x++ }');
}
# list context
{
my $a = '';
my $b = '';
for 1..3, 4..6 { $a ~= $_.WHAT.gist ; $b ~= Int.gist };
is($a, $b, 'List context');
$a = '';
for [1..3, 4..6] { $a ~= $_.WHAT.gist };
is($a, Array.gist, 'List context');
$a = '';
$b = '';
for [1..3], [4..6] { $a ~= $_.WHAT.gist ; $b ~= Array.gist };
is($a, $b, 'List context');
}
{
# this was a rakudo bug with mixed 'for' and recursion, which seems to
# confuse some lexical pads or the like, see RT #58392
my $gather = '';
sub f($l) {
if $l <= 0 {
return $l;
}
$gather ~= $l;
for 1..3 {
f($l-1);
$gather ~= '.';
}
}
f(2);
is $gather, '21....1....1....', 'Can mix recursion and for';
}
# another variation
{
my $t = '';
my $c;
sub r($x) {
my $h = $c++;
r $x-1 if $x;
for 1 { $t ~= $h };
};
r 3;
is $t, '3210', 'can mix recursion and for (RT 103332)';
}
# grep and sort in for - these were pugs bugs once, so let's
# keep them as regression tests
{
my @array = <1 2 3 4>;
my $output = '';
for (grep { 1 }, @array) -> $elem {
$output ~= "$elem,";
}
is $output, "1,2,3,4,", "grep works in for";
}
{
my @array = <1 2 3 4>;
my $output = '';
for @array.sort -> $elem {
$output ~= "$elem,";
}
is $output, "1,2,3,4,", "sort works in for";
}
{
my @array = <1 2 3 4>;
my $output = '';
for (grep { 1 }, @array.sort) -> $elem {
$output ~= "$elem,";
}
is $output, "1,2,3,4,", "grep and sort work in for";
}
# L<S04/Statement parsing/keywords require whitespace>
eval_dies_ok('for(0..5) { }','keyword needs at least one whitespace after it');
# looping with more than one loop variables
{
my @a = <1 2 3 4>;
my $str = '';
for @a -> $x, $y {
$str ~= $x+$y;
}
is $str, "37", "for loop with two variables";
}
{
#my $str = '';
eval_dies_ok('for 1..5 -> $x, $y { $str ~= "$x$y" }', 'Should throw exception, no value for parameter $y');
#is $str, "1234", "loop ran before throwing exception";
#diag ">$str<";
}
#?rakudo skip 'optional variable in for loop (RT #63994)'
#?niecza 2 todo 'NYI'
{
my $str = '';
for 1..5 -> $x, $y? {
$str ~= " " ~ $x*$y;
}
is $str, " 2 12 0";
}
{
my $str = '';
for 1..5 -> $x, $y = 7 {
$str ~= " " ~ $x*$y;
}
is $str, " 2 12 35", 'default values in for-loops';
}
#?pugs todo
{
my @a = <1 2 3>;
my @b = <4 5 6>;
my $res = '';
for @a Z @b -> $x, $y {
$res ~= " " ~ $x * $y;
}
is $res, " 4 10 18", "Z -ed for loop";
}
#?pugs todo
{
my @a = <1 2 3>;
my $str = '';
for @a Z @a Z @a Z @a Z @a -> $q, $w, $e, $r, $t {
$str ~= " " ~ $q*$w*$e*$r*$t;
}
is $str, " 1 {2**5} {3**5}", "Z-ed for loop with 5 arrays";
}
{
eval_dies_ok 'for 1.. { };', "Please use ..* for indefinite range";
eval_dies_ok 'for 1... { };', "1... does not exist";
}
{
my $c;
for 1..8 {
$c = $_;
last if $_ == 6;
}
is $c, 6, 'for loop ends in time using last';
}
{
my $c;
for 1..* {
$c = $_;
last if $_ == 6;
}
is $c, 6, 'infinte for loop ends in time using last';
}
{
my $c;
for 1..Inf {
$c = $_;
last if $_ == 6;
}
is $c, 6, 'infinte for loop ends in time using last';
}
# RT #62478
#?pugs todo
{
try { EVAL('for (my $ii = 1; $ii <= 3; $ii++) { say $ii; }') };
ok "$!" ~~ /C\-style/, 'mentions C-style';
ok "$!" ~~ /for/, 'mentions for';
ok "$!" ~~ /loop/, 'mentions loop';
}
# RT #65212
#?pugs todo
{
my $parsed = 0;
try { EVAL '$parsed = 1; for (1..3)->$n { last }' };
ok ! $parsed, 'for (1..3)->$n fails to parse';
}
# RT #71268
{
sub rt71268 { for ^1 {} }
#?pugs todo
lives_ok { ~(rt71268) }, 'can stringify "for ^1 {}" without death';
#?pugs skip 'Cannot cast from VList to VCode'
ok rt71268() ~~ (), 'result of "for ^1 {}" is ()';
}
# RT 62478
{
eval_dies_ok 'for (my $i; $i <=3; $i++) { $i; }', 'Unsupported use of C-style "for (;;)" loop; in Perl 6 please use "loop (;;)"';
}
#?pugs todo
{
try { EVAL 'for (my $x; $x <=3; $x++) { $i; }'; diag($!) };
ok $! ~~ / 'C-style' /, 'Sensible error message';
}
# RT #64886
#?rakudo skip 'maybe bogus, for loops are not supposed to be lazy?'
{
my $a = 0;
for 1..10000000000 {
$a++;
last;
}
is $a, 1, 'for on Range with huge max value is lazy and enters block';
}
# RT #60780
lives_ok {
for 1 .. 5 -> $x, $y? { }
}, 'Iteration variables do not need to add up if one is optional';
# RT #78232
{
my $a = 0;
for 1, 2, 3 { sub foo {}; $a++ }
is $a, 3, 'RT #78232';
}
# http://irclog.perlgeek.de/perl6/2011-12-29#i_4892285
# (Niecza bug)
{
my $x = 0;
for 1 .. 2 -> $a, $b { $x = $b } #OK not used
is $x, 2, 'Lazy lists interact properly with multi-element for loops';
}
# RT #71270
# list comprehension
#?pugs skip 'Cannot cast from VList to VCode'
{
sub f() { for ^1 { } };
is ~f(), '', 'empty for-loop returns empty list';
}
# RT #74060
# more list comprehension
#?pugs skip 'parsefail'
#?niecza todo "https://github.com/sorear/niecza/issues/180"
{
my @s = ($_ * 2 if $_ ** 2 > 3 for 0 .. 5);
is ~@s, '4 6 8 10', 'Can use statement-modifying "for" in list comprehension';
}
# RT 113026
#?rakudo todo 'RT 113026 array iterator does not track a growing array'
#?niecza todo 'array iterator does not track a growing array'
#?pugs todo
{
my @rt113026 = 1 .. 10;
my $iter = 0;
for @rt113026 -> $n {
$iter++;
if $iter % 2 {
@rt113026.push: $n;
}
}
is $iter, 20, 'iterating over an expanding list';
is @rt113026, <1 2 3 4 5 6 7 8 9 10 1 3 5 7 9 1 5 9 5 5>,
'array expanded in for loop is expanded';
}
# RT #78406
{
my $c = 0;
dies_ok { for ^8 { .=fmt('%03b'); $c++ } }, '$_ is read-only here';
is $c, 0, '... and $_ is *always* read-only here';
}
dies_ok
{
my class Foo {
has @.items;
method check_items { for @.items -> $item { die "bad" if $item == 2 } }
method foo { self.check_items; .say for @.items }
}
Foo.new(items => (1, 2, 3, 4)).foo
}, 'for in called method runs (was a sink context bug)';
# RT #77460
#?pugs todo
{
my @a = 1;
for 1..10 {
my $last = @a[*-1];
push @a, (sub ($s) { $s + 1 })($last)
};
is @a, [1, 2, 3, 4, 5, 6, 7, 8,9, 10, 11];
}
# vim: ft=perl6

76
samples/Perl6/hash.t Normal file
View File

@@ -0,0 +1,76 @@
use v6;
use Test;
plan(5);
unless EVAL 'EVAL("1", :lang<perl5>)' {
skip_rest;
exit;
}
die unless
EVAL(q/
package My::Hash;
use strict;
sub new {
my ($class, $ref) = @_;
bless \$ref, $class;
}
sub hash {
my $self = shift;
return $$self;
}
sub my_keys {
my $self = shift;
return keys %{$$self};
}
sub my_exists {
my ($self, $idx) = @_;
return exists $$self->{$idx};
}
sub fetch {
my ($self, $idx) = @_;
return $$self->{$idx};
}
sub store {
my ($self, $idx, $val) = @_;
$$self->{$idx} = $val;
}
sub push {
my ($self, $val) = @_;
}
1;
/, :lang<perl5>);
my $p5ha = EVAL('sub { My::Hash->new($_[0]) }', :lang<perl5>);
my %hash = (5 => 'a', 6 => 'b', 7 => 'c', 8 => 'd');
my $p5hash = $p5ha(\%hash);
my $rethash = $p5hash.hash;
my @keys = %hash.keys.sort;
my @p5keys;
try {
@p5keys = $p5hash.my_keys; # this doesn't even pass lives_ok ??
@p5keys .= sort;
};
is("{ @keys }", "{ @p5keys }");
ok($p5hash.store(9, 'e'), 'can store');
is(%hash{9}, 'e', 'store result');
is($p5hash.fetch(5), 'a', 'fetch result');
is($p5hash.my_exists(5), %hash<5>:exists, 'exists');
#?pugs todo 'bug'
is($p5hash.my_exists(12), %hash<12>:exists, 'nonexists fail');
# vim: ft=perl6

630
samples/Perl6/htmlify.pl Executable file
View File

@@ -0,0 +1,630 @@
#!/usr/bin/env perl6
use v6;
# This script isn't in bin/ because it's not meant to be installed.
BEGIN say 'Initializing ...';
use Pod::To::HTML;
use URI::Escape;
use lib 'lib';
use Perl6::TypeGraph;
use Perl6::TypeGraph::Viz;
use Perl6::Documentable::Registry;
my $*DEBUG = False;
my $tg;
my %methods-by-type;
my $footer = footer-html;
my $head = q[
<link rel="icon" href="/favicon.ico" type="favicon.ico" />
<link rel="stylesheet" type="text/css" href="/style.css" media="screen" title="default" />
];
sub url-munge($_) {
return $_ if m{^ <[a..z]>+ '://'};
return "/type/$_" if m/^<[A..Z]>/;
return "/routine/$_" if m/^<[a..z]>/;
# poor man's <identifier>
if m/ ^ '&'( \w <[[\w'-]>* ) $/ {
return "/routine/$0";
}
return $_;
}
sub p2h($pod) {
pod2html($pod, :url(&url-munge), :$footer, :$head);
}
sub pod-gist(Pod::Block $pod, $level = 0) {
my $leading = ' ' x $level;
my %confs;
my @chunks;
for <config name level caption type> {
my $thing = $pod.?"$_"();
if $thing {
%confs{$_} = $thing ~~ Iterable ?? $thing.perl !! $thing.Str;
}
}
@chunks = $leading, $pod.^name, (%confs.perl if %confs), "\n";
for $pod.content.list -> $c {
if $c ~~ Pod::Block {
@chunks.push: pod-gist($c, $level + 2);
}
elsif $c ~~ Str {
@chunks.push: $c.indent($level + 2), "\n";
} elsif $c ~~ Positional {
@chunks.push: $c.map: {
if $_ ~~ Pod::Block {
*.&pod-gist
} elsif $_ ~~ Str {
$_
}
}
}
}
@chunks.join;
}
sub recursive-dir($dir) {
my @todo = $dir;
gather while @todo {
my $d = @todo.shift;
for dir($d) -> $f {
if $f.f {
take $f;
}
else {
@todo.push($f.path);
}
}
}
}
sub first-code-block(@pod) {
if @pod[1] ~~ Pod::Block::Code {
return @pod[1].content.grep(Str).join;
}
'';
}
sub MAIN(Bool :$debug, Bool :$typegraph = False) {
$*DEBUG = $debug;
say 'Creating html/ subdirectories ...';
for '', <type language routine images op op/prefix op/postfix op/infix
op/circumfix op/postcircumfix op/listop> {
mkdir "html/$_" unless "html/$_".IO ~~ :e;
}
say 'Reading lib/ ...';
my @source = recursive-dir('lib').grep(*.f).grep(rx{\.pod$});
@source .= map: {; .path.subst('lib/', '').subst(rx{\.pod$}, '').subst(:g, '/', '::') => $_ };
say 'Reading type graph ...';
$tg = Perl6::TypeGraph.new-from-file('type-graph.txt');
{
my %h = $tg.sorted.kv.flat.reverse;
@source .= sort: { %h{.key} // -1 };
}
my $dr = Perl6::Documentable::Registry.new;
say 'Processing Pod files ...';
for (0..* Z @source) -> $num, $_ {
my $podname = .key;
my $file = .value;
my $what = $podname ~~ /^<[A..Z]> | '::'/ ?? 'type' !! 'language';
printf "% 4d/%d: % -40s => %s\n", $num, +@source, $file.path, "$what/$podname";
my $pod = eval slurp($file.path) ~ "\n\$=pod";
$pod .= [0];
if $what eq 'language' {
write-language-file(:$dr, :$what, :$pod, :$podname);
}
else {
say pod-gist($pod[0]) if $*DEBUG;
write-type-file(:$dr, :$what, :pod($pod[0]), :$podname);
}
}
say 'Composing doc registry ...';
$dr.compose;
write-disambiguation-files($dr);
write-op-disambiguation-files($dr);
write-operator-files($dr);
write-type-graph-images(:force($typegraph));
write-search-file($dr);
write-index-file($dr);
say 'Writing per-routine files ...';
my %routine-seen;
for $dr.lookup('routine', :by<kind>).list -> $d {
next if %routine-seen{$d.name}++;
write-routine-file($dr, $d.name);
print '.'
}
say '';
say 'Processing complete.';
}
sub write-language-file(:$dr, :$what, :$pod, :$podname) {
spurt "html/$what/$podname.html", p2h($pod);
if $podname eq 'operators' {
my @chunks = chunks-grep($pod.content,
:from({ $_ ~~ Pod::Heading and .level == 2}),
:to({ $^b ~~ Pod::Heading and $^b.level <= $^a.level}),
);
for @chunks -> $chunk {
my $heading = $chunk[0].content[0].content[0];
next unless $heading ~~ / ^ [in | pre | post | circum | postcircum ] fix | listop /;
my $what = ~$/;
my $operator = $heading.split(' ', 2)[1];
$dr.add-new(
:kind<operator>,
:subkind($what),
:name($operator),
:pod($chunk),
:!pod-is-complete,
);
}
}
$dr.add-new(
:kind<language>,
:name($podname),
:$pod,
:pod-is-complete,
);
}
sub write-type-file(:$dr, :$what, :$pod, :$podname) {
my @chunks = chunks-grep($pod.content,
:from({ $_ ~~ Pod::Heading and .level == 2}),
:to({ $^b ~~ Pod::Heading and $^b.level <= $^a.level}),
);
if $tg.types{$podname} -> $t {
$pod.content.push: Pod::Block::Named.new(
name => 'Image',
content => [ "/images/type-graph-$podname.png"],
);
$pod.content.push: pod-link(
'Full-size type graph image as SVG',
"/images/type-graph-$podname.svg",
);
my @mro = $t.mro;
@mro.shift; # current type is already taken care of
for $t.roles -> $r {
next unless %methods-by-type{$r};
$pod.content.push:
pod-heading("Methods supplied by role $r"),
pod-block(
"$podname does role ",
pod-link($r.name, "/type/$r"),
", which provides the following methods:",
),
%methods-by-type{$r}.list,
;
}
for @mro -> $c {
next unless %methods-by-type{$c};
$pod.content.push:
pod-heading("Methods supplied by class $c"),
pod-block(
"$podname inherits from class ",
pod-link($c.name, "/type/$c"),
", which provides the following methods:",
),
%methods-by-type{$c}.list,
;
for $c.roles -> $r {
next unless %methods-by-type{$r};
$pod.content.push:
pod-heading("Methods supplied by role $r"),
pod-block(
"$podname inherits from class ",
pod-link($c.name, "/type/$c"),
", which does role ",
pod-link($r.name, "/type/$r"),
", which provides the following methods:",
),
%methods-by-type{$r}.list,
;
}
}
}
my $d = $dr.add-new(
:kind<type>,
# TODO: subkind
:$pod,
:pod-is-complete,
:name($podname),
);
for @chunks -> $chunk {
my $name = $chunk[0].content[0].content[0];
say "$podname.$name" if $*DEBUG;
next if $name ~~ /\s/;
%methods-by-type{$podname}.push: $chunk;
# determine whether it's a sub or method
my Str $subkind;
{
my %counter;
for first-code-block($chunk).lines {
if ms/^ 'multi'? (sub|method)»/ {
%counter{$0}++;
}
}
if %counter == 1 {
($subkind,) = %counter.keys;
}
if %counter<method> {
write-qualified-method-call(
:$name,
:pod($chunk),
:type($podname),
);
}
}
$dr.add-new(
:kind<routine>,
:$subkind,
:$name,
:pod($chunk),
:!pod-is-complete,
:origin($d),
);
}
spurt "html/$what/$podname.html", p2h($pod);
}
sub chunks-grep(:$from!, :&to!, *@elems) {
my @current;
gather {
for @elems -> $c {
if @current && ($c ~~ $from || to(@current[0], $c)) {
take [@current];
@current = ();
@current.push: $c if $c ~~ $from;
}
elsif @current or $c ~~ $from {
@current.push: $c;
}
}
take [@current] if @current;
}
}
sub pod-with-title($title, *@blocks) {
Pod::Block::Named.new(
name => "pod",
content => [
Pod::Block::Named.new(
name => "TITLE",
content => Array.new(
Pod::Block::Para.new(
content => [$title],
)
)
),
@blocks.flat,
]
);
}
sub pod-block(*@content) {
Pod::Block::Para.new(:@content);
}
sub pod-link($text, $url) {
Pod::FormattingCode.new(
type => 'L',
content => [
join('|', $text, $url),
],
);
}
sub pod-item(*@content, :$level = 1) {
Pod::Item.new(
:$level,
:@content,
);
}
sub pod-heading($name, :$level = 1) {
Pod::Heading.new(
:$level,
:content[pod-block($name)],
);
}
sub write-type-graph-images(:$force) {
unless $force {
my $dest = 'html/images/type-graph-Any.svg'.path;
if $dest.e && $dest.modified >= 'type-graph.txt'.path.modified {
say "Not writing type graph images, it seems to be up-to-date";
say "To force writing of type graph images, supply the --typegraph";
say "option at the command line, or delete";
say "file 'html/images/type-graph-Any.svg'";
return;
}
}
say 'Writing type graph images to html/images/ ...';
for $tg.sorted -> $type {
my $viz = Perl6::TypeGraph::Viz.new-for-type($type);
$viz.to-file("html/images/type-graph-{$type}.svg", format => 'svg');
$viz.to-file("html/images/type-graph-{$type}.png", format => 'png', size => '8,3');
print '.'
}
say '';
say 'Writing specialized visualizations to html/images/ ...';
my %by-group = $tg.sorted.classify(&viz-group);
%by-group<Exception>.push: $tg.types< Exception Any Mu >;
%by-group<Metamodel>.push: $tg.types< Any Mu >;
for %by-group.kv -> $group, @types {
my $viz = Perl6::TypeGraph::Viz.new(:types(@types),
:dot-hints(viz-hints($group)),
:rank-dir('LR'));
$viz.to-file("html/images/type-graph-{$group}.svg", format => 'svg');
$viz.to-file("html/images/type-graph-{$group}.png", format => 'png', size => '8,3');
}
}
sub viz-group ($type) {
return 'Metamodel' if $type.name ~~ /^ 'Perl6::Metamodel' /;
return 'Exception' if $type.name ~~ /^ 'X::' /;
return 'Any';
}
sub viz-hints ($group) {
return '' unless $group eq 'Any';
return '
subgraph "cluster: Mu children" {
rank=same;
style=invis;
"Any";
"Junction";
}
subgraph "cluster: Pod:: top level" {
rank=same;
style=invis;
"Pod::Config";
"Pod::Block";
}
subgraph "cluster: Date/time handling" {
rank=same;
style=invis;
"Date";
"DateTime";
"DateTime-local-timezone";
}
subgraph "cluster: Collection roles" {
rank=same;
style=invis;
"Positional";
"Associative";
"Baggy";
}
';
}
sub write-search-file($dr) {
say 'Writing html/search.html ...';
my $template = slurp("search_template.html");
my @items;
my sub fix-url ($raw) { $raw.substr(1) ~ '.html' };
@items.push: $dr.lookup('language', :by<kind>).sort(*.name).map({
"\{ label: \"Language: {.name}\", value: \"{.name}\", url: \"{ fix-url(.url) }\" \}"
});
@items.push: $dr.lookup('type', :by<kind>).sort(*.name).map({
"\{ label: \"Type: {.name}\", value: \"{.name}\", url: \"{ fix-url(.url) }\" \}"
});
my %seen;
@items.push: $dr.lookup('routine', :by<kind>).grep({!%seen{.name}++}).sort(*.name).map({
"\{ label: \"{ (.subkind // 'Routine').tclc }: {.name}\", value: \"{.name}\", url: \"{ fix-url(.url) }\" \}"
});
sub escape(Str $s) {
$s.trans([</ \\ ">] => [<\\/ \\\\ \\">]);
}
@items.push: $dr.lookup('operator', :by<kind>).map({
qq[\{ label: "$_.human-kind() {escape .name}", value: "{escape .name}", url: "{ fix-url .url }"\}]
});
my $items = @items.join(",\n");
spurt("html/search.html", $template.subst("ITEMS", $items));
}
my %operator_disambiguation_file_written;
sub write-disambiguation-files($dr) {
say 'Writing disambiguation files ...';
for $dr.grouped-by('name').kv -> $name, $p is copy {
print '.';
my $pod = pod-with-title("Disambiguation for '$name'");
if $p.elems == 1 {
$p.=[0] if $p ~~ Array;
if $p.origin -> $o {
$pod.content.push:
pod-block(
pod-link("'$name' is a $p.human-kind()", $p.url),
' from ',
pod-link($o.human-kind() ~ ' ' ~ $o.name, $o.url),
);
}
else {
$pod.content.push:
pod-block(
pod-link("'$name' is a $p.human-kind()", $p.url)
);
}
}
else {
$pod.content.push:
pod-block("'$name' can be anything of the following"),
$p.map({
if .origin -> $o {
pod-item(
pod-link(.human-kind, .url),
' from ',
pod-link($o.human-kind() ~ ' ' ~ $o.name, $o.url),
)
}
else {
pod-item( pod-link(.human-kind, .url) )
}
});
}
my $html = p2h($pod);
spurt "html/$name.html", $html;
if all($p>>.kind) eq 'operator' {
spurt "html/op/$name.html", $html;
%operator_disambiguation_file_written{$p[0].name} = 1;
}
}
say '';
}
sub write-op-disambiguation-files($dr) {
say 'Writing operator disambiguation files ...';
for $dr.lookup('operator', :by<kind>).classify(*.name).kv -> $name, @ops {
next unless %operator_disambiguation_file_written{$name};
my $pod = pod-with-title("Disambiguation for '$name'");
if @ops == 1 {
my $p = @ops[0];
if $p.origin -> $o {
$pod.content.push:
pod-block(
pod-link("'$name' is a $p.human-kind()", $p.url),
' from ',
pod-link($o.human-kind() ~ ' ' ~ $o.name, $o.url),
);
}
else {
$pod.content.push:
pod-block(
pod-link("'$name' is a $p.human-kind()", $p.url)
);
}
}
else {
$pod.content.push:
pod-block("'$name' can be anything of the following"),
@ops.map({
if .origin -> $o {
pod-item(
pod-link(.human-kind, .url),
' from ',
pod-link($o.human-kind() ~ ' ' ~ $o.name, $o.url),
)
}
else {
pod-item( pod-link(.human-kind, .url) )
}
});
}
my $html = p2h($pod);
spurt "html/$name.html", $html;
}
}
sub write-operator-files($dr) {
say 'Writing operator files ...';
for $dr.lookup('operator', :by<kind>).list -> $doc {
my $what = $doc.subkind;
my $op = $doc.name;
my $pod = pod-with-title(
"$what.tclc() $op operator",
pod-block(
"Documentation for $what $op, extracted from ",
pod-link("the operators language documentation", "/language/operators")
),
@($doc.pod),
);
spurt "html/op/$what/$op.html", p2h($pod);
}
}
sub write-index-file($dr) {
say 'Writing html/index.html ...';
my %routine-seen;
my $pod = pod-with-title('Perl 6 Documentation',
Pod::Block::Para.new(
content => ['Official Perl 6 documentation'],
),
# TODO: add more
pod-heading("Language Documentation"),
$dr.lookup('language', :by<kind>).sort(*.name).map({
pod-item( pod-link(.name, .url) )
}),
pod-heading('Types'),
$dr.lookup('type', :by<kind>).sort(*.name).map({
pod-item(pod-link(.name, .url))
}),
pod-heading('Routines'),
$dr.lookup('routine', :by<kind>).sort(*.name).map({
next if %routine-seen{.name}++;
pod-item(pod-link(.name, .url))
}),
);
spurt 'html/index.html', p2h($pod);
}
sub write-routine-file($dr, $name) {
say 'Writing html/routine/$name.html ...' if $*DEBUG;
my @docs = $dr.lookup($name, :by<name>).grep(*.kind eq 'routine');
my $subkind = 'routine';
{
my @subkinds = @docs>>.subkind;
$subkind = @subkinds[0] if all(@subkinds>>.defined) && [eq] @subkinds;
}
my $pod = pod-with-title("Documentation for $subkind $name",
pod-block("Documentation for $subkind $name, assembled from the
following types:"),
@docs.map({
pod-heading(.origin.name ~ '.' ~ .name),
pod-block("From ", pod-link(.origin.name, .origin.url ~ '#' ~ .name)),
.pod.list,
})
);
spurt "html/routine/$name.html", p2h($pod);
}
sub write-qualified-method-call(:$name!, :$pod!, :$type!) {
my $p = pod-with-title(
"Documentation for method $type.$name",
pod-block('From ', pod-link($type, "/type/{$type}#$name")),
@$pod,
);
spurt "html/{$type}.{$name}.html", p2h($p);
}
sub footer-html() {
state $dt = ~DateTime.now;
qq[
<div id="footer">
<p>
Generated on $dt from the sources at
<a href="https://github.com/perl6/doc">perl6/doc on github</a>.
</p>
<p>
This is a work in progress to document Perl 6, and known to be
incomplete. Your contribution is appreciated.
</p>
</div>
];
}

View File

@@ -0,0 +1,76 @@
use v6;
use Test;
# L<S02/Whitespace and Comments>
=begin kwid
= DESCRIPTION
Tests that the List quoting parser properly
ignores whitespace in lists. This becomes important
if your line endings are \x0d\x0a.
Characters that should be ignored are:
\t
\r
\n
\x20
Most likely there are more. James tells me that
the maximum Unicode char is \x10FFFF , so maybe
we should simply (re)construct the whitespace
list via IsSpace or \s on the fly.
Of course, in the parsed result, no item should
contain whitespace.
C<\xA0> is specifically an I<nonbreaking> whitespace
character and thus should B<not> break the list.
=end kwid
#?pugs emit if $?PUGS_BACKEND ne "BACKEND_PUGS" {
#?pugs emit skip_rest "PIL2JS and PIL-Run do not support EVAL() yet.";
#?pugs emit exit;
#?pugs emit }
my @list = <a b c d>;
my @separators = ("\t","\r","\n"," ");
my @nonseparators = (",","/","\\",";","\xa0");
plan +@separators + @nonseparators + 3;
for @separators -> $sep {
my $str = "<$sep" ~ @list.join("$sep$sep") ~ "$sep>";
my @res = EVAL $str;
my $vis = sprintf "%02x", ord $sep;
is( @res, @list, "'\\x$vis\\x$vis' is properly parsed as list whitespace")
};
for @nonseparators -> $sep {
my $ex = @list.join($sep);
my $str = "<" ~$ex~ ">";
my @res = EVAL $str;
my $vis = sprintf "%02x", ord $sep;
#?rakudo emit if $sep eq "\xa0" {
#?rakudo emit todo('\xa0 should not be a separator for list quotes');
#?rakudo emit };
#?niecza emit if $sep eq "\xa0" {
#?niecza emit todo('\xa0 should not be a separator for list quotes');
#?niecza emit };
is( @res, [@list.join($sep)], "'\\x$vis' does not split in a whitespace quoted list")
};
is < foo
>, 'foo', 'various combinations of whitespaces are stripped';
# RT #73772
isa_ok < >, Parcel, '< > (only whitespaces) is empty Parcel';
is < >.elems, 0, ".. and it's really empty";
# vim: ft=perl6

View File

@@ -0,0 +1,32 @@
use Test;
# stress test for lexicals and lexical subs
# See
# http://en.wikipedia.org/w/index.php?title=Man_or_boy_test&oldid=249795453#Perl
my @results = 1, 0, -2, 0, 1, 0, 1, -1, -10, -30;
# if we want to *really* stress-test, we can use a few more tests:
# my @results = 1, 0, -2, 0, 1, 0, 1, -1, -10, -30, -67, -138
# -291, -642, -1446, -3250, -7244, -16065, -35601, -78985;
plan +@results;
sub A($k is copy, &x1, &x2, &x3, &x4, &x5) {
my $B;
$B = sub (*@) { A(--$k, $B, &x1, &x2, &x3, &x4) };
if ($k <= 0) {
return x4($k, &x1, &x2, &x3, &x4, &x5)
+ x5($k, &x1, &x2, &x3, &x4, &x5);
}
return $B();
};
for 0 .. (@results-1) -> $i {
is A($i, sub (*@) {1}, sub (*@) {-1}, sub (*@) {-1}, sub (*@) {1}, sub (*@) {0}),
@results[$i],
"man-or-boy test for start value $i";
}
# vim: ft=perl6

39
samples/RAML/api.raml Normal file
View File

@@ -0,0 +1,39 @@
#%RAML 0.8
title: World Music API
baseUri: http://example.api.com/{version}
version: v1
traits:
- paged:
queryParameters:
pages:
description: The number of pages to return
type: number
- secured: !include http://raml-example.com/secured.yml
/songs:
is: [ paged, secured ]
get:
queryParameters:
genre:
description: filter the songs by genre
post:
/{songId}:
get:
responses:
200:
body:
application/json:
schema: |
{ "$schema": "http://json-schema.org/schema",
"type": "object",
"description": "A canonical song",
"properties": {
"title": { "type": "string" },
"artist": { "type": "string" }
},
"required": [ "title", "artist" ]
}
application/xml:
delete:
description: |
This method will *delete* an **individual song**

View File

@@ -1,12 +0,0 @@
$blue: #3bbfce;
$margin: 16px;
.content_navigation {
color: $blue;
}
.border {
padding: $margin / 2;
margin: $margin / 2;
border: 2px $blue solid;
}

47
samples/XML/pt_BR.ts Normal file
View File

@@ -0,0 +1,47 @@
<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE TS>
<TS version="2.0" language="pt_BR">
<context>
<name>MainWindow</name>
<message>
<location filename="../forms/mainwindow.ui" line="22"/>
<source>United Kingdom</source>
<translation>Reino Unido</translation>
</message>
<message>
<location filename="../forms/mainwindow.ui" line="38"/>
<source>God save the Queen</source>
<translation>Deus salve a Rainha</translation>
</message>
<message>
<location filename="../mainwindow.cpp" line="46"/>
<source>England</source>
<translation>Inglaterra</translation>
</message>
<message>
<location filename="../mainwindow.cpp" line="47"/>
<source>Wales</source>
<translation>Gales</translation>
</message>
<message>
<location filename="../mainwindow.cpp" line="48"/>
<source>Scotland</source>
<translation>Escócia</translation>
</message>
<message>
<location filename="../mainwindow.cpp" line="49"/>
<source>Northern Ireland</source>
<translation>Irlanda Norte</translation>
</message>
<message>
<location filename="../mainwindow.cpp" line="51"/>
<source>Portuguese</source>
<translation>Português</translation>
</message>
<message>
<location filename="../mainwindow.cpp" line="52"/>
<source>English</source>
<translation>Inglês</translation>
</message>
</context>
</TS>

89
samples/XML/some-ideas.mm Normal file
View File

@@ -0,0 +1,89 @@
<map version="0.9.0">
<!-- To view this file, download free mind mapping software FreeMind from http://freemind.sourceforge.net -->
<node COLOR="#000000" CREATED="1385819664217" ID="ID_1105859543" MODIFIED="1385820134114" TEXT="Some ideas on demexp">
<font NAME="SansSerif" SIZE="20"/>
<hook NAME="accessories/plugins/AutomaticLayout.properties"/>
<node COLOR="#0033ff" CREATED="1385819753503" ID="ID_1407588370" MODIFIED="1385819767173" POSITION="right" TEXT="User Interface">
<edge STYLE="sharp_bezier" WIDTH="8"/>
<font NAME="SansSerif" SIZE="18"/>
<node COLOR="#00b439" CREATED="1385819771320" ID="ID_1257512743" MODIFIED="1385819783131" TEXT="Text file">
<edge STYLE="bezier" WIDTH="thin"/>
<font NAME="SansSerif" SIZE="16"/>
</node>
<node COLOR="#00b439" CREATED="1385819783831" ID="ID_997633499" MODIFIED="1385819786761" TEXT="Web">
<edge STYLE="bezier" WIDTH="thin"/>
<font NAME="SansSerif" SIZE="16"/>
</node>
<node COLOR="#00b439" CREATED="1385819787041" ID="ID_204106158" MODIFIED="1385819794885" TEXT="Graphical interface">
<edge STYLE="bezier" WIDTH="thin"/>
<font NAME="SansSerif" SIZE="16"/>
</node>
<node COLOR="#00b439" CREATED="1385819795339" ID="ID_768498137" MODIFIED="1385819800338" TEXT="Email">
<edge STYLE="bezier" WIDTH="thin"/>
<font NAME="SansSerif" SIZE="16"/>
</node>
<node COLOR="#00b439" CREATED="1385819801043" ID="ID_1660630451" MODIFIED="1385819802441" TEXT="SMS">
<edge STYLE="bezier" WIDTH="thin"/>
<font NAME="SansSerif" SIZE="16"/>
</node>
</node>
<node COLOR="#0033ff" CREATED="1385819872899" ID="ID_281388957" MODIFIED="1385819878444" POSITION="left" TEXT="Cordorcet voting module">
<edge STYLE="sharp_bezier" WIDTH="8"/>
<font NAME="SansSerif" SIZE="18"/>
<node COLOR="#00b439" CREATED="1385819880540" ID="ID_1389666909" MODIFIED="1385819948101" TEXT="Input">
<edge STYLE="bezier" WIDTH="thin"/>
<font NAME="SansSerif" SIZE="16"/>
<node COLOR="#990000" CREATED="1385819893834" ID="ID_631111389" MODIFIED="1385819901697" TEXT="Number of votes">
<font NAME="SansSerif" SIZE="14"/>
</node>
<node COLOR="#990000" CREATED="1385819902442" ID="ID_838201093" MODIFIED="1385819910452" TEXT="Number of possible choices">
<font NAME="SansSerif" SIZE="14"/>
</node>
<node COLOR="#990000" CREATED="1385819910703" ID="ID_1662888975" MODIFIED="1385819933316" TEXT="For a vote: number of votes and list of choices">
<font NAME="SansSerif" SIZE="14"/>
</node>
</node>
<node COLOR="#00b439" CREATED="1385819949027" ID="ID_1504837261" MODIFIED="1385819952198" TEXT="Format">
<edge STYLE="bezier" WIDTH="thin"/>
<font NAME="SansSerif" SIZE="16"/>
<node COLOR="#990000" CREATED="1385819955105" ID="ID_647722151" MODIFIED="1385819962151" TEXT="A single file?">
<font NAME="SansSerif" SIZE="14"/>
</node>
<node COLOR="#990000" CREATED="1385819962642" ID="ID_1374756253" MODIFIED="1385819976939" TEXT="Several files (parameters + 1 per vote)?">
<font NAME="SansSerif" SIZE="14"/>
</node>
<node COLOR="#990000" CREATED="1385819977578" ID="ID_979556559" MODIFIED="1385819984309" TEXT="JSON?">
<font NAME="SansSerif" SIZE="14"/>
</node>
</node>
</node>
<node COLOR="#0033ff" CREATED="1385820005408" ID="ID_1886566753" MODIFIED="1385820009909" POSITION="right" TEXT="Technologies">
<edge STYLE="sharp_bezier" WIDTH="8"/>
<font NAME="SansSerif" SIZE="18"/>
<node COLOR="#00b439" CREATED="1385820011913" ID="ID_1291489552" MODIFIED="1385820014698" TEXT="SPARK 2014">
<edge STYLE="bezier" WIDTH="thin"/>
<font NAME="SansSerif" SIZE="16"/>
</node>
<node COLOR="#00b439" CREATED="1385820015481" ID="ID_1825929484" MODIFIED="1385820017935" TEXT="Frama-C">
<edge STYLE="bezier" WIDTH="thin"/>
<font NAME="SansSerif" SIZE="16"/>
</node>
<node COLOR="#00b439" CREATED="1385820018603" ID="ID_253774957" MODIFIED="1385820027363" TEXT="Why3 -&gt; OCaml">
<edge STYLE="bezier" WIDTH="thin"/>
<font NAME="SansSerif" SIZE="16"/>
</node>
</node>
<node COLOR="#0033ff" CREATED="1385820136808" ID="ID_1002115371" MODIFIED="1385820139813" POSITION="left" TEXT="Vote storage">
<edge STYLE="sharp_bezier" WIDTH="8"/>
<font NAME="SansSerif" SIZE="18"/>
<node COLOR="#00b439" CREATED="1385820141400" ID="ID_1882609124" MODIFIED="1385820145261" TEXT="Database">
<edge STYLE="bezier" WIDTH="thin"/>
<font NAME="SansSerif" SIZE="16"/>
</node>
<node COLOR="#00b439" CREATED="1385820146138" ID="ID_1771403777" MODIFIED="1385820154334" TEXT="Text file (XML?)">
<edge STYLE="bezier" WIDTH="thin"/>
<font NAME="SansSerif" SIZE="16"/>
</node>
</node>
</node>
</map>

1
samples/text/messages.fr Normal file
View File

@@ -0,0 +1 @@
the green potato=la pomme de terre verte

1
samples/text/readme.txt Normal file
View File

@@ -0,0 +1 @@
Read me now!

View File

@@ -13,6 +13,9 @@ set +x
mkdir -p ./vendor/gems
# Clean out any unversioned files
git clean -fd
bundle install --local --path ./vendor/gems
bundle exec rake samples
bundle exec rake

View File

@@ -469,16 +469,16 @@ class TestBlob < Test::Unit::TestCase
# Test language detection for files which shouldn't be used as samples
root = File.expand_path('../fixtures', __FILE__)
Dir.entries(root).each do |language|
next unless File.file?(language)
next if language == '.' || language == '..' || File.basename(language) == 'ace_modes.json'
# Each directory contains test files of a language
dirname = File.join(root, language)
Dir.entries(dirname).each do |filename|
next unless File.file?(filename)
# By default blob search the file in the samples;
# thus, we need to give it the absolute path
filepath = File.join(dirname, filename)
next unless File.file?(filepath)
blob = blob(filepath)
assert blob.language, "No language for #{filepath}"
assert_equal language, blob.language.name, blob.name

View File

@@ -11,23 +11,35 @@ class TestHeuristcs < Test::Unit::TestCase
File.read(File.join(samples_path, name))
end
def file_blob(name)
path = File.exist?(name) ? name : File.join(samples_path, name)
FileBlob.new(path)
end
def all_fixtures(language_name, file="*")
Dir.glob("#{samples_path}/#{language_name}/#{file}")
end
def test_no_match
language = []
results = Heuristics.call(file_blob("JavaScript/namespace.js"), language)
assert_equal [], results
end
# Candidate languages = ["C++", "Objective-C"]
def test_obj_c_by_heuristics
# Only calling out '.h' filenames as these are the ones causing issues
all_fixtures("Objective-C", "*.h").each do |fixture|
results = Heuristics.disambiguate_c(fixture("Objective-C/#{File.basename(fixture)}"))
assert_equal Language["Objective-C"], results.first
end
assert_heuristics({
"Objective-C" => all_fixtures("Objective-C", "*.h"),
"C++" => ["C++/render_adapter.cpp", "C++/ThreadedQueue.h"],
"C" => nil
})
end
# Candidate languages = ["C++", "Objective-C"]
def test_cpp_by_heuristics
results = Heuristics.disambiguate_c(fixture("C++/render_adapter.cpp"))
assert_equal Language["C++"], results.first
def test_c_by_heuristics
languages = [Language["C++"], Language["Objective-C"], Language["C"]]
results = Heuristics.call(file_blob("C/ArrowLeft.h"), languages)
assert_equal [], results
end
def test_detect_still_works_if_nothing_matches
@@ -37,104 +49,105 @@ class TestHeuristcs < Test::Unit::TestCase
end
# Candidate languages = ["Perl", "Prolog"]
def test_pl_prolog_by_heuristics
results = Heuristics.disambiguate_pl(fixture("Prolog/turing.pl"))
assert_equal Language["Prolog"], results.first
end
# Candidate languages = ["Perl", "Prolog"]
def test_pl_perl_by_heuristics
results = Heuristics.disambiguate_pl(fixture("Perl/perl-test.t"))
assert_equal Language["Perl"], results.first
def test_pl_prolog_perl_by_heuristics
assert_heuristics({
"Prolog" => "Prolog/turing.pl",
"Perl" => "Perl/perl-test.t",
})
end
# Candidate languages = ["ECL", "Prolog"]
def test_ecl_prolog_by_heuristics
results = Heuristics.disambiguate_ecl(fixture("Prolog/or-constraint.ecl"))
assert_equal Language["Prolog"], results.first
end
# Candidate languages = ["ECL", "Prolog"]
def test_ecl_ecl_by_heuristics
results = Heuristics.disambiguate_ecl(fixture("ECL/sample.ecl"))
assert_equal Language["ECL"], results.first
assert_heuristics({
"ECL" => "ECL/sample.ecl",
"Prolog" => "Prolog/or-constraint.ecl"
})
end
# Candidate languages = ["IDL", "Prolog"]
def test_pro_prolog_by_heuristics
results = Heuristics.disambiguate_pro(fixture("Prolog/logic-problem.pro"))
assert_equal Language["Prolog"], results.first
end
# Candidate languages = ["IDL", "Prolog"]
def test_pro_idl_by_heuristics
results = Heuristics.disambiguate_pro(fixture("IDL/mg_acosh.pro"))
assert_equal Language["IDL"], results.first
def test_pro_prolog_idl_by_heuristics
assert_heuristics({
"Prolog" => "Prolog/logic-problem.pro",
"IDL" => "IDL/mg_acosh.pro"
})
end
# Candidate languages = ["AGS Script", "AsciiDoc"]
def test_asc_asciidoc_by_heuristics
results = Heuristics.disambiguate_asc(fixture("AsciiDoc/list.asc"))
assert_equal Language["AsciiDoc"], results.first
end
# Candidate languages = ["TypeScript", "XML"]
def test_ts_typescript_by_heuristics
results = Heuristics.disambiguate_ts(fixture("TypeScript/classes.ts"))
assert_equal Language["TypeScript"], results.first
end
# Candidate languages = ["TypeScript", "XML"]
def test_ts_xml_by_heuristics
results = Heuristics.disambiguate_ts(fixture("XML/pt_BR.xml"))
assert_equal Language["XML"], results.first
assert_heuristics({
"AsciiDoc" => "AsciiDoc/list.asc",
"AGS Script" => nil
})
end
def test_cl_by_heuristics
languages = ["Common Lisp", "OpenCL"]
languages.each do |language|
all_fixtures(language).each do |fixture|
results = Heuristics.disambiguate_cl(fixture("#{language}/#{File.basename(fixture)}"))
assert_equal Language[language], results.first
end
end
assert_heuristics({
"Common Lisp" => all_fixtures("Common Lisp"),
"OpenCL" => all_fixtures("OpenCL")
})
end
def test_f_by_heuristics
languages = ["FORTRAN", "Forth"]
languages.each do |language|
all_fixtures(language).each do |fixture|
results = Heuristics.disambiguate_f(fixture("#{language}/#{File.basename(fixture)}"))
assert_equal Language[language], results.first
end
end
assert_heuristics({
"FORTRAN" => all_fixtures("FORTRAN"),
"Forth" => all_fixtures("Forth")
})
end
# Candidate languages = ["Hack", "PHP"]
def test_hack_by_heuristics
results = Heuristics.disambiguate_hack(fixture("Hack/funs.php"))
assert_equal Language["Hack"], results.first
assert_heuristics({
"Hack" => "Hack/funs.php",
"PHP" => "PHP/Model.php"
})
end
# Candidate languages = ["Scala", "SuperCollider"]
def test_sc_supercollider_by_heuristics
results = Heuristics.disambiguate_sc(fixture("SuperCollider/WarpPreset.sc"))
assert_equal Language["SuperCollider"], results.first
end
# Candidate languages = ["Scala", "SuperCollider"]
def test_sc_scala_by_heuristics
results = Heuristics.disambiguate_sc(fixture("Scala/node11.sc"))
assert_equal Language["Scala"], results.first
def test_sc_supercollider_scala_by_heuristics
assert_heuristics({
"SuperCollider" => "SuperCollider/WarpPreset.sc",
"Scala" => "Scala/node11.sc"
})
end
def test_fs_by_heuristics
languages = ["F#", "Forth", "GLSL"]
languages.each do |language|
all_fixtures(language).each do |fixture|
results = Heuristics.disambiguate_fs(fixture("#{language}/#{File.basename(fixture)}"))
assert_equal Language[language], results.first
assert_heuristics({
"F#" => all_fixtures("F#"),
"Forth" => all_fixtures("Forth"),
"GLSL" => all_fixtures("GLSL")
})
end
def test_fr_by_heuristics
assert_heuristics({
"Frege" => all_fixtures("Frege"),
"Forth" => all_fixtures("Forth"),
"text" => all_fixtures("text")
})
end
def assert_heuristics(hash)
candidates = hash.keys.map { |l| Language[l] }
hash.each do |language, blobs|
Array(blobs).each do |blob|
result = Heuristics.call(file_blob(blob), candidates)
assert_equal [Language[language]], result, "Failed for #{blob}"
end
end
end
def test_ls_by_heuristics
assert_heuristics({
"LiveScript" => "LiveScript/hello.ls",
"LoomScript" => "LoomScript/HelloWorld.ls"
})
end
def test_ts_by_heuristics
assert_heuristics({
"TypeScript" => all_fixtures("TypeScript", "*.ts"),
"XML" => all_fixtures("XML", "*.ts")
})
end
end

View File

@@ -223,34 +223,21 @@ class TestLanguage < Test::Unit::TestCase
assert_equal [Language['Chapel']], Language.find_by_filename('examples/hello.chpl')
end
def test_find_by_shebang
assert_equal 'ruby', Linguist.interpreter_from_shebang("#!/usr/bin/ruby\n# baz")
{ [] => ["",
"foo",
"#bar",
"#baz",
"///",
"\n\n\n\n\n",
" #!/usr/sbin/ruby",
"\n#!/usr/sbin/ruby"],
['Ruby'] => ["#!/usr/bin/env ruby\n# baz",
"#!/usr/sbin/ruby\n# bar",
"#!/usr/bin/ruby\n# foo",
"#!/usr/sbin/ruby",
"#!/usr/sbin/ruby foo bar baz\n"],
['R'] => ["#!/usr/bin/env Rscript\n# example R script\n#\n"],
['Shell'] => ["#!/usr/bin/bash\n", "#!/bin/sh"],
['Python'] => ["#!/bin/python\n# foo\n# bar\n# baz",
"#!/usr/bin/python2.7\n\n\n\n",
"#!/usr/bin/python3\n\n\n\n"],
["Common Lisp"] => ["#!/usr/bin/sbcl --script\n\n"]
}.each do |languages, bodies|
bodies.each do |body|
assert_equal([body, languages.map{|l| Language[l]}],
[body, Language.find_by_shebang(body)])
end
def test_find_by_interpreter
{
"ruby" => "Ruby",
"Rscript" => "R",
"sh" => "Shell",
"bash" => "Shell",
"python" => "Python",
"python2" => "Python",
"python3" => "Python",
"sbcl" => "Common Lisp"
}.each do |interpreter, language|
assert_equal [Language[language]], Language.find_by_interpreter(interpreter)
end
assert_equal [], Language.find_by_interpreter(nil)
end
def test_find
@@ -321,11 +308,11 @@ class TestLanguage < Test::Unit::TestCase
assert_equal 'css', Language['CSS'].ace_mode
assert_equal 'lsl', Language['LSL'].ace_mode
assert_equal 'javascript', Language['JavaScript'].ace_mode
assert_equal 'none', Language['FORTRAN'].ace_mode
end
def test_ace_modes
assert Language.ace_modes.include?(Language['Ruby'])
assert !Language.ace_modes.include?(Language['FORTRAN'])
end
def test_wrap
@@ -369,4 +356,22 @@ class TestLanguage < Test::Unit::TestCase
message << missing.map { |language| sprintf("%-#{width}s %s", language.name, language.tm_scope) }.sort.join("\n")
assert missing.empty?, message
end
def test_all_languages_have_a_valid_ace_mode
ace_fixture_path = File.join('test', 'fixtures', 'ace_modes.json')
skip("No ace_modes.json file") unless File.exist?(ace_fixture_path)
ace_github_modes = Yajl.load(File.read(ace_fixture_path))
existing_ace_modes = ace_github_modes.map do |ace_github_mode|
File.basename(ace_github_mode["name"], ".js") if ace_github_mode["name"] !~ /_highlight_rules|_test|_worker/
end.compact.uniq.sort.map(&:downcase)
missing = Language.all.reject { |language| language.ace_mode == "none" || existing_ace_modes.include?(language.ace_mode) }
message = "The following languages do not have an Ace mode listed in languages.yml. Please add an Ace mode for all new languages.\n"
message << "If no Ace mode exists for a language, mark the language with `ace_mode: none` in lib/linguist/languages.yml.\n"
width = missing.map { |language| language.name.length }.max
message << missing.map { |language| sprintf("%-#{width}s %s", language.name, language.ace_mode) }.sort.join("\n")
assert missing.empty?, message
end
end

View File

@@ -82,9 +82,4 @@ class TestSamples < Test::Unit::TestCase
end
end
end
def test_shebang
assert_equal "crystal", Linguist.interpreter_from_shebang("#!/usr/bin/env bin/crystal")
assert_equal "python2", Linguist.interpreter_from_shebang("#!/usr/bin/python2.4")
end
end

39
test/test_shebang.rb Normal file
View File

@@ -0,0 +1,39 @@
require_relative "./helper"
class TestShebang < Test::Unit::TestCase
include Linguist
def assert_interpreter(interpreter, body)
assert_equal interpreter, Shebang.interpreter(body)
end
def test_shebangs
assert_interpreter nil, ""
assert_interpreter nil, "foo"
assert_interpreter nil, "#bar"
assert_interpreter nil, "#baz"
assert_interpreter nil, "///"
assert_interpreter nil, "\n\n\n\n\n"
assert_interpreter nil, " #!/usr/sbin/ruby"
assert_interpreter nil, "\n#!/usr/sbin/ruby"
assert_interpreter nil, "#!"
assert_interpreter "ruby", "#!/usr/sbin/ruby\n# bar"
assert_interpreter "ruby", "#!/usr/bin/ruby\n# foo"
assert_interpreter "ruby", "#!/usr/sbin/ruby"
assert_interpreter "ruby", "#!/usr/sbin/ruby foo bar baz\n"
assert_interpreter "Rscript", "#!/usr/bin/env Rscript\n# example R script\n#\n"
assert_interpreter "crystal", "#!/usr/bin/env bin/crystal"
assert_interpreter "ruby", "#!/usr/bin/env ruby\n# baz"
assert_interpreter "bash", "#!/usr/bin/bash\n"
assert_interpreter "sh", "#!/bin/sh"
assert_interpreter "python", "#!/bin/python\n# foo\n# bar\n# baz"
assert_interpreter "python2", "#!/usr/bin/python2.7\n\n\n\n"
assert_interpreter "python3", "#!/usr/bin/python3\n\n\n\n"
assert_interpreter "sbcl", "#!/usr/bin/sbcl --script\n\n"
assert_interpreter "perl", "#! perl"
end
end

BIN
vendor/cache/yajl-ruby-1.1.0.gem vendored Normal file

Binary file not shown.

Binary file not shown.