scanner: add comments
diff --git a/scanner/scanner.go b/scanner/scanner.go
index b8dc06f..2908817 100644
--- a/scanner/scanner.go
+++ b/scanner/scanner.go
@@ -155,83 +155,20 @@
 }
 
 func (s *Scanner) scanComment() string {
-	// initial '/' already consumed; s.ch == '/' || s.ch == '*'
-	offs := s.offset - 1 // position of initial '/'
+	// initial [;#] already consumed
+	offs := s.offset - 1 // position of initial [;#]
 
-	if s.ch == '/' {
-		//-style comment
-		s.next()
-		for s.ch != '\n' && s.ch >= 0 {
-			s.next()
-		}
-		if offs == s.lineOffset {
-			// comment starts at the beginning of the current line
-			s.interpretLineComment(s.src[offs:s.offset])
-		}
-		goto exit
-	}
-
-	/*-style comment */
 	s.next()
-	for s.ch >= 0 {
-		ch := s.ch
+	for s.ch != '\n' && s.ch >= 0 {
 		s.next()
-		if ch == '*' && s.ch == '/' {
-			s.next()
-			goto exit
-		}
 	}
-
-	s.error(offs, "comment not terminated")
-
-exit:
+	if offs == s.lineOffset {
+		// comment starts at the beginning of the current line
+		s.interpretLineComment(s.src[offs:s.offset])
+	}
 	return string(s.src[offs:s.offset])
 }
 
-func (s *Scanner) findLineEnd() bool {
-	// initial '/' already consumed
-
-	defer func(offs int) {
-		// reset scanner state to where it was upon calling findLineEnd
-		s.ch = '/'
-		s.offset = offs
-		s.rdOffset = offs + 1
-		s.next() // consume initial '/' again
-	}(s.offset - 1)
-
-	// read ahead until a newline, EOF, or non-comment token is found
-	for s.ch == '/' || s.ch == '*' {
-		if s.ch == '/' {
-			//-style comment always contains a newline
-			return true
-		}
-		/*-style comment: look for newline */
-		s.next()
-		for s.ch >= 0 {
-			ch := s.ch
-			if ch == '\n' {
-				return true
-			}
-			s.next()
-			if ch == '*' && s.ch == '/' {
-				s.next()
-				break
-			}
-		}
-		s.skipWhitespace() // s.insertSemi is set
-		if s.ch < 0 || s.ch == '\n' {
-			return true
-		}
-		if s.ch != '/' {
-			// non-comment token
-			return false
-		}
-		s.next() // consume '/'
-	}
-
-	return false
-}
-
 func isLetter(ch rune) bool {
 	return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch)
 }
diff --git a/scanner/scanner_test.go b/scanner/scanner_test.go
index ffb59f8..7492778 100644
--- a/scanner/scanner_test.go
+++ b/scanner/scanner_test.go
@@ -8,6 +8,7 @@
 	"os"
 	"path/filepath"
 	"runtime"
+	"strings"
 	"testing"
 )
 
@@ -42,9 +43,9 @@
 var tokens = [...]elt{
 	// Special tokens
 	{token.EOL, "\n", special},
-//FIXME
-//	{token.COMMENT, "/* a comment */", special},
-//	{token.COMMENT, "// a comment \n", special},
+
+	{token.COMMENT, "; a comment \n", special},
+	{token.COMMENT, "# a comment \n", special},
 
 	// Identifiers and basic type literals
 //FIXME
@@ -126,8 +127,9 @@
 		Line:     1,
 		Column:   1,
 	}
+	pos, tok, lit := s.Scan()
+outer:
 	for {
-		pos, tok, lit := s.Scan()
 		if lit == "" {
 			// no literal value for non-literal tokens
 			lit = tok.String()
@@ -161,8 +163,7 @@
 		}
 		epos.Offset += len(lit) + len(whitespace)
 		epos.Line += newlineCount(lit) + whitespace_linecount
-		if tok == token.COMMENT && lit[1] == '/' {
-			// correct for unaccounted '/n' in //-style comment
+		if tok == token.COMMENT && strings.HasSuffix(e.lit, "\n") {
 			epos.Offset++
 			epos.Line++
 		}
@@ -170,18 +171,12 @@
 		if tok == token.EOF {
 			break
 		}
-		// skip three EOLs
-		_, tok, lit = s.Scan()
-		if tok != token.EOL {
-			t.Errorf("bad token: got %s, expected %s", lit, tok, token.EOL)
-		}
-		_, tok, lit = s.Scan()
-		if tok != token.EOL {
-			t.Errorf("bad token: got %s, expected %s", lit, tok, token.EOL)
-		}
-		_, tok, lit = s.Scan()
-		if tok != token.EOL {
-			t.Errorf("bad token: got %s, expected %s", lit, tok, token.EOL)
+		// skip EOLs
+		for {
+			pos, tok, lit = s.Scan()
+			if tok != token.EOL {
+				continue outer
+			}
 		}
 	}
 	if s.ErrorCount != 0 {
@@ -509,8 +504,6 @@
 //	{`"`, token.STRING, 0, "string not terminated"},
 //	{"``", token.STRING, 0, ""},
 //	{"`", token.STRING, 0, "string not terminated"},
-//	{"/**/", token.COMMENT, 0, ""},
-//	{"/*", token.COMMENT, 0, "comment not terminated"},
 //	{"\"abc\x00def\"", token.STRING, 4, "illegal character NUL"},
 //	{"\"abc\x80def\"", token.STRING, 4, "illegal UTF-8 encoding"},
 }