expo run:ios - build fails - errors exist in generated swift files - swift

I am following this guide:
https://www.mongodb.com/docs/realm/sdk/react-native/bootstrap-with-expo/
but when I get to the step that says to run expo run:ios it fails with the following errors:
› Skipping dev server
› Planning build
› Executing react-native Pods/FBReactNativeSpec » [CP-User] Generate Specs
❌ (node_modules/expo-modules-core/ios/Swift/Functions/AsyncFunctionComponent.swift:94:17)
92 | }
93 |
> 94 | let queue = queue ?? defaultQueue
| ^ variable used within its own initial value
95 |
96 | queue.async { [body, name] in
97 | let returnedValue: ReturnType?
❌ (node_modules/expo-modules-core/ios/Swift/Views/ConcreteViewProp.swift:42:52)
40 | }
41 | guard let value = try propType.cast(value) as? PropType else {
> 42 | throw Conversions.CastingException<PropType>(value)
| ^ variable declared in 'guard' condition is not usable in its body
43 | }
44 | setter(view, value)
45 | }
❌ (node_modules/expo-modules-core/ios/Swift/Functions/ConcurrentFunctionDefinition.swift:8:33)
6 | */
7 | public final class ConcurrentFunctionDefinition<Args, FirstArgType, ReturnType>: AnyFunction {
> 8 | typealias ClosureType = (Args) async throws -> ReturnType
| ^ consecutive declarations on a line must be separated by ';'
9 |
10 | let body: ClosureType
11 |
❌ (node_modules/expo-modules-core/ios/Swift/Functions/ConcurrentFunctionDefinition.swift:8:34)
6 | */
7 | public final class ConcurrentFunctionDefinition<Args, FirstArgType, ReturnType>: AnyFunction {
> 8 | typealias ClosureType = (Args) async throws -> ReturnType
| ^ expected declaration
9 |
10 | let body: ClosureType
11 |
❌ (node_modules/expo-modules-core/ios/Swift/Functions/ConcurrentFunctionDefinition.swift:56:36)
54 | // swiftlint:disable force_cast
55 | let argumentsTuple = try Conversions.toTuple(arguments) as! Args
> 56 | let returnValue = try await body(argumentsTuple)
| ^ consecutive statements on a line must be separated by ';'
57 |
58 | result = .success(returnValue)
59 | } catch let error as Exception {
❌ (node_modules/expo-modules-core/ios/Swift/Functions/ConcurrentFunctionDefinition.swift:94:49)
92 | public func AsyncFunction<R>(
93 | _ name: String,
> 94 | #_implicitSelfCapture _ closure: #escaping () async throws -> R
| ^ expected ',' separator
95 | ) -> ConcurrentFunctionDefinition<(), Void, R> {
96 | return ConcurrentFunctionDefinition(
97 | name,
❌ (node_modules/expo-modules-core/ios/Swift/Functions/ConcurrentFunctionDefinition.swift:94:62)
92 | public func AsyncFunction<R>(
93 | _ name: String,
> 94 | #_implicitSelfCapture _ closure: #escaping () async throws -> R
| ^ expected ':' following argument label and parameter name
95 | ) -> ConcurrentFunctionDefinition<(), Void, R> {
96 | return ConcurrentFunctionDefinition(
97 | name,
❌ (node_modules/expo-modules-core/ios/Swift/Functions/ConcurrentFunctionDefinition.swift:109:51)
107 | public func AsyncFunction<R, A0: AnyArgument>(
108 | _ name: String,
> 109 | #_implicitSelfCapture _ closure: #escaping (A0) async throws -> R
| ^ expected ',' separator
110 | ) -> ConcurrentFunctionDefinition<(A0), A0, R> {
111 | return ConcurrentFunctionDefinition(
112 | name,
❌ (node_modules/expo-modules-core/ios/Swift/Functions/ConcurrentFunctionDefinition.swift:109:64)
107 | public func AsyncFunction<R, A0: AnyArgument>(
108 | _ name: String,
> 109 | #_implicitSelfCapture _ closure: #escaping (A0) async throws -> R
| ^ expected ':' following argument label and parameter name
110 | ) -> ConcurrentFunctionDefinition<(A0), A0, R> {
111 | return ConcurrentFunctionDefinition(
112 | name,
❌ (node_modules/expo-modules-core/ios/Swift/Functions/ConcurrentFunctionDefinition.swift:124:55)
122 | public func AsyncFunction<R, A0: AnyArgument, A1: AnyArgument>(
123 | _ name: String,
> 124 | #_implicitSelfCapture _ closure: #escaping (A0, A1) async throws -> R
| ^ expected ',' separator
125 | ) -> ConcurrentFunctionDefinition<(A0, A1), A0, R> {
126 | return ConcurrentFunctionDefinition(
127 | name,
... Hundreds of similar errors later...
❌ (node_modules/expo-modules-core/ios/Swift/Views/ViewDefinition.swift:68:3)
66 | public func View<ViewType: UIView>(
67 | _ viewType: ViewType.Type,
> 68 | #ViewDefinition<ViewType>.ElementsBuilder _ elements: #escaping () -> [AnyDefinition]
| ^ struct 'ElementsBuilder' cannot be used as an attribute
69 | ) -> ViewDefinition<ViewType> {
70 | return ViewDefinition(viewType, elements: elements())
71 | }
❌ (node_modules/expo-modules-core/ios/Swift/Views/ViewManagerDefinitionComponents.swift:19:3)
17 | public func Prop<ViewType: UIView, PropType: AnyArgument>(
18 | _ name: String,
> 19 | #_implicitSelfCapture _ setter: #escaping (ViewType, PropType) -> Void
| ^ unknown attribute '_implicitSelfCapture'
20 | ) -> ConcreteViewProp<ViewType, PropType> {
21 | return ConcreteViewProp(
22 | name: name,
❌ (node_modules/expo-modules-core/ios/Swift/Views/ViewManagerDefinitionComponents.swift:34:3)
32 | */
33 | public func OnViewDidUpdateProps<ViewType: UIView>(
> 34 | #_implicitSelfCapture _ closure: #escaping (_ view: ViewType) -> Void
| ^ unknown attribute '_implicitSelfCapture'
35 | ) -> ViewLifecycleMethod<ViewType> {
36 | return ViewLifecycleMethod(type: .didUpdateProps, closure: closure)
37 | }
› Compiling react-native Pods/React-RCTVibration » RCTVibrationPlugins.mm
› Compiling react-native Pods/React-RCTVibration » RCTVibration.mm
› Compiling react-native Pods/React-RCTVibration » React-RCTVibration-dummy.m
› Compiling react-native Pods/React-RCTSettings » RCTSettingsManager.mm
› Compiling react-native Pods/React-RCTSettings » React-RCTSettings-dummy.m
› Compiling react-native Pods/React-RCTSettings » RCTSettingsPlugins.mm
› Compiling react-native Pods/React-RCTNetwork » React-RCTNetwork-dummy.m
› 104 error(s), and 0 warning(s)
I'm not sure what I'm doing wrong Xcode says the following statement
CommandError: Failed to build iOS project. "xcodebuild" exited with error code 65.
I have the latest expo cli installed
I tried building the project in Xcode directly, the project did build, but it won't launch on the emulator, expo opens but when connecting to the dev server the app just crashes.
I do not understand why running expo run:ios results in a build failure while building in Xcode results in a "success".

The solution is actually kind of silly,
so the reason I was able to build successfully in Xcode vs not build successfully using expo run:ios is because I have two version of Xcode installed on this machine. one version is Xcode 12.3, and xcrun was pointed to that sdk package. I was able to fix this by running sudo xcode-select --switch /Applications/Xcode.app/Contents/Developer double checking xcrun --show-sdk-path should now list the appropriate pathing. for good measure I deleted the pods file and the pod.lock file and rebuilt the app using expo run:ios

Related

Different type of base 16?

Base 16 should go from 0 to F, with F being equal to 15 in base 10. But yet, when I use a base 16 converter found on google (https://www.hexator.com/) , it says that F is equal to 46.
Expected results:
0 | 0
1 | 1
2 | 2
3 | 3
4 | 4
5 | 5
6 | 6
7 | 7
8 | 8
9 | 9
a | 10
b | 11
c | 12
d | 13
e | 14
f | 15
Am I miss-interpreting something here?
That encoder is converting the ASCII value of the letter 'F' into the hexadecimal representation of it. The ASCII value of 'F' is 70, which is 46 when converted into hexadecimal. See this ascii table.
That converter is converting text into its hex representation, not Hex strings into decimal numbers.

llvm-cov fails to generate report when run on cloud GitLab CI

I have been running the following llvm-cov report command (which ships as part of the Swift toolchains) in Docker images (swift:5.1) on various environments.
BINARY_PATH="..."
PROF_DATA_PATH="..."
IGNORE_FILENAME_REGEX="..."
llvm-cov report \
$BINARY_PATH \
--format=text \
-instr-profile="$PROF_DATA_PATH" \
-ignore-filename-regex="$IGNORE_FILENAME_REGEX"
When the docker image is hosted on any machine aside from GitLab's cloud docker runners, I get the expected code coverage output:
Filename Regions Missed Regions Cover Functions Missed Functions Executed Lines Missed Lines Cover
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
ChannelHandlers/RedisByteDecoder.swift 5 0 100.00% 3 0 100.00% 10 0 100.00%
ChannelHandlers/RedisCommandHandler.swift 15 5 66.67% 8 3 62.50% 45 11 75.56%
ChannelHandlers/RedisMessageEncoder.swift 3 1 66.67% 3 1 66.67% 13 6 53.85%
Commands/BasicCommands.swift 28 4 85.71% 16 2 87.50% 99 7 92.93%
Commands/HashCommands.swift 38 4 89.47% 29 1 96.55% 156 1 99.36%
Commands/ListCommands.swift 56 8 85.71% 48 5 89.58% 217 11 94.93%
Commands/SetCommands.swift 46 12 73.91% 30 4 86.67% 147 4 97.28%
Commands/SortedSetCommands.swift 172 19 88.95% 105 6 94.29% 555 18 96.76%
Commands/StringCommands.swift 23 2 91.30% 21 1 95.24% 100 1 99.00%
Extensions/StandardLibrary.swift 10 2 80.00% 6 1 83.33% 21 1 95.24%
Extensions/SwiftNIO.swift 9 1 88.89% 7 0 100.00% 38 1 97.37%
RESP/RESPTranslator.swift 69 7 89.86% 10 2 80.00% 172 10 94.19%
RESP/RESPValue.swift 39 11 71.79% 14 3 78.57% 69 17 75.36%
RESP/RESPValueConvertible.swift 52 19 63.46% 15 3 80.00% 99 22 77.78%
RedisClient.swift 2 0 100.00% 2 0 100.00% 7 0 100.00%
RedisConnection.swift 72 23 68.06% 47 10 78.72% 228 31 86.40%
RedisErrors.swift 12 4 66.67% 6 1 83.33% 23 3 86.96%
RedisKey.swift 15 9 40.00% 12 6 50.00% 38 20 47.37%
RedisMetrics.swift 9 2 77.78% 9 2 77.78% 23 2 91.30%
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
TOTAL 675 133 80.30% 391 51 86.96% 2060 166 91.94%
However, when the same docker images running the same commands, are hosted with GitLab's cloud runners:
Filename Regions Missed Regions Cover Functions Missed Functions Executed Lines Missed Lines Cover
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
TOTAL 0 0 - 0 0 - 0 0 -
I'm making sure that the code coverage data is provided correctly by the Swift Package Manager through ls -l and in every environment (including GitLab CI) I get:
Profdata: -rw-r--r--. 1 root root 575608 Feb 8 19:51 .build/x86_64-unknown-linux/debug/codecov/default.profdata
Test binary: -rwxr-xr-x. 1 root root 16309424 Feb 8 19:51 .build/x86_64-unknown-linux/debug/redi-stackPackageTests.xctest
This also happens with LLVM-8 and LLVM-9 (LLVM-7 ships w/ Swift 5.1)
For the life of me, I can't figure out why.
Environments I've tested (all running Docker Engine 19+):
+----------------+-----------------+----------------+-------------------+
| | | | |
| HOST | OS | CPU | Generates Report? |
| | | | |
+-----------------------------------------------------------------------+
| iMac 2011 | High Sierra | sandybridge | YES |
+-----------------------------------------------------------------------+
| MBP 2019 | Catalina | skylake | YES |
+-----------------------------------------------------------------------+
| mac mini 2018 | Catalina | skylake | YES |
+-----------------------------------------------------------------------+
| GitHub Actions | 'ubuntu|latest' | skylake|avx512 | YES |
+-----------------------------------------------------------------------+
| GitLab CI | 'tags: docker' | haswell | NO |
+----------------+-----------------+----------------+-------------------+
Relevant bug reports:
Swift
GitLab

how to read date/time fields in brunel visualization

I'm importing the following csv file:
import pandas as pd
from numpy import log, abs, sign, sqrt
import brunel
# Read data
DISKAVGRIO = pd.read_csv("../DISKAVGRIO_nmon.csv")
DISKAVGRIO.head(6)
And the following table:
Hostname | Date-Time | hdisk1342 | hdisk1340 | hdisk1343 | ...
------------ | ----------------------- | ----------- | ------------- | ----------- | ------
host1 | 12-08-2015 00:56:12 | 0.0 | 0.0 | 0.0 | ...
host1 | 12-08-2015 01:11:13 | 0.0 | 0.0 | 0.0 | ...
host1 | 12-08-2015 01:26:14 | 0.0 | 0.0 | 0.0 | ...
host1 | 12-08-2015 01:41:14 | 0.0 | 0.0 | 0.0 | ...
host1 | 12-08-2015 01:56:14 | 0.0 | 0.4 | 4.2 | ...
host1 | 12-08-2015 02:11:14 | 0.0 | 0.0 | 0.0 | ...
Then I try to plot a line graphic and get the following error message:
# Line plot
%brunel data('DISKAVGRIO') x(Date-Time) y(hdisk1342) color(#series) line
And get the following error message:
--------------------------------------------------------------------------- java.lang.RuntimeExceptionPyRaisable Traceback (most recent call last) <ipython-input-4-1c7cb7700929> in <module>()
1 # Line plot
----> 2 get_ipython().magic("brunel data('DISKAVGRIO') x(Date-Time) y(hdisk1342) color(#series) line")
/home/anobre/anaconda3/lib/python3.5/site-packages/IPython/core/interactiveshell.py in magic(self, arg_s)
2161 magic_name, _, magic_arg_s = arg_s.partition(' ')
2162 magic_name = magic_name.lstrip(prefilter.ESC_MAGIC)
-> 2163 return self.run_line_magic(magic_name, magic_arg_s)
2164
2165 #-------------------------------------------------------------------------
/home/anobre/anaconda3/lib/python3.5/site-packages/IPython/core/interactiveshell.py in run_line_magic(self, magic_name, line)
2082 kwargs['local_ns'] = sys._getframe(stack_depth).f_locals
2083 with self.builtin_trap:
-> 2084 result = fn(*args,**kwargs)
2085 return result
2086
<decorator-gen-124> in brunel(self, line, cell)
/home/anobre/anaconda3/lib/python3.5/site-packages/IPython/core/magic.py in <lambda>(f, *a, **k)
191 # but it's overkill for just that one bit of state.
192 def magic_deco(arg):
--> 193 call = lambda f, *a, **k: f(*a, **k)
194
195 if callable(arg):
/home/anobre/anaconda3/lib/python3.5/site-packages/brunel/magics.py in brunel(self, line, cell)
42 parts = line.split('::')
43 action = parts[0].strip()
---> 44 datasets_in_brunel = brunel.get_dataset_names(action)
45 self.cache_data(datasets_in_brunel,datas)
46 if len(parts) > 2:
/home/anobre/anaconda3/lib/python3.5/site-packages/brunel/brunel.py in get_dataset_names(brunel_src)
92
93 def get_dataset_names(brunel_src):
---> 94 return brunel_util_java.D3Integration.getDatasetNames(brunel_src)
95
96 def cacheData(data_key, data):
java.lang.RuntimeExceptionPyRaisable: org.brunel.model.VisException: Illegal field name: Date-Time while parsing action text: data('DISKAVGRIO') x(Date-Time) y(hdisk1342) color(#series) line
I'm not sure but I thing the problem is the date/time format. Does anyone know how to read date/time fields?
Try using:
%brunel data('DISKAVGRIO') x(Date_Time) y(hdisk1342) color(#series) line
That is, use an underscore "_" instead of a dash "-" within the field name. Brunel converts characters in field names that interfere with the syntax into underscores for reference within the syntax--but the original field name will appear as is on the displayed axis.

Difference between correctly / incorrectly classified instances in decision tree and confusion matrix in Weka

I have been using Weka’s J48 decision tree to classify frequencies of keywords
in RSS feeds into target categories. And I think I may have a problem
reconciling the generated decision tree with the number of correctly classified
instances reported and in the confusion matrix.
For example, one of my .arff files contains the following data extracts:
#attribute Keyword_1_nasa_Frequency numeric
#attribute Keyword_2_fish_Frequency numeric
#attribute Keyword_3_kill_Frequency numeric
#attribute Keyword_4_show_Frequency numeric
...
#attribute Keyword_64_fear_Frequency numeric
#attribute RSSFeedCategoryDescription {BFE,FCL,F,M, NCA, SNT,S}
#data
0,0,0,34,0,0,0,0,0,40,0,0,0,0,0,0,0,0,0,0,24,0,0,0,0,13,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,BFE
0,0,0,10,0,0,0,0,0,11,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,BFE
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,BFE
...
20,0,64,19,0,162,0,0,36,72,179,24,24,47,24,40,0,48,0,0,0,97,24,0,48,205,143,62,78,
0,0,216,0,36,24,24,0,0,24,0,0,0,0,140,24,0,0,0,0,72,176,0,0,144,48,0,38,0,284,
221,72,0,72,0,SNT
...
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,6,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,8,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,S
And so on: there’s a total of 64 keywords (columns) and 570 rows where each one contains the frequency of a keyword in a feed for a day. In this case, there are 57 feeds for
10 days giving a total of 570 records to be classified. Each keyword is prefixed
with a surrogate number and postfixed with ‘Frequency’.
My use of the decision tree is with default parameters using 10x validation.
Weka reports the following:
Correctly Classified Instances 210 36.8421 %
Incorrectly Classified Instances 360 63.1579 %
With the following confusion matrix:
=== Confusion Matrix ===
a b c d e f g <-- classified as
11 0 0 0 39 0 0 | a = BFE
0 0 0 0 60 0 0 | b = FCL
1 0 5 0 72 0 2 | c = F
0 0 1 0 69 0 0 | d = M
3 0 0 0 153 0 4 | e = NCA
0 0 0 0 90 10 0 | f = SNT
0 0 0 0 19 0 31 | g = S
The tree is as follows:
Keyword_22_health_Frequency <= 0
| Keyword_7_open_Frequency <= 0
| | Keyword_52_libya_Frequency <= 0
| | | Keyword_21_job_Frequency <= 0
| | | | Keyword_48_pic_Frequency <= 0
| | | | | Keyword_63_world_Frequency <= 0
| | | | | | Keyword_26_day_Frequency <= 0: NCA (461.0/343.0)
| | | | | | Keyword_26_day_Frequency > 0: BFE (8.0/3.0)
| | | | | Keyword_63_world_Frequency > 0
| | | | | | Keyword_31_gaddafi_Frequency <= 0: S (4.0/1.0)
| | | | | | Keyword_31_gaddafi_Frequency > 0: NCA (3.0)
| | | | Keyword_48_pic_Frequency > 0: F (7.0)
| | | Keyword_21_job_Frequency > 0: BFE (10.0/1.0)
| | Keyword_52_libya_Frequency > 0: NCA (31.0)
| Keyword_7_open_Frequency > 0
| | Keyword_31_gaddafi_Frequency <= 0: S (32.0/1.0)
| | Keyword_31_gaddafi_Frequency > 0: NCA (4.0)
Keyword_22_health_Frequency > 0: SNT (10.0)
My question concerns reconciling the matrix to the tree or vice versa. As far as
I understand the results, a rating like (461.0/343.0) indicates that 461 instances have been classified as NCA. But how can that be when the matrix reveals only 153? I am
not sure how to interpret this so any help is welcome.
Thanks in advance.
The number in parentheses at each leaf should be read as (number of total instances of this classification at this leaf / number of incorrect classifications at this leaf).
In your example for the first NCA leaf, it says there are 461 test instances that were classified as NCA, and of those 461, there were 343 incorrect classifications. So there are 461-343 = 118 correctly classified instances at that leaf.
Looking through your decision tree, note that NCA is also at other leaves. I count 118 + 3 + 31 + 4 = 156 correctly classified instances out of 461 + 3 + 31 + 4 = 499 total classifications of NCA.
Your confusion matrix shows 153 correct classifications of NCA out of 39 + 60 + 72 + 69 + 153 + 90 + 19 = 502 total classifications of NCA.
So there is a slight difference between the tree (156/499) and your confusion matrix (153/502).
Note that if you are running Weka from the command-line, it outputs a tree and a confusion matrix for testing on all the training data and also for testing with cross-validation. Be careful that you are looking at the right matrix for the right tree. Weka outputs both training and test results, resulting in two pairs of matrix and tree. You may have mixed them up.

perl print formatting question

I want to display a table in perl, the rows and column names for which will be of variable length. I want the columns to be neatly aligned. The problem is the row and column heading are of variable length, so the alignment shifts off for different files.
Here is the code I am using to format :
print "\n ";
foreach (keys(%senseToSenseCountHash))
{
printf "%15s",$_;
}
print "\n";
print "------------------------------------------------------------\n";
my $space = "---";
foreach my $realSense (keys(%actualSenseToWronglyDisambiguatedSense))
{
printf "%s",$realSense;
foreach (keys(%senseToSenseCountHash))
{
if(exists($actualSenseToWronglyDisambiguatedSense{$realSense}[0]{$_}))
{
printf "%15s",$actualSenseToWronglyDisambiguatedSense{$realSense}[0]{$_};
}
else
{
printf "%15s",$space;
}
}
print "\n";
}
The outputs I get are as follows (for different files that I have to test on) :
Microsoft IBM
------------------------------------------------------------
Microsoft 896 120
IBM 66 661
SERVE12 SERVE2 SERVE6 SERVE10
------------------------------------------------------------
SERVE12 319 32 19 8
SERVE2 44 159 39 25
SERVE6 22 9 102 1
SERVE10 14 16 12 494
HARD3 HARD2 HARD1
------------------------------------------------------------
HARD3 68 7 27
HARD2 6 60 90
HARD1 37 69 937
I want to make this output aligned regardless of the row and column name. Can anyone please help?
Thanks so much!
This line:
printf "%s",$realSense;
has no specific width, and is throwing off the alignment.
Found the answer, pasting it here in case any one wants to use it.
printf "%10s %-2s",'----------','|';
foreach(keys(%senseToSenseCountHash))
{
printf "%s",'----------------';
}
print "\n";
printf "%10s %-2s",' ','|';
foreach(keys(%senseToSenseCountHash))
{
printf "%-14s",$_;
}
print "\n";
printf "%10s %-2s",'----------','|';
foreach(keys(%senseToSenseCountHash))
{
printf "%s",'----------------';
}
print "\n";
foreach my $key (sort { $senseToSenseCountHash{$b} <=>
$senseToSenseCountHash{$a} } keys %senseToSenseCountHash )
{
$maxSense = $senseToSenseCountHash{$key};
last;
}
my $space = "---";
foreach my $realSense (keys(%actualSenseToWronglyDisambiguatedSense))
{
printf "%-10s %-2s",$realSense,'|';
foreach (keys(%senseToSenseCountHash))
{
if(exists($actualSenseToWronglyDisambiguatedSense{$realSense}[0]{$_}))
{
printf "%-15s",$actualSenseToWronglyDisambiguatedSense{$realSense}[0]{$_};
}
else
{
printf "%-15s",$space;
}
}
print "\n";
}
printf "%10s %-2s",'----------','|';
foreach(keys(%senseToSenseCountHash))
{
printf "%s",'----------------';
}
print "\n";
Output :
---------- | ------------------------------------------------
| HARD3 HARD2 HARD1
---------- | ------------------------------------------------
HARD3 | 68 7 27
HARD2 | 6 60 90
HARD1 | 37 69 937
---------- | ------------------------------------------------
---------- | ----------------------------------------------------------------
| SERVE12 SERVE2 SERVE6 SERVE10
---------- | ----------------------------------------------------------------
SERVE12 | 319 32 19 8
SERVE2 | 44 159 39 25
SERVE6 | 22 9 102 1
SERVE10 | 14 16 12 494
---------- | ----------------------------------------------------------------